dma-buf: abstract fence locking v2

Add dma_fence_lock_irqsafe() and dma_fence_unlock_irqrestore() wrappers
and mechanically apply them everywhere.

Just a pre-requisite cleanup for a follow up patch.

v2: add some missing i915 bits, add abstraction for lockdep assertion as
    well
v3: one more suggestion by Tvrtko

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Link: https://lore.kernel.org/r/20260219160822.1529-4-christian.koenig@amd.com
This commit is contained in:
Christian König 2025-10-09 10:40:06 +02:00
parent 541c8f2468
commit 3e5067931b
12 changed files with 96 additions and 56 deletions

View File

@ -366,7 +366,7 @@ void dma_fence_signal_timestamp_locked(struct dma_fence *fence,
struct dma_fence_cb *cur, *tmp;
struct list_head cb_list;
lockdep_assert_held(fence->lock);
dma_fence_assert_held(fence);
if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&fence->flags)))
@ -414,9 +414,9 @@ void dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
if (WARN_ON(!fence))
return;
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
dma_fence_signal_timestamp_locked(fence, timestamp);
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
}
EXPORT_SYMBOL(dma_fence_signal_timestamp);
@ -475,9 +475,9 @@ bool dma_fence_check_and_signal(struct dma_fence *fence)
unsigned long flags;
bool ret;
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
ret = dma_fence_check_and_signal_locked(fence);
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
return ret;
}
@ -503,9 +503,9 @@ void dma_fence_signal(struct dma_fence *fence)
tmp = dma_fence_begin_signalling();
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
dma_fence_signal_timestamp_locked(fence, ktime_get());
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
dma_fence_end_signalling(tmp);
}
@ -606,10 +606,10 @@ void dma_fence_release(struct kref *kref)
* don't leave chains dangling. We set the error flag first
* so that the callbacks know this signal is due to an error.
*/
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
fence->error = -EDEADLK;
dma_fence_signal_locked(fence);
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
}
ops = rcu_dereference(fence->ops);
@ -639,7 +639,7 @@ static bool __dma_fence_enable_signaling(struct dma_fence *fence)
const struct dma_fence_ops *ops;
bool was_set;
lockdep_assert_held(fence->lock);
dma_fence_assert_held(fence);
was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&fence->flags);
@ -675,9 +675,9 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence)
{
unsigned long flags;
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
__dma_fence_enable_signaling(fence);
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
}
EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
@ -717,8 +717,7 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
return -ENOENT;
}
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
if (__dma_fence_enable_signaling(fence)) {
cb->func = func;
list_add_tail(&cb->node, &fence->cb_list);
@ -726,8 +725,7 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
INIT_LIST_HEAD(&cb->node);
ret = -ENOENT;
}
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
return ret;
}
@ -750,9 +748,9 @@ int dma_fence_get_status(struct dma_fence *fence)
unsigned long flags;
int status;
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
status = dma_fence_get_status_locked(fence);
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
return status;
}
@ -782,13 +780,11 @@ dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
unsigned long flags;
bool ret;
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
ret = !list_empty(&cb->node);
if (ret)
list_del_init(&cb->node);
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
return ret;
}
@ -827,7 +823,7 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
unsigned long flags;
signed long ret = timeout ? timeout : 1;
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
if (dma_fence_test_signaled_flag(fence))
goto out;
@ -851,11 +847,11 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
__set_current_state(TASK_INTERRUPTIBLE);
else
__set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
ret = schedule_timeout(ret);
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
if (ret > 0 && intr && signal_pending(current))
ret = -ERESTARTSYS;
}
@ -865,7 +861,7 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
__set_current_state(TASK_RUNNING);
out:
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
return ret;
}
EXPORT_SYMBOL(dma_fence_default_wait);

View File

@ -410,8 +410,10 @@ struct race_thread {
static void __wait_for_callbacks(struct dma_fence *f)
{
spin_lock_irq(f->lock);
spin_unlock_irq(f->lock);
unsigned long flags;
dma_fence_lock_irqsave(f, flags);
dma_fence_unlock_irqrestore(f, flags);
}
static int thread_signal_callback(void *arg)

View File

@ -156,12 +156,12 @@ static void timeline_fence_release(struct dma_fence *fence)
struct sync_timeline *parent = dma_fence_parent(fence);
unsigned long flags;
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
if (!list_empty(&pt->link)) {
list_del(&pt->link);
rb_erase(&pt->node, &parent->pt_tree);
}
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
sync_timeline_put(parent);
dma_fence_free(fence);
@ -179,7 +179,7 @@ static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadlin
struct sync_pt *pt = dma_fence_to_sync_pt(fence);
unsigned long flags;
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
if (test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
if (ktime_before(deadline, pt->deadline))
pt->deadline = deadline;
@ -187,7 +187,7 @@ static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadlin
pt->deadline = deadline;
__set_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags);
}
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
}
static const struct dma_fence_ops timeline_fence_ops = {
@ -431,13 +431,13 @@ static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long a
goto put_fence;
}
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
if (!test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
ret = -ENOENT;
goto unlock;
}
data.deadline_ns = ktime_to_ns(pt->deadline);
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
dma_fence_put(fence);
@ -450,7 +450,7 @@ static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long a
return 0;
unlock:
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
put_fence:
dma_fence_put(fence);

View File

@ -479,10 +479,10 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
return false;
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
if (!dma_fence_is_signaled_locked(fence))
dma_fence_set_error(fence, -ENODATA);
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
while (!dma_fence_is_signaled(fence) &&
ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)

View File

@ -2785,8 +2785,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dma_fence_put(vm->last_unlocked);
dma_fence_wait(vm->last_tlb_flush, false);
/* Make sure that all fence callbacks have completed */
spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
dma_fence_lock_irqsave(vm->last_tlb_flush, flags);
dma_fence_unlock_irqrestore(vm->last_tlb_flush, flags);
dma_fence_put(vm->last_tlb_flush);
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {

View File

@ -639,7 +639,7 @@ static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm)
* sure that the dma_fence structure isn't freed up.
*/
rcu_read_lock();
lock = vm->last_tlb_flush->lock;
lock = dma_fence_spinlock(vm->last_tlb_flush);
rcu_read_unlock();
spin_lock_irqsave(lock, flags);

View File

@ -148,7 +148,7 @@ __dma_fence_signal__notify(struct dma_fence *fence,
{
struct dma_fence_cb *cur, *tmp;
lockdep_assert_held(fence->lock);
dma_fence_assert_held(fence);
list_for_each_entry_safe(cur, tmp, list, node) {
INIT_LIST_HEAD(&cur->node);

View File

@ -1045,9 +1045,10 @@ __i915_active_fence_set(struct i915_active_fence *active,
* nesting rules for the fence->lock; the inner lock is always the
* older lock.
*/
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
if (prev)
spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
spin_lock_nested(dma_fence_spinlock(prev),
SINGLE_DEPTH_NESTING);
/*
* A does the cmpxchg first, and so it sees C or NULL, as before, or
@ -1061,17 +1062,18 @@ __i915_active_fence_set(struct i915_active_fence *active,
*/
while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) {
if (prev) {
spin_unlock(prev->lock);
spin_unlock(dma_fence_spinlock(prev));
dma_fence_put(prev);
}
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
prev = i915_active_fence_get(active);
GEM_BUG_ON(prev == fence);
spin_lock_irqsave(fence->lock, flags);
dma_fence_lock_irqsave(fence, flags);
if (prev)
spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
spin_lock_nested(dma_fence_spinlock(prev),
SINGLE_DEPTH_NESTING);
}
/*
@ -1088,10 +1090,11 @@ __i915_active_fence_set(struct i915_active_fence *active,
*/
if (prev) {
__list_del_entry(&active->cb.node);
spin_unlock(prev->lock); /* serialise with prev->cb_list */
/* serialise with prev->cb_list */
spin_unlock(dma_fence_spinlock(prev));
}
list_add_tail(&active->cb.node, &fence->cb_list);
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_unlock_irqrestore(fence, flags);
return prev;
}

View File

@ -156,12 +156,13 @@ nouveau_name(struct drm_device *dev)
static inline bool
nouveau_cli_work_ready(struct dma_fence *fence)
{
unsigned long flags;
bool ret = true;
spin_lock_irq(fence->lock);
dma_fence_lock_irqsave(fence, flags);
if (!dma_fence_is_signaled_locked(fence))
ret = false;
spin_unlock_irq(fence->lock);
dma_fence_unlock_irqrestore(fence, flags);
if (ret == true)
dma_fence_put(fence);

View File

@ -156,19 +156,19 @@ static void drm_sched_fence_set_deadline_finished(struct dma_fence *f,
struct dma_fence *parent;
unsigned long flags;
spin_lock_irqsave(&fence->lock, flags);
dma_fence_lock_irqsave(f, flags);
/* If we already have an earlier deadline, keep it: */
if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags) &&
ktime_before(fence->deadline, deadline)) {
spin_unlock_irqrestore(&fence->lock, flags);
dma_fence_unlock_irqrestore(f, flags);
return;
}
fence->deadline = deadline;
set_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags);
spin_unlock_irqrestore(&fence->lock, flags);
dma_fence_unlock_irqrestore(f, flags);
/*
* smp_load_aquire() to ensure that if we are racing another

View File

@ -190,11 +190,11 @@ static bool xe_fence_set_error(struct dma_fence *fence, int error)
unsigned long irq_flags;
bool signaled;
spin_lock_irqsave(fence->lock, irq_flags);
dma_fence_lock_irqsave(fence, irq_flags);
signaled = test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags);
if (!signaled)
dma_fence_set_error(fence, error);
spin_unlock_irqrestore(fence->lock, irq_flags);
dma_fence_unlock_irqrestore(fence, irq_flags);
return signaled;
}

View File

@ -377,6 +377,44 @@ dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep)
} while (1);
}
/**
* dma_fence_spinlock - return pointer to the spinlock protecting the fence
* @fence: the fence to get the lock from
*
* Return the pointer to the extern lock.
*/
static inline spinlock_t *dma_fence_spinlock(struct dma_fence *fence)
{
return fence->lock;
}
/**
* dma_fence_lock_irqsave - irqsave lock the fence
* @fence: the fence to lock
* @flags: where to store the CPU flags.
*
* Lock the fence, preventing it from changing to the signaled state.
*/
#define dma_fence_lock_irqsave(fence, flags) \
spin_lock_irqsave(fence->lock, flags)
/**
* dma_fence_unlock_irqrestore - unlock the fence and irqrestore
* @fence: the fence to unlock
* @flags the CPU flags to restore
*
* Unlock the fence, allowing it to change it's state to signaled again.
*/
#define dma_fence_unlock_irqrestore(fence, flags) \
spin_unlock_irqrestore(fence->lock, flags)
/**
* dma_fence_assert_held - lockdep assertion that fence is locked
* @fence: the fence which should be locked
*/
#define dma_fence_assert_held(fence) \
lockdep_assert_held(dma_fence_spinlock(fence));
#ifdef CONFIG_LOCKDEP
bool dma_fence_begin_signalling(void);
void dma_fence_end_signalling(bool cookie);