locking/local_lock: Support Clang's context analysis

Add support for Clang's context analysis for local_lock_t and
local_trylock_t.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20251219154418.3592607-20-elver@google.com
This commit is contained in:
Marco Elver 2025-12-19 16:40:08 +01:00 committed by Peter Zijlstra
parent 8c9c8566e1
commit d3febf16de
4 changed files with 159 additions and 34 deletions

View File

@ -80,7 +80,7 @@ Supported Kernel Primitives
Currently the following synchronization primitives are supported:
`raw_spinlock_t`, `spinlock_t`, `rwlock_t`, `mutex`, `seqlock_t`,
`bit_spinlock`, RCU, SRCU (`srcu_struct`), `rw_semaphore`.
`bit_spinlock`, RCU, SRCU (`srcu_struct`), `rw_semaphore`, `local_lock_t`.
For context locks with an initialization function (e.g., `spin_lock_init()`),
calling this function before initializing any guarded members or globals

View File

@ -14,13 +14,13 @@
* local_lock - Acquire a per CPU local lock
* @lock: The lock variable
*/
#define local_lock(lock) __local_lock(this_cpu_ptr(lock))
#define local_lock(lock) __local_lock(__this_cpu_local_lock(lock))
/**
* local_lock_irq - Acquire a per CPU local lock and disable interrupts
* @lock: The lock variable
*/
#define local_lock_irq(lock) __local_lock_irq(this_cpu_ptr(lock))
#define local_lock_irq(lock) __local_lock_irq(__this_cpu_local_lock(lock))
/**
* local_lock_irqsave - Acquire a per CPU local lock, save and disable
@ -29,19 +29,19 @@
* @flags: Storage for interrupt flags
*/
#define local_lock_irqsave(lock, flags) \
__local_lock_irqsave(this_cpu_ptr(lock), flags)
__local_lock_irqsave(__this_cpu_local_lock(lock), flags)
/**
* local_unlock - Release a per CPU local lock
* @lock: The lock variable
*/
#define local_unlock(lock) __local_unlock(this_cpu_ptr(lock))
#define local_unlock(lock) __local_unlock(__this_cpu_local_lock(lock))
/**
* local_unlock_irq - Release a per CPU local lock and enable interrupts
* @lock: The lock variable
*/
#define local_unlock_irq(lock) __local_unlock_irq(this_cpu_ptr(lock))
#define local_unlock_irq(lock) __local_unlock_irq(__this_cpu_local_lock(lock))
/**
* local_unlock_irqrestore - Release a per CPU local lock and restore
@ -50,7 +50,7 @@
* @flags: Interrupt flags to restore
*/
#define local_unlock_irqrestore(lock, flags) \
__local_unlock_irqrestore(this_cpu_ptr(lock), flags)
__local_unlock_irqrestore(__this_cpu_local_lock(lock), flags)
/**
* local_trylock_init - Runtime initialize a lock instance
@ -66,7 +66,7 @@
* locking constrains it will _always_ fail to acquire the lock in NMI or
* HARDIRQ context on PREEMPT_RT.
*/
#define local_trylock(lock) __local_trylock(this_cpu_ptr(lock))
#define local_trylock(lock) __local_trylock(__this_cpu_local_lock(lock))
#define local_lock_is_locked(lock) __local_lock_is_locked(lock)
@ -81,27 +81,36 @@
* HARDIRQ context on PREEMPT_RT.
*/
#define local_trylock_irqsave(lock, flags) \
__local_trylock_irqsave(this_cpu_ptr(lock), flags)
__local_trylock_irqsave(__this_cpu_local_lock(lock), flags)
DEFINE_GUARD(local_lock, local_lock_t __percpu*,
local_lock(_T),
local_unlock(_T))
DEFINE_GUARD(local_lock_irq, local_lock_t __percpu*,
local_lock_irq(_T),
local_unlock_irq(_T))
DEFINE_LOCK_GUARD_1(local_lock, local_lock_t __percpu,
local_lock(_T->lock),
local_unlock(_T->lock))
DEFINE_LOCK_GUARD_1(local_lock_irq, local_lock_t __percpu,
local_lock_irq(_T->lock),
local_unlock_irq(_T->lock))
DEFINE_LOCK_GUARD_1(local_lock_irqsave, local_lock_t __percpu,
local_lock_irqsave(_T->lock, _T->flags),
local_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
#define local_lock_nested_bh(_lock) \
__local_lock_nested_bh(this_cpu_ptr(_lock))
__local_lock_nested_bh(__this_cpu_local_lock(_lock))
#define local_unlock_nested_bh(_lock) \
__local_unlock_nested_bh(this_cpu_ptr(_lock))
__local_unlock_nested_bh(__this_cpu_local_lock(_lock))
DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*,
local_lock_nested_bh(_T),
local_unlock_nested_bh(_T))
DEFINE_LOCK_GUARD_1(local_lock_nested_bh, local_lock_t __percpu,
local_lock_nested_bh(_T->lock),
local_unlock_nested_bh(_T->lock))
DECLARE_LOCK_GUARD_1_ATTRS(local_lock, __acquires(_T), __releases(*(local_lock_t __percpu **)_T))
#define class_local_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock, _T)
DECLARE_LOCK_GUARD_1_ATTRS(local_lock_irq, __acquires(_T), __releases(*(local_lock_t __percpu **)_T))
#define class_local_lock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_irq, _T)
DECLARE_LOCK_GUARD_1_ATTRS(local_lock_irqsave, __acquires(_T), __releases(*(local_lock_t __percpu **)_T))
#define class_local_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_irqsave, _T)
DECLARE_LOCK_GUARD_1_ATTRS(local_lock_nested_bh, __acquires(_T), __releases(*(local_lock_t __percpu **)_T))
#define class_local_lock_nested_bh_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_nested_bh, _T)
#endif

View File

@ -10,21 +10,23 @@
#ifndef CONFIG_PREEMPT_RT
typedef struct {
context_lock_struct(local_lock) {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
struct task_struct *owner;
#endif
} local_lock_t;
};
typedef struct local_lock local_lock_t;
/* local_trylock() and local_trylock_irqsave() only work with local_trylock_t */
typedef struct {
context_lock_struct(local_trylock) {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
struct task_struct *owner;
#endif
u8 acquired;
} local_trylock_t;
};
typedef struct local_trylock local_trylock_t;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define LOCAL_LOCK_DEBUG_INIT(lockname) \
@ -84,9 +86,14 @@ do { \
0, LD_WAIT_CONFIG, LD_WAIT_INV, \
LD_LOCK_PERCPU); \
local_lock_debug_init(lock); \
__assume_ctx_lock(lock); \
} while (0)
#define __local_trylock_init(lock) __local_lock_init((local_lock_t *)lock)
#define __local_trylock_init(lock) \
do { \
__local_lock_init((local_lock_t *)lock); \
__assume_ctx_lock(lock); \
} while (0)
#define __spinlock_nested_bh_init(lock) \
do { \
@ -97,6 +104,7 @@ do { \
0, LD_WAIT_CONFIG, LD_WAIT_INV, \
LD_LOCK_NORMAL); \
local_lock_debug_init(lock); \
__assume_ctx_lock(lock); \
} while (0)
#define __local_lock_acquire(lock) \
@ -119,22 +127,25 @@ do { \
do { \
preempt_disable(); \
__local_lock_acquire(lock); \
__acquire(lock); \
} while (0)
#define __local_lock_irq(lock) \
do { \
local_irq_disable(); \
__local_lock_acquire(lock); \
__acquire(lock); \
} while (0)
#define __local_lock_irqsave(lock, flags) \
do { \
local_irq_save(flags); \
__local_lock_acquire(lock); \
__acquire(lock); \
} while (0)
#define __local_trylock(lock) \
({ \
__try_acquire_ctx_lock(lock, ({ \
local_trylock_t *__tl; \
\
preempt_disable(); \
@ -148,10 +159,10 @@ do { \
(local_lock_t *)__tl); \
} \
!!__tl; \
})
}))
#define __local_trylock_irqsave(lock, flags) \
({ \
__try_acquire_ctx_lock(lock, ({ \
local_trylock_t *__tl; \
\
local_irq_save(flags); \
@ -165,7 +176,7 @@ do { \
(local_lock_t *)__tl); \
} \
!!__tl; \
})
}))
/* preemption or migration must be disabled before calling __local_lock_is_locked */
#define __local_lock_is_locked(lock) READ_ONCE(this_cpu_ptr(lock)->acquired)
@ -188,18 +199,21 @@ do { \
#define __local_unlock(lock) \
do { \
__release(lock); \
__local_lock_release(lock); \
preempt_enable(); \
} while (0)
#define __local_unlock_irq(lock) \
do { \
__release(lock); \
__local_lock_release(lock); \
local_irq_enable(); \
} while (0)
#define __local_unlock_irqrestore(lock, flags) \
do { \
__release(lock); \
__local_lock_release(lock); \
local_irq_restore(flags); \
} while (0)
@ -208,13 +222,19 @@ do { \
do { \
lockdep_assert_in_softirq(); \
local_lock_acquire((lock)); \
__acquire(lock); \
} while (0)
#define __local_unlock_nested_bh(lock) \
local_lock_release((lock))
do { \
__release(lock); \
local_lock_release((lock)); \
} while (0)
#else /* !CONFIG_PREEMPT_RT */
#include <linux/spinlock.h>
/*
* On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
* critical section while staying preemptible.
@ -269,7 +289,7 @@ do { \
} while (0)
#define __local_trylock(lock) \
({ \
__try_acquire_ctx_lock(lock, context_unsafe(({ \
int __locked; \
\
if (in_nmi() | in_hardirq()) { \
@ -281,17 +301,40 @@ do { \
migrate_enable(); \
} \
__locked; \
})
})))
#define __local_trylock_irqsave(lock, flags) \
({ \
__try_acquire_ctx_lock(lock, ({ \
typecheck(unsigned long, flags); \
flags = 0; \
__local_trylock(lock); \
})
}))
/* migration must be disabled before calling __local_lock_is_locked */
#define __local_lock_is_locked(__lock) \
(rt_mutex_owner(&this_cpu_ptr(__lock)->lock) == current)
#endif /* CONFIG_PREEMPT_RT */
#if defined(WARN_CONTEXT_ANALYSIS)
/*
* Because the compiler only knows about the base per-CPU variable, use this
* helper function to make the compiler think we lock/unlock the @base variable,
* and hide the fact we actually pass the per-CPU instance to lock/unlock
* functions.
*/
static __always_inline local_lock_t *__this_cpu_local_lock(local_lock_t __percpu *base)
__returns_ctx_lock(base) __attribute__((overloadable))
{
return this_cpu_ptr(base);
}
#ifndef CONFIG_PREEMPT_RT
static __always_inline local_trylock_t *__this_cpu_local_lock(local_trylock_t __percpu *base)
__returns_ctx_lock(base) __attribute__((overloadable))
{
return this_cpu_ptr(base);
}
#endif /* CONFIG_PREEMPT_RT */
#else /* WARN_CONTEXT_ANALYSIS */
#define __this_cpu_local_lock(base) this_cpu_ptr(base)
#endif /* WARN_CONTEXT_ANALYSIS */

View File

@ -6,7 +6,9 @@
#include <linux/bit_spinlock.h>
#include <linux/build_bug.h>
#include <linux/local_lock.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/rwsem.h>
#include <linux/seqlock.h>
@ -458,3 +460,74 @@ static void __used test_srcu_guard(struct test_srcu_data *d)
{ guard(srcu_fast)(&d->srcu); (void)srcu_dereference(d->data, &d->srcu); }
{ guard(srcu_fast_notrace)(&d->srcu); (void)srcu_dereference(d->data, &d->srcu); }
}
struct test_local_lock_data {
local_lock_t lock;
int counter __guarded_by(&lock);
};
static DEFINE_PER_CPU(struct test_local_lock_data, test_local_lock_data) = {
.lock = INIT_LOCAL_LOCK(lock),
};
static void __used test_local_lock_init(struct test_local_lock_data *d)
{
local_lock_init(&d->lock);
d->counter = 0;
}
static void __used test_local_lock(void)
{
unsigned long flags;
local_lock(&test_local_lock_data.lock);
this_cpu_add(test_local_lock_data.counter, 1);
local_unlock(&test_local_lock_data.lock);
local_lock_irq(&test_local_lock_data.lock);
this_cpu_add(test_local_lock_data.counter, 1);
local_unlock_irq(&test_local_lock_data.lock);
local_lock_irqsave(&test_local_lock_data.lock, flags);
this_cpu_add(test_local_lock_data.counter, 1);
local_unlock_irqrestore(&test_local_lock_data.lock, flags);
local_lock_nested_bh(&test_local_lock_data.lock);
this_cpu_add(test_local_lock_data.counter, 1);
local_unlock_nested_bh(&test_local_lock_data.lock);
}
static void __used test_local_lock_guard(void)
{
{ guard(local_lock)(&test_local_lock_data.lock); this_cpu_add(test_local_lock_data.counter, 1); }
{ guard(local_lock_irq)(&test_local_lock_data.lock); this_cpu_add(test_local_lock_data.counter, 1); }
{ guard(local_lock_irqsave)(&test_local_lock_data.lock); this_cpu_add(test_local_lock_data.counter, 1); }
{ guard(local_lock_nested_bh)(&test_local_lock_data.lock); this_cpu_add(test_local_lock_data.counter, 1); }
}
struct test_local_trylock_data {
local_trylock_t lock;
int counter __guarded_by(&lock);
};
static DEFINE_PER_CPU(struct test_local_trylock_data, test_local_trylock_data) = {
.lock = INIT_LOCAL_TRYLOCK(lock),
};
static void __used test_local_trylock_init(struct test_local_trylock_data *d)
{
local_trylock_init(&d->lock);
d->counter = 0;
}
static void __used test_local_trylock(void)
{
local_lock(&test_local_trylock_data.lock);
this_cpu_add(test_local_trylock_data.counter, 1);
local_unlock(&test_local_trylock_data.lock);
if (local_trylock(&test_local_trylock_data.lock)) {
this_cpu_add(test_local_trylock_data.counter, 1);
local_unlock(&test_local_trylock_data.lock);
}
}