mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-22 20:30:58 +02:00
Merge tag 'locking-core-2023-06-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: - Introduce cmpxchg128() -- aka. the demise of cmpxchg_double() The cmpxchg128() family of functions is basically & functionally the same as cmpxchg_double(), but with a saner interface. Instead of a 6-parameter horror that forced u128 - u64/u64-halves layout details on the interface and exposed users to complexity, fragility & bugs, use a natural 3-parameter interface with u128 types. - Restructure the generated atomic headers, and add kerneldoc comments for all of the generic atomic{,64,_long}_t operations. The generated definitions are much cleaner now, and come with documentation. - Implement lock_set_cmp_fn() on lockdep, for defining an ordering when taking multiple locks of the same type. This gets rid of one use of lockdep_set_novalidate_class() in the bcache code. - Fix raw_cpu_generic_try_cmpxchg() bug due to an unintended variable shadowing generating garbage code on Clang on certain ARM builds. * tag 'locking-core-2023-06-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (43 commits) locking/atomic: scripts: fix ${atomic}_dec_if_positive() kerneldoc percpu: Fix self-assignment of __old in raw_cpu_generic_try_cmpxchg() locking/atomic: treewide: delete arch_atomic_*() kerneldoc locking/atomic: docs: Add atomic operations to the driver basic API documentation locking/atomic: scripts: generate kerneldoc comments docs: scripts: kernel-doc: accept bitwise negation like ~@var locking/atomic: scripts: simplify raw_atomic*() definitions locking/atomic: scripts: simplify raw_atomic_long*() definitions locking/atomic: scripts: split pfx/name/sfx/order locking/atomic: scripts: restructure fallback ifdeffery locking/atomic: scripts: build raw_atomic_long*() directly locking/atomic: treewide: use raw_atomic*_<op>() locking/atomic: scripts: add trivial raw_atomic*_<op>() locking/atomic: scripts: factor out order template generation locking/atomic: scripts: remove leftover "${mult}" locking/atomic: scripts: remove bogus order parameter locking/atomic: xtensa: add preprocessor symbols locking/atomic: x86: add preprocessor symbols locking/atomic: sparc: add preprocessor symbols locking/atomic: sh: add preprocessor symbols ...
This commit is contained in:
@@ -510,7 +510,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
|
||||
* In this we case we don't care about any concurrency/ordering.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE))
|
||||
arch_atomic_set(&ct->state, state);
|
||||
raw_atomic_set(&ct->state, state);
|
||||
} else {
|
||||
/*
|
||||
* Even if context tracking is disabled on this CPU, because it's outside
|
||||
@@ -527,7 +527,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) {
|
||||
/* Tracking for vtime only, no concurrent RCU EQS accounting */
|
||||
arch_atomic_set(&ct->state, state);
|
||||
raw_atomic_set(&ct->state, state);
|
||||
} else {
|
||||
/*
|
||||
* Tracking for vtime and RCU EQS. Make sure we don't race
|
||||
@@ -535,7 +535,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
|
||||
* RCU only requires RCU_DYNTICKS_IDX increments to be fully
|
||||
* ordered.
|
||||
*/
|
||||
arch_atomic_add(state, &ct->state);
|
||||
raw_atomic_add(state, &ct->state);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -630,12 +630,12 @@ void noinstr __ct_user_exit(enum ctx_state state)
|
||||
* In this we case we don't care about any concurrency/ordering.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE))
|
||||
arch_atomic_set(&ct->state, CONTEXT_KERNEL);
|
||||
raw_atomic_set(&ct->state, CONTEXT_KERNEL);
|
||||
|
||||
} else {
|
||||
if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) {
|
||||
/* Tracking for vtime only, no concurrent RCU EQS accounting */
|
||||
arch_atomic_set(&ct->state, CONTEXT_KERNEL);
|
||||
raw_atomic_set(&ct->state, CONTEXT_KERNEL);
|
||||
} else {
|
||||
/*
|
||||
* Tracking for vtime and RCU EQS. Make sure we don't race
|
||||
@@ -643,7 +643,7 @@ void noinstr __ct_user_exit(enum ctx_state state)
|
||||
* RCU only requires RCU_DYNTICKS_IDX increments to be fully
|
||||
* ordered.
|
||||
*/
|
||||
arch_atomic_sub(state, &ct->state);
|
||||
raw_atomic_sub(state, &ct->state);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -709,7 +709,7 @@ void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
|
||||
usage[i] = '\0';
|
||||
}
|
||||
|
||||
static void __print_lock_name(struct lock_class *class)
|
||||
static void __print_lock_name(struct held_lock *hlock, struct lock_class *class)
|
||||
{
|
||||
char str[KSYM_NAME_LEN];
|
||||
const char *name;
|
||||
@@ -724,17 +724,19 @@ static void __print_lock_name(struct lock_class *class)
|
||||
printk(KERN_CONT "#%d", class->name_version);
|
||||
if (class->subclass)
|
||||
printk(KERN_CONT "/%d", class->subclass);
|
||||
if (hlock && class->print_fn)
|
||||
class->print_fn(hlock->instance);
|
||||
}
|
||||
}
|
||||
|
||||
static void print_lock_name(struct lock_class *class)
|
||||
static void print_lock_name(struct held_lock *hlock, struct lock_class *class)
|
||||
{
|
||||
char usage[LOCK_USAGE_CHARS];
|
||||
|
||||
get_usage_chars(class, usage);
|
||||
|
||||
printk(KERN_CONT " (");
|
||||
__print_lock_name(class);
|
||||
__print_lock_name(hlock, class);
|
||||
printk(KERN_CONT "){%s}-{%d:%d}", usage,
|
||||
class->wait_type_outer ?: class->wait_type_inner,
|
||||
class->wait_type_inner);
|
||||
@@ -772,7 +774,7 @@ static void print_lock(struct held_lock *hlock)
|
||||
}
|
||||
|
||||
printk(KERN_CONT "%px", hlock->instance);
|
||||
print_lock_name(lock);
|
||||
print_lock_name(hlock, lock);
|
||||
printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
|
||||
}
|
||||
|
||||
@@ -1868,7 +1870,7 @@ print_circular_bug_entry(struct lock_list *target, int depth)
|
||||
if (debug_locks_silent)
|
||||
return;
|
||||
printk("\n-> #%u", depth);
|
||||
print_lock_name(target->class);
|
||||
print_lock_name(NULL, target->class);
|
||||
printk(KERN_CONT ":\n");
|
||||
print_lock_trace(target->trace, 6);
|
||||
}
|
||||
@@ -1899,11 +1901,11 @@ print_circular_lock_scenario(struct held_lock *src,
|
||||
*/
|
||||
if (parent != source) {
|
||||
printk("Chain exists of:\n ");
|
||||
__print_lock_name(source);
|
||||
__print_lock_name(src, source);
|
||||
printk(KERN_CONT " --> ");
|
||||
__print_lock_name(parent);
|
||||
__print_lock_name(NULL, parent);
|
||||
printk(KERN_CONT " --> ");
|
||||
__print_lock_name(target);
|
||||
__print_lock_name(tgt, target);
|
||||
printk(KERN_CONT "\n\n");
|
||||
}
|
||||
|
||||
@@ -1914,13 +1916,13 @@ print_circular_lock_scenario(struct held_lock *src,
|
||||
printk(" rlock(");
|
||||
else
|
||||
printk(" lock(");
|
||||
__print_lock_name(target);
|
||||
__print_lock_name(tgt, target);
|
||||
printk(KERN_CONT ");\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(parent);
|
||||
__print_lock_name(NULL, parent);
|
||||
printk(KERN_CONT ");\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(target);
|
||||
__print_lock_name(tgt, target);
|
||||
printk(KERN_CONT ");\n");
|
||||
if (src_read != 0)
|
||||
printk(" rlock(");
|
||||
@@ -1928,7 +1930,7 @@ print_circular_lock_scenario(struct held_lock *src,
|
||||
printk(" sync(");
|
||||
else
|
||||
printk(" lock(");
|
||||
__print_lock_name(source);
|
||||
__print_lock_name(src, source);
|
||||
printk(KERN_CONT ");\n");
|
||||
printk("\n *** DEADLOCK ***\n\n");
|
||||
}
|
||||
@@ -2154,6 +2156,8 @@ check_path(struct held_lock *target, struct lock_list *src_entry,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void print_deadlock_bug(struct task_struct *, struct held_lock *, struct held_lock *);
|
||||
|
||||
/*
|
||||
* Prove that the dependency graph starting at <src> can not
|
||||
* lead to <target>. If it can, there is a circle when adding
|
||||
@@ -2185,7 +2189,10 @@ check_noncircular(struct held_lock *src, struct held_lock *target,
|
||||
*trace = save_trace();
|
||||
}
|
||||
|
||||
print_circular_bug(&src_entry, target_entry, src, target);
|
||||
if (src->class_idx == target->class_idx)
|
||||
print_deadlock_bug(current, src, target);
|
||||
else
|
||||
print_circular_bug(&src_entry, target_entry, src, target);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -2346,7 +2353,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
|
||||
int bit;
|
||||
|
||||
printk("%*s->", depth, "");
|
||||
print_lock_name(class);
|
||||
print_lock_name(NULL, class);
|
||||
#ifdef CONFIG_DEBUG_LOCKDEP
|
||||
printk(KERN_CONT " ops: %lu", debug_class_ops_read(class));
|
||||
#endif
|
||||
@@ -2528,11 +2535,11 @@ print_irq_lock_scenario(struct lock_list *safe_entry,
|
||||
*/
|
||||
if (middle_class != unsafe_class) {
|
||||
printk("Chain exists of:\n ");
|
||||
__print_lock_name(safe_class);
|
||||
__print_lock_name(NULL, safe_class);
|
||||
printk(KERN_CONT " --> ");
|
||||
__print_lock_name(middle_class);
|
||||
__print_lock_name(NULL, middle_class);
|
||||
printk(KERN_CONT " --> ");
|
||||
__print_lock_name(unsafe_class);
|
||||
__print_lock_name(NULL, unsafe_class);
|
||||
printk(KERN_CONT "\n\n");
|
||||
}
|
||||
|
||||
@@ -2540,18 +2547,18 @@ print_irq_lock_scenario(struct lock_list *safe_entry,
|
||||
printk(" CPU0 CPU1\n");
|
||||
printk(" ---- ----\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(unsafe_class);
|
||||
__print_lock_name(NULL, unsafe_class);
|
||||
printk(KERN_CONT ");\n");
|
||||
printk(" local_irq_disable();\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(safe_class);
|
||||
__print_lock_name(NULL, safe_class);
|
||||
printk(KERN_CONT ");\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(middle_class);
|
||||
__print_lock_name(NULL, middle_class);
|
||||
printk(KERN_CONT ");\n");
|
||||
printk(" <Interrupt>\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(safe_class);
|
||||
__print_lock_name(NULL, safe_class);
|
||||
printk(KERN_CONT ");\n");
|
||||
printk("\n *** DEADLOCK ***\n\n");
|
||||
}
|
||||
@@ -2588,20 +2595,20 @@ print_bad_irq_dependency(struct task_struct *curr,
|
||||
pr_warn("\nand this task is already holding:\n");
|
||||
print_lock(prev);
|
||||
pr_warn("which would create a new lock dependency:\n");
|
||||
print_lock_name(hlock_class(prev));
|
||||
print_lock_name(prev, hlock_class(prev));
|
||||
pr_cont(" ->");
|
||||
print_lock_name(hlock_class(next));
|
||||
print_lock_name(next, hlock_class(next));
|
||||
pr_cont("\n");
|
||||
|
||||
pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
|
||||
irqclass);
|
||||
print_lock_name(backwards_entry->class);
|
||||
print_lock_name(NULL, backwards_entry->class);
|
||||
pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
|
||||
|
||||
print_lock_trace(backwards_entry->class->usage_traces[bit1], 1);
|
||||
|
||||
pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
|
||||
print_lock_name(forwards_entry->class);
|
||||
print_lock_name(NULL, forwards_entry->class);
|
||||
pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
|
||||
pr_warn("...");
|
||||
|
||||
@@ -2971,10 +2978,10 @@ print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv)
|
||||
printk(" CPU0\n");
|
||||
printk(" ----\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(prev);
|
||||
__print_lock_name(prv, prev);
|
||||
printk(KERN_CONT ");\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(next);
|
||||
__print_lock_name(nxt, next);
|
||||
printk(KERN_CONT ");\n");
|
||||
printk("\n *** DEADLOCK ***\n\n");
|
||||
printk(" May be due to missing lock nesting notation\n\n");
|
||||
@@ -2984,6 +2991,8 @@ static void
|
||||
print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
|
||||
struct held_lock *next)
|
||||
{
|
||||
struct lock_class *class = hlock_class(prev);
|
||||
|
||||
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
|
||||
return;
|
||||
|
||||
@@ -2998,6 +3007,11 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
|
||||
pr_warn("\nbut task is already holding lock:\n");
|
||||
print_lock(prev);
|
||||
|
||||
if (class->cmp_fn) {
|
||||
pr_warn("and the lock comparison function returns %i:\n",
|
||||
class->cmp_fn(prev->instance, next->instance));
|
||||
}
|
||||
|
||||
pr_warn("\nother info that might help us debug this:\n");
|
||||
print_deadlock_scenario(next, prev);
|
||||
lockdep_print_held_locks(curr);
|
||||
@@ -3019,6 +3033,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
|
||||
static int
|
||||
check_deadlock(struct task_struct *curr, struct held_lock *next)
|
||||
{
|
||||
struct lock_class *class;
|
||||
struct held_lock *prev;
|
||||
struct held_lock *nest = NULL;
|
||||
int i;
|
||||
@@ -3039,6 +3054,12 @@ check_deadlock(struct task_struct *curr, struct held_lock *next)
|
||||
if ((next->read == 2) && prev->read)
|
||||
continue;
|
||||
|
||||
class = hlock_class(prev);
|
||||
|
||||
if (class->cmp_fn &&
|
||||
class->cmp_fn(prev->instance, next->instance) < 0)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* We're holding the nest_lock, which serializes this lock's
|
||||
* nesting behaviour.
|
||||
@@ -3100,6 +3121,14 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
|
||||
return 2;
|
||||
}
|
||||
|
||||
if (prev->class_idx == next->class_idx) {
|
||||
struct lock_class *class = hlock_class(prev);
|
||||
|
||||
if (class->cmp_fn &&
|
||||
class->cmp_fn(prev->instance, next->instance) < 0)
|
||||
return 2;
|
||||
}
|
||||
|
||||
/*
|
||||
* Prove that the new <prev> -> <next> dependency would not
|
||||
* create a circular dependency in the graph. (We do this by
|
||||
@@ -3576,7 +3605,7 @@ static void print_chain_keys_chain(struct lock_chain *chain)
|
||||
hlock_id = chain_hlocks[chain->base + i];
|
||||
chain_key = print_chain_key_iteration(hlock_id, chain_key);
|
||||
|
||||
print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id));
|
||||
print_lock_name(NULL, lock_classes + chain_hlock_class_idx(hlock_id));
|
||||
printk("\n");
|
||||
}
|
||||
}
|
||||
@@ -3933,11 +3962,11 @@ static void print_usage_bug_scenario(struct held_lock *lock)
|
||||
printk(" CPU0\n");
|
||||
printk(" ----\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(class);
|
||||
__print_lock_name(lock, class);
|
||||
printk(KERN_CONT ");\n");
|
||||
printk(" <Interrupt>\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(class);
|
||||
__print_lock_name(lock, class);
|
||||
printk(KERN_CONT ");\n");
|
||||
printk("\n *** DEADLOCK ***\n\n");
|
||||
}
|
||||
@@ -4023,7 +4052,7 @@ print_irq_inversion_bug(struct task_struct *curr,
|
||||
pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
|
||||
else
|
||||
pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
|
||||
print_lock_name(other->class);
|
||||
print_lock_name(NULL, other->class);
|
||||
pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
|
||||
|
||||
pr_warn("\nother info that might help us debug this:\n");
|
||||
@@ -4896,6 +4925,33 @@ EXPORT_SYMBOL_GPL(lockdep_init_map_type);
|
||||
struct lock_class_key __lockdep_no_validate__;
|
||||
EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
|
||||
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
void lockdep_set_lock_cmp_fn(struct lockdep_map *lock, lock_cmp_fn cmp_fn,
|
||||
lock_print_fn print_fn)
|
||||
{
|
||||
struct lock_class *class = lock->class_cache[0];
|
||||
unsigned long flags;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
lockdep_recursion_inc();
|
||||
|
||||
if (!class)
|
||||
class = register_lock_class(lock, 0, 0);
|
||||
|
||||
if (class) {
|
||||
WARN_ON(class->cmp_fn && class->cmp_fn != cmp_fn);
|
||||
WARN_ON(class->print_fn && class->print_fn != print_fn);
|
||||
|
||||
class->cmp_fn = cmp_fn;
|
||||
class->print_fn = print_fn;
|
||||
}
|
||||
|
||||
lockdep_recursion_finish();
|
||||
raw_local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lockdep_set_lock_cmp_fn);
|
||||
#endif
|
||||
|
||||
static void
|
||||
print_lock_nested_lock_not_held(struct task_struct *curr,
|
||||
struct held_lock *hlock)
|
||||
|
@@ -287,7 +287,7 @@ again:
|
||||
clock = wrap_max(clock, min_clock);
|
||||
clock = wrap_min(clock, max_clock);
|
||||
|
||||
if (!arch_try_cmpxchg64(&scd->clock, &old_clock, clock))
|
||||
if (!raw_try_cmpxchg64(&scd->clock, &old_clock, clock))
|
||||
goto again;
|
||||
|
||||
return clock;
|
||||
|
Reference in New Issue
Block a user