Konstantin Demin
c3d09a3e94
imported from https://salsa.debian.org/kernel-team/linux.git commit 9d5cc9d9d6501d7f1dd7e194d4b245bd0b6c6a22 version 6.11.4-1
454 lines
13 KiB
Diff
454 lines
13 KiB
Diff
From: John Ogness <john.ogness@linutronix.de>
|
|
Date: Tue, 20 Aug 2024 08:36:01 +0206
|
|
Subject: [PATCH 35/54] lockdep: Mark emergency sections in lockdep splats
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.11/older/patches-6.11-rt7.tar.xz
|
|
|
|
Mark emergency sections wherever multiple lines of
|
|
lock debugging output are generated. In an emergency
|
|
section, every printk() call will attempt to directly
|
|
flush to the consoles using the EMERGENCY priority.
|
|
|
|
Note that debug_show_all_locks() and
|
|
lockdep_print_held_locks() rely on their callers to
|
|
enter the emergency section. This is because these
|
|
functions can also be called in non-emergency
|
|
situations (such as sysrq).
|
|
|
|
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
|
Reviewed-by: Petr Mladek <pmladek@suse.com>
|
|
Link: https://lore.kernel.org/r/20240820063001.36405-36-john.ogness@linutronix.de
|
|
Signed-off-by: Petr Mladek <pmladek@suse.com>
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
---
|
|
kernel/locking/lockdep.c | 83 +++++++++++++++++++++++++++++++++++++++++++++--
|
|
1 file changed, 81 insertions(+), 2 deletions(-)
|
|
|
|
--- a/kernel/locking/lockdep.c
|
|
+++ b/kernel/locking/lockdep.c
|
|
@@ -56,6 +56,7 @@
|
|
#include <linux/kprobes.h>
|
|
#include <linux/lockdep.h>
|
|
#include <linux/context_tracking.h>
|
|
+#include <linux/console.h>
|
|
|
|
#include <asm/sections.h>
|
|
|
|
@@ -573,8 +574,10 @@ static struct lock_trace *save_trace(voi
|
|
if (!debug_locks_off_graph_unlock())
|
|
return NULL;
|
|
|
|
+ nbcon_cpu_emergency_enter();
|
|
print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
|
|
dump_stack();
|
|
+ nbcon_cpu_emergency_exit();
|
|
|
|
return NULL;
|
|
}
|
|
@@ -887,11 +890,13 @@ look_up_lock_class(const struct lockdep_
|
|
if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
|
|
instrumentation_begin();
|
|
debug_locks_off();
|
|
+ nbcon_cpu_emergency_enter();
|
|
printk(KERN_ERR
|
|
"BUG: looking up invalid subclass: %u\n", subclass);
|
|
printk(KERN_ERR
|
|
"turning off the locking correctness validator.\n");
|
|
dump_stack();
|
|
+ nbcon_cpu_emergency_exit();
|
|
instrumentation_end();
|
|
return NULL;
|
|
}
|
|
@@ -968,11 +973,13 @@ static bool assign_lock_key(struct lockd
|
|
else {
|
|
/* Debug-check: all keys must be persistent! */
|
|
debug_locks_off();
|
|
+ nbcon_cpu_emergency_enter();
|
|
pr_err("INFO: trying to register non-static key.\n");
|
|
pr_err("The code is fine but needs lockdep annotation, or maybe\n");
|
|
pr_err("you didn't initialize this object before use?\n");
|
|
pr_err("turning off the locking correctness validator.\n");
|
|
dump_stack();
|
|
+ nbcon_cpu_emergency_exit();
|
|
return false;
|
|
}
|
|
|
|
@@ -1316,8 +1323,10 @@ register_lock_class(struct lockdep_map *
|
|
return NULL;
|
|
}
|
|
|
|
+ nbcon_cpu_emergency_enter();
|
|
print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
|
|
dump_stack();
|
|
+ nbcon_cpu_emergency_exit();
|
|
return NULL;
|
|
}
|
|
nr_lock_classes++;
|
|
@@ -1349,11 +1358,13 @@ register_lock_class(struct lockdep_map *
|
|
if (verbose(class)) {
|
|
graph_unlock();
|
|
|
|
+ nbcon_cpu_emergency_enter();
|
|
printk("\nnew class %px: %s", class->key, class->name);
|
|
if (class->name_version > 1)
|
|
printk(KERN_CONT "#%d", class->name_version);
|
|
printk(KERN_CONT "\n");
|
|
dump_stack();
|
|
+ nbcon_cpu_emergency_exit();
|
|
|
|
if (!graph_lock()) {
|
|
return NULL;
|
|
@@ -1392,8 +1403,10 @@ static struct lock_list *alloc_list_entr
|
|
if (!debug_locks_off_graph_unlock())
|
|
return NULL;
|
|
|
|
+ nbcon_cpu_emergency_enter();
|
|
print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
|
|
dump_stack();
|
|
+ nbcon_cpu_emergency_exit();
|
|
return NULL;
|
|
}
|
|
nr_list_entries++;
|
|
@@ -2039,6 +2052,8 @@ static noinline void print_circular_bug(
|
|
|
|
depth = get_lock_depth(target);
|
|
|
|
+ nbcon_cpu_emergency_enter();
|
|
+
|
|
print_circular_bug_header(target, depth, check_src, check_tgt);
|
|
|
|
parent = get_lock_parent(target);
|
|
@@ -2057,6 +2072,8 @@ static noinline void print_circular_bug(
|
|
|
|
printk("\nstack backtrace:\n");
|
|
dump_stack();
|
|
+
|
|
+ nbcon_cpu_emergency_exit();
|
|
}
|
|
|
|
static noinline void print_bfs_bug(int ret)
|
|
@@ -2569,6 +2586,8 @@ print_bad_irq_dependency(struct task_str
|
|
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
|
|
return;
|
|
|
|
+ nbcon_cpu_emergency_enter();
|
|
+
|
|
pr_warn("\n");
|
|
pr_warn("=====================================================\n");
|
|
pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
|
|
@@ -2618,11 +2637,13 @@ print_bad_irq_dependency(struct task_str
|
|
pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
|
|
next_root->trace = save_trace();
|
|
if (!next_root->trace)
|
|
- return;
|
|
+ goto out;
|
|
print_shortest_lock_dependencies(forwards_entry, next_root);
|
|
|
|
pr_warn("\nstack backtrace:\n");
|
|
dump_stack();
|
|
+out:
|
|
+ nbcon_cpu_emergency_exit();
|
|
}
|
|
|
|
static const char *state_names[] = {
|
|
@@ -2987,6 +3008,8 @@ print_deadlock_bug(struct task_struct *c
|
|
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
|
|
return;
|
|
|
|
+ nbcon_cpu_emergency_enter();
|
|
+
|
|
pr_warn("\n");
|
|
pr_warn("============================================\n");
|
|
pr_warn("WARNING: possible recursive locking detected\n");
|
|
@@ -3009,6 +3032,8 @@ print_deadlock_bug(struct task_struct *c
|
|
|
|
pr_warn("\nstack backtrace:\n");
|
|
dump_stack();
|
|
+
|
|
+ nbcon_cpu_emergency_exit();
|
|
}
|
|
|
|
/*
|
|
@@ -3606,6 +3631,8 @@ static void print_collision(struct task_
|
|
struct held_lock *hlock_next,
|
|
struct lock_chain *chain)
|
|
{
|
|
+ nbcon_cpu_emergency_enter();
|
|
+
|
|
pr_warn("\n");
|
|
pr_warn("============================\n");
|
|
pr_warn("WARNING: chain_key collision\n");
|
|
@@ -3622,6 +3649,8 @@ static void print_collision(struct task_
|
|
|
|
pr_warn("\nstack backtrace:\n");
|
|
dump_stack();
|
|
+
|
|
+ nbcon_cpu_emergency_exit();
|
|
}
|
|
#endif
|
|
|
|
@@ -3712,8 +3741,10 @@ static inline int add_chain_cache(struct
|
|
if (!debug_locks_off_graph_unlock())
|
|
return 0;
|
|
|
|
+ nbcon_cpu_emergency_enter();
|
|
print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
|
|
dump_stack();
|
|
+ nbcon_cpu_emergency_exit();
|
|
return 0;
|
|
}
|
|
chain->chain_key = chain_key;
|
|
@@ -3730,8 +3761,10 @@ static inline int add_chain_cache(struct
|
|
if (!debug_locks_off_graph_unlock())
|
|
return 0;
|
|
|
|
+ nbcon_cpu_emergency_enter();
|
|
print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
|
|
dump_stack();
|
|
+ nbcon_cpu_emergency_exit();
|
|
return 0;
|
|
}
|
|
|
|
@@ -3970,6 +4003,8 @@ print_usage_bug(struct task_struct *curr
|
|
if (!debug_locks_off() || debug_locks_silent)
|
|
return;
|
|
|
|
+ nbcon_cpu_emergency_enter();
|
|
+
|
|
pr_warn("\n");
|
|
pr_warn("================================\n");
|
|
pr_warn("WARNING: inconsistent lock state\n");
|
|
@@ -3998,6 +4033,8 @@ print_usage_bug(struct task_struct *curr
|
|
|
|
pr_warn("\nstack backtrace:\n");
|
|
dump_stack();
|
|
+
|
|
+ nbcon_cpu_emergency_exit();
|
|
}
|
|
|
|
/*
|
|
@@ -4032,6 +4069,8 @@ print_irq_inversion_bug(struct task_stru
|
|
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
|
|
return;
|
|
|
|
+ nbcon_cpu_emergency_enter();
|
|
+
|
|
pr_warn("\n");
|
|
pr_warn("========================================================\n");
|
|
pr_warn("WARNING: possible irq lock inversion dependency detected\n");
|
|
@@ -4072,11 +4111,13 @@ print_irq_inversion_bug(struct task_stru
|
|
pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
|
|
root->trace = save_trace();
|
|
if (!root->trace)
|
|
- return;
|
|
+ goto out;
|
|
print_shortest_lock_dependencies(other, root);
|
|
|
|
pr_warn("\nstack backtrace:\n");
|
|
dump_stack();
|
|
+out:
|
|
+ nbcon_cpu_emergency_exit();
|
|
}
|
|
|
|
/*
|
|
@@ -4153,6 +4194,8 @@ void print_irqtrace_events(struct task_s
|
|
{
|
|
const struct irqtrace_events *trace = &curr->irqtrace;
|
|
|
|
+ nbcon_cpu_emergency_enter();
|
|
+
|
|
printk("irq event stamp: %u\n", trace->irq_events);
|
|
printk("hardirqs last enabled at (%u): [<%px>] %pS\n",
|
|
trace->hardirq_enable_event, (void *)trace->hardirq_enable_ip,
|
|
@@ -4166,6 +4209,8 @@ void print_irqtrace_events(struct task_s
|
|
printk("softirqs last disabled at (%u): [<%px>] %pS\n",
|
|
trace->softirq_disable_event, (void *)trace->softirq_disable_ip,
|
|
(void *)trace->softirq_disable_ip);
|
|
+
|
|
+ nbcon_cpu_emergency_exit();
|
|
}
|
|
|
|
static int HARDIRQ_verbose(struct lock_class *class)
|
|
@@ -4686,10 +4731,12 @@ static int mark_lock(struct task_struct
|
|
* We must printk outside of the graph_lock:
|
|
*/
|
|
if (ret == 2) {
|
|
+ nbcon_cpu_emergency_enter();
|
|
printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
|
|
print_lock(this);
|
|
print_irqtrace_events(curr);
|
|
dump_stack();
|
|
+ nbcon_cpu_emergency_exit();
|
|
}
|
|
|
|
return ret;
|
|
@@ -4730,6 +4777,8 @@ print_lock_invalid_wait_context(struct t
|
|
if (debug_locks_silent)
|
|
return 0;
|
|
|
|
+ nbcon_cpu_emergency_enter();
|
|
+
|
|
pr_warn("\n");
|
|
pr_warn("=============================\n");
|
|
pr_warn("[ BUG: Invalid wait context ]\n");
|
|
@@ -4749,6 +4798,8 @@ print_lock_invalid_wait_context(struct t
|
|
pr_warn("stack backtrace:\n");
|
|
dump_stack();
|
|
|
|
+ nbcon_cpu_emergency_exit();
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -4956,6 +5007,8 @@ print_lock_nested_lock_not_held(struct t
|
|
if (debug_locks_silent)
|
|
return;
|
|
|
|
+ nbcon_cpu_emergency_enter();
|
|
+
|
|
pr_warn("\n");
|
|
pr_warn("==================================\n");
|
|
pr_warn("WARNING: Nested lock was not taken\n");
|
|
@@ -4976,6 +5029,8 @@ print_lock_nested_lock_not_held(struct t
|
|
|
|
pr_warn("\nstack backtrace:\n");
|
|
dump_stack();
|
|
+
|
|
+ nbcon_cpu_emergency_exit();
|
|
}
|
|
|
|
static int __lock_is_held(const struct lockdep_map *lock, int read);
|
|
@@ -5024,11 +5079,13 @@ static int __lock_acquire(struct lockdep
|
|
debug_class_ops_inc(class);
|
|
|
|
if (very_verbose(class)) {
|
|
+ nbcon_cpu_emergency_enter();
|
|
printk("\nacquire class [%px] %s", class->key, class->name);
|
|
if (class->name_version > 1)
|
|
printk(KERN_CONT "#%d", class->name_version);
|
|
printk(KERN_CONT "\n");
|
|
dump_stack();
|
|
+ nbcon_cpu_emergency_exit();
|
|
}
|
|
|
|
/*
|
|
@@ -5155,6 +5212,7 @@ static int __lock_acquire(struct lockdep
|
|
#endif
|
|
if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
|
|
debug_locks_off();
|
|
+ nbcon_cpu_emergency_enter();
|
|
print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
|
|
printk(KERN_DEBUG "depth: %i max: %lu!\n",
|
|
curr->lockdep_depth, MAX_LOCK_DEPTH);
|
|
@@ -5162,6 +5220,7 @@ static int __lock_acquire(struct lockdep
|
|
lockdep_print_held_locks(current);
|
|
debug_show_all_locks();
|
|
dump_stack();
|
|
+ nbcon_cpu_emergency_exit();
|
|
|
|
return 0;
|
|
}
|
|
@@ -5181,6 +5240,8 @@ static void print_unlock_imbalance_bug(s
|
|
if (debug_locks_silent)
|
|
return;
|
|
|
|
+ nbcon_cpu_emergency_enter();
|
|
+
|
|
pr_warn("\n");
|
|
pr_warn("=====================================\n");
|
|
pr_warn("WARNING: bad unlock balance detected!\n");
|
|
@@ -5197,6 +5258,8 @@ static void print_unlock_imbalance_bug(s
|
|
|
|
pr_warn("\nstack backtrace:\n");
|
|
dump_stack();
|
|
+
|
|
+ nbcon_cpu_emergency_exit();
|
|
}
|
|
|
|
static noinstr int match_held_lock(const struct held_lock *hlock,
|
|
@@ -5901,6 +5964,8 @@ static void print_lock_contention_bug(st
|
|
if (debug_locks_silent)
|
|
return;
|
|
|
|
+ nbcon_cpu_emergency_enter();
|
|
+
|
|
pr_warn("\n");
|
|
pr_warn("=================================\n");
|
|
pr_warn("WARNING: bad contention detected!\n");
|
|
@@ -5917,6 +5982,8 @@ static void print_lock_contention_bug(st
|
|
|
|
pr_warn("\nstack backtrace:\n");
|
|
dump_stack();
|
|
+
|
|
+ nbcon_cpu_emergency_exit();
|
|
}
|
|
|
|
static void
|
|
@@ -6536,6 +6603,8 @@ print_freed_lock_bug(struct task_struct
|
|
if (debug_locks_silent)
|
|
return;
|
|
|
|
+ nbcon_cpu_emergency_enter();
|
|
+
|
|
pr_warn("\n");
|
|
pr_warn("=========================\n");
|
|
pr_warn("WARNING: held lock freed!\n");
|
|
@@ -6548,6 +6617,8 @@ print_freed_lock_bug(struct task_struct
|
|
|
|
pr_warn("\nstack backtrace:\n");
|
|
dump_stack();
|
|
+
|
|
+ nbcon_cpu_emergency_exit();
|
|
}
|
|
|
|
static inline int not_in_range(const void* mem_from, unsigned long mem_len,
|
|
@@ -6594,6 +6665,8 @@ static void print_held_locks_bug(void)
|
|
if (debug_locks_silent)
|
|
return;
|
|
|
|
+ nbcon_cpu_emergency_enter();
|
|
+
|
|
pr_warn("\n");
|
|
pr_warn("====================================\n");
|
|
pr_warn("WARNING: %s/%d still has locks held!\n",
|
|
@@ -6603,6 +6676,8 @@ static void print_held_locks_bug(void)
|
|
lockdep_print_held_locks(current);
|
|
pr_warn("\nstack backtrace:\n");
|
|
dump_stack();
|
|
+
|
|
+ nbcon_cpu_emergency_exit();
|
|
}
|
|
|
|
void debug_check_no_locks_held(void)
|
|
@@ -6660,6 +6735,7 @@ asmlinkage __visible void lockdep_sys_ex
|
|
if (unlikely(curr->lockdep_depth)) {
|
|
if (!debug_locks_off())
|
|
return;
|
|
+ nbcon_cpu_emergency_enter();
|
|
pr_warn("\n");
|
|
pr_warn("================================================\n");
|
|
pr_warn("WARNING: lock held when returning to user space!\n");
|
|
@@ -6668,6 +6744,7 @@ asmlinkage __visible void lockdep_sys_ex
|
|
pr_warn("%s/%d is leaving the kernel with locks still held!\n",
|
|
curr->comm, curr->pid);
|
|
lockdep_print_held_locks(curr);
|
|
+ nbcon_cpu_emergency_exit();
|
|
}
|
|
|
|
/*
|
|
@@ -6684,6 +6761,7 @@ void lockdep_rcu_suspicious(const char *
|
|
bool rcu = warn_rcu_enter();
|
|
|
|
/* Note: the following can be executed concurrently, so be careful. */
|
|
+ nbcon_cpu_emergency_enter();
|
|
pr_warn("\n");
|
|
pr_warn("=============================\n");
|
|
pr_warn("WARNING: suspicious RCU usage\n");
|
|
@@ -6722,6 +6800,7 @@ void lockdep_rcu_suspicious(const char *
|
|
lockdep_print_held_locks(curr);
|
|
pr_warn("\nstack backtrace:\n");
|
|
dump_stack();
|
|
+ nbcon_cpu_emergency_exit();
|
|
warn_rcu_exit(rcu);
|
|
}
|
|
EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
|