71 lines
2.6 KiB
Diff
71 lines
2.6 KiB
Diff
From 62b8eb7367de30fd149137b08ffbff5aadf02541 Mon Sep 17 00:00:00 2001
|
|
From: Steven Rostedt <rostedt@goodmis.org>
|
|
Date: Fri, 22 Aug 2025 18:36:06 -0400
|
|
Subject: ftrace: Also allocate and copy hash for reading of filter files
|
|
|
|
Currently the reader of set_ftrace_filter and set_ftrace_notrace just adds
|
|
the pointer to the global tracer hash to its iterator. Unlike the writer
|
|
that allocates a copy of the hash, the reader keeps the pointer to the
|
|
filter hashes. This is problematic because this pointer is static across
|
|
function calls that release the locks that can update the global tracer
|
|
hashes. This can cause UAF and similar bugs.
|
|
|
|
Allocate and copy the hash for reading the filter files like it is done
|
|
for the writers. This not only fixes UAF bugs, but also makes the code a
|
|
bit simpler as it doesn't have to differentiate when to free the
|
|
iterator's hash between writers and readers.
|
|
|
|
Cc: stable@vger.kernel.org
|
|
Cc: Masami Hiramatsu <mhiramat@kernel.org>
|
|
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
|
|
Cc: Nathan Chancellor <nathan@kernel.org>
|
|
Cc: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Link: https://lore.kernel.org/20250822183606.12962cc3@batman.local.home
|
|
Fixes: c20489dad156 ("ftrace: Assign iter->hash to filter or notrace hashes on seq read")
|
|
Closes: https://lore.kernel.org/all/20250813023044.2121943-1-wutengda@huaweicloud.com/
|
|
Closes: https://lore.kernel.org/all/20250822192437.GA458494@ax162/
|
|
Reported-by: Tengda Wu <wutengda@huaweicloud.com>
|
|
Tested-by: Tengda Wu <wutengda@huaweicloud.com>
|
|
Tested-by: Nathan Chancellor <nathan@kernel.org>
|
|
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
|
|
---
|
|
kernel/trace/ftrace.c | 19 ++++++++++---------
|
|
1 file changed, 10 insertions(+), 9 deletions(-)
|
|
|
|
--- a/kernel/trace/ftrace.c
|
|
+++ b/kernel/trace/ftrace.c
|
|
@@ -4665,13 +4665,17 @@ ftrace_regex_open(struct ftrace_ops *ops
|
|
} else {
|
|
iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
|
|
}
|
|
+ } else {
|
|
+ if (hash)
|
|
+ iter->hash = alloc_and_copy_ftrace_hash(hash->size_bits, hash);
|
|
+ else
|
|
+ iter->hash = EMPTY_HASH;
|
|
+ }
|
|
|
|
- if (!iter->hash) {
|
|
- trace_parser_put(&iter->parser);
|
|
- goto out_unlock;
|
|
- }
|
|
- } else
|
|
- iter->hash = hash;
|
|
+ if (!iter->hash) {
|
|
+ trace_parser_put(&iter->parser);
|
|
+ goto out_unlock;
|
|
+ }
|
|
|
|
ret = 0;
|
|
|
|
@@ -6547,9 +6551,6 @@ int ftrace_regex_release(struct inode *i
|
|
ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
|
|
iter->hash, filter_hash);
|
|
mutex_unlock(&ftrace_lock);
|
|
- } else {
|
|
- /* For read only, the hash is the ops hash */
|
|
- iter->hash = NULL;
|
|
}
|
|
|
|
mutex_unlock(&iter->ops->func_hash->regex_lock);
|