--- /dev/null
+From e18eb8783ec4949adebc7d7b0fdb65f65bfeefd9 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Wed, 23 Nov 2022 14:25:57 -0500
+Subject: tracing: Add tracing_reset_all_online_cpus_unlocked() function
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit e18eb8783ec4949adebc7d7b0fdb65f65bfeefd9 upstream.
+
+Currently the tracing_reset_all_online_cpus() requires the
+trace_types_lock held. But only one caller of this function actually has
+that lock held before calling it, and the other just takes the lock so
+that it can call it. More users of this function is needed where the lock
+is not held.
+
+Add a tracing_reset_all_online_cpus_unlocked() function for the one use
+case that calls it without being held, and also add a lockdep_assert to
+make sure it is held when called.
+
+Then have tracing_reset_all_online_cpus() take the lock internally, such
+that callers do not need to worry about taking it.
+
+Link: https://lkml.kernel.org/r/20221123192741.658273220@goodmis.org
+
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Zheng Yejian <zhengyejian1@huawei.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Zheng Yejian <zhengyejian1@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace.c | 11 ++++++++++-
+ kernel/trace/trace.h | 1 +
+ kernel/trace/trace_events.c | 2 +-
+ kernel/trace/trace_events_synth.c | 2 --
+ 4 files changed, 12 insertions(+), 4 deletions(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2178,10 +2178,12 @@ void tracing_reset_online_cpus(struct ar
+ }
+
+ /* Must have trace_types_lock held */
+-void tracing_reset_all_online_cpus(void)
++void tracing_reset_all_online_cpus_unlocked(void)
+ {
+ struct trace_array *tr;
+
++ lockdep_assert_held(&trace_types_lock);
++
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+ if (!tr->clear_trace)
+ continue;
+@@ -2193,6 +2195,13 @@ void tracing_reset_all_online_cpus(void)
+ }
+ }
+
++void tracing_reset_all_online_cpus(void)
++{
++ mutex_lock(&trace_types_lock);
++ tracing_reset_all_online_cpus_unlocked();
++ mutex_unlock(&trace_types_lock);
++}
++
+ /*
+ * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
+ * is the tgid last observed corresponding to pid=i.
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -725,6 +725,7 @@ int tracing_is_enabled(void);
+ void tracing_reset_online_cpus(struct array_buffer *buf);
+ void tracing_reset_current(int cpu);
+ void tracing_reset_all_online_cpus(void);
++void tracing_reset_all_online_cpus_unlocked(void);
+ int tracing_open_generic(struct inode *inode, struct file *filp);
+ int tracing_open_generic_tr(struct inode *inode, struct file *filp);
+ bool tracing_is_disabled(void);
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -2661,7 +2661,7 @@ static void trace_module_remove_events(s
+ * over from this module may be passed to the new module events and
+ * unexpected results may occur.
+ */
+- tracing_reset_all_online_cpus();
++ tracing_reset_all_online_cpus_unlocked();
+ }
+
+ static int trace_module_notify(struct notifier_block *self,
+--- a/kernel/trace/trace_events_synth.c
++++ b/kernel/trace/trace_events_synth.c
+@@ -1363,7 +1363,6 @@ int synth_event_delete(const char *event
+ mutex_unlock(&event_mutex);
+
+ if (mod) {
+- mutex_lock(&trace_types_lock);
+ /*
+ * It is safest to reset the ring buffer if the module
+ * being unloaded registered any events that were
+@@ -1375,7 +1374,6 @@ int synth_event_delete(const char *event
+ * occur.
+ */
+ tracing_reset_all_online_cpus();
+- mutex_unlock(&trace_types_lock);
+ }
+
+ return ret;