--- /dev/null
+From 0e069265bce5a40c4eee52e2364bbbd4dabee94a Mon Sep 17 00:00:00 2001
+From: Lino Sanfilippo <l.sanfilippo@kunbus.com>
+Date: Thu, 24 Nov 2022 14:55:35 +0100
+Subject: tpm, tpm_tis: Claim locality in interrupt handler
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Lino Sanfilippo <l.sanfilippo@kunbus.com>
+
+commit 0e069265bce5a40c4eee52e2364bbbd4dabee94a upstream.
+
+Writing the TPM_INT_STATUS register in the interrupt handler to clear the
+interrupts only has effect if a locality is held. Since this is not
+guaranteed at the time the interrupt is fired, claim the locality
+explicitly in the handler.
+
+Signed-off-by: Lino Sanfilippo <l.sanfilippo@kunbus.com>
+Tested-by: Michael Niewöhner <linux@mniewoehner.de>
+Tested-by: Jarkko Sakkinen <jarkko@kernel.org>
+Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/tpm/tpm_tis_core.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -731,7 +731,9 @@ static irqreturn_t tis_int_handler(int d
+ wake_up_interruptible(&priv->int_queue);
+
+ /* Clear interrupts handled with TPM_EOI */
++ tpm_tis_request_locality(chip, 0);
+ rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), interrupt);
++ tpm_tis_relinquish_locality(chip, 0);
+ if (rc < 0)
+ return IRQ_NONE;
+
--- /dev/null
+From e18eb8783ec4949adebc7d7b0fdb65f65bfeefd9 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Wed, 23 Nov 2022 14:25:57 -0500
+Subject: tracing: Add tracing_reset_all_online_cpus_unlocked() function
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit e18eb8783ec4949adebc7d7b0fdb65f65bfeefd9 upstream.
+
+Currently the tracing_reset_all_online_cpus() requires the
+trace_types_lock held. But only one caller of this function actually has
+that lock held before calling it, and the other just takes the lock so
+that it can call it. More users of this function is needed where the lock
+is not held.
+
+Add a tracing_reset_all_online_cpus_unlocked() function for the one use
+case that calls it without being held, and also add a lockdep_assert to
+make sure it is held when called.
+
+Then have tracing_reset_all_online_cpus() take the lock internally, such
+that callers do not need to worry about taking it.
+
+Link: https://lkml.kernel.org/r/20221123192741.658273220@goodmis.org
+
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Zheng Yejian <zhengyejian1@huawei.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Zheng Yejian <zhengyejian1@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace.c | 11 ++++++++++-
+ kernel/trace/trace.h | 1 +
+ kernel/trace/trace_events.c | 2 +-
+ kernel/trace/trace_events_synth.c | 2 --
+ 4 files changed, 12 insertions(+), 4 deletions(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2175,10 +2175,12 @@ void tracing_reset_online_cpus(struct ar
+ }
+
+ /* Must have trace_types_lock held */
+-void tracing_reset_all_online_cpus(void)
++void tracing_reset_all_online_cpus_unlocked(void)
+ {
+ struct trace_array *tr;
+
++ lockdep_assert_held(&trace_types_lock);
++
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+ if (!tr->clear_trace)
+ continue;
+@@ -2190,6 +2192,13 @@ void tracing_reset_all_online_cpus(void)
+ }
+ }
+
++void tracing_reset_all_online_cpus(void)
++{
++ mutex_lock(&trace_types_lock);
++ tracing_reset_all_online_cpus_unlocked();
++ mutex_unlock(&trace_types_lock);
++}
++
+ /*
+ * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
+ * is the tgid last observed corresponding to pid=i.
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -580,6 +580,7 @@ int tracing_is_enabled(void);
+ void tracing_reset_online_cpus(struct array_buffer *buf);
+ void tracing_reset_current(int cpu);
+ void tracing_reset_all_online_cpus(void);
++void tracing_reset_all_online_cpus_unlocked(void);
+ int tracing_open_generic(struct inode *inode, struct file *filp);
+ int tracing_open_generic_tr(struct inode *inode, struct file *filp);
+ bool tracing_is_disabled(void);
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -2974,7 +2974,7 @@ static void trace_module_remove_events(s
+ * over from this module may be passed to the new module events and
+ * unexpected results may occur.
+ */
+- tracing_reset_all_online_cpus();
++ tracing_reset_all_online_cpus_unlocked();
+ }
+
+ static int trace_module_notify(struct notifier_block *self,
+--- a/kernel/trace/trace_events_synth.c
++++ b/kernel/trace/trace_events_synth.c
+@@ -1416,7 +1416,6 @@ int synth_event_delete(const char *event
+ mutex_unlock(&event_mutex);
+
+ if (mod) {
+- mutex_lock(&trace_types_lock);
+ /*
+ * It is safest to reset the ring buffer if the module
+ * being unloaded registered any events that were
+@@ -1428,7 +1427,6 @@ int synth_event_delete(const char *event
+ * occur.
+ */
+ tracing_reset_all_online_cpus();
+- mutex_unlock(&trace_types_lock);
+ }
+
+ return ret;