--- /dev/null
+From 0689d0b4c56f13eb7e57ed05293341faf8c9e709 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Feb 2023 09:36:04 -0800
+Subject: maple_tree: fix write memory barrier of nodes once dead for RCU mode
+
+From: Liam R. Howlett <Liam.Howlett@oracle.com>
+
+[ Upstream commit c13af03de46ba27674dd9fb31a17c0d480081139 ]
+
+During the development of the maple tree, the strategy of freeing multiple
+nodes changed and, in the process, the pivots were reused to store
+pointers to dead nodes. To ensure the readers see accurate pivots, the
+writers need to mark the nodes as dead and call smp_wmb() to ensure any
+readers can identify the node as dead before using the pivot values.
+
+There were two places where the old method of marking the node as dead
+without smp_wmb() were being used, which resulted in RCU readers seeing
+the wrong pivot value before seeing the node was dead. Fix this race
+condition by using mte_set_node_dead() which has the smp_wmb() call to
+ensure the race is closed.
+
+Add a WARN_ON() to the ma_free_rcu() call to ensure all nodes being freed
+are marked as dead to ensure there are no other call paths besides the two
+updated paths.
+
+This is necessary for the RCU mode of the maple tree.
+
+Link: https://lkml.kernel.org/r/20230227173632.3292573-6-surenb@google.com
+Fixes: 54a611b60590 ("Maple Tree: add new data structure")
+Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
+Signed-off-by: Suren Baghdasaryan <surenb@google.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ lib/maple_tree.c | 7 +++++--
+ tools/testing/radix-tree/maple.c | 16 ++++++++++++++++
+ 2 files changed, 21 insertions(+), 2 deletions(-)
+
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index fb452873914f2..022573f499578 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -178,7 +178,7 @@ static void mt_free_rcu(struct rcu_head *head)
+ */
+ static void ma_free_rcu(struct maple_node *node)
+ {
+- node->parent = ma_parent_ptr(node);
++ WARN_ON(node->parent != ma_parent_ptr(node));
+ call_rcu(&node->rcu, mt_free_rcu);
+ }
+
+@@ -1785,8 +1785,10 @@ static inline void mas_replace(struct ma_state *mas, bool advanced)
+ rcu_assign_pointer(slots[offset], mas->node);
+ }
+
+- if (!advanced)
++ if (!advanced) {
++ mte_set_node_dead(old_enode);
+ mas_free(mas, old_enode);
++ }
+ }
+
+ /*
+@@ -4221,6 +4223,7 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas)
+ done:
+ mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end);
+ if (in_rcu) {
++ mte_set_node_dead(mas->node);
+ mas->node = mt_mk_node(newnode, wr_mas->type);
+ mas_replace(mas, false);
+ } else {
+diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
+index 1f36bc1c5d362..2a16939cf0285 100644
+--- a/tools/testing/radix-tree/maple.c
++++ b/tools/testing/radix-tree/maple.c
+@@ -108,6 +108,7 @@ static noinline void check_new_node(struct maple_tree *mt)
+ MT_BUG_ON(mt, mn->slot[1] != NULL);
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+
++ mn->parent = ma_parent_ptr(mn);
+ ma_free_rcu(mn);
+ mas.node = MAS_START;
+ mas_nomem(&mas, GFP_KERNEL);
+@@ -160,6 +161,7 @@ static noinline void check_new_node(struct maple_tree *mt)
+ MT_BUG_ON(mt, mas_allocated(&mas) != i);
+ MT_BUG_ON(mt, !mn);
+ MT_BUG_ON(mt, not_empty(mn));
++ mn->parent = ma_parent_ptr(mn);
+ ma_free_rcu(mn);
+ }
+
+@@ -192,6 +194,7 @@ static noinline void check_new_node(struct maple_tree *mt)
+ MT_BUG_ON(mt, not_empty(mn));
+ MT_BUG_ON(mt, mas_allocated(&mas) != i - 1);
+ MT_BUG_ON(mt, !mn);
++ mn->parent = ma_parent_ptr(mn);
+ ma_free_rcu(mn);
+ }
+
+@@ -210,6 +213,7 @@ static noinline void check_new_node(struct maple_tree *mt)
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, not_empty(mn));
+ MT_BUG_ON(mt, mas_allocated(&mas) != j - 1);
++ mn->parent = ma_parent_ptr(mn);
+ ma_free_rcu(mn);
+ }
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+@@ -233,6 +237,7 @@ static noinline void check_new_node(struct maple_tree *mt)
+ MT_BUG_ON(mt, mas_allocated(&mas) != i - j);
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, not_empty(mn));
++ mn->parent = ma_parent_ptr(mn);
+ ma_free_rcu(mn);
+ MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1);
+ }
+@@ -269,6 +274,7 @@ static noinline void check_new_node(struct maple_tree *mt)
+ mn = mas_pop_node(&mas); /* get the next node. */
+ MT_BUG_ON(mt, mn == NULL);
+ MT_BUG_ON(mt, not_empty(mn));
++ mn->parent = ma_parent_ptr(mn);
+ ma_free_rcu(mn);
+ }
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+@@ -294,6 +300,7 @@ static noinline void check_new_node(struct maple_tree *mt)
+ mn = mas_pop_node(&mas2); /* get the next node. */
+ MT_BUG_ON(mt, mn == NULL);
+ MT_BUG_ON(mt, not_empty(mn));
++ mn->parent = ma_parent_ptr(mn);
+ ma_free_rcu(mn);
+ }
+ MT_BUG_ON(mt, mas_allocated(&mas2) != 0);
+@@ -334,10 +341,12 @@ static noinline void check_new_node(struct maple_tree *mt)
+ MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2);
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, not_empty(mn));
++ mn->parent = ma_parent_ptr(mn);
+ ma_free_rcu(mn);
+ for (i = 1; i <= MAPLE_ALLOC_SLOTS + 1; i++) {
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, not_empty(mn));
++ mn->parent = ma_parent_ptr(mn);
+ ma_free_rcu(mn);
+ }
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+@@ -375,6 +384,7 @@ static noinline void check_new_node(struct maple_tree *mt)
+ mas_node_count(&mas, i); /* Request */
+ mas_nomem(&mas, GFP_KERNEL); /* Fill request */
+ mn = mas_pop_node(&mas); /* get the next node. */
++ mn->parent = ma_parent_ptr(mn);
+ ma_free_rcu(mn);
+ mas_destroy(&mas);
+
+@@ -382,10 +392,13 @@ static noinline void check_new_node(struct maple_tree *mt)
+ mas_node_count(&mas, i); /* Request */
+ mas_nomem(&mas, GFP_KERNEL); /* Fill request */
+ mn = mas_pop_node(&mas); /* get the next node. */
++ mn->parent = ma_parent_ptr(mn);
+ ma_free_rcu(mn);
+ mn = mas_pop_node(&mas); /* get the next node. */
++ mn->parent = ma_parent_ptr(mn);
+ ma_free_rcu(mn);
+ mn = mas_pop_node(&mas); /* get the next node. */
++ mn->parent = ma_parent_ptr(mn);
+ ma_free_rcu(mn);
+ mas_destroy(&mas);
+ }
+@@ -35369,6 +35382,7 @@ static noinline void check_prealloc(struct maple_tree *mt)
+ MT_BUG_ON(mt, allocated != 1 + height * 3);
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1);
++ mn->parent = ma_parent_ptr(mn);
+ ma_free_rcu(mn);
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ mas_destroy(&mas);
+@@ -35386,6 +35400,7 @@ static noinline void check_prealloc(struct maple_tree *mt)
+ mas_destroy(&mas);
+ allocated = mas_allocated(&mas);
+ MT_BUG_ON(mt, allocated != 0);
++ mn->parent = ma_parent_ptr(mn);
+ ma_free_rcu(mn);
+
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+@@ -35756,6 +35771,7 @@ void farmer_tests(void)
+ tree.ma_root = mt_mk_node(node, maple_leaf_64);
+ mt_dump(&tree);
+
++ node->parent = ma_parent_ptr(node);
+ ma_free_rcu(node);
+
+ /* Check things that will make lockdep angry */
+--
+2.39.2
+
--- /dev/null
+From 92d196fc8f06fc604d4fe1b2802b41041d7b3639 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Feb 2023 12:28:52 -0500
+Subject: tracing: Add trace_array_puts() to write into instance
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+[ Upstream commit d503b8f7474fe7ac616518f7fc49773cbab49f36 ]
+
+Add a generic trace_array_puts() that can be used to "trace_puts()" into
+an allocated trace_array instance. This is just another variant of
+trace_array_printk().
+
+Link: https://lkml.kernel.org/r/20230207173026.584717290@goodmis.org
+
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Ross Zwisler <zwisler@google.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Stable-dep-of: 9d52727f8043 ("tracing: Have tracing_snapshot_instance_cond() write errors to the appropriate instance")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/trace.h | 12 ++++++++++++
+ kernel/trace/trace.c | 27 +++++++++++++++++----------
+ 2 files changed, 29 insertions(+), 10 deletions(-)
+
+diff --git a/include/linux/trace.h b/include/linux/trace.h
+index 80ffda8717491..2a70a447184c9 100644
+--- a/include/linux/trace.h
++++ b/include/linux/trace.h
+@@ -33,6 +33,18 @@ struct trace_array;
+ int register_ftrace_export(struct trace_export *export);
+ int unregister_ftrace_export(struct trace_export *export);
+
++/**
++ * trace_array_puts - write a constant string into the trace buffer.
++ * @tr: The trace array to write to
++ * @str: The constant string to write
++ */
++#define trace_array_puts(tr, str) \
++ ({ \
++ str ? __trace_array_puts(tr, _THIS_IP_, str, strlen(str)) : -1; \
++ })
++int __trace_array_puts(struct trace_array *tr, unsigned long ip,
++ const char *str, int size);
++
+ void trace_printk_init_buffers(void);
+ __printf(3, 4)
+ int trace_array_printk(struct trace_array *tr, unsigned long ip,
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 1a931896ba042..13c46787ba5fa 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1001,13 +1001,8 @@ __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *ev
+ ring_buffer_unlock_commit(buffer);
+ }
+
+-/**
+- * __trace_puts - write a constant string into the trace buffer.
+- * @ip: The address of the caller
+- * @str: The constant string to write
+- * @size: The size of the string.
+- */
+-int __trace_puts(unsigned long ip, const char *str, int size)
++int __trace_array_puts(struct trace_array *tr, unsigned long ip,
++ const char *str, int size)
+ {
+ struct ring_buffer_event *event;
+ struct trace_buffer *buffer;
+@@ -1015,7 +1010,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
+ unsigned int trace_ctx;
+ int alloc;
+
+- if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
++ if (!(tr->trace_flags & TRACE_ITER_PRINTK))
+ return 0;
+
+ if (unlikely(tracing_selftest_running || tracing_disabled))
+@@ -1024,7 +1019,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
+ alloc = sizeof(*entry) + size + 2; /* possible \n added */
+
+ trace_ctx = tracing_gen_ctx();
+- buffer = global_trace.array_buffer.buffer;
++ buffer = tr->array_buffer.buffer;
+ ring_buffer_nest_start(buffer);
+ event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
+ trace_ctx);
+@@ -1046,11 +1041,23 @@ int __trace_puts(unsigned long ip, const char *str, int size)
+ entry->buf[size] = '\0';
+
+ __buffer_unlock_commit(buffer, event);
+- ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
++ ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
+ out:
+ ring_buffer_nest_end(buffer);
+ return size;
+ }
++EXPORT_SYMBOL_GPL(__trace_array_puts);
++
++/**
++ * __trace_puts - write a constant string into the trace buffer.
++ * @ip: The address of the caller
++ * @str: The constant string to write
++ * @size: The size of the string.
++ */
++int __trace_puts(unsigned long ip, const char *str, int size)
++{
++ return __trace_array_puts(&global_trace, ip, str, size);
++}
+ EXPORT_SYMBOL_GPL(__trace_puts);
+
+ /**
+--
+2.39.2
+
--- /dev/null
+From a141ffdfc797f5533fffbe9a06da53ec49ca401e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Apr 2023 22:21:14 -0400
+Subject: tracing: Have tracing_snapshot_instance_cond() write errors to the
+ appropriate instance
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+[ Upstream commit 9d52727f8043cfda241ae96896628d92fa9c50bb ]
+
+If a trace instance has a failure with its snapshot code, the error
+message is to be written to that instance's buffer. But currently, the
+message is written to the top level buffer. Worse yet, it may also disable
+the top level buffer and not the instance that had the issue.
+
+Link: https://lkml.kernel.org/r/20230405022341.688730321@goodmis.org
+
+Cc: stable@vger.kernel.org
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Ross Zwisler <zwisler@google.com>
+Fixes: 2824f50332486 ("tracing: Make the snapshot trigger work with instances")
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 13c46787ba5fa..13b324f008256 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1111,22 +1111,22 @@ static void tracing_snapshot_instance_cond(struct trace_array *tr,
+ unsigned long flags;
+
+ if (in_nmi()) {
+- internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
+- internal_trace_puts("*** snapshot is being ignored ***\n");
++ trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
++ trace_array_puts(tr, "*** snapshot is being ignored ***\n");
+ return;
+ }
+
+ if (!tr->allocated_snapshot) {
+- internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
+- internal_trace_puts("*** stopping trace here! ***\n");
+- tracing_off();
++ trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
++ trace_array_puts(tr, "*** stopping trace here! ***\n");
++ tracer_tracing_off(tr);
+ return;
+ }
+
+ /* Note, snapshot can not be used when the tracer uses it */
+ if (tracer->use_max_tr) {
+- internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
+- internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
++ trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
++ trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
+ return;
+ }
+
+--
+2.39.2
+