]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 18 Dec 2023 07:40:29 +0000 (08:40 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 18 Dec 2023 07:40:29 +0000 (08:40 +0100)
added patches:
ring-buffer-do-not-try-to-put-back-write_stamp.patch
ring-buffer-do-not-update-before-stamp-when-switching-sub-buffers.patch
ring-buffer-fix-a-race-in-rb_time_cmpxchg-for-32-bit-archs.patch
ring-buffer-fix-memory-leak-of-free-page.patch
ring-buffer-fix-writing-to-the-buffer-with-max_data_size.patch
ring-buffer-have-saved-event-hold-the-entire-event.patch
tracing-update-snapshot-buffer-on-resize-if-it-is-allocated.patch

queue-5.15/ring-buffer-do-not-try-to-put-back-write_stamp.patch [new file with mode: 0644]
queue-5.15/ring-buffer-do-not-update-before-stamp-when-switching-sub-buffers.patch [new file with mode: 0644]
queue-5.15/ring-buffer-fix-a-race-in-rb_time_cmpxchg-for-32-bit-archs.patch [new file with mode: 0644]
queue-5.15/ring-buffer-fix-memory-leak-of-free-page.patch [new file with mode: 0644]
queue-5.15/ring-buffer-fix-writing-to-the-buffer-with-max_data_size.patch [new file with mode: 0644]
queue-5.15/ring-buffer-have-saved-event-hold-the-entire-event.patch [new file with mode: 0644]
queue-5.15/series
queue-5.15/tracing-update-snapshot-buffer-on-resize-if-it-is-allocated.patch [new file with mode: 0644]

diff --git a/queue-5.15/ring-buffer-do-not-try-to-put-back-write_stamp.patch b/queue-5.15/ring-buffer-do-not-try-to-put-back-write_stamp.patch
new file mode 100644 (file)
index 0000000..ce51413
--- /dev/null
@@ -0,0 +1,98 @@
+From dd939425707898da992e59ab0fcfae4652546910 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Thu, 14 Dec 2023 22:29:21 -0500
+Subject: ring-buffer: Do not try to put back write_stamp
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit dd939425707898da992e59ab0fcfae4652546910 upstream.
+
+If an update to an event is interrupted by another event between the time
+the initial event allocated its buffer and where it wrote to the
+write_stamp, the code try to reset the write stamp back to the what it had
+just overwritten. It knows that it was overwritten via checking the
+before_stamp, and if it didn't match what it wrote to the before_stamp
+before it allocated its space, it knows it was overwritten.
+
+To put back the write_stamp, it uses the before_stamp it read. The problem
+here is that by writing the before_stamp to the write_stamp it makes the
+two equal again, which means that the write_stamp can be considered valid
+as the last timestamp written to the ring buffer. But this is not
+necessarily true. The event that interrupted the event could have been
+interrupted in a way that it was interrupted as well, and can end up
+leaving with an invalid write_stamp. But if this happens and returns to
+this context that uses the before_stamp to update the write_stamp again,
+it can possibly incorrectly make it valid, causing later events to have in
+correct time stamps.
+
+As it is OK to leave this function with an invalid write_stamp (one that
+doesn't match the before_stamp), there's no reason to try to make it valid
+again in this case. If this race happens, then just leave with the invalid
+write_stamp and the next event to come along will just add a absolute
+timestamp and validate everything again.
+
+Bonus points: This gets rid of another cmpxchg64!
+
+Link: https://lore.kernel.org/linux-trace-kernel/20231214222921.193037a7@gandalf.local.home
+
+Cc: stable@vger.kernel.org
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Joel Fernandes <joel@joelfernandes.org>
+Cc: Vincent Donnefort <vdonnefort@google.com>
+Fixes: a389d86f7fd09 ("ring-buffer: Have nested events still record running time stamp")
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/ring_buffer.c |   29 ++++++-----------------------
+ 1 file changed, 6 insertions(+), 23 deletions(-)
+
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -3568,14 +3568,14 @@ __rb_reserve_next(struct ring_buffer_per
+       }
+       if (likely(tail == w)) {
+-              u64 save_before;
+-              bool s_ok;
+-
+               /* Nothing interrupted us between A and C */
+  /*D*/                rb_time_set(&cpu_buffer->write_stamp, info->ts);
+-              barrier();
+- /*E*/                s_ok = rb_time_read(&cpu_buffer->before_stamp, &save_before);
+-              RB_WARN_ON(cpu_buffer, !s_ok);
++              /*
++               * If something came in between C and D, the write stamp
++               * may now not be in sync. But that's fine as the before_stamp
++               * will be different and then next event will just be forced
++               * to use an absolute timestamp.
++               */
+               if (likely(!(info->add_timestamp &
+                            (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
+                       /* This did not interrupt any time update */
+@@ -3583,24 +3583,7 @@ __rb_reserve_next(struct ring_buffer_per
+               else
+                       /* Just use full timestamp for interrupting event */
+                       info->delta = info->ts;
+-              barrier();
+               check_buffer(cpu_buffer, info, tail);
+-              if (unlikely(info->ts != save_before)) {
+-                      /* SLOW PATH - Interrupted between C and E */
+-
+-                      a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
+-                      RB_WARN_ON(cpu_buffer, !a_ok);
+-
+-                      /* Write stamp must only go forward */
+-                      if (save_before > info->after) {
+-                              /*
+-                               * We do not care about the result, only that
+-                               * it gets updated atomically.
+-                               */
+-                              (void)rb_time_cmpxchg(&cpu_buffer->write_stamp,
+-                                                    info->after, save_before);
+-                      }
+-              }
+       } else {
+               u64 ts;
+               /* SLOW PATH - Interrupted between A and C */
diff --git a/queue-5.15/ring-buffer-do-not-update-before-stamp-when-switching-sub-buffers.patch b/queue-5.15/ring-buffer-do-not-update-before-stamp-when-switching-sub-buffers.patch
new file mode 100644 (file)
index 0000000..aea559f
--- /dev/null
@@ -0,0 +1,60 @@
+From 9e45e39dc249c970d99d2681f6bcb55736fd725c Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Mon, 11 Dec 2023 11:44:20 -0500
+Subject: ring-buffer: Do not update before stamp when switching sub-buffers
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit 9e45e39dc249c970d99d2681f6bcb55736fd725c upstream.
+
+The ring buffer timestamps are synchronized by two timestamp placeholders.
+One is the "before_stamp" and the other is the "write_stamp" (sometimes
+referred to as the "after stamp" but only in the comments. These two
+stamps are key to knowing how to handle nested events coming in with a
+lockless system.
+
+When moving across sub-buffers, the before stamp is updated but the write
+stamp is not. There's an effort to put back the before stamp to something
+that seems logical in case there's nested events. But as the current event
+is about to cross sub-buffers, and so will any new nested event that happens,
+updating the before stamp is useless, and could even introduce new race
+conditions.
+
+The first event on a sub-buffer simply uses the sub-buffer's timestamp
+and keeps a "delta" of zero. The "before_stamp" and "write_stamp" are not
+used in the algorithm in this case. There's no reason to try to fix the
+before_stamp when this happens.
+
+As a bonus, it removes a cmpxchg() when crossing sub-buffers!
+
+Link: https://lore.kernel.org/linux-trace-kernel/20231211114420.36dde01b@gandalf.local.home
+
+Cc: stable@vger.kernel.org
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Fixes: a389d86f7fd09 ("ring-buffer: Have nested events still record running time stamp")
+Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/ring_buffer.c |    9 +--------
+ 1 file changed, 1 insertion(+), 8 deletions(-)
+
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -3558,14 +3558,7 @@ __rb_reserve_next(struct ring_buffer_per
+       /* See if we shot pass the end of this buffer page */
+       if (unlikely(write > BUF_PAGE_SIZE)) {
+-              /* before and after may now different, fix it up*/
+-              b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
+-              a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
+-              if (a_ok && b_ok && info->before != info->after)
+-                      (void)rb_time_cmpxchg(&cpu_buffer->before_stamp,
+-                                            info->before, info->after);
+-              if (a_ok && b_ok)
+-                      check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
++              check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
+               return rb_move_tail(cpu_buffer, tail, info);
+       }
diff --git a/queue-5.15/ring-buffer-fix-a-race-in-rb_time_cmpxchg-for-32-bit-archs.patch b/queue-5.15/ring-buffer-fix-a-race-in-rb_time_cmpxchg-for-32-bit-archs.patch
new file mode 100644 (file)
index 0000000..be3faf0
--- /dev/null
@@ -0,0 +1,74 @@
+From fff88fa0fbc7067ba46dde570912d63da42c59a9 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Tue, 12 Dec 2023 11:53:01 -0500
+Subject: ring-buffer: Fix a race in rb_time_cmpxchg() for 32 bit archs
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit fff88fa0fbc7067ba46dde570912d63da42c59a9 upstream.
+
+Mathieu Desnoyers pointed out an issue in the rb_time_cmpxchg() for 32 bit
+architectures. That is:
+
+ static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
+ {
+       unsigned long cnt, top, bottom, msb;
+       unsigned long cnt2, top2, bottom2, msb2;
+       u64 val;
+
+       /* The cmpxchg always fails if it interrupted an update */
+        if (!__rb_time_read(t, &val, &cnt2))
+                return false;
+
+        if (val != expect)
+                return false;
+
+<<<< interrupted here!
+
+        cnt = local_read(&t->cnt);
+
+The problem is that the synchronization counter in the rb_time_t is read
+*after* the value of the timestamp is read. That means if an interrupt
+were to come in between the value being read and the counter being read,
+it can change the value and the counter and the interrupted process would
+be clueless about it!
+
+The counter needs to be read first and then the value. That way it is easy
+to tell if the value is stale or not. If the counter hasn't been updated,
+then the value is still good.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20231211201324.652870-1-mathieu.desnoyers@efficios.com/
+Link: https://lore.kernel.org/linux-trace-kernel/20231212115301.7a9c9a64@gandalf.local.home
+
+Cc: stable@vger.kernel.org
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Fixes: 10464b4aa605e ("ring-buffer: Add rb_time_t 64 bit operations for speeding up 32 bit")
+Reported-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/ring_buffer.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -697,6 +697,9 @@ static int rb_time_cmpxchg(rb_time_t *t,
+       unsigned long cnt2, top2, bottom2;
+       u64 val;
++      /* Any interruptions in this function should cause a failure */
++      cnt = local_read(&t->cnt);
++
+       /* The cmpxchg always fails if it interrupted an update */
+        if (!__rb_time_read(t, &val, &cnt2))
+                return false;
+@@ -704,7 +707,6 @@ static int rb_time_cmpxchg(rb_time_t *t,
+        if (val != expect)
+                return false;
+-       cnt = local_read(&t->cnt);
+        if ((cnt & 3) != cnt2)
+                return false;
diff --git a/queue-5.15/ring-buffer-fix-memory-leak-of-free-page.patch b/queue-5.15/ring-buffer-fix-memory-leak-of-free-page.patch
new file mode 100644 (file)
index 0000000..8749c6f
--- /dev/null
@@ -0,0 +1,48 @@
+From 17d801758157bec93f26faaf5ff1a8b9a552d67a Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Sun, 10 Dec 2023 22:12:50 -0500
+Subject: ring-buffer: Fix memory leak of free page
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit 17d801758157bec93f26faaf5ff1a8b9a552d67a upstream.
+
+Reading the ring buffer does a swap of a sub-buffer within the ring buffer
+with a empty sub-buffer. This allows the reader to have full access to the
+content of the sub-buffer that was swapped out without having to worry
+about contention with the writer.
+
+The readers call ring_buffer_alloc_read_page() to allocate a page that
+will be used to swap with the ring buffer. When the code is finished with
+the reader page, it calls ring_buffer_free_read_page(). Instead of freeing
+the page, it stores it as a spare. Then next call to
+ring_buffer_alloc_read_page() will return this spare instead of calling
+into the memory management system to allocate a new page.
+
+Unfortunately, on freeing of the ring buffer, this spare page is not
+freed, and causes a memory leak.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20231210221250.7b9cc83c@rorschach.local.home
+
+Cc: stable@vger.kernel.org
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Fixes: 73a757e63114d ("ring-buffer: Return reader page back into existing ring buffer")
+Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/ring_buffer.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1765,6 +1765,8 @@ static void rb_free_cpu_buffer(struct ri
+               free_buffer_page(bpage);
+       }
++      free_page((unsigned long)cpu_buffer->free_page);
++
+       kfree(cpu_buffer);
+ }
diff --git a/queue-5.15/ring-buffer-fix-writing-to-the-buffer-with-max_data_size.patch b/queue-5.15/ring-buffer-fix-writing-to-the-buffer-with-max_data_size.patch
new file mode 100644 (file)
index 0000000..6587218
--- /dev/null
@@ -0,0 +1,87 @@
+From b3ae7b67b87fed771fa5bf95389df06b0433603e Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Tue, 12 Dec 2023 11:16:17 -0500
+Subject: ring-buffer: Fix writing to the buffer with max_data_size
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit b3ae7b67b87fed771fa5bf95389df06b0433603e upstream.
+
+The maximum ring buffer data size is the maximum size of data that can be
+recorded on the ring buffer. Events must be smaller than the sub buffer
+data size minus any meta data. This size is checked before trying to
+allocate from the ring buffer because the allocation assumes that the size
+will fit on the sub buffer.
+
+The maximum size was calculated as the size of a sub buffer page (which is
+currently PAGE_SIZE minus the sub buffer header) minus the size of the
+meta data of an individual event. But it missed the possible adding of a
+time stamp for events that are added long enough apart that the event meta
+data can't hold the time delta.
+
+When an event is added that is greater than the current BUF_MAX_DATA_SIZE
+minus the size of a time stamp, but still less than or equal to
+BUF_MAX_DATA_SIZE, the ring buffer would go into an infinite loop, looking
+for a page that can hold the event. Luckily, there's a check for this loop
+and after 1000 iterations and a warning is emitted and the ring buffer is
+disabled. But this should never happen.
+
+This can happen when a large event is added first, or after a long period
+where an absolute timestamp is prefixed to the event, increasing its size
+by 8 bytes. This passes the check and then goes into the algorithm that
+causes the infinite loop.
+
+For events that are the first event on the sub-buffer, it does not need to
+add a timestamp, because the sub-buffer itself contains an absolute
+timestamp, and adding one is redundant.
+
+The fix is to check if the event is to be the first event on the
+sub-buffer, and if it is, then do not add a timestamp.
+
+This also fixes 32 bit adding a timestamp when a read of before_stamp or
+write_stamp is interrupted. There's still no need to add that timestamp if
+the event is going to be the first event on the sub buffer.
+
+Also, if the buffer has "time_stamp_abs" set, then also check if the
+length plus the timestamp is greater than the BUF_MAX_DATA_SIZE.
+
+Link: https://lore.kernel.org/all/20231212104549.58863438@gandalf.local.home/
+Link: https://lore.kernel.org/linux-trace-kernel/20231212071837.5fdd6c13@gandalf.local.home
+Link: https://lore.kernel.org/linux-trace-kernel/20231212111617.39e02849@gandalf.local.home
+
+Cc: stable@vger.kernel.org
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Fixes: a4543a2fa9ef3 ("ring-buffer: Get timestamp after event is allocated")
+Fixes: 58fbc3c63275c ("ring-buffer: Consolidate add_timestamp to remove some branches")
+Reported-by: Kent Overstreet <kent.overstreet@linux.dev> # (on IRC)
+Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/ring_buffer.c |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -3535,7 +3535,10 @@ __rb_reserve_next(struct ring_buffer_per
+                * absolute timestamp.
+                * Don't bother if this is the start of a new page (w == 0).
+                */
+-              if (unlikely(!a_ok || !b_ok || (info->before != info->after && w))) {
++              if (!w) {
++                      /* Use the sub-buffer timestamp */
++                      info->delta = 0;
++              } else if (unlikely(!a_ok || !b_ok || info->before != info->after)) {
+                       info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND;
+                       info->length += RB_LEN_TIME_EXTEND;
+               } else {
+@@ -3686,6 +3689,8 @@ rb_reserve_next_event(struct trace_buffe
+       if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) {
+               add_ts_default = RB_ADD_STAMP_ABSOLUTE;
+               info.length += RB_LEN_TIME_EXTEND;
++              if (info.length > BUF_MAX_DATA_SIZE)
++                      goto out_fail;
+       } else {
+               add_ts_default = RB_ADD_STAMP_NONE;
+       }
diff --git a/queue-5.15/ring-buffer-have-saved-event-hold-the-entire-event.patch b/queue-5.15/ring-buffer-have-saved-event-hold-the-entire-event.patch
new file mode 100644 (file)
index 0000000..b90bd59
--- /dev/null
@@ -0,0 +1,54 @@
+From b049525855fdd0024881c9b14b8fbec61c3f53d3 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Tue, 12 Dec 2023 07:25:58 -0500
+Subject: ring-buffer: Have saved event hold the entire event
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit b049525855fdd0024881c9b14b8fbec61c3f53d3 upstream.
+
+For the ring buffer iterator (non-consuming read), the event needs to be
+copied into the iterator buffer to make sure that a writer does not
+overwrite it while the user is reading it. If a write happens during the
+copy, the buffer is simply discarded.
+
+But the temp buffer itself was not big enough. The allocation of the
+buffer was only BUF_MAX_DATA_SIZE, which is the maximum data size that can
+be passed into the ring buffer and saved. But the temp buffer needs to
+hold the meta data as well. That would be BUF_PAGE_SIZE and not
+BUF_MAX_DATA_SIZE.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20231212072558.61f76493@gandalf.local.home
+
+Cc: stable@vger.kernel.org
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Fixes: 785888c544e04 ("ring-buffer: Have rb_iter_head_event() handle concurrent writer")
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/ring_buffer.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -2367,7 +2367,7 @@ rb_iter_head_event(struct ring_buffer_it
+        */
+       barrier();
+-      if ((iter->head + length) > commit || length > BUF_MAX_DATA_SIZE)
++      if ((iter->head + length) > commit || length > BUF_PAGE_SIZE)
+               /* Writer corrupted the read? */
+               goto reset;
+@@ -5066,7 +5066,8 @@ ring_buffer_read_prepare(struct trace_bu
+       if (!iter)
+               return NULL;
+-      iter->event = kmalloc(BUF_MAX_DATA_SIZE, flags);
++      /* Holds the entire event: data and meta data */
++      iter->event = kmalloc(BUF_PAGE_SIZE, flags);
+       if (!iter->event) {
+               kfree(iter);
+               return NULL;
index 88b4e55823662ddc3e52b62e940e6f12f20a7f1c..3fe26e38d0d62c8995bb806f81c32f52b20e84a6 100644 (file)
@@ -75,3 +75,10 @@ ext4-prevent-the-normalized-size-from-exceeding-ext_max_blocks.patch
 arm64-mm-always-make-sw-dirty-ptes-hw-dirty-in-pte_modify.patch
 team-fix-use-after-free-when-an-option-instance-allocation-fails.patch
 drm-amdgpu-sdma5.2-add-begin-end_use-ring-callbacks.patch
+ring-buffer-fix-memory-leak-of-free-page.patch
+tracing-update-snapshot-buffer-on-resize-if-it-is-allocated.patch
+ring-buffer-do-not-update-before-stamp-when-switching-sub-buffers.patch
+ring-buffer-have-saved-event-hold-the-entire-event.patch
+ring-buffer-fix-writing-to-the-buffer-with-max_data_size.patch
+ring-buffer-fix-a-race-in-rb_time_cmpxchg-for-32-bit-archs.patch
+ring-buffer-do-not-try-to-put-back-write_stamp.patch
diff --git a/queue-5.15/tracing-update-snapshot-buffer-on-resize-if-it-is-allocated.patch b/queue-5.15/tracing-update-snapshot-buffer-on-resize-if-it-is-allocated.patch
new file mode 100644 (file)
index 0000000..d64c36e
--- /dev/null
@@ -0,0 +1,59 @@
+From d06aff1cb13d2a0d52b48e605462518149c98c81 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Sun, 10 Dec 2023 22:54:47 -0500
+Subject: tracing: Update snapshot buffer on resize if it is allocated
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit d06aff1cb13d2a0d52b48e605462518149c98c81 upstream.
+
+The snapshot buffer is to mimic the main buffer so that when a snapshot is
+needed, the snapshot and main buffer are swapped. When the snapshot buffer
+is allocated, it is set to the minimal size that the ring buffer may be at
+and still functional. When it is allocated it becomes the same size as the
+main ring buffer, and when the main ring buffer changes in size, it should
+do.
+
+Currently, the resize only updates the snapshot buffer if it's used by the
+current tracer (ie. the preemptirqsoff tracer). But it needs to be updated
+anytime it is allocated.
+
+When changing the size of the main buffer, instead of looking to see if
+the current tracer is utilizing the snapshot buffer, just check if it is
+allocated to know if it should be updated or not.
+
+Also fix typo in comment just above the code change.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20231210225447.48476a6a@rorschach.local.home
+
+Cc: stable@vger.kernel.org
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Fixes: ad909e21bbe69 ("tracing: Add internal tracing_snapshot() functions")
+Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -6243,7 +6243,7 @@ static int __tracing_resize_ring_buffer(
+       if (!tr->array_buffer.buffer)
+               return 0;
+-      /* Do not allow tracing while resizng ring buffer */
++      /* Do not allow tracing while resizing ring buffer */
+       tracing_stop_tr(tr);
+       ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
+@@ -6251,7 +6251,7 @@ static int __tracing_resize_ring_buffer(
+               goto out_start;
+ #ifdef CONFIG_TRACER_MAX_TRACE
+-      if (!tr->current_trace->use_max_tr)
++      if (!tr->allocated_snapshot)
+               goto out;
+       ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);