]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 3 Jan 2024 10:18:56 +0000 (11:18 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 3 Jan 2024 10:18:56 +0000 (11:18 +0100)
added patches:
ksmbd-fix-slab-out-of-bounds-in-smb_strndup_from_utf16.patch
ring-buffer-fix-wake-ups-when-buffer_percent-is-set-to-100.patch
ring-buffer-remove-useless-update-to-write_stamp-in-rb_try_to_discard.patch
tracing-fix-blocked-reader-of-snapshot-buffer.patch

queue-5.15/ksmbd-fix-slab-out-of-bounds-in-smb_strndup_from_utf16.patch [new file with mode: 0644]
queue-5.15/ring-buffer-fix-wake-ups-when-buffer_percent-is-set-to-100.patch [new file with mode: 0644]
queue-5.15/ring-buffer-remove-useless-update-to-write_stamp-in-rb_try_to_discard.patch [new file with mode: 0644]
queue-5.15/series
queue-5.15/tracing-fix-blocked-reader-of-snapshot-buffer.patch [new file with mode: 0644]

diff --git a/queue-5.15/ksmbd-fix-slab-out-of-bounds-in-smb_strndup_from_utf16.patch b/queue-5.15/ksmbd-fix-slab-out-of-bounds-in-smb_strndup_from_utf16.patch
new file mode 100644 (file)
index 0000000..1dedd45
--- /dev/null
@@ -0,0 +1,56 @@
+From d10c77873ba1e9e6b91905018e29e196fd5f863d Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Wed, 20 Dec 2023 15:52:11 +0900
+Subject: ksmbd: fix slab-out-of-bounds in smb_strndup_from_utf16()
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit d10c77873ba1e9e6b91905018e29e196fd5f863d upstream.
+
+If ->NameOffset/Length is bigger than ->CreateContextsOffset/Length,
+ksmbd_check_message doesn't validate request buffer it correctly.
+So slab-out-of-bounds warning from calling smb_strndup_from_utf16()
+in smb2_open() could happen. If ->NameLength is non-zero, Set the larger
+of the two sums (Name and CreateContext size) as the offset and length of
+the data area.
+
+Reported-by: Yang Chaoming <lometsj@live.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/smb2misc.c |   15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+--- a/fs/ksmbd/smb2misc.c
++++ b/fs/ksmbd/smb2misc.c
+@@ -107,16 +107,25 @@ static int smb2_get_data_area_len(unsign
+               break;
+       case SMB2_CREATE:
+       {
++              unsigned short int name_off =
++                      le16_to_cpu(((struct smb2_create_req *)hdr)->NameOffset);
++              unsigned short int name_len =
++                      le16_to_cpu(((struct smb2_create_req *)hdr)->NameLength);
++
+               if (((struct smb2_create_req *)hdr)->CreateContextsLength) {
+                       *off = le32_to_cpu(((struct smb2_create_req *)
+                               hdr)->CreateContextsOffset);
+                       *len = le32_to_cpu(((struct smb2_create_req *)
+                               hdr)->CreateContextsLength);
+-                      break;
++                      if (!name_len)
++                              break;
++
++                      if (name_off + name_len < (u64)*off + *len)
++                              break;
+               }
+-              *off = le16_to_cpu(((struct smb2_create_req *)hdr)->NameOffset);
+-              *len = le16_to_cpu(((struct smb2_create_req *)hdr)->NameLength);
++              *off = name_off;
++              *len = name_len;
+               break;
+       }
+       case SMB2_QUERY_INFO:
diff --git a/queue-5.15/ring-buffer-fix-wake-ups-when-buffer_percent-is-set-to-100.patch b/queue-5.15/ring-buffer-fix-wake-ups-when-buffer_percent-is-set-to-100.patch
new file mode 100644 (file)
index 0000000..c6ce708
--- /dev/null
@@ -0,0 +1,73 @@
+From 623b1f896fa8a669a277ee5a258307a16c7377a3 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Tue, 26 Dec 2023 12:59:02 -0500
+Subject: ring-buffer: Fix wake ups when buffer_percent is set to 100
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit 623b1f896fa8a669a277ee5a258307a16c7377a3 upstream.
+
+The tracefs file "buffer_percent" is to allow user space to set a
+water-mark on how much of the tracing ring buffer needs to be filled in
+order to wake up a blocked reader.
+
+ 0 - is to wait until any data is in the buffer
+ 1 - is to wait for 1% of the sub buffers to be filled
+ 50 - would be half of the sub buffers are filled with data
+ 100 - is not to wake the waiter until the ring buffer is completely full
+
+Unfortunately the test for being full was:
+
+       dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
+       return (dirty * 100) > (full * nr_pages);
+
+Where "full" is the value for "buffer_percent".
+
+There is two issues with the above when full == 100.
+
+1. dirty * 100 > 100 * nr_pages will never be true
+   That is, the above is basically saying that if the user sets
+   buffer_percent to 100, more pages need to be dirty than exist in the
+   ring buffer!
+
+2. The page that the writer is on is never considered dirty, as dirty
+   pages are only those that are full. When the writer goes to a new
+   sub-buffer, it clears the contents of that sub-buffer.
+
+That is, even if the check was ">=" it would still not be equal as the
+most pages that can be considered "dirty" is nr_pages - 1.
+
+To fix this, add one to dirty and use ">=" in the compare.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20231226125902.4a057f1d@gandalf.local.home
+
+Cc: stable@vger.kernel.org
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Fixes: 03329f9939781 ("tracing: Add tracefs file buffer_percentage")
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/ring_buffer.c |    9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -897,9 +897,14 @@ static __always_inline bool full_hit(str
+       if (!nr_pages || !full)
+               return true;
+-      dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
++      /*
++       * Add one as dirty will never equal nr_pages, as the sub-buffer
++       * that the writer is on is not counted as dirty.
++       * This is needed if "buffer_percent" is set to 100.
++       */
++      dirty = ring_buffer_nr_dirty_pages(buffer, cpu) + 1;
+-      return (dirty * 100) > (full * nr_pages);
++      return (dirty * 100) >= (full * nr_pages);
+ }
+ /*
diff --git a/queue-5.15/ring-buffer-remove-useless-update-to-write_stamp-in-rb_try_to_discard.patch b/queue-5.15/ring-buffer-remove-useless-update-to-write_stamp-in-rb_try_to_discard.patch
new file mode 100644 (file)
index 0000000..ee829d6
--- /dev/null
@@ -0,0 +1,144 @@
+From 083e9f65bd215582bf8f6a920db729fadf16704f Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Fri, 15 Dec 2023 08:18:10 -0500
+Subject: ring-buffer: Remove useless update to write_stamp in rb_try_to_discard()
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit 083e9f65bd215582bf8f6a920db729fadf16704f upstream.
+
+When filtering is enabled, a temporary buffer is created to place the
+content of the trace event output so that the filter logic can decide
+from the trace event output if the trace event should be filtered out or
+not. If it is to be filtered out, the content in the temporary buffer is
+simply discarded, otherwise it is written into the trace buffer.
+
+But if an interrupt were to come in while a previous event was using that
+temporary buffer, the event written by the interrupt would actually go
+into the ring buffer itself to prevent corrupting the data on the
+temporary buffer. If the event is to be filtered out, the event in the
+ring buffer is discarded, or if it fails to discard because another event
+were to have already come in, it is turned into padding.
+
+The update to the write_stamp in the rb_try_to_discard() happens after a
+fix was made to force the next event after the discard to use an absolute
+timestamp by setting the before_stamp to zero so it does not match the
+write_stamp (which causes an event to use the absolute timestamp).
+
+But there's an effort in rb_try_to_discard() to put back the write_stamp
+to what it was before the event was added. But this is useless and
+wasteful because nothing is going to be using that write_stamp for
+calculations as it still will not match the before_stamp.
+
+Remove this useless update, and in doing so, we remove another
+cmpxchg64()!
+
+Also update the comments to reflect this change as well as remove some
+extra white space in another comment.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20231215081810.1f4f38fe@rorschach.local.home
+
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Joel Fernandes <joel@joelfernandes.org>
+Cc: Vincent Donnefort   <vdonnefort@google.com>
+Fixes: b2dd797543cf ("ring-buffer: Force absolute timestamp on discard of event")
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/ring_buffer.c |   47 ++++++++++-----------------------------------
+ 1 file changed, 11 insertions(+), 36 deletions(-)
+
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -2942,25 +2942,6 @@ static unsigned rb_calculate_event_lengt
+       return length;
+ }
+-static u64 rb_time_delta(struct ring_buffer_event *event)
+-{
+-      switch (event->type_len) {
+-      case RINGBUF_TYPE_PADDING:
+-              return 0;
+-
+-      case RINGBUF_TYPE_TIME_EXTEND:
+-              return rb_event_time_stamp(event);
+-
+-      case RINGBUF_TYPE_TIME_STAMP:
+-              return 0;
+-
+-      case RINGBUF_TYPE_DATA:
+-              return event->time_delta;
+-      default:
+-              return 0;
+-      }
+-}
+-
+ static inline int
+ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+                 struct ring_buffer_event *event)
+@@ -2969,8 +2950,6 @@ rb_try_to_discard(struct ring_buffer_per
+       struct buffer_page *bpage;
+       unsigned long index;
+       unsigned long addr;
+-      u64 write_stamp;
+-      u64 delta;
+       new_index = rb_event_index(event);
+       old_index = new_index + rb_event_ts_length(event);
+@@ -2979,14 +2958,10 @@ rb_try_to_discard(struct ring_buffer_per
+       bpage = READ_ONCE(cpu_buffer->tail_page);
+-      delta = rb_time_delta(event);
+-
+-      if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp))
+-              return 0;
+-
+-      /* Make sure the write stamp is read before testing the location */
+-      barrier();
+-
++      /*
++       * Make sure the tail_page is still the same and
++       * the next write location is the end of this event
++       */
+       if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
+               unsigned long write_mask =
+                       local_read(&bpage->write) & ~RB_WRITE_MASK;
+@@ -2997,20 +2972,20 @@ rb_try_to_discard(struct ring_buffer_per
+                * to make sure that the next event adds an absolute
+                * value and does not rely on the saved write stamp, which
+                * is now going to be bogus.
++               *
++               * By setting the before_stamp to zero, the next event
++               * is not going to use the write_stamp and will instead
++               * create an absolute timestamp. This means there's no
++               * reason to update the wirte_stamp!
+                */
+               rb_time_set(&cpu_buffer->before_stamp, 0);
+-              /* Something came in, can't discard */
+-              if (!rb_time_cmpxchg(&cpu_buffer->write_stamp,
+-                                     write_stamp, write_stamp - delta))
+-                      return 0;
+-
+               /*
+                * If an event were to come in now, it would see that the
+                * write_stamp and the before_stamp are different, and assume
+                * that this event just added itself before updating
+                * the write stamp. The interrupting event will fix the
+-               * write stamp for us, and use the before stamp as its delta.
++               * write stamp for us, and use an absolute timestamp.
+                */
+               /*
+@@ -3449,7 +3424,7 @@ static void check_buffer(struct ring_buf
+               return;
+       /*
+-       * If this interrupted another event, 
++       * If this interrupted another event,
+        */
+       if (atomic_inc_return(this_cpu_ptr(&checking)) != 1)
+               goto out;
index 667253f8b8e20390a2226f0bf6e51c020f37685a..6b161482809009442f9e0f842aba74504ce9c07d 100644 (file)
@@ -84,3 +84,7 @@ smb-client-fix-oob-in-smbcalcsize.patch
 bluetooth-af_bluetooth-fix-use-after-free-in-bt_sock.patch
 device-property-add-const-qualifier-to-device_get_ma.patch
 mm-filemap-avoid-buffered-read-write-race-to-read-inconsistent-data.patch
+ring-buffer-fix-wake-ups-when-buffer_percent-is-set-to-100.patch
+tracing-fix-blocked-reader-of-snapshot-buffer.patch
+ring-buffer-remove-useless-update-to-write_stamp-in-rb_try_to_discard.patch
+ksmbd-fix-slab-out-of-bounds-in-smb_strndup_from_utf16.patch
diff --git a/queue-5.15/tracing-fix-blocked-reader-of-snapshot-buffer.patch b/queue-5.15/tracing-fix-blocked-reader-of-snapshot-buffer.patch
new file mode 100644 (file)
index 0000000..d6a4207
--- /dev/null
@@ -0,0 +1,105 @@
+From 39a7dc23a1ed0fe81141792a09449d124c5953bd Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Thu, 28 Dec 2023 09:51:49 -0500
+Subject: tracing: Fix blocked reader of snapshot buffer
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit 39a7dc23a1ed0fe81141792a09449d124c5953bd upstream.
+
+If an application blocks on the snapshot or snapshot_raw files, expecting
+to be woken up when a snapshot occurs, it will not happen. Or it may
+happen with an unexpected result.
+
+That result is that the application will be reading the main buffer
+instead of the snapshot buffer. That is because when the snapshot occurs,
+the main and snapshot buffers are swapped. But the reader has a descriptor
+still pointing to the buffer that it originally connected to.
+
+This is fine for the main buffer readers, as they may be blocked waiting
+for a watermark to be hit, and when a snapshot occurs, the data that the
+main readers want is now on the snapshot buffer.
+
+But for waiters of the snapshot buffer, they are waiting for an event to
+occur that will trigger the snapshot and they can then consume it quickly
+to save the snapshot before the next snapshot occurs. But to do this, they
+need to read the new snapshot buffer, not the old one that is now
+receiving new data.
+
+Also, it does not make sense to have a watermark "buffer_percent" on the
+snapshot buffer, as the snapshot buffer is static and does not receive new
+data except all at once.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20231228095149.77f5b45d@gandalf.local.home
+
+Cc: stable@vger.kernel.org
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Fixes: debdd57f5145f ("tracing: Make a snapshot feature available from userspace")
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/ring_buffer.c |    3 ++-
+ kernel/trace/trace.c       |   20 +++++++++++++++++---
+ 2 files changed, 19 insertions(+), 4 deletions(-)
+
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -964,7 +964,8 @@ void ring_buffer_wake_waiters(struct tra
+       /* make sure the waiters see the new index */
+       smp_wmb();
+-      rb_wake_up_waiters(&rbwork->work);
++      /* This can be called in any context */
++      irq_work_queue(&rbwork->work);
+ }
+ /**
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1837,6 +1837,9 @@ update_max_tr(struct trace_array *tr, st
+       __update_max_tr(tr, tsk, cpu);
+       arch_spin_unlock(&tr->max_lock);
++
++      /* Any waiters on the old snapshot buffer need to wake up */
++      ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS);
+ }
+ /**
+@@ -1888,12 +1891,23 @@ update_max_tr_single(struct trace_array
+ static int wait_on_pipe(struct trace_iterator *iter, int full)
+ {
++      int ret;
++
+       /* Iterators are static, they should be filled or empty */
+       if (trace_buffer_iter(iter, iter->cpu_file))
+               return 0;
+-      return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
+-                              full);
++      ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full);
++
++#ifdef CONFIG_TRACER_MAX_TRACE
++      /*
++       * Make sure this is still the snapshot buffer, as if a snapshot were
++       * to happen, this would now be the main buffer.
++       */
++      if (iter->snapshot)
++              iter->array_buffer = &iter->tr->max_buffer;
++#endif
++      return ret;
+ }
+ #ifdef CONFIG_FTRACE_STARTUP_TEST
+@@ -8383,7 +8397,7 @@ tracing_buffers_splice_read(struct file
+               wait_index = READ_ONCE(iter->wait_index);
+-              ret = wait_on_pipe(iter, iter->tr->buffer_percent);
++              ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent);
+               if (ret)
+                       goto out;