]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.17-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 13 Oct 2025 08:37:02 +0000 (10:37 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 13 Oct 2025 08:37:02 +0000 (10:37 +0200)
added patches:
tracing-fix-tracing_mark_raw_write-to-use-buf-and-not-ubuf.patch
tracing-stop-fortify-string-from-warning-in-tracing_mark_raw_write.patch

queue-6.17/series
queue-6.17/tracing-fix-tracing_mark_raw_write-to-use-buf-and-not-ubuf.patch [new file with mode: 0644]
queue-6.17/tracing-stop-fortify-string-from-warning-in-tracing_mark_raw_write.patch [new file with mode: 0644]

index 30024c472df5fcec97b3c6674c9f5d7deb835c7e..06dd8842bb942e6b0922c81ccffc4d46da7c3366 100644 (file)
@@ -519,3 +519,5 @@ tracing-fix-race-condition-in-kprobe-initialization-causing-null-pointer-derefer
 tracing-fix-wakeup-tracers-on-failure-of-acquiring-calltime.patch
 tracing-fix-irqoff-tracers-on-failure-of-acquiring-calltime.patch
 tracing-have-trace_marker-use-per-cpu-data-to-read-user-space.patch
+tracing-fix-tracing_mark_raw_write-to-use-buf-and-not-ubuf.patch
+tracing-stop-fortify-string-from-warning-in-tracing_mark_raw_write.patch
diff --git a/queue-6.17/tracing-fix-tracing_mark_raw_write-to-use-buf-and-not-ubuf.patch b/queue-6.17/tracing-fix-tracing_mark_raw_write-to-use-buf-and-not-ubuf.patch
new file mode 100644 (file)
index 0000000..e83bdea
--- /dev/null
@@ -0,0 +1,56 @@
+From bda745ee8fbb63330d8f2f2ea4157229a5df959e Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Fri, 10 Oct 2025 23:51:42 -0400
+Subject: tracing: Fix tracing_mark_raw_write() to use buf and not ubuf
+
+From: Steven Rostedt <rostedt@goodmis.org>
+
+commit bda745ee8fbb63330d8f2f2ea4157229a5df959e upstream.
+
+The fix to use a per CPU buffer to read user space tested only the writes
+to trace_marker. But it appears that the selftests are missing tests to
+the trace_maker_raw file. The trace_maker_raw file is used by applications
+that writes data structures and not strings into the file, and the tools
+read the raw ring buffer to process the structures it writes.
+
+The fix that reads the per CPU buffers passes the new per CPU buffer to
+the trace_marker file writes, but the update to the trace_marker_raw write
+read the data from user space into the per CPU buffer, but then still used
+then passed the user space address to the function that records the data.
+
+Pass in the per CPU buffer and not the user space address.
+
+TODO: Add a test to better test trace_marker_raw.
+
+Cc: stable@vger.kernel.org
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Link: https://lore.kernel.org/20251011035243.386098147@kernel.org
+Fixes: 64cf7d058a00 ("tracing: Have trace_marker use per-cpu data to read user space")
+Reported-by: syzbot+9a2ede1643175f350105@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/68e973f5.050a0220.1186a4.0010.GAE@google.com/
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -7497,12 +7497,12 @@ tracing_mark_raw_write(struct file *filp
+       if (tr == &global_trace) {
+               guard(rcu)();
+               list_for_each_entry_rcu(tr, &marker_copies, marker_list) {
+-                      written = write_raw_marker_to_buffer(tr, ubuf, cnt);
++                      written = write_raw_marker_to_buffer(tr, buf, cnt);
+                       if (written < 0)
+                               break;
+               }
+       } else {
+-              written = write_raw_marker_to_buffer(tr, ubuf, cnt);
++              written = write_raw_marker_to_buffer(tr, buf, cnt);
+       }
+       return written;
diff --git a/queue-6.17/tracing-stop-fortify-string-from-warning-in-tracing_mark_raw_write.patch b/queue-6.17/tracing-stop-fortify-string-from-warning-in-tracing_mark_raw_write.patch
new file mode 100644 (file)
index 0000000..d74b8ec
--- /dev/null
@@ -0,0 +1,120 @@
+From 54b91e54b113d4f15ab023a44f508251db6e22e7 Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Sat, 11 Oct 2025 11:20:32 -0400
+Subject: tracing: Stop fortify-string from warning in tracing_mark_raw_write()
+
+From: Steven Rostedt <rostedt@goodmis.org>
+
+commit 54b91e54b113d4f15ab023a44f508251db6e22e7 upstream.
+
+The way tracing_mark_raw_write() records its data is that it has the
+following structure:
+
+  struct {
+       struct trace_entry;
+       int id;
+       char buf[];
+  };
+
+But memcpy(&entry->id, buf, size) triggers the following warning when the
+size is greater than the id:
+
+ ------------[ cut here ]------------
+ memcpy: detected field-spanning write (size 6) of single field "&entry->id" at kernel/trace/trace.c:7458 (size 4)
+ WARNING: CPU: 7 PID: 995 at kernel/trace/trace.c:7458 write_raw_marker_to_buffer.isra.0+0x1f9/0x2e0
+ Modules linked in:
+ CPU: 7 UID: 0 PID: 995 Comm: bash Not tainted 6.17.0-test-00007-g60b82183e78a-dirty #211 PREEMPT(voluntary)
+ Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.17.0-debian-1.17.0-1 04/01/2014
+ RIP: 0010:write_raw_marker_to_buffer.isra.0+0x1f9/0x2e0
+ Code: 04 00 75 a7 b9 04 00 00 00 48 89 de 48 89 04 24 48 c7 c2 e0 b1 d1 b2 48 c7 c7 40 b2 d1 b2 c6 05 2d 88 6a 04 01 e8 f7 e8 bd ff <0f> 0b 48 8b 04 24 e9 76 ff ff ff 49 8d 7c 24 04 49 8d 5c 24 08 48
+ RSP: 0018:ffff888104c3fc78 EFLAGS: 00010292
+ RAX: 0000000000000000 RBX: 0000000000000006 RCX: 0000000000000000
+ RDX: 0000000000000000 RSI: 1ffffffff6b363b4 RDI: 0000000000000001
+ RBP: ffff888100058a00 R08: ffffffffb041d459 R09: ffffed1020987f40
+ R10: 0000000000000007 R11: 0000000000000001 R12: ffff888100bb9010
+ R13: 0000000000000000 R14: 00000000000003e3 R15: ffff888134800000
+ FS:  00007fa61d286740(0000) GS:ffff888286cad000(0000) knlGS:0000000000000000
+ CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 0000560d28d509f1 CR3: 00000001047a4006 CR4: 0000000000172ef0
+ Call Trace:
+  <TASK>
+  tracing_mark_raw_write+0x1fe/0x290
+  ? __pfx_tracing_mark_raw_write+0x10/0x10
+  ? security_file_permission+0x50/0xf0
+  ? rw_verify_area+0x6f/0x4b0
+  vfs_write+0x1d8/0xdd0
+  ? __pfx_vfs_write+0x10/0x10
+  ? __pfx_css_rstat_updated+0x10/0x10
+  ? count_memcg_events+0xd9/0x410
+  ? fdget_pos+0x53/0x5e0
+  ksys_write+0x182/0x200
+  ? __pfx_ksys_write+0x10/0x10
+  ? do_user_addr_fault+0x4af/0xa30
+  do_syscall_64+0x63/0x350
+  entry_SYSCALL_64_after_hwframe+0x76/0x7e
+ RIP: 0033:0x7fa61d318687
+ Code: 48 89 fa 4c 89 df e8 58 b3 00 00 8b 93 08 03 00 00 59 5e 48 83 f8 fc 74 1a 5b c3 0f 1f 84 00 00 00 00 00 48 8b 44 24 10 0f 05 <5b> c3 0f 1f 80 00 00 00 00 83 e2 39 83 fa 08 75 de e8 23 ff ff ff
+ RSP: 002b:00007ffd87fe0120 EFLAGS: 00000202 ORIG_RAX: 0000000000000001
+ RAX: ffffffffffffffda RBX: 00007fa61d286740 RCX: 00007fa61d318687
+ RDX: 0000000000000006 RSI: 0000560d28d509f0 RDI: 0000000000000001
+ RBP: 0000560d28d509f0 R08: 0000000000000000 R09: 0000000000000000
+ R10: 0000000000000000 R11: 0000000000000202 R12: 0000000000000006
+ R13: 00007fa61d4715c0 R14: 00007fa61d46ee80 R15: 0000000000000000
+  </TASK>
+ ---[ end trace 0000000000000000 ]---
+
+This is because fortify string sees that the size of entry->id is only 4
+bytes, but it is writing more than that. But this is OK as the
+dynamic_array is allocated to handle that copy.
+
+The size allocated on the ring buffer was actually a bit too big:
+
+  size = sizeof(*entry) + cnt;
+
+But cnt includes the 'id' and the buffer data, so adding cnt to the size
+of *entry actually allocates too much on the ring buffer.
+
+Change the allocation to:
+
+  size = struct_size(entry, buf, cnt - sizeof(entry->id));
+
+and the memcpy() to unsafe_memcpy() with an added justification.
+
+Cc: stable@vger.kernel.org
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Link: https://lore.kernel.org/20251011112032.77be18e4@gandalf.local.home
+Fixes: 64cf7d058a00 ("tracing: Have trace_marker use per-cpu data to read user space")
+Reported-by: syzbot+9a2ede1643175f350105@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/68e973f5.050a0220.1186a4.0010.GAE@google.com/
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace.c |    8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -7441,7 +7441,8 @@ static ssize_t write_raw_marker_to_buffe
+       ssize_t written;
+       size_t size;
+-      size = sizeof(*entry) + cnt;
++      /* cnt includes both the entry->id and the data behind it. */
++      size = struct_size(entry, buf, cnt - sizeof(entry->id));
+       buffer = tr->array_buffer.buffer;
+@@ -7455,7 +7456,10 @@ static ssize_t write_raw_marker_to_buffe
+               return -EBADF;
+       entry = ring_buffer_event_data(event);
+-      memcpy(&entry->id, buf, cnt);
++      unsafe_memcpy(&entry->id, buf, cnt,
++                    "id and content already reserved on ring buffer"
++                    "'buf' includes the 'id' and the data."
++                    "'entry' was allocated with cnt from 'id'.");
+       written = cnt;
+       __buffer_unlock_commit(buffer, event);