]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.18-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 8 Feb 2026 12:11:09 +0000 (13:11 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 8 Feb 2026 12:11:09 +0000 (13:11 +0100)
added patches:
binder-fix-br_frozen_reply-error-log.patch
binder-fix-uaf-in-binder_netlink_report.patch
binderfs-fix-ida_alloc_max-upper-bound.patch
rust_binder-add-additional-alignment-checks.patch
rust_binder-correctly-handle-fda-objects-of-length-zero.patch
rust_binderfs-fix-ida_alloc_max-upper-bound.patch
tracing-fix-ftrace-event-field-alignments.patch

queue-6.18/binder-fix-br_frozen_reply-error-log.patch [new file with mode: 0644]
queue-6.18/binder-fix-uaf-in-binder_netlink_report.patch [new file with mode: 0644]
queue-6.18/binderfs-fix-ida_alloc_max-upper-bound.patch [new file with mode: 0644]
queue-6.18/rust_binder-add-additional-alignment-checks.patch [new file with mode: 0644]
queue-6.18/rust_binder-correctly-handle-fda-objects-of-length-zero.patch [new file with mode: 0644]
queue-6.18/rust_binderfs-fix-ida_alloc_max-upper-bound.patch [new file with mode: 0644]
queue-6.18/series
queue-6.18/tracing-fix-ftrace-event-field-alignments.patch [new file with mode: 0644]

diff --git a/queue-6.18/binder-fix-br_frozen_reply-error-log.patch b/queue-6.18/binder-fix-br_frozen_reply-error-log.patch
new file mode 100644 (file)
index 0000000..71afaf4
--- /dev/null
@@ -0,0 +1,39 @@
+From 1769f90e5ba2a6d24bb46b85da33fe861c68f005 Mon Sep 17 00:00:00 2001
+From: Carlos Llamas <cmllamas@google.com>
+Date: Fri, 23 Jan 2026 17:57:02 +0000
+Subject: binder: fix BR_FROZEN_REPLY error log
+
+From: Carlos Llamas <cmllamas@google.com>
+
+commit 1769f90e5ba2a6d24bb46b85da33fe861c68f005 upstream.
+
+The error logging for failed transactions is misleading as it always
+reports "dead process or thread" even when the target is actually
+frozen. Additionally, the pid and tid are reversed which can further
+confuse debugging efforts. Fix both issues.
+
+Cc: stable@kernel.org
+Cc: Steven Moreland <smoreland@google.com>
+Fixes: a15dac8b2286 ("binder: additional transaction error logs")
+Signed-off-by: Carlos Llamas <cmllamas@google.com>
+Reviewed-by: Alice Ryhl <aliceryhl@google.com>
+Link: https://patch.msgid.link/20260123175702.2154348-1-cmllamas@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/android/binder.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -3824,8 +3824,9 @@ static void binder_transaction(struct bi
+       return;
+ err_dead_proc_or_thread:
+-      binder_txn_error("%d:%d dead process or thread\n",
+-              thread->pid, proc->pid);
++      binder_txn_error("%d:%d %s process or thread\n",
++                       proc->pid, thread->pid,
++                       return_error == BR_FROZEN_REPLY ? "frozen" : "dead");
+       return_error_line = __LINE__;
+       binder_dequeue_work(proc, tcomplete);
+ err_translate_failed:
diff --git a/queue-6.18/binder-fix-uaf-in-binder_netlink_report.patch b/queue-6.18/binder-fix-uaf-in-binder_netlink_report.patch
new file mode 100644 (file)
index 0000000..b3e9b3f
--- /dev/null
@@ -0,0 +1,98 @@
+From 5e8a3d01544282e50d887d76f30d1496a0a53562 Mon Sep 17 00:00:00 2001
+From: Carlos Llamas <cmllamas@google.com>
+Date: Thu, 22 Jan 2026 18:02:02 +0000
+Subject: binder: fix UAF in binder_netlink_report()
+
+From: Carlos Llamas <cmllamas@google.com>
+
+commit 5e8a3d01544282e50d887d76f30d1496a0a53562 upstream.
+
+Oneway transactions sent to frozen targets via binder_proc_transaction()
+return a BR_TRANSACTION_PENDING_FROZEN error but they are still treated
+as successful since the target is expected to thaw at some point. It is
+then not safe to access 't' after BR_TRANSACTION_PENDING_FROZEN errors
+as the transaction could have been consumed by the now thawed target.
+
+This is the case for binder_netlink_report() which derreferences 't'
+after a pending frozen error, as pointed out by the following KASAN
+report:
+
+  ==================================================================
+  BUG: KASAN: slab-use-after-free in binder_netlink_report.isra.0+0x694/0x6c8
+  Read of size 8 at addr ffff00000f98ba38 by task binder-util/522
+
+  CPU: 4 UID: 0 PID: 522 Comm: binder-util Not tainted 6.19.0-rc6-00015-gc03e9c42ae8f #1 PREEMPT
+  Hardware name: linux,dummy-virt (DT)
+  Call trace:
+   binder_netlink_report.isra.0+0x694/0x6c8
+   binder_transaction+0x66e4/0x79b8
+   binder_thread_write+0xab4/0x4440
+   binder_ioctl+0x1fd4/0x2940
+   [...]
+
+  Allocated by task 522:
+   __kmalloc_cache_noprof+0x17c/0x50c
+   binder_transaction+0x584/0x79b8
+   binder_thread_write+0xab4/0x4440
+   binder_ioctl+0x1fd4/0x2940
+   [...]
+
+  Freed by task 488:
+   kfree+0x1d0/0x420
+   binder_free_transaction+0x150/0x234
+   binder_thread_read+0x2d08/0x3ce4
+   binder_ioctl+0x488/0x2940
+   [...]
+  ==================================================================
+
+Instead, make a transaction copy so the data can be safely accessed by
+binder_netlink_report() after a pending frozen error. While here, add a
+comment about not using t->buffer in binder_netlink_report().
+
+Cc: stable@vger.kernel.org
+Fixes: 63740349eba7 ("binder: introduce transaction reports via netlink")
+Signed-off-by: Carlos Llamas <cmllamas@google.com>
+Reviewed-by: Alice Ryhl <aliceryhl@google.com>
+Link: https://patch.msgid.link/20260122180203.1502637-1-cmllamas@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/android/binder.c |   14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -2991,6 +2991,10 @@ static void binder_set_txn_from_error(st
+  * @t:                the binder transaction that failed
+  * @data_size:        the user provided data size for the transaction
+  * @error:    enum binder_driver_return_protocol returned to sender
++ *
++ * Note that t->buffer is not safe to access here, as it may have been
++ * released (or not yet allocated). Callers should guarantee all the
++ * transaction items used here are safe to access.
+  */
+ static void binder_netlink_report(struct binder_proc *proc,
+                                 struct binder_transaction *t,
+@@ -3780,6 +3784,14 @@ static void binder_transaction(struct bi
+                       goto err_dead_proc_or_thread;
+               }
+       } else {
++              /*
++               * Make a transaction copy. It is not safe to access 't' after
++               * binder_proc_transaction() reported a pending frozen. The
++               * target could thaw and consume the transaction at any point.
++               * Instead, use a safe 't_copy' for binder_netlink_report().
++               */
++              struct binder_transaction t_copy = *t;
++
+               BUG_ON(target_node == NULL);
+               BUG_ON(t->buffer->async_transaction != 1);
+               return_error = binder_proc_transaction(t, target_proc, NULL);
+@@ -3790,7 +3802,7 @@ static void binder_transaction(struct bi
+                */
+               if (return_error == BR_TRANSACTION_PENDING_FROZEN) {
+                       tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
+-                      binder_netlink_report(proc, t, tr->data_size,
++                      binder_netlink_report(proc, &t_copy, tr->data_size,
+                                             return_error);
+               }
+               binder_enqueue_thread_work(thread, tcomplete);
diff --git a/queue-6.18/binderfs-fix-ida_alloc_max-upper-bound.patch b/queue-6.18/binderfs-fix-ida_alloc_max-upper-bound.patch
new file mode 100644 (file)
index 0000000..f8f453b
--- /dev/null
@@ -0,0 +1,47 @@
+From ec4ddc90d201d09ef4e4bef8a2c6d9624525ad68 Mon Sep 17 00:00:00 2001
+From: Carlos Llamas <cmllamas@google.com>
+Date: Tue, 27 Jan 2026 23:55:11 +0000
+Subject: binderfs: fix ida_alloc_max() upper bound
+
+From: Carlos Llamas <cmllamas@google.com>
+
+commit ec4ddc90d201d09ef4e4bef8a2c6d9624525ad68 upstream.
+
+The 'max' argument of ida_alloc_max() takes the maximum valid ID and not
+the "count". Using an ID of BINDERFS_MAX_MINOR (1 << 20) for dev->minor
+would exceed the limits of minor numbers (20-bits). Fix this off-by-one
+error by subtracting 1 from the 'max'.
+
+Cc: stable@vger.kernel.org
+Fixes: 3ad20fe393b3 ("binder: implement binderfs")
+Signed-off-by: Carlos Llamas <cmllamas@google.com>
+Link: https://patch.msgid.link/20260127235545.2307876-2-cmllamas@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/android/binderfs.c |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/android/binderfs.c
++++ b/drivers/android/binderfs.c
+@@ -132,8 +132,8 @@ static int binderfs_binder_device_create
+       mutex_lock(&binderfs_minors_mutex);
+       if (++info->device_count <= info->mount_opts.max)
+               minor = ida_alloc_max(&binderfs_minors,
+-                                    use_reserve ? BINDERFS_MAX_MINOR :
+-                                                  BINDERFS_MAX_MINOR_CAPPED,
++                                    use_reserve ? BINDERFS_MAX_MINOR - 1 :
++                                                  BINDERFS_MAX_MINOR_CAPPED - 1,
+                                     GFP_KERNEL);
+       else
+               minor = -ENOSPC;
+@@ -424,8 +424,8 @@ static int binderfs_binder_ctl_create(st
+       /* Reserve a new minor number for the new device. */
+       mutex_lock(&binderfs_minors_mutex);
+       minor = ida_alloc_max(&binderfs_minors,
+-                            use_reserve ? BINDERFS_MAX_MINOR :
+-                                          BINDERFS_MAX_MINOR_CAPPED,
++                            use_reserve ? BINDERFS_MAX_MINOR - 1 :
++                                          BINDERFS_MAX_MINOR_CAPPED - 1,
+                             GFP_KERNEL);
+       mutex_unlock(&binderfs_minors_mutex);
+       if (minor < 0) {
diff --git a/queue-6.18/rust_binder-add-additional-alignment-checks.patch b/queue-6.18/rust_binder-add-additional-alignment-checks.patch
new file mode 100644 (file)
index 0000000..0623788
--- /dev/null
@@ -0,0 +1,159 @@
+From d047248190d86a52164656d47bec9bfba61dc71e Mon Sep 17 00:00:00 2001
+From: Alice Ryhl <aliceryhl@google.com>
+Date: Fri, 23 Jan 2026 16:23:56 +0000
+Subject: rust_binder: add additional alignment checks
+
+From: Alice Ryhl <aliceryhl@google.com>
+
+commit d047248190d86a52164656d47bec9bfba61dc71e upstream.
+
+This adds some alignment checks to match C Binder more closely. This
+causes the driver to reject more transactions. I don't think any of the
+transactions in question are harmful, but it's still a bug because it's
+the wrong uapi to accept them.
+
+The cases where usize is changed for u64, it will affect only 32-bit
+kernels.
+
+Cc: stable@vger.kernel.org
+Fixes: eafedbc7c050 ("rust_binder: add Rust Binder driver")
+Signed-off-by: Alice Ryhl <aliceryhl@google.com>
+Acked-by: Carlos Llamas <cmllamas@google.com>
+Link: https://patch.msgid.link/20260123-binder-alignment-more-checks-v1-1-7e1cea77411d@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/android/binder/thread.rs | 50 +++++++++++++++++++++++---------
+ 1 file changed, 36 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/android/binder/thread.rs b/drivers/android/binder/thread.rs
+index dcd47e10aeb8..e0ea33ccfe58 100644
+--- a/drivers/android/binder/thread.rs
++++ b/drivers/android/binder/thread.rs
+@@ -39,6 +39,10 @@
+     sync::atomic::{AtomicU32, Ordering},
+ };
++fn is_aligned(value: usize, to: usize) -> bool {
++    value % to == 0
++}
++
+ /// Stores the layout of the scatter-gather entries. This is used during the `translate_objects`
+ /// call and is discarded when it returns.
+ struct ScatterGatherState {
+@@ -795,6 +799,10 @@ fn translate_object(
+                 let num_fds = usize::try_from(obj.num_fds).map_err(|_| EINVAL)?;
+                 let fds_len = num_fds.checked_mul(size_of::<u32>()).ok_or(EINVAL)?;
++                if !is_aligned(parent_offset, size_of::<u32>()) {
++                    return Err(EINVAL.into());
++                }
++
+                 let info = sg_state.validate_parent_fixup(parent_index, parent_offset, fds_len)?;
+                 view.alloc.info_add_fd_reserve(num_fds)?;
+@@ -809,6 +817,10 @@ fn translate_object(
+                     }
+                 };
++                if !is_aligned(parent_entry.sender_uaddr, size_of::<u32>()) {
++                    return Err(EINVAL.into());
++                }
++
+                 parent_entry.fixup_min_offset = info.new_min_offset;
+                 parent_entry
+                     .pointer_fixups
+@@ -825,6 +837,7 @@ fn translate_object(
+                     .sender_uaddr
+                     .checked_add(parent_offset)
+                     .ok_or(EINVAL)?;
++
+                 let mut fda_bytes = KVec::new();
+                 UserSlice::new(UserPtr::from_addr(fda_uaddr as _), fds_len)
+                     .read_all(&mut fda_bytes, GFP_KERNEL)?;
+@@ -958,25 +971,30 @@ pub(crate) fn copy_transaction_data(
+         let data_size = trd.data_size.try_into().map_err(|_| EINVAL)?;
+         let aligned_data_size = ptr_align(data_size).ok_or(EINVAL)?;
+-        let offsets_size = trd.offsets_size.try_into().map_err(|_| EINVAL)?;
+-        let aligned_offsets_size = ptr_align(offsets_size).ok_or(EINVAL)?;
+-        let buffers_size = tr.buffers_size.try_into().map_err(|_| EINVAL)?;
+-        let aligned_buffers_size = ptr_align(buffers_size).ok_or(EINVAL)?;
++        let offsets_size: usize = trd.offsets_size.try_into().map_err(|_| EINVAL)?;
++        let buffers_size: usize = tr.buffers_size.try_into().map_err(|_| EINVAL)?;
+         let aligned_secctx_size = match secctx.as_ref() {
+             Some((_offset, ctx)) => ptr_align(ctx.len()).ok_or(EINVAL)?,
+             None => 0,
+         };
++        if !is_aligned(offsets_size, size_of::<u64>()) {
++            return Err(EINVAL.into());
++        }
++        if !is_aligned(buffers_size, size_of::<u64>()) {
++            return Err(EINVAL.into());
++        }
++
+         // This guarantees that at least `sizeof(usize)` bytes will be allocated.
+         let len = usize::max(
+             aligned_data_size
+-                .checked_add(aligned_offsets_size)
+-                .and_then(|sum| sum.checked_add(aligned_buffers_size))
++                .checked_add(offsets_size)
++                .and_then(|sum| sum.checked_add(buffers_size))
+                 .and_then(|sum| sum.checked_add(aligned_secctx_size))
+                 .ok_or(ENOMEM)?,
+-            size_of::<usize>(),
++            size_of::<u64>(),
+         );
+-        let secctx_off = aligned_data_size + aligned_offsets_size + aligned_buffers_size;
++        let secctx_off = aligned_data_size + offsets_size + buffers_size;
+         let mut alloc =
+             match to_process.buffer_alloc(debug_id, len, is_oneway, self.process.task.pid()) {
+                 Ok(alloc) => alloc,
+@@ -1008,13 +1026,13 @@ pub(crate) fn copy_transaction_data(
+             }
+             let offsets_start = aligned_data_size;
+-            let offsets_end = aligned_data_size + aligned_offsets_size;
++            let offsets_end = aligned_data_size + offsets_size;
+             // This state is used for BINDER_TYPE_PTR objects.
+             let sg_state = sg_state.insert(ScatterGatherState {
+                 unused_buffer_space: UnusedBufferSpace {
+                     offset: offsets_end,
+-                    limit: len,
++                    limit: offsets_end + buffers_size,
+                 },
+                 sg_entries: KVec::new(),
+                 ancestors: KVec::new(),
+@@ -1023,12 +1041,16 @@ pub(crate) fn copy_transaction_data(
+             // Traverse the objects specified.
+             let mut view = AllocationView::new(&mut alloc, data_size);
+             for (index, index_offset) in (offsets_start..offsets_end)
+-                .step_by(size_of::<usize>())
++                .step_by(size_of::<u64>())
+                 .enumerate()
+             {
+-                let offset = view.alloc.read(index_offset)?;
++                let offset: usize = view
++                    .alloc
++                    .read::<u64>(index_offset)?
++                    .try_into()
++                    .map_err(|_| EINVAL)?;
+-                if offset < end_of_previous_object {
++                if offset < end_of_previous_object || !is_aligned(offset, size_of::<u32>()) {
+                     pr_warn!("Got transaction with invalid offset.");
+                     return Err(EINVAL.into());
+                 }
+@@ -1060,7 +1082,7 @@ pub(crate) fn copy_transaction_data(
+                 }
+                 // Update the indexes containing objects to clean up.
+-                let offset_after_object = index_offset + size_of::<usize>();
++                let offset_after_object = index_offset + size_of::<u64>();
+                 view.alloc
+                     .set_info_offsets(offsets_start..offset_after_object);
+             }
+-- 
+2.53.0
+
diff --git a/queue-6.18/rust_binder-correctly-handle-fda-objects-of-length-zero.patch b/queue-6.18/rust_binder-correctly-handle-fda-objects-of-length-zero.patch
new file mode 100644 (file)
index 0000000..ae63d50
--- /dev/null
@@ -0,0 +1,157 @@
+From 8f589c9c3be539d6c2b393c82940c3783831082f Mon Sep 17 00:00:00 2001
+From: Alice Ryhl <aliceryhl@google.com>
+Date: Mon, 29 Dec 2025 15:38:14 +0000
+Subject: rust_binder: correctly handle FDA objects of length zero
+
+From: Alice Ryhl <aliceryhl@google.com>
+
+commit 8f589c9c3be539d6c2b393c82940c3783831082f upstream.
+
+Fix a bug where an empty FDA (fd array) object with 0 fds would cause an
+out-of-bounds error. The previous implementation used `skip == 0` to
+mean "this is a pointer fixup", but 0 is also the correct skip length
+for an empty FDA. If the FDA is at the end of the buffer, then this
+results in an attempt to write 8-bytes out of bounds. This is caught and
+results in an EINVAL error being returned to userspace.
+
+The pattern of using `skip == 0` as a special value originates from the
+C-implementation of Binder. As part of fixing this bug, this pattern is
+replaced with a Rust enum.
+
+I considered the alternate option of not pushing a fixup when the length
+is zero, but I think it's cleaner to just get rid of the zero-is-special
+stuff.
+
+The root cause of this bug was diagnosed by Gemini CLI on first try. I
+used the following prompt:
+
+> There appears to be a bug in @drivers/android/binder/thread.rs where
+> the Fixups oob bug is triggered with 316 304 316 324. This implies
+> that we somehow ended up with a fixup where buffer A has a pointer to
+> buffer B, but the pointer is located at an index in buffer A that is
+> out of bounds. Please investigate the code to find the bug. You may
+> compare with @drivers/android/binder.c that implements this correctly.
+
+Cc: stable@vger.kernel.org
+Reported-by: DeepChirp <DeepChirp@outlook.com>
+Closes: https://github.com/waydroid/waydroid/issues/2157
+Fixes: eafedbc7c050 ("rust_binder: add Rust Binder driver")
+Tested-by: DeepChirp <DeepChirp@outlook.com>
+Signed-off-by: Alice Ryhl <aliceryhl@google.com>
+Acked-by: Carlos Llamas <cmllamas@google.com>
+Link: https://patch.msgid.link/20251229-fda-zero-v1-1-58a41cb0e7ec@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/android/binder/thread.rs | 59 ++++++++++++++++++--------------
+ 1 file changed, 34 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/android/binder/thread.rs b/drivers/android/binder/thread.rs
+index 1a8e6fdc0dc4..dcd47e10aeb8 100644
+--- a/drivers/android/binder/thread.rs
++++ b/drivers/android/binder/thread.rs
+@@ -69,17 +69,24 @@ struct ScatterGatherEntry {
+ }
+ /// This entry specifies that a fixup should happen at `target_offset` of the
+-/// buffer. If `skip` is nonzero, then the fixup is a `binder_fd_array_object`
+-/// and is applied later. Otherwise if `skip` is zero, then the size of the
+-/// fixup is `sizeof::<u64>()` and `pointer_value` is written to the buffer.
+-struct PointerFixupEntry {
+-    /// The number of bytes to skip, or zero for a `binder_buffer_object` fixup.
+-    skip: usize,
+-    /// The translated pointer to write when `skip` is zero.
+-    pointer_value: u64,
+-    /// The offset at which the value should be written. The offset is relative
+-    /// to the original buffer.
+-    target_offset: usize,
++/// buffer.
++enum PointerFixupEntry {
++    /// A fixup for a `binder_buffer_object`.
++    Fixup {
++        /// The translated pointer to write.
++        pointer_value: u64,
++        /// The offset at which the value should be written. The offset is relative
++        /// to the original buffer.
++        target_offset: usize,
++    },
++    /// A skip for a `binder_fd_array_object`.
++    Skip {
++        /// The number of bytes to skip.
++        skip: usize,
++        /// The offset at which the skip should happen. The offset is relative
++        /// to the original buffer.
++        target_offset: usize,
++    },
+ }
+ /// Return type of `apply_and_validate_fixup_in_parent`.
+@@ -762,8 +769,7 @@ fn translate_object(
+                     parent_entry.fixup_min_offset = info.new_min_offset;
+                     parent_entry.pointer_fixups.push(
+-                        PointerFixupEntry {
+-                            skip: 0,
++                        PointerFixupEntry::Fixup {
+                             pointer_value: buffer_ptr_in_user_space,
+                             target_offset: info.target_offset,
+                         },
+@@ -807,9 +813,8 @@ fn translate_object(
+                 parent_entry
+                     .pointer_fixups
+                     .push(
+-                        PointerFixupEntry {
++                        PointerFixupEntry::Skip {
+                             skip: fds_len,
+-                            pointer_value: 0,
+                             target_offset: info.target_offset,
+                         },
+                         GFP_KERNEL,
+@@ -871,17 +876,21 @@ fn apply_sg(&self, alloc: &mut Allocation, sg_state: &mut ScatterGatherState) ->
+             let mut reader =
+                 UserSlice::new(UserPtr::from_addr(sg_entry.sender_uaddr), sg_entry.length).reader();
+             for fixup in &mut sg_entry.pointer_fixups {
+-                let fixup_len = if fixup.skip == 0 {
+-                    size_of::<u64>()
+-                } else {
+-                    fixup.skip
++                let (fixup_len, fixup_offset) = match fixup {
++                    PointerFixupEntry::Fixup { target_offset, .. } => {
++                        (size_of::<u64>(), *target_offset)
++                    }
++                    PointerFixupEntry::Skip {
++                        skip,
++                        target_offset,
++                    } => (*skip, *target_offset),
+                 };
+-                let target_offset_end = fixup.target_offset.checked_add(fixup_len).ok_or(EINVAL)?;
+-                if fixup.target_offset < end_of_previous_fixup || offset_end < target_offset_end {
++                let target_offset_end = fixup_offset.checked_add(fixup_len).ok_or(EINVAL)?;
++                if fixup_offset < end_of_previous_fixup || offset_end < target_offset_end {
+                     pr_warn!(
+                         "Fixups oob {} {} {} {}",
+-                        fixup.target_offset,
++                        fixup_offset,
+                         end_of_previous_fixup,
+                         offset_end,
+                         target_offset_end
+@@ -890,13 +899,13 @@ fn apply_sg(&self, alloc: &mut Allocation, sg_state: &mut ScatterGatherState) ->
+                 }
+                 let copy_off = end_of_previous_fixup;
+-                let copy_len = fixup.target_offset - end_of_previous_fixup;
++                let copy_len = fixup_offset - end_of_previous_fixup;
+                 if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
+                     pr_warn!("Failed copying into alloc: {:?}", err);
+                     return Err(err.into());
+                 }
+-                if fixup.skip == 0 {
+-                    let res = alloc.write::<u64>(fixup.target_offset, &fixup.pointer_value);
++                if let PointerFixupEntry::Fixup { pointer_value, .. } = fixup {
++                    let res = alloc.write::<u64>(fixup_offset, pointer_value);
+                     if let Err(err) = res {
+                         pr_warn!("Failed copying ptr into alloc: {:?}", err);
+                         return Err(err.into());
+-- 
+2.53.0
+
diff --git a/queue-6.18/rust_binderfs-fix-ida_alloc_max-upper-bound.patch b/queue-6.18/rust_binderfs-fix-ida_alloc_max-upper-bound.patch
new file mode 100644 (file)
index 0000000..bf234b6
--- /dev/null
@@ -0,0 +1,50 @@
+From d6ba734814266bbf7ee01f9030436597116805f3 Mon Sep 17 00:00:00 2001
+From: Carlos Llamas <cmllamas@google.com>
+Date: Tue, 27 Jan 2026 23:55:10 +0000
+Subject: rust_binderfs: fix ida_alloc_max() upper bound
+
+From: Carlos Llamas <cmllamas@google.com>
+
+commit d6ba734814266bbf7ee01f9030436597116805f3 upstream.
+
+The 'max' argument of ida_alloc_max() takes the maximum valid ID and not
+the "count". Using an ID of BINDERFS_MAX_MINOR (1 << 20) for dev->minor
+would exceed the limits of minor numbers (20-bits). Fix this off-by-one
+error by subtracting 1 from the 'max'.
+
+Cc: stable@vger.kernel.org
+Fixes: eafedbc7c050 ("rust_binder: add Rust Binder driver")
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/r/202512181203.IOv6IChH-lkp@intel.com/
+Signed-off-by: Carlos Llamas <cmllamas@google.com>
+Reviewed-by: Alice Ryhl <aliceryhl@google.com>
+Link: https://patch.msgid.link/20260127235545.2307876-1-cmllamas@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/android/binder/rust_binderfs.c |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/android/binder/rust_binderfs.c
++++ b/drivers/android/binder/rust_binderfs.c
+@@ -132,8 +132,8 @@ static int binderfs_binder_device_create
+       mutex_lock(&binderfs_minors_mutex);
+       if (++info->device_count <= info->mount_opts.max)
+               minor = ida_alloc_max(&binderfs_minors,
+-                                    use_reserve ? BINDERFS_MAX_MINOR :
+-                                                  BINDERFS_MAX_MINOR_CAPPED,
++                                    use_reserve ? BINDERFS_MAX_MINOR - 1 :
++                                                  BINDERFS_MAX_MINOR_CAPPED - 1,
+                                     GFP_KERNEL);
+       else
+               minor = -ENOSPC;
+@@ -416,8 +416,8 @@ static int binderfs_binder_ctl_create(st
+       /* Reserve a new minor number for the new device. */
+       mutex_lock(&binderfs_minors_mutex);
+       minor = ida_alloc_max(&binderfs_minors,
+-                            use_reserve ? BINDERFS_MAX_MINOR :
+-                                          BINDERFS_MAX_MINOR_CAPPED,
++                            use_reserve ? BINDERFS_MAX_MINOR - 1 :
++                                          BINDERFS_MAX_MINOR_CAPPED - 1,
+                             GFP_KERNEL);
+       mutex_unlock(&binderfs_minors_mutex);
+       if (minor < 0) {
index c3af463a468131c527b9f0e57afb494db3d56242..351406aaaa29cc485bae8252792ddacfa93170c4 100644 (file)
@@ -38,3 +38,10 @@ treewide-drop-pci_save_state-after-pci_restore_state.patch
 bus-mhi-host-pci_generic-add-telit-fe990b40-modem-support.patch
 sched-fair-skip-sched_balance_running-cmpxchg-when-balance-is-not-due.patch
 sched-fair-have-sd_serialize-affect-newidle-balancing.patch
+rust_binder-correctly-handle-fda-objects-of-length-zero.patch
+rust_binder-add-additional-alignment-checks.patch
+rust_binderfs-fix-ida_alloc_max-upper-bound.patch
+binder-fix-uaf-in-binder_netlink_report.patch
+binder-fix-br_frozen_reply-error-log.patch
+binderfs-fix-ida_alloc_max-upper-bound.patch
+tracing-fix-ftrace-event-field-alignments.patch
diff --git a/queue-6.18/tracing-fix-ftrace-event-field-alignments.patch b/queue-6.18/tracing-fix-ftrace-event-field-alignments.patch
new file mode 100644 (file)
index 0000000..606dc23
--- /dev/null
@@ -0,0 +1,220 @@
+From stable+bounces-214804-greg=kroah.com@vger.kernel.org Sat Feb  7 16:56:23 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat,  7 Feb 2026 10:56:15 -0500
+Subject: tracing: Fix ftrace event field alignments
+To: stable@vger.kernel.org
+Cc: Steven Rostedt <rostedt@goodmis.org>, Mathieu Desnoyers <mathieu.desnoyers@efficios.com>, Mark Rutland <mark.rutland@arm.com>, "Masami Hiramatsu (Google)" <mhiramat@kernel.org>, "jempty.liang" <imntjempty@163.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260207155615.406518-1-sashal@kernel.org>
+
+From: Steven Rostedt <rostedt@goodmis.org>
+
+[ Upstream commit 033c55fe2e326bea022c3cc5178ecf3e0e459b82 ]
+
+The fields of ftrace specific events (events used to save ftrace internal
+events like function traces and trace_printk) are generated similarly to
+how normal trace event fields are generated. That is, the fields are added
+to a trace_events_fields array that saves the name, offset, size,
+alignment and signness of the field. It is used to produce the output in
+the format file in tracefs so that tooling knows how to parse the binary
+data of the trace events.
+
+The issue is that some of the ftrace event structures are packed. The
+function graph exit event structures are one of them. The 64 bit calltime
+and rettime fields end up 4 byte aligned, but the algorithm to show to
+userspace shows them as 8 byte aligned.
+
+The macros that create the ftrace events has one for embedded structure
+fields. There's two macros for theses fields:
+
+  __field_desc() and __field_packed()
+
+The difference of the latter macro is that it treats the field as packed.
+
+Rename that field to __field_desc_packed() and create replace the
+__field_packed() to be a normal field that is packed and have the calltime
+and rettime use those.
+
+This showed up on 32bit architectures for function graph time fields. It
+had:
+
+ ~# cat /sys/kernel/tracing/events/ftrace/funcgraph_exit/format
+[..]
+        field:unsigned long func;       offset:8;       size:4; signed:0;
+        field:unsigned int depth;       offset:12;      size:4; signed:0;
+        field:unsigned int overrun;     offset:16;      size:4; signed:0;
+        field:unsigned long long calltime;      offset:24;      size:8; signed:0;
+        field:unsigned long long rettime;       offset:32;      size:8; signed:0;
+
+Notice that overrun is at offset 16 with size 4, where in the structure
+calltime is at offset 20 (16 + 4), but it shows the offset at 24. That's
+because it used the alignment of unsigned long long when used as a
+declaration and not as a member of a structure where it would be aligned
+by word size (in this case 4).
+
+By using the proper structure alignment, the format has it at the correct
+offset:
+
+ ~# cat /sys/kernel/tracing/events/ftrace/funcgraph_exit/format
+[..]
+        field:unsigned long func;       offset:8;       size:4; signed:0;
+        field:unsigned int depth;       offset:12;      size:4; signed:0;
+        field:unsigned int overrun;     offset:16;      size:4; signed:0;
+        field:unsigned long long calltime;      offset:20;      size:8; signed:0;
+        field:unsigned long long rettime;       offset:28;      size:8; signed:0;
+
+Cc: stable@vger.kernel.org
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Reported-by: "jempty.liang" <imntjempty@163.com>
+Link: https://patch.msgid.link/20260204113628.53faec78@gandalf.local.home
+Fixes: 04ae87a52074e ("ftrace: Rework event_create_dir()")
+Closes: https://lore.kernel.org/all/20260130015740.212343-1-imntjempty@163.com/
+Closes: https://lore.kernel.org/all/20260202123342.2544795-1-imntjempty@163.com/
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+[ Different variable types and some renames ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace.h         |    7 +++++--
+ kernel/trace/trace_entries.h |   32 ++++++++++++++++----------------
+ kernel/trace/trace_export.c  |   21 +++++++++++++++------
+ 3 files changed, 36 insertions(+), 24 deletions(-)
+
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -67,14 +67,17 @@ enum trace_type {
+ #undef __field_fn
+ #define __field_fn(type, item)                type    item;
++#undef __field_packed
++#define __field_packed(type, item)    type    item;
++
+ #undef __field_struct
+ #define __field_struct(type, item)    __field(type, item)
+ #undef __field_desc
+ #define __field_desc(type, container, item)
+-#undef __field_packed
+-#define __field_packed(type, container, item)
++#undef __field_desc_packed
++#define __field_desc_packed(type, container, item)
+ #undef __array
+ #define __array(type, item, size)     type    item[size];
+--- a/kernel/trace/trace_entries.h
++++ b/kernel/trace/trace_entries.h
+@@ -79,8 +79,8 @@ FTRACE_ENTRY(funcgraph_entry, ftrace_gra
+       F_STRUCT(
+               __field_struct( struct ftrace_graph_ent,        graph_ent       )
+-              __field_packed( unsigned long,  graph_ent,      func            )
+-              __field_packed( unsigned int,   graph_ent,      depth           )
++              __field_desc_packed(unsigned long,      graph_ent,      func    )
++              __field_desc_packed(unsigned int,       graph_ent,      depth   )
+               __dynamic_array(unsigned long,  args                            )
+       ),
+@@ -96,9 +96,9 @@ FTRACE_ENTRY_PACKED(fgraph_retaddr_entry
+       F_STRUCT(
+               __field_struct( struct fgraph_retaddr_ent,      graph_ent       )
+-              __field_packed( unsigned long,  graph_ent,      func            )
+-              __field_packed( unsigned int,   graph_ent,      depth           )
+-              __field_packed( unsigned long,  graph_ent,      retaddr         )
++              __field_desc_packed(    unsigned long,  graph_ent,      func    )
++              __field_desc_packed(    unsigned int,   graph_ent,      depth   )
++              __field_desc_packed(    unsigned long,  graph_ent,      retaddr )
+       ),
+       F_printk("--> %ps (%u) <- %ps", (void *)__entry->func, __entry->depth,
+@@ -122,12 +122,12 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftra
+       F_STRUCT(
+               __field_struct( struct ftrace_graph_ret,        ret     )
+-              __field_packed( unsigned long,  ret,            func    )
+-              __field_packed( unsigned long,  ret,            retval  )
+-              __field_packed( unsigned int,   ret,            depth   )
+-              __field_packed( unsigned int,   ret,            overrun )
+-              __field(unsigned long long,     calltime                )
+-              __field(unsigned long long,     rettime                 )
++              __field_desc_packed(    unsigned long,  ret,    func    )
++              __field_desc_packed(    unsigned long,  ret,    retval  )
++              __field_desc_packed(    unsigned int,   ret,    depth   )
++              __field_desc_packed(    unsigned int,   ret,    overrun )
++              __field_packed(unsigned long long,      calltime)
++              __field_packed(unsigned long long,      rettime )
+       ),
+       F_printk("<-- %ps (%u) (start: %llx  end: %llx) over: %u retval: %lx",
+@@ -145,11 +145,11 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftra
+       F_STRUCT(
+               __field_struct( struct ftrace_graph_ret,        ret     )
+-              __field_packed( unsigned long,  ret,            func    )
+-              __field_packed( unsigned int,   ret,            depth   )
+-              __field_packed( unsigned int,   ret,            overrun )
+-              __field(unsigned long long,     calltime                )
+-              __field(unsigned long long,     rettime                 )
++              __field_desc_packed(    unsigned long,  ret,    func    )
++              __field_desc_packed(    unsigned int,   ret,    depth   )
++              __field_desc_packed(    unsigned int,   ret,    overrun )
++              __field_packed(unsigned long long,      calltime        )
++              __field_packed(unsigned long long,      rettime         )
+       ),
+       F_printk("<-- %ps (%u) (start: %llx  end: %llx) over: %u",
+--- a/kernel/trace/trace_export.c
++++ b/kernel/trace/trace_export.c
+@@ -42,11 +42,14 @@ static int ftrace_event_register(struct
+ #undef __field_fn
+ #define __field_fn(type, item)                                type item;
++#undef __field_packed
++#define __field_packed(type, item)                    type item;
++
+ #undef __field_desc
+ #define __field_desc(type, container, item)           type item;
+-#undef __field_packed
+-#define __field_packed(type, container, item)         type item;
++#undef __field_desc_packed
++#define __field_desc_packed(type, container, item)    type item;
+ #undef __array
+ #define __array(type, item, size)                     type item[size];
+@@ -104,11 +107,14 @@ static void __always_unused ____ftrace_c
+ #undef __field_fn
+ #define __field_fn(_type, _item) __field_ext(_type, _item, FILTER_TRACE_FN)
++#undef __field_packed
++#define __field_packed(_type, _item) __field_ext_packed(_type, _item, FILTER_OTHER)
++
+ #undef __field_desc
+ #define __field_desc(_type, _container, _item) __field_ext(_type, _item, FILTER_OTHER)
+-#undef __field_packed
+-#define __field_packed(_type, _container, _item) __field_ext_packed(_type, _item, FILTER_OTHER)
++#undef __field_desc_packed
++#define __field_desc_packed(_type, _container, _item) __field_ext_packed(_type, _item, FILTER_OTHER)
+ #undef __array
+ #define __array(_type, _item, _len) {                                 \
+@@ -146,11 +152,14 @@ static struct trace_event_fields ftrace_
+ #undef __field_fn
+ #define __field_fn(type, item)
++#undef __field_packed
++#define __field_packed(type, item)
++
+ #undef __field_desc
+ #define __field_desc(type, container, item)
+-#undef __field_packed
+-#define __field_packed(type, container, item)
++#undef __field_desc_packed
++#define __field_desc_packed(type, container, item)
+ #undef __array
+ #define __array(type, item, len)