--- /dev/null
+From stable-owner@vger.kernel.org Wed Feb 22 13:14:03 2023
+From: Lee Jones <lee@kernel.org>
+Date: Wed, 22 Feb 2023 12:12:07 +0000
+Subject: binder: Address corner cases in deferred copy and fixup
+To: lee@kernel.org
+Cc: stable@vger.kernel.org, Alessandro Astone <ales.astone@gmail.com>, Todd Kjos <tkjos@google.com>, stable <stable@kernel.org>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Carlos Llamas <cmllamas@google.com>
+Message-ID: <20230222121208.898198-5-lee@kernel.org>
+
+From: Alessandro Astone <ales.astone@gmail.com>
+
+commit 2d1746e3fda0c3612143d7c06f8e1d1830c13e23 upstream.
+
+When handling BINDER_TYPE_FDA object we are pushing a parent fixup
+with a certain skip_size but no scatter-gather copy object, since
+the copy is handled standalone.
+If BINDER_TYPE_FDA is the last children the scatter-gather copy
+loop will never stop to skip it, thus we are left with an item in
+the parent fixup list. This will trigger the BUG_ON().
+
+This is reproducible in android when playing a video.
+We receive a transaction that looks like this:
+ obj[0] BINDER_TYPE_PTR, parent
+ obj[1] BINDER_TYPE_PTR, child
+ obj[2] BINDER_TYPE_PTR, child
+ obj[3] BINDER_TYPE_FDA, child
+
+Fixes: 09184ae9b575 ("binder: defer copies of pre-patched txn data")
+Acked-by: Todd Kjos <tkjos@google.com>
+Cc: stable <stable@kernel.org>
+Signed-off-by: Alessandro Astone <ales.astone@gmail.com>
+Link: https://lore.kernel.org/r/20220415120015.52684-2-ales.astone@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Carlos Llamas <cmllamas@google.com>
+Signed-off-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/android/binder.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -2331,6 +2331,7 @@ static int binder_do_deferred_txn_copies
+ {
+ int ret = 0;
+ struct binder_sg_copy *sgc, *tmpsgc;
++ struct binder_ptr_fixup *tmppf;
+ struct binder_ptr_fixup *pf =
+ list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
+ node);
+@@ -2385,7 +2386,11 @@ static int binder_do_deferred_txn_copies
+ list_del(&sgc->node);
+ kfree(sgc);
+ }
+- BUG_ON(!list_empty(pf_head));
++ list_for_each_entry_safe(pf, tmppf, pf_head, node) {
++ BUG_ON(pf->skip_size == 0);
++ list_del(&pf->node);
++ kfree(pf);
++ }
+ BUG_ON(!list_empty(sgc_head));
+
+ return ret > 0 ? -EINVAL : ret;
--- /dev/null
+From stable-owner@vger.kernel.org Wed Feb 22 13:12:33 2023
+From: Lee Jones <lee@kernel.org>
+Date: Wed, 22 Feb 2023 12:12:05 +0000
+Subject: binder: defer copies of pre-patched txn data
+To: lee@kernel.org
+Cc: stable@vger.kernel.org, Todd Kjos <tkjos@google.com>, Martijn Coenen <maco@android.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Message-ID: <20230222121208.898198-3-lee@kernel.org>
+
+From: Todd Kjos <tkjos@google.com>
+
+commit 09184ae9b5756cc469db6fd1d1cfdcffbf627c2d upstream.
+
+BINDER_TYPE_PTR objects point to memory areas in the
+source process to be copied into the target buffer
+as part of a transaction. This implements a scatter-
+gather model where non-contiguous memory in a source
+process is "gathered" into a contiguous region in
+the target buffer.
+
+The data can include pointers that must be fixed up
+to correctly point to the copied data. To avoid making
+source process pointers visible to the target process,
+this patch defers the copy until the fixups are known
+and then copies and fixeups are done together.
+
+There is a special case of BINDER_TYPE_FDA which applies
+the fixup later in the target process context. In this
+case the user data is skipped (so no untranslated fds
+become visible to the target).
+
+Reviewed-by: Martijn Coenen <maco@android.com>
+Signed-off-by: Todd Kjos <tkjos@google.com>
+Link: https://lore.kernel.org/r/20211130185152.437403-5-tkjos@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/android/binder.c | 299 +++++++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 274 insertions(+), 25 deletions(-)
+
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -2269,7 +2269,246 @@ err_fd_not_accepted:
+ return ret;
+ }
+
+-static int binder_translate_fd_array(struct binder_fd_array_object *fda,
++/**
++ * struct binder_ptr_fixup - data to be fixed-up in target buffer
++ * @offset offset in target buffer to fixup
++ * @skip_size bytes to skip in copy (fixup will be written later)
++ * @fixup_data data to write at fixup offset
++ * @node list node
++ *
++ * This is used for the pointer fixup list (pf) which is created and consumed
++ * during binder_transaction() and is only accessed locally. No
++ * locking is necessary.
++ *
++ * The list is ordered by @offset.
++ */
++struct binder_ptr_fixup {
++ binder_size_t offset;
++ size_t skip_size;
++ binder_uintptr_t fixup_data;
++ struct list_head node;
++};
++
++/**
++ * struct binder_sg_copy - scatter-gather data to be copied
++ * @offset offset in target buffer
++ * @sender_uaddr user address in source buffer
++ * @length bytes to copy
++ * @node list node
++ *
++ * This is used for the sg copy list (sgc) which is created and consumed
++ * during binder_transaction() and is only accessed locally. No
++ * locking is necessary.
++ *
++ * The list is ordered by @offset.
++ */
++struct binder_sg_copy {
++ binder_size_t offset;
++ const void __user *sender_uaddr;
++ size_t length;
++ struct list_head node;
++};
++
++/**
++ * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
++ * @alloc: binder_alloc associated with @buffer
++ * @buffer: binder buffer in target process
++ * @sgc_head: list_head of scatter-gather copy list
++ * @pf_head: list_head of pointer fixup list
++ *
++ * Processes all elements of @sgc_head, applying fixups from @pf_head
++ * and copying the scatter-gather data from the source process' user
++ * buffer to the target's buffer. It is expected that the list creation
++ * and processing all occurs during binder_transaction() so these lists
++ * are only accessed in local context.
++ *
++ * Return: 0=success, else -errno
++ */
++static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
++ struct binder_buffer *buffer,
++ struct list_head *sgc_head,
++ struct list_head *pf_head)
++{
++ int ret = 0;
++ struct binder_sg_copy *sgc, *tmpsgc;
++ struct binder_ptr_fixup *pf =
++ list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
++ node);
++
++ list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
++ size_t bytes_copied = 0;
++
++ while (bytes_copied < sgc->length) {
++ size_t copy_size;
++ size_t bytes_left = sgc->length - bytes_copied;
++ size_t offset = sgc->offset + bytes_copied;
++
++ /*
++ * We copy up to the fixup (pointed to by pf)
++ */
++ copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
++ : bytes_left;
++ if (!ret && copy_size)
++ ret = binder_alloc_copy_user_to_buffer(
++ alloc, buffer,
++ offset,
++ sgc->sender_uaddr + bytes_copied,
++ copy_size);
++ bytes_copied += copy_size;
++ if (copy_size != bytes_left) {
++ BUG_ON(!pf);
++ /* we stopped at a fixup offset */
++ if (pf->skip_size) {
++ /*
++ * we are just skipping. This is for
++ * BINDER_TYPE_FDA where the translated
++ * fds will be fixed up when we get
++ * to target context.
++ */
++ bytes_copied += pf->skip_size;
++ } else {
++ /* apply the fixup indicated by pf */
++ if (!ret)
++ ret = binder_alloc_copy_to_buffer(
++ alloc, buffer,
++ pf->offset,
++ &pf->fixup_data,
++ sizeof(pf->fixup_data));
++ bytes_copied += sizeof(pf->fixup_data);
++ }
++ list_del(&pf->node);
++ kfree(pf);
++ pf = list_first_entry_or_null(pf_head,
++ struct binder_ptr_fixup, node);
++ }
++ }
++ list_del(&sgc->node);
++ kfree(sgc);
++ }
++ BUG_ON(!list_empty(pf_head));
++ BUG_ON(!list_empty(sgc_head));
++
++ return ret > 0 ? -EINVAL : ret;
++}
++
++/**
++ * binder_cleanup_deferred_txn_lists() - free specified lists
++ * @sgc_head: list_head of scatter-gather copy list
++ * @pf_head: list_head of pointer fixup list
++ *
++ * Called to clean up @sgc_head and @pf_head if there is an
++ * error.
++ */
++static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
++ struct list_head *pf_head)
++{
++ struct binder_sg_copy *sgc, *tmpsgc;
++ struct binder_ptr_fixup *pf, *tmppf;
++
++ list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
++ list_del(&sgc->node);
++ kfree(sgc);
++ }
++ list_for_each_entry_safe(pf, tmppf, pf_head, node) {
++ list_del(&pf->node);
++ kfree(pf);
++ }
++}
++
++/**
++ * binder_defer_copy() - queue a scatter-gather buffer for copy
++ * @sgc_head: list_head of scatter-gather copy list
++ * @offset: binder buffer offset in target process
++ * @sender_uaddr: user address in source process
++ * @length: bytes to copy
++ *
++ * Specify a scatter-gather block to be copied. The actual copy must
++ * be deferred until all the needed fixups are identified and queued.
++ * Then the copy and fixups are done together so un-translated values
++ * from the source are never visible in the target buffer.
++ *
++ * We are guaranteed that repeated calls to this function will have
++ * monotonically increasing @offset values so the list will naturally
++ * be ordered.
++ *
++ * Return: 0=success, else -errno
++ */
++static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
++ const void __user *sender_uaddr, size_t length)
++{
++ struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
++
++ if (!bc)
++ return -ENOMEM;
++
++ bc->offset = offset;
++ bc->sender_uaddr = sender_uaddr;
++ bc->length = length;
++ INIT_LIST_HEAD(&bc->node);
++
++ /*
++ * We are guaranteed that the deferred copies are in-order
++ * so just add to the tail.
++ */
++ list_add_tail(&bc->node, sgc_head);
++
++ return 0;
++}
++
++/**
++ * binder_add_fixup() - queue a fixup to be applied to sg copy
++ * @pf_head: list_head of binder ptr fixup list
++ * @offset: binder buffer offset in target process
++ * @fixup: bytes to be copied for fixup
++ * @skip_size: bytes to skip when copying (fixup will be applied later)
++ *
++ * Add the specified fixup to a list ordered by @offset. When copying
++ * the scatter-gather buffers, the fixup will be copied instead of
++ * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
++ * will be applied later (in target process context), so we just skip
++ * the bytes specified by @skip_size. If @skip_size is 0, we copy the
++ * value in @fixup.
++ *
++ * This function is called *mostly* in @offset order, but there are
++ * exceptions. Since out-of-order inserts are relatively uncommon,
++ * we insert the new element by searching backward from the tail of
++ * the list.
++ *
++ * Return: 0=success, else -errno
++ */
++static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
++ binder_uintptr_t fixup, size_t skip_size)
++{
++ struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
++ struct binder_ptr_fixup *tmppf;
++
++ if (!pf)
++ return -ENOMEM;
++
++ pf->offset = offset;
++ pf->fixup_data = fixup;
++ pf->skip_size = skip_size;
++ INIT_LIST_HEAD(&pf->node);
++
++ /* Fixups are *mostly* added in-order, but there are some
++ * exceptions. Look backwards through list for insertion point.
++ */
++ list_for_each_entry_reverse(tmppf, pf_head, node) {
++ if (tmppf->offset < pf->offset) {
++ list_add(&pf->node, &tmppf->node);
++ return 0;
++ }
++ }
++ /*
++ * if we get here, then the new offset is the lowest so
++ * insert at the head
++ */
++ list_add(&pf->node, pf_head);
++ return 0;
++}
++
++static int binder_translate_fd_array(struct list_head *pf_head,
++ struct binder_fd_array_object *fda,
+ const void __user *sender_ubuffer,
+ struct binder_buffer_object *parent,
+ struct binder_buffer_object *sender_uparent,
+@@ -2281,6 +2520,7 @@ static int binder_translate_fd_array(str
+ binder_size_t fda_offset;
+ const void __user *sender_ufda_base;
+ struct binder_proc *proc = thread->proc;
++ int ret;
+
+ fd_buf_size = sizeof(u32) * fda->num_fds;
+ if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
+@@ -2312,9 +2552,12 @@ static int binder_translate_fd_array(str
+ proc->pid, thread->pid);
+ return -EINVAL;
+ }
++ ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
++ if (ret)
++ return ret;
++
+ for (fdi = 0; fdi < fda->num_fds; fdi++) {
+ u32 fd;
+- int ret;
+ binder_size_t offset = fda_offset + fdi * sizeof(fd);
+ binder_size_t sender_uoffset = fdi * sizeof(fd);
+
+@@ -2328,7 +2571,8 @@ static int binder_translate_fd_array(str
+ return 0;
+ }
+
+-static int binder_fixup_parent(struct binder_transaction *t,
++static int binder_fixup_parent(struct list_head *pf_head,
++ struct binder_transaction *t,
+ struct binder_thread *thread,
+ struct binder_buffer_object *bp,
+ binder_size_t off_start_offset,
+@@ -2374,14 +2618,7 @@ static int binder_fixup_parent(struct bi
+ }
+ buffer_offset = bp->parent_offset +
+ (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
+- if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
+- &bp->buffer, sizeof(bp->buffer))) {
+- binder_user_error("%d:%d got transaction with invalid parent offset\n",
+- proc->pid, thread->pid);
+- return -EINVAL;
+- }
+-
+- return 0;
++ return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
+ }
+
+ /**
+@@ -2523,8 +2760,12 @@ static void binder_transaction(struct bi
+ int t_debug_id = atomic_inc_return(&binder_last_id);
+ char *secctx = NULL;
+ u32 secctx_sz = 0;
++ struct list_head sgc_head;
++ struct list_head pf_head;
+ const void __user *user_buffer = (const void __user *)
+ (uintptr_t)tr->data.ptr.buffer;
++ INIT_LIST_HEAD(&sgc_head);
++ INIT_LIST_HEAD(&pf_head);
+
+ e = binder_transaction_log_add(&binder_transaction_log);
+ e->debug_id = t_debug_id;
+@@ -3041,8 +3282,8 @@ static void binder_transaction(struct bi
+ return_error_line = __LINE__;
+ goto err_bad_parent;
+ }
+- ret = binder_translate_fd_array(fda, user_buffer,
+- parent,
++ ret = binder_translate_fd_array(&pf_head, fda,
++ user_buffer, parent,
+ &user_object.bbo, t,
+ thread, in_reply_to);
+ if (!ret)
+@@ -3074,19 +3315,14 @@ static void binder_transaction(struct bi
+ return_error_line = __LINE__;
+ goto err_bad_offset;
+ }
+- if (binder_alloc_copy_user_to_buffer(
+- &target_proc->alloc,
+- t->buffer,
+- sg_buf_offset,
+- (const void __user *)
+- (uintptr_t)bp->buffer,
+- bp->length)) {
+- binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
+- proc->pid, thread->pid);
+- return_error_param = -EFAULT;
++ ret = binder_defer_copy(&sgc_head, sg_buf_offset,
++ (const void __user *)(uintptr_t)bp->buffer,
++ bp->length);
++ if (ret) {
+ return_error = BR_FAILED_REPLY;
++ return_error_param = ret;
+ return_error_line = __LINE__;
+- goto err_copy_data_failed;
++ goto err_translate_failed;
+ }
+ /* Fixup buffer pointer to target proc address space */
+ bp->buffer = (uintptr_t)
+@@ -3095,7 +3331,8 @@ static void binder_transaction(struct bi
+
+ num_valid = (buffer_offset - off_start_offset) /
+ sizeof(binder_size_t);
+- ret = binder_fixup_parent(t, thread, bp,
++ ret = binder_fixup_parent(&pf_head, t,
++ thread, bp,
+ off_start_offset,
+ num_valid,
+ last_fixup_obj_off,
+@@ -3135,6 +3372,17 @@ static void binder_transaction(struct bi
+ return_error_line = __LINE__;
+ goto err_copy_data_failed;
+ }
++
++ ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
++ &sgc_head, &pf_head);
++ if (ret) {
++ binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
++ proc->pid, thread->pid);
++ return_error = BR_FAILED_REPLY;
++ return_error_param = ret;
++ return_error_line = __LINE__;
++ goto err_copy_data_failed;
++ }
+ if (t->buffer->oneway_spam_suspect)
+ tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
+ else
+@@ -3208,6 +3456,7 @@ err_bad_object_type:
+ err_bad_offset:
+ err_bad_parent:
+ err_copy_data_failed:
++ binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
+ binder_free_txn_fixups(t);
+ trace_binder_transaction_failed_buffer_release(t->buffer);
+ binder_transaction_buffer_release(target_proc, NULL, t->buffer,
--- /dev/null
+From stable-owner@vger.kernel.org Wed Feb 22 13:12:32 2023
+From: Lee Jones <lee@kernel.org>
+Date: Wed, 22 Feb 2023 12:12:06 +0000
+Subject: binder: fix pointer cast warning
+To: lee@kernel.org
+Cc: stable@vger.kernel.org, Arnd Bergmann <arnd@arndb.de>, Todd Kjos <tkjos@google.com>, Randy Dunlap <rdunlap@infradead.org>, Christian Brauner <christian.brauner@ubuntu.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Carlos Llamas <cmllamas@google.com>
+Message-ID: <20230222121208.898198-4-lee@kernel.org>
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit 9a0a930fe2535a76ad70d3f43caeccf0d86a3009 upstream.
+
+binder_uintptr_t is not the same as uintptr_t, so converting it into a
+pointer requires a second cast:
+
+drivers/android/binder.c: In function 'binder_translate_fd_array':
+drivers/android/binder.c:2511:28: error: cast to pointer from integer of different size [-Werror=int-to-pointer-cast]
+ 2511 | sender_ufda_base = (void __user *)sender_uparent->buffer + fda->parent_offset;
+ | ^
+
+Fixes: 656e01f3ab54 ("binder: read pre-translated fds from sender buffer")
+Acked-by: Todd Kjos <tkjos@google.com>
+Acked-by: Randy Dunlap <rdunlap@infradead.org> # build-tested
+Acked-by: Christian Brauner <christian.brauner@ubuntu.com>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Link: https://lore.kernel.org/r/20211207122448.1185769-1-arnd@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Carlos Llamas <cmllamas@google.com>
+Signed-off-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/android/binder.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -2544,7 +2544,8 @@ static int binder_translate_fd_array(str
+ */
+ fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
+ fda->parent_offset;
+- sender_ufda_base = (void __user *)sender_uparent->buffer + fda->parent_offset;
++ sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
++ fda->parent_offset;
+
+ if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
+ !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
--- /dev/null
+From stable-owner@vger.kernel.org Wed Feb 22 13:12:35 2023
+From: Lee Jones <lee@kernel.org>
+Date: Wed, 22 Feb 2023 12:12:08 +0000
+Subject: binder: Gracefully handle BINDER_TYPE_FDA objects with num_fds=0
+To: lee@kernel.org
+Cc: stable@vger.kernel.org, Alessandro Astone <ales.astone@gmail.com>, Todd Kjos <tkjos@google.com>, stable <stable@kernel.org>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Carlos Llamas <cmllamas@google.com>
+Message-ID: <20230222121208.898198-6-lee@kernel.org>
+
+From: Alessandro Astone <ales.astone@gmail.com>
+
+commit ef38de9217a04c9077629a24652689d8fdb4c6c6 upstream.
+
+Some android userspace is sending BINDER_TYPE_FDA objects with
+num_fds=0. Like the previous patch, this is reproducible when
+playing a video.
+
+Before commit 09184ae9b575 BINDER_TYPE_FDA objects with num_fds=0
+were 'correctly handled', as in no fixup was performed.
+
+After commit 09184ae9b575 we aggregate fixup and skip regions in
+binder_ptr_fixup structs and distinguish between the two by using
+the skip_size field: if it's 0, then it's a fixup, otherwise skip.
+When processing BINDER_TYPE_FDA objects with num_fds=0 we add a
+skip region of skip_size=0, and this causes issues because now
+binder_do_deferred_txn_copies will think this was a fixup region.
+
+To address that, return early from binder_translate_fd_array to
+avoid adding an empty skip region.
+
+Fixes: 09184ae9b575 ("binder: defer copies of pre-patched txn data")
+Acked-by: Todd Kjos <tkjos@google.com>
+Cc: stable <stable@kernel.org>
+Signed-off-by: Alessandro Astone <ales.astone@gmail.com>
+Link: https://lore.kernel.org/r/20220415120015.52684-1-ales.astone@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Carlos Llamas <cmllamas@google.com>
+Signed-off-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/android/binder.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -2527,6 +2527,9 @@ static int binder_translate_fd_array(str
+ struct binder_proc *proc = thread->proc;
+ int ret;
+
++ if (fda->num_fds == 0)
++ return 0;
++
+ fd_buf_size = sizeof(u32) * fda->num_fds;
+ if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
+ binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
--- /dev/null
+From stable-owner@vger.kernel.org Wed Feb 22 13:12:29 2023
+From: Lee Jones <lee@kernel.org>
+Date: Wed, 22 Feb 2023 12:12:04 +0000
+Subject: binder: read pre-translated fds from sender buffer
+To: lee@kernel.org
+Cc: stable@vger.kernel.org, Todd Kjos <tkjos@google.com>, Martijn Coenen <maco@android.com>, Christian Brauner <christian.brauner@ubuntu.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Carlos Llamas <cmllamas@google.com>
+Message-ID: <20230222121208.898198-2-lee@kernel.org>
+
+From: Todd Kjos <tkjos@google.com>
+
+commit 656e01f3ab54afe71bed066996fc2640881e1220 upstream.
+
+This patch is to prepare for an up coming patch where we read
+pre-translated fds from the sender buffer and translate them before
+copying them to the target. It does not change run time.
+
+The patch adds two new parameters to binder_translate_fd_array() to
+hold the sender buffer and sender buffer parent. These parameters let
+us call copy_from_user() directly from the sender instead of using
+binder_alloc_copy_from_buffer() to copy from the target. Also the patch
+adds some new alignment checks. Previously the alignment checks would
+have been done in a different place, but this lets us print more
+useful error messages.
+
+Reviewed-by: Martijn Coenen <maco@android.com>
+Acked-by: Christian Brauner <christian.brauner@ubuntu.com>
+Signed-off-by: Todd Kjos <tkjos@google.com>
+Link: https://lore.kernel.org/r/20211130185152.437403-4-tkjos@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Carlos Llamas <cmllamas@google.com>
+Signed-off-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/android/binder.c | 39 ++++++++++++++++++++++++++++++++-------
+ 1 file changed, 32 insertions(+), 7 deletions(-)
+
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -2270,15 +2270,17 @@ err_fd_not_accepted:
+ }
+
+ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
++ const void __user *sender_ubuffer,
+ struct binder_buffer_object *parent,
++ struct binder_buffer_object *sender_uparent,
+ struct binder_transaction *t,
+ struct binder_thread *thread,
+ struct binder_transaction *in_reply_to)
+ {
+ binder_size_t fdi, fd_buf_size;
+ binder_size_t fda_offset;
++ const void __user *sender_ufda_base;
+ struct binder_proc *proc = thread->proc;
+- struct binder_proc *target_proc = t->to_proc;
+
+ fd_buf_size = sizeof(u32) * fda->num_fds;
+ if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
+@@ -2302,7 +2304,10 @@ static int binder_translate_fd_array(str
+ */
+ fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
+ fda->parent_offset;
+- if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
++ sender_ufda_base = (void __user *)sender_uparent->buffer + fda->parent_offset;
++
++ if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
++ !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
+ binder_user_error("%d:%d parent offset not aligned correctly.\n",
+ proc->pid, thread->pid);
+ return -EINVAL;
+@@ -2311,10 +2316,9 @@ static int binder_translate_fd_array(str
+ u32 fd;
+ int ret;
+ binder_size_t offset = fda_offset + fdi * sizeof(fd);
++ binder_size_t sender_uoffset = fdi * sizeof(fd);
+
+- ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
+- &fd, t->buffer,
+- offset, sizeof(fd));
++ ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
+ if (!ret)
+ ret = binder_translate_fd(fd, offset, t, thread,
+ in_reply_to);
+@@ -2987,6 +2991,8 @@ static void binder_transaction(struct bi
+ case BINDER_TYPE_FDA: {
+ struct binder_object ptr_object;
+ binder_size_t parent_offset;
++ struct binder_object user_object;
++ size_t user_parent_size;
+ struct binder_fd_array_object *fda =
+ to_binder_fd_array_object(hdr);
+ size_t num_valid = (buffer_offset - off_start_offset) /
+@@ -3018,8 +3024,27 @@ static void binder_transaction(struct bi
+ return_error_line = __LINE__;
+ goto err_bad_parent;
+ }
+- ret = binder_translate_fd_array(fda, parent, t, thread,
+- in_reply_to);
++ /*
++ * We need to read the user version of the parent
++ * object to get the original user offset
++ */
++ user_parent_size =
++ binder_get_object(proc, user_buffer, t->buffer,
++ parent_offset, &user_object);
++ if (user_parent_size != sizeof(user_object.bbo)) {
++ binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
++ proc->pid, thread->pid,
++ user_parent_size,
++ sizeof(user_object.bbo));
++ return_error = BR_FAILED_REPLY;
++ return_error_param = -EINVAL;
++ return_error_line = __LINE__;
++ goto err_bad_parent;
++ }
++ ret = binder_translate_fd_array(fda, user_buffer,
++ parent,
++ &user_object.bbo, t,
++ thread, in_reply_to);
+ if (!ret)
+ ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer,
--- /dev/null
+From 858f1bf65d3d9c00b5e2d8ca87dc79ed88267c98 Mon Sep 17 00:00:00 2001
+From: Zhang Wensheng <zhangwensheng5@huawei.com>
+Date: Sat, 21 May 2022 15:37:48 +0800
+Subject: nbd: fix possible overflow on 'first_minor' in nbd_dev_add()
+
+From: Zhang Wensheng <zhangwensheng5@huawei.com>
+
+commit 858f1bf65d3d9c00b5e2d8ca87dc79ed88267c98 upstream.
+
+When 'index' is a big numbers, it may become negative which forced
+to 'int'. then 'index << part_shift' might overflow to a positive
+value that is not greater than '0xfffff', then sysfs might complains
+about duplicate creation. Because of this, move the 'index' judgment
+to the front will fix it and be better.
+
+Fixes: b0d9111a2d53 ("nbd: use an idr to keep track of nbd devices")
+Fixes: 940c264984fd ("nbd: fix possible overflow for 'first_minor' in nbd_dev_add()")
+Signed-off-by: Zhang Wensheng <zhangwensheng5@huawei.com>
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Reviewed-by: Josef Bacik <josef@toxicpanda.com>
+Link: https://lore.kernel.org/r/20220521073749.3146892-6-yukuai3@huawei.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Wen Yang <wenyang.linux@foxmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/block/nbd.c | 23 ++++++++++++-----------
+ 1 file changed, 12 insertions(+), 11 deletions(-)
+
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -1756,17 +1756,7 @@ static struct nbd_device *nbd_dev_add(in
+ refcount_set(&nbd->refs, 0);
+ INIT_LIST_HEAD(&nbd->list);
+ disk->major = NBD_MAJOR;
+-
+- /* Too big first_minor can cause duplicate creation of
+- * sysfs files/links, since index << part_shift might overflow, or
+- * MKDEV() expect that the max bits of first_minor is 20.
+- */
+ disk->first_minor = index << part_shift;
+- if (disk->first_minor < index || disk->first_minor > MINORMASK) {
+- err = -EINVAL;
+- goto out_free_work;
+- }
+-
+ disk->minors = 1 << part_shift;
+ disk->fops = &nbd_fops;
+ disk->private_data = nbd;
+@@ -1871,8 +1861,19 @@ static int nbd_genl_connect(struct sk_bu
+ if (!netlink_capable(skb, CAP_SYS_ADMIN))
+ return -EPERM;
+
+- if (info->attrs[NBD_ATTR_INDEX])
++ if (info->attrs[NBD_ATTR_INDEX]) {
+ index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
++
++ /*
++ * Too big first_minor can cause duplicate creation of
++ * sysfs files/links, since index << part_shift might overflow, or
++ * MKDEV() expect that the max bits of first_minor is 20.
++ */
++ if (index < 0 || index > MINORMASK >> part_shift) {
++ printk(KERN_ERR "nbd: illegal input index %d\n", index);
++ return -EINVAL;
++ }
++ }
+ if (!info->attrs[NBD_ATTR_SOCKETS]) {
+ printk(KERN_ERR "nbd: must specify at least one socket\n");
+ return -EINVAL;
powerpc-64s-radix-fix-rwx-mapping-with-relocated-ker.patch
revert-netfilter-conntrack-fix-bug-in-for_each_sctp_.patch
drm-i915-gvt-fix-double-free-bug-in-split_2mb_gtt_entry.patch
+uaccess-add-speculation-barrier-to-copy_from_user.patch
+binder-read-pre-translated-fds-from-sender-buffer.patch
+binder-defer-copies-of-pre-patched-txn-data.patch
+binder-fix-pointer-cast-warning.patch
+binder-address-corner-cases-in-deferred-copy-and-fixup.patch
+binder-gracefully-handle-binder_type_fda-objects-with-num_fds-0.patch
+nbd-fix-possible-overflow-on-first_minor-in-nbd_dev_add.patch
--- /dev/null
+From 74e19ef0ff8061ef55957c3abd71614ef0f42f47 Mon Sep 17 00:00:00 2001
+From: Dave Hansen <dave.hansen@linux.intel.com>
+Date: Tue, 21 Feb 2023 12:30:15 -0800
+Subject: uaccess: Add speculation barrier to copy_from_user()
+
+From: Dave Hansen <dave.hansen@linux.intel.com>
+
+commit 74e19ef0ff8061ef55957c3abd71614ef0f42f47 upstream.
+
+The results of "access_ok()" can be mis-speculated. The result is that
+you can end speculatively:
+
+ if (access_ok(from, size))
+ // Right here
+
+even for bad from/size combinations. On first glance, it would be ideal
+to just add a speculation barrier to "access_ok()" so that its results
+can never be mis-speculated.
+
+But there are lots of system calls just doing access_ok() via
+"copy_to_user()" and friends (example: fstat() and friends). Those are
+generally not problematic because they do not _consume_ data from
+userspace other than the pointer. They are also very quick and common
+system calls that should not be needlessly slowed down.
+
+"copy_from_user()" on the other hand uses a user-controller pointer and
+is frequently followed up with code that might affect caches. Take
+something like this:
+
+ if (!copy_from_user(&kernelvar, uptr, size))
+ do_something_with(kernelvar);
+
+If userspace passes in an evil 'uptr' that *actually* points to a kernel
+addresses, and then do_something_with() has cache (or other)
+side-effects, it could allow userspace to infer kernel data values.
+
+Add a barrier to the common copy_from_user() code to prevent
+mis-speculated values which happen after the copy.
+
+Also add a stub for architectures that do not define barrier_nospec().
+This makes the macro usable in generic code.
+
+Since the barrier is now usable in generic code, the x86 #ifdef in the
+BPF code can also go away.
+
+Reported-by: Jordy Zomer <jordyzomer@google.com>
+Suggested-by: Linus Torvalds <torvalds@linuxfoundation.org>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net> # BPF bits
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/nospec.h | 4 ++++
+ kernel/bpf/core.c | 2 --
+ lib/usercopy.c | 7 +++++++
+ 3 files changed, 11 insertions(+), 2 deletions(-)
+
+--- a/include/linux/nospec.h
++++ b/include/linux/nospec.h
+@@ -11,6 +11,10 @@
+
+ struct task_struct;
+
++#ifndef barrier_nospec
++# define barrier_nospec() do { } while (0)
++#endif
++
+ /**
+ * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
+ * @index: array element index
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -1648,9 +1648,7 @@ out:
+ * reuse preexisting logic from Spectre v1 mitigation that
+ * happens to produce the required code on x86 for v4 as well.
+ */
+-#ifdef CONFIG_X86
+ barrier_nospec();
+-#endif
+ CONT;
+ #define LDST(SIZEOP, SIZE) \
+ STX_MEM_##SIZEOP: \
+--- a/lib/usercopy.c
++++ b/lib/usercopy.c
+@@ -3,6 +3,7 @@
+ #include <linux/fault-inject-usercopy.h>
+ #include <linux/instrumented.h>
+ #include <linux/uaccess.h>
++#include <linux/nospec.h>
+
+ /* out-of-line parts */
+
+@@ -12,6 +13,12 @@ unsigned long _copy_from_user(void *to,
+ unsigned long res = n;
+ might_fault();
+ if (!should_fail_usercopy() && likely(access_ok(from, n))) {
++ /*
++ * Ensure that bad access_ok() speculation will not
++ * lead to nasty side effects *after* the copy is
++ * finished:
++ */
++ barrier_nospec();
+ instrument_copy_from_user(to, from, n);
+ res = raw_copy_from_user(to, from, n);
+ }