]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.6-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 18 Aug 2025 10:07:40 +0000 (12:07 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 18 Aug 2025 10:07:40 +0000 (12:07 +0200)
added patches:
cifs-reset-iface-weights-when-we-cannot-find-a-candidate.patch
ext4-fix-largest-free-orders-lists-corruption-on-mb_optimize_scan-switch.patch
ext4-fix-zombie-groups-in-average-fragment-size-lists.patch
iommu-arm-smmu-qcom-add-sm6115-mdss-compatible.patch
iommufd-prevent-align-overflow.patch
iommufd-report-unmapped-bytes-in-the-error-path-of-iopt_unmap_iova_range.patch
misc-rtsx-usb-ensure-mmc-child-device-is-active-when-card-is-present.patch
usb-core-config-prevent-oob-read-in-ss-endpoint-companion-parsing.patch
usb-typec-ucsi-update-power_supply-on-power-role-change.patch

queue-6.6/cifs-reset-iface-weights-when-we-cannot-find-a-candidate.patch [new file with mode: 0644]
queue-6.6/ext4-fix-largest-free-orders-lists-corruption-on-mb_optimize_scan-switch.patch [new file with mode: 0644]
queue-6.6/ext4-fix-zombie-groups-in-average-fragment-size-lists.patch [new file with mode: 0644]
queue-6.6/iommu-arm-smmu-qcom-add-sm6115-mdss-compatible.patch [new file with mode: 0644]
queue-6.6/iommufd-prevent-align-overflow.patch [new file with mode: 0644]
queue-6.6/iommufd-report-unmapped-bytes-in-the-error-path-of-iopt_unmap_iova_range.patch [new file with mode: 0644]
queue-6.6/misc-rtsx-usb-ensure-mmc-child-device-is-active-when-card-is-present.patch [new file with mode: 0644]
queue-6.6/series
queue-6.6/usb-core-config-prevent-oob-read-in-ss-endpoint-companion-parsing.patch [new file with mode: 0644]
queue-6.6/usb-typec-ucsi-update-power_supply-on-power-role-change.patch [new file with mode: 0644]

diff --git a/queue-6.6/cifs-reset-iface-weights-when-we-cannot-find-a-candidate.patch b/queue-6.6/cifs-reset-iface-weights-when-we-cannot-find-a-candidate.patch
new file mode 100644 (file)
index 0000000..37eded8
--- /dev/null
@@ -0,0 +1,61 @@
+From 9d5eff7821f6d70f7d1b4d8a60680fba4de868a7 Mon Sep 17 00:00:00 2001
+From: Shyam Prasad N <sprasad@microsoft.com>
+Date: Thu, 17 Jul 2025 17:36:13 +0530
+Subject: cifs: reset iface weights when we cannot find a candidate
+
+From: Shyam Prasad N <sprasad@microsoft.com>
+
+commit 9d5eff7821f6d70f7d1b4d8a60680fba4de868a7 upstream.
+
+We now do a weighted selection of server interfaces when allocating
+new channels. The weights are decided based on the speed advertised.
+The fulfilled weight for an interface is a counter that is used to
+track the interface selection. It should be reset back to zero once
+all interfaces fulfilling their weight.
+
+In cifs_chan_update_iface, this reset logic was missing. As a result
+when the server interface list changes, the client may not be able
+to find a new candidate for other channels after all interfaces have
+been fulfilled.
+
+Fixes: a6d8fb54a515 ("cifs: distribute channels across interfaces based on speed")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Shyam Prasad N <sprasad@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/sess.c |    9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/fs/smb/client/sess.c
++++ b/fs/smb/client/sess.c
+@@ -372,6 +372,7 @@ cifs_chan_update_iface(struct cifs_ses *
+       struct cifs_server_iface *old_iface = NULL;
+       struct cifs_server_iface *last_iface = NULL;
+       struct sockaddr_storage ss;
++      int retry = 0;
+       spin_lock(&ses->chan_lock);
+       chan_index = cifs_ses_get_chan_index(ses, server);
+@@ -400,6 +401,7 @@ cifs_chan_update_iface(struct cifs_ses *
+               return;
+       }
++try_again:
+       last_iface = list_last_entry(&ses->iface_list, struct cifs_server_iface,
+                                    iface_head);
+       iface_min_speed = last_iface->speed;
+@@ -437,6 +439,13 @@ cifs_chan_update_iface(struct cifs_ses *
+       }
+       if (list_entry_is_head(iface, &ses->iface_list, iface_head)) {
++              list_for_each_entry(iface, &ses->iface_list, iface_head)
++                      iface->weight_fulfilled = 0;
++
++              /* see if it can be satisfied in second attempt */
++              if (!retry++)
++                      goto try_again;
++
+               iface = NULL;
+               cifs_dbg(FYI, "unable to find a suitable iface\n");
+       }
diff --git a/queue-6.6/ext4-fix-largest-free-orders-lists-corruption-on-mb_optimize_scan-switch.patch b/queue-6.6/ext4-fix-largest-free-orders-lists-corruption-on-mb_optimize_scan-switch.patch
new file mode 100644 (file)
index 0000000..6489b9e
--- /dev/null
@@ -0,0 +1,92 @@
+From 7d345aa1fac4c2ec9584fbd6f389f2c2368671d5 Mon Sep 17 00:00:00 2001
+From: Baokun Li <libaokun1@huawei.com>
+Date: Mon, 14 Jul 2025 21:03:21 +0800
+Subject: ext4: fix largest free orders lists corruption on mb_optimize_scan switch
+
+From: Baokun Li <libaokun1@huawei.com>
+
+commit 7d345aa1fac4c2ec9584fbd6f389f2c2368671d5 upstream.
+
+The grp->bb_largest_free_order is updated regardless of whether
+mb_optimize_scan is enabled. This can lead to inconsistencies between
+grp->bb_largest_free_order and the actual s_mb_largest_free_orders list
+index when mb_optimize_scan is repeatedly enabled and disabled via remount.
+
+For example, if mb_optimize_scan is initially enabled, largest free
+order is 3, and the group is in s_mb_largest_free_orders[3]. Then,
+mb_optimize_scan is disabled via remount, block allocations occur,
+updating largest free order to 2. Finally, mb_optimize_scan is re-enabled
+via remount, more block allocations update largest free order to 1.
+
+At this point, the group would be removed from s_mb_largest_free_orders[3]
+under the protection of s_mb_largest_free_orders_locks[2]. This lock
+mismatch can lead to list corruption.
+
+To fix this, whenever grp->bb_largest_free_order changes, we now always
+attempt to remove the group from its old order list. However, we only
+insert the group into the new order list if `mb_optimize_scan` is enabled.
+This approach helps prevent lock inconsistencies and ensures the data in
+the order lists remains reliable.
+
+Fixes: 196e402adf2e ("ext4: improve cr 0 / cr 1 group scanning")
+CC: stable@vger.kernel.org
+Suggested-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Zhang Yi <yi.zhang@huawei.com>
+Link: https://patch.msgid.link/20250714130327.1830534-12-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/mballoc.c |   33 ++++++++++++++-------------------
+ 1 file changed, 14 insertions(+), 19 deletions(-)
+
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1150,33 +1150,28 @@ static void
+ mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
+ {
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+-      int i;
++      int new, old = grp->bb_largest_free_order;
+-      for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
+-              if (grp->bb_counters[i] > 0)
++      for (new = MB_NUM_ORDERS(sb) - 1; new >= 0; new--)
++              if (grp->bb_counters[new] > 0)
+                       break;
++
+       /* No need to move between order lists? */
+-      if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
+-          i == grp->bb_largest_free_order) {
+-              grp->bb_largest_free_order = i;
++      if (new == old)
+               return;
+-      }
+-      if (grp->bb_largest_free_order >= 0) {
+-              write_lock(&sbi->s_mb_largest_free_orders_locks[
+-                                            grp->bb_largest_free_order]);
++      if (old >= 0 && !list_empty(&grp->bb_largest_free_order_node)) {
++              write_lock(&sbi->s_mb_largest_free_orders_locks[old]);
+               list_del_init(&grp->bb_largest_free_order_node);
+-              write_unlock(&sbi->s_mb_largest_free_orders_locks[
+-                                            grp->bb_largest_free_order]);
++              write_unlock(&sbi->s_mb_largest_free_orders_locks[old]);
+       }
+-      grp->bb_largest_free_order = i;
+-      if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
+-              write_lock(&sbi->s_mb_largest_free_orders_locks[
+-                                            grp->bb_largest_free_order]);
++
++      grp->bb_largest_free_order = new;
++      if (test_opt2(sb, MB_OPTIMIZE_SCAN) && new >= 0 && grp->bb_free) {
++              write_lock(&sbi->s_mb_largest_free_orders_locks[new]);
+               list_add_tail(&grp->bb_largest_free_order_node,
+-                    &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]);
+-              write_unlock(&sbi->s_mb_largest_free_orders_locks[
+-                                            grp->bb_largest_free_order]);
++                            &sbi->s_mb_largest_free_orders[new]);
++              write_unlock(&sbi->s_mb_largest_free_orders_locks[new]);
+       }
+ }
diff --git a/queue-6.6/ext4-fix-zombie-groups-in-average-fragment-size-lists.patch b/queue-6.6/ext4-fix-zombie-groups-in-average-fragment-size-lists.patch
new file mode 100644 (file)
index 0000000..4c44084
--- /dev/null
@@ -0,0 +1,85 @@
+From 1c320d8e92925bb7615f83a7b6e3f402a5c2ca63 Mon Sep 17 00:00:00 2001
+From: Baokun Li <libaokun1@huawei.com>
+Date: Mon, 14 Jul 2025 21:03:20 +0800
+Subject: ext4: fix zombie groups in average fragment size lists
+
+From: Baokun Li <libaokun1@huawei.com>
+
+commit 1c320d8e92925bb7615f83a7b6e3f402a5c2ca63 upstream.
+
+Groups with no free blocks shouldn't be in any average fragment size list.
+However, when all blocks in a group are allocated(i.e., bb_fragments or
+bb_free is 0), we currently skip updating the average fragment size, which
+means the group isn't removed from its previous s_mb_avg_fragment_size[old]
+list.
+
+This created "zombie" groups that were always skipped during traversal as
+they couldn't satisfy any block allocation requests, negatively impacting
+traversal efficiency.
+
+Therefore, when a group becomes completely full, bb_avg_fragment_size_order
+is now set to -1. If the old order was not -1, a removal operation is
+performed; if the new order is not -1, an insertion is performed.
+
+Fixes: 196e402adf2e ("ext4: improve cr 0 / cr 1 group scanning")
+CC: stable@vger.kernel.org
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Zhang Yi <yi.zhang@huawei.com>
+Link: https://patch.msgid.link/20250714130327.1830534-11-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/mballoc.c |   34 +++++++++++++++++-----------------
+ 1 file changed, 17 insertions(+), 17 deletions(-)
+
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -841,30 +841,30 @@ static void
+ mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
+ {
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+-      int new_order;
++      int new, old;
+-      if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_fragments == 0)
++      if (!test_opt2(sb, MB_OPTIMIZE_SCAN))
+               return;
+-      new_order = mb_avg_fragment_size_order(sb,
+-                                      grp->bb_free / grp->bb_fragments);
+-      if (new_order == grp->bb_avg_fragment_size_order)
++      old = grp->bb_avg_fragment_size_order;
++      new = grp->bb_fragments == 0 ? -1 :
++            mb_avg_fragment_size_order(sb, grp->bb_free / grp->bb_fragments);
++      if (new == old)
+               return;
+-      if (grp->bb_avg_fragment_size_order != -1) {
+-              write_lock(&sbi->s_mb_avg_fragment_size_locks[
+-                                      grp->bb_avg_fragment_size_order]);
++      if (old >= 0) {
++              write_lock(&sbi->s_mb_avg_fragment_size_locks[old]);
+               list_del(&grp->bb_avg_fragment_size_node);
+-              write_unlock(&sbi->s_mb_avg_fragment_size_locks[
+-                                      grp->bb_avg_fragment_size_order]);
++              write_unlock(&sbi->s_mb_avg_fragment_size_locks[old]);
++      }
++
++      grp->bb_avg_fragment_size_order = new;
++      if (new >= 0) {
++              write_lock(&sbi->s_mb_avg_fragment_size_locks[new]);
++              list_add_tail(&grp->bb_avg_fragment_size_node,
++                              &sbi->s_mb_avg_fragment_size[new]);
++              write_unlock(&sbi->s_mb_avg_fragment_size_locks[new]);
+       }
+-      grp->bb_avg_fragment_size_order = new_order;
+-      write_lock(&sbi->s_mb_avg_fragment_size_locks[
+-                                      grp->bb_avg_fragment_size_order]);
+-      list_add_tail(&grp->bb_avg_fragment_size_node,
+-              &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]);
+-      write_unlock(&sbi->s_mb_avg_fragment_size_locks[
+-                                      grp->bb_avg_fragment_size_order]);
+ }
+ /*
diff --git a/queue-6.6/iommu-arm-smmu-qcom-add-sm6115-mdss-compatible.patch b/queue-6.6/iommu-arm-smmu-qcom-add-sm6115-mdss-compatible.patch
new file mode 100644 (file)
index 0000000..5575e7a
--- /dev/null
@@ -0,0 +1,111 @@
+From f7fa8520f30373ce99c436c4d57c76befdacbef3 Mon Sep 17 00:00:00 2001
+From: Alexey Klimov <alexey.klimov@linaro.org>
+Date: Fri, 13 Jun 2025 18:32:38 +0100
+Subject: iommu/arm-smmu-qcom: Add SM6115 MDSS compatible
+
+From: Alexey Klimov <alexey.klimov@linaro.org>
+
+commit f7fa8520f30373ce99c436c4d57c76befdacbef3 upstream.
+
+Add the SM6115 MDSS compatible to clients compatible list, as it also
+needs that workaround.
+Without this workaround, for example, QRB4210 RB2 which is based on
+SM4250/SM6115 generates a lot of smmu unhandled context faults during
+boot:
+
+arm_smmu_context_fault: 116854 callbacks suppressed
+arm-smmu c600000.iommu: Unhandled context fault: fsr=0x402,
+iova=0x5c0ec600, fsynr=0x320021, cbfrsynra=0x420, cb=5
+arm-smmu c600000.iommu: FSR    = 00000402 [Format=2 TF], SID=0x420
+arm-smmu c600000.iommu: FSYNR0 = 00320021 [S1CBNDX=50 PNU PLVL=1]
+arm-smmu c600000.iommu: Unhandled context fault: fsr=0x402,
+iova=0x5c0d7800, fsynr=0x320021, cbfrsynra=0x420, cb=5
+arm-smmu c600000.iommu: FSR    = 00000402 [Format=2 TF], SID=0x420
+
+and also failed initialisation of lontium lt9611uxc, gpu and dpu is
+observed:
+(binding MDSS components triggered by lt9611uxc have failed)
+
+ ------------[ cut here ]------------
+ !aspace
+ WARNING: CPU: 6 PID: 324 at drivers/gpu/drm/msm/msm_gem_vma.c:130 msm_gem_vma_init+0x150/0x18c [msm]
+ Modules linked in: ... (long list of modules)
+ CPU: 6 UID: 0 PID: 324 Comm: (udev-worker) Not tainted 6.15.0-03037-gaacc73ceeb8b #4 PREEMPT
+ Hardware name: Qualcomm Technologies, Inc. QRB4210 RB2 (DT)
+ pstate: 80000005 (Nzcv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+ pc : msm_gem_vma_init+0x150/0x18c [msm]
+ lr : msm_gem_vma_init+0x150/0x18c [msm]
+ sp : ffff80008144b280
+               ...
+ Call trace:
+  msm_gem_vma_init+0x150/0x18c [msm] (P)
+  get_vma_locked+0xc0/0x194 [msm]
+  msm_gem_get_and_pin_iova_range+0x4c/0xdc [msm]
+  msm_gem_kernel_new+0x48/0x160 [msm]
+  msm_gpu_init+0x34c/0x53c [msm]
+  adreno_gpu_init+0x1b0/0x2d8 [msm]
+  a6xx_gpu_init+0x1e8/0x9e0 [msm]
+  adreno_bind+0x2b8/0x348 [msm]
+  component_bind_all+0x100/0x230
+  msm_drm_bind+0x13c/0x3d0 [msm]
+  try_to_bring_up_aggregate_device+0x164/0x1d0
+  __component_add+0xa4/0x174
+  component_add+0x14/0x20
+  dsi_dev_attach+0x20/0x34 [msm]
+  dsi_host_attach+0x58/0x98 [msm]
+  devm_mipi_dsi_attach+0x34/0x90
+  lt9611uxc_attach_dsi.isra.0+0x94/0x124 [lontium_lt9611uxc]
+  lt9611uxc_probe+0x540/0x5fc [lontium_lt9611uxc]
+  i2c_device_probe+0x148/0x2a8
+  really_probe+0xbc/0x2c0
+  __driver_probe_device+0x78/0x120
+  driver_probe_device+0x3c/0x154
+  __driver_attach+0x90/0x1a0
+  bus_for_each_dev+0x68/0xb8
+  driver_attach+0x24/0x30
+  bus_add_driver+0xe4/0x208
+  driver_register+0x68/0x124
+  i2c_register_driver+0x48/0xcc
+  lt9611uxc_driver_init+0x20/0x1000 [lontium_lt9611uxc]
+  do_one_initcall+0x60/0x1d4
+  do_init_module+0x54/0x1fc
+  load_module+0x1748/0x1c8c
+  init_module_from_file+0x74/0xa0
+  __arm64_sys_finit_module+0x130/0x2f8
+  invoke_syscall+0x48/0x104
+  el0_svc_common.constprop.0+0xc0/0xe0
+  do_el0_svc+0x1c/0x28
+  el0_svc+0x2c/0x80
+  el0t_64_sync_handler+0x10c/0x138
+  el0t_64_sync+0x198/0x19c
+ ---[ end trace 0000000000000000 ]---
+ msm_dpu 5e01000.display-controller: [drm:msm_gpu_init [msm]] *ERROR* could not allocate memptrs: -22
+ msm_dpu 5e01000.display-controller: failed to load adreno gpu
+ platform a400000.remoteproc:glink-edge:apr:service@7:dais: Adding to iommu group 19
+ msm_dpu 5e01000.display-controller: failed to bind 5900000.gpu (ops a3xx_ops [msm]): -22
+ msm_dpu 5e01000.display-controller: adev bind failed: -22
+ lt9611uxc 0-002b: failed to attach dsi to host
+ lt9611uxc 0-002b: probe with driver lt9611uxc failed with error -22
+
+Suggested-by: Bjorn Andersson <andersson@kernel.org>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Fixes: 3581b7062cec ("drm/msm/disp/dpu1: add support for display on SM6115")
+Cc: stable@vger.kernel.org
+Signed-off-by: Alexey Klimov <alexey.klimov@linaro.org>
+Link: https://lore.kernel.org/r/20250613173238.15061-1-alexey.klimov@linaro.org
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
++++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+@@ -255,6 +255,7 @@ static const struct of_device_id qcom_sm
+       { .compatible = "qcom,sdm670-mdss" },
+       { .compatible = "qcom,sdm845-mdss" },
+       { .compatible = "qcom,sdm845-mss-pil" },
++      { .compatible = "qcom,sm6115-mdss" },
+       { .compatible = "qcom,sm6350-mdss" },
+       { .compatible = "qcom,sm6375-mdss" },
+       { .compatible = "qcom,sm8150-mdss" },
diff --git a/queue-6.6/iommufd-prevent-align-overflow.patch b/queue-6.6/iommufd-prevent-align-overflow.patch
new file mode 100644 (file)
index 0000000..af38eb3
--- /dev/null
@@ -0,0 +1,99 @@
+From b42497e3c0e74db061eafad41c0cd7243c46436b Mon Sep 17 00:00:00 2001
+From: Jason Gunthorpe <jgg@nvidia.com>
+Date: Thu, 17 Jul 2025 11:46:55 -0300
+Subject: iommufd: Prevent ALIGN() overflow
+
+From: Jason Gunthorpe <jgg@nvidia.com>
+
+commit b42497e3c0e74db061eafad41c0cd7243c46436b upstream.
+
+When allocating IOVA the candidate range gets aligned to the target
+alignment. If the range is close to ULONG_MAX then the ALIGN() can
+wrap resulting in a corrupted iova.
+
+Open code the ALIGN() using get_add_overflow() to prevent this.
+This simplifies the checks as we don't need to check for length earlier
+either.
+
+Consolidate the two copies of this code under a single helper.
+
+This bug would allow userspace to create a mapping that overlaps with some
+other mapping or a reserved range.
+
+Cc: stable@vger.kernel.org
+Fixes: 51fe6141f0f6 ("iommufd: Data structure to provide IOVA to PFN mapping")
+Reported-by: syzbot+c2f65e2801743ca64e08@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/r/685af644.a00a0220.2e5631.0094.GAE@google.com
+Reviewed-by: Yi Liu <yi.l.liu@intel.com>
+Reviewed-by: Nicolin Chen <nicolinc@nvidia.com>
+Link: https://patch.msgid.link/all/1-v1-7b4a16fc390b+10f4-iommufd_alloc_overflow_jgg@nvidia.com/
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iommu/iommufd/io_pagetable.c |   41 +++++++++++++++++++++--------------
+ 1 file changed, 25 insertions(+), 16 deletions(-)
+
+--- a/drivers/iommu/iommufd/io_pagetable.c
++++ b/drivers/iommu/iommufd/io_pagetable.c
+@@ -69,36 +69,45 @@ struct iopt_area *iopt_area_contig_next(
+       return iter->area;
+ }
+-static bool __alloc_iova_check_hole(struct interval_tree_double_span_iter *span,
+-                                  unsigned long length,
+-                                  unsigned long iova_alignment,
+-                                  unsigned long page_offset)
++static bool __alloc_iova_check_range(unsigned long *start, unsigned long last,
++                                   unsigned long length,
++                                   unsigned long iova_alignment,
++                                   unsigned long page_offset)
+ {
+-      if (span->is_used || span->last_hole - span->start_hole < length - 1)
++      unsigned long aligned_start;
++
++      /* ALIGN_UP() */
++      if (check_add_overflow(*start, iova_alignment - 1, &aligned_start))
+               return false;
++      aligned_start &= ~(iova_alignment - 1);
++      aligned_start |= page_offset;
+-      span->start_hole = ALIGN(span->start_hole, iova_alignment) |
+-                         page_offset;
+-      if (span->start_hole > span->last_hole ||
+-          span->last_hole - span->start_hole < length - 1)
++      if (aligned_start >= last || last - aligned_start < length - 1)
+               return false;
++      *start = aligned_start;
+       return true;
+ }
+-static bool __alloc_iova_check_used(struct interval_tree_span_iter *span,
++static bool __alloc_iova_check_hole(struct interval_tree_double_span_iter *span,
+                                   unsigned long length,
+                                   unsigned long iova_alignment,
+                                   unsigned long page_offset)
+ {
+-      if (span->is_hole || span->last_used - span->start_used < length - 1)
++      if (span->is_used)
+               return false;
++      return __alloc_iova_check_range(&span->start_hole, span->last_hole,
++                                      length, iova_alignment, page_offset);
++}
+-      span->start_used = ALIGN(span->start_used, iova_alignment) |
+-                         page_offset;
+-      if (span->start_used > span->last_used ||
+-          span->last_used - span->start_used < length - 1)
++static bool __alloc_iova_check_used(struct interval_tree_span_iter *span,
++                                  unsigned long length,
++                                  unsigned long iova_alignment,
++                                  unsigned long page_offset)
++{
++      if (span->is_hole)
+               return false;
+-      return true;
++      return __alloc_iova_check_range(&span->start_used, span->last_used,
++                                      length, iova_alignment, page_offset);
+ }
+ /*
diff --git a/queue-6.6/iommufd-report-unmapped-bytes-in-the-error-path-of-iopt_unmap_iova_range.patch b/queue-6.6/iommufd-report-unmapped-bytes-in-the-error-path-of-iopt_unmap_iova_range.patch
new file mode 100644 (file)
index 0000000..d55837c
--- /dev/null
@@ -0,0 +1,47 @@
+From b23e09f9997771b4b739c1c694fa832b5fa2de02 Mon Sep 17 00:00:00 2001
+From: Nicolin Chen <nicolinc@nvidia.com>
+Date: Wed, 9 Jul 2025 22:58:53 -0700
+Subject: iommufd: Report unmapped bytes in the error path of iopt_unmap_iova_range
+
+From: Nicolin Chen <nicolinc@nvidia.com>
+
+commit b23e09f9997771b4b739c1c694fa832b5fa2de02 upstream.
+
+There are callers that read the unmapped bytes even when rc != 0. Thus, do
+not forget to report it in the error path too.
+
+Fixes: 8d40205f6093 ("iommufd: Add kAPI toward external drivers for kernel access")
+Link: https://patch.msgid.link/r/e2b61303bbc008ba1a4e2d7c2a2894749b59fdac.1752126748.git.nicolinc@nvidia.com
+Cc: stable@vger.kernel.org
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iommu/iommufd/io_pagetable.c |    7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/iommu/iommufd/io_pagetable.c
++++ b/drivers/iommu/iommufd/io_pagetable.c
+@@ -524,8 +524,10 @@ again:
+                       iommufd_access_notify_unmap(iopt, area_first, length);
+                       /* Something is not responding to unmap requests. */
+                       tries++;
+-                      if (WARN_ON(tries > 100))
+-                              return -EDEADLOCK;
++                      if (WARN_ON(tries > 100)) {
++                              rc = -EDEADLOCK;
++                              goto out_unmapped;
++                      }
+                       goto again;
+               }
+@@ -547,6 +549,7 @@ again:
+ out_unlock_iova:
+       up_write(&iopt->iova_rwsem);
+       up_read(&iopt->domains_rwsem);
++out_unmapped:
+       if (unmapped)
+               *unmapped = unmapped_bytes;
+       return rc;
diff --git a/queue-6.6/misc-rtsx-usb-ensure-mmc-child-device-is-active-when-card-is-present.patch b/queue-6.6/misc-rtsx-usb-ensure-mmc-child-device-is-active-when-card-is-present.patch
new file mode 100644 (file)
index 0000000..39fa304
--- /dev/null
@@ -0,0 +1,69 @@
+From 966c5cd72be8989c8a559ddef8e8ff07a37c5eb0 Mon Sep 17 00:00:00 2001
+From: Ricky Wu <ricky_wu@realtek.com>
+Date: Fri, 11 Jul 2025 22:01:43 +0800
+Subject: misc: rtsx: usb: Ensure mmc child device is active when card is present
+
+From: Ricky Wu <ricky_wu@realtek.com>
+
+commit 966c5cd72be8989c8a559ddef8e8ff07a37c5eb0 upstream.
+
+When a card is present in the reader, the driver currently defers
+autosuspend by returning -EAGAIN during the suspend callback to
+trigger USB remote wakeup signaling. However, this does not guarantee
+that the mmc child device has been resumed, which may cause issues if
+it remains suspended while the card is accessible.
+This patch ensures that all child devices, including the mmc host
+controller, are explicitly resumed before returning -EAGAIN. This
+fixes a corner case introduced by earlier remote wakeup handling,
+improving reliability of runtime PM when a card is inserted.
+
+Fixes: 883a87ddf2f1 ("misc: rtsx_usb: Use USB remote wakeup signaling for card insertion detection")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ricky Wu <ricky_wu@realtek.com>
+Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
+Link: https://lore.kernel.org/r/20250711140143.2105224-1-ricky_wu@realtek.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/cardreader/rtsx_usb.c |   16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+--- a/drivers/misc/cardreader/rtsx_usb.c
++++ b/drivers/misc/cardreader/rtsx_usb.c
+@@ -698,6 +698,12 @@ static void rtsx_usb_disconnect(struct u
+ }
+ #ifdef CONFIG_PM
++static int rtsx_usb_resume_child(struct device *dev, void *data)
++{
++      pm_request_resume(dev);
++      return 0;
++}
++
+ static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message)
+ {
+       struct rtsx_ucr *ucr =
+@@ -713,8 +719,10 @@ static int rtsx_usb_suspend(struct usb_i
+                       mutex_unlock(&ucr->dev_mutex);
+                       /* Defer the autosuspend if card exists */
+-                      if (val & (SD_CD | MS_CD))
++                      if (val & (SD_CD | MS_CD)) {
++                              device_for_each_child(&intf->dev, NULL, rtsx_usb_resume_child);
+                               return -EAGAIN;
++                      }
+               } else {
+                       /* There is an ongoing operation*/
+                       return -EAGAIN;
+@@ -724,12 +732,6 @@ static int rtsx_usb_suspend(struct usb_i
+       return 0;
+ }
+-static int rtsx_usb_resume_child(struct device *dev, void *data)
+-{
+-      pm_request_resume(dev);
+-      return 0;
+-}
+-
+ static int rtsx_usb_resume(struct usb_interface *intf)
+ {
+       device_for_each_child(&intf->dev, NULL, rtsx_usb_resume_child);
index 96c72cfe92e95fd8df91d1b2acf322f30cb4e7f4..28a849e38bb091796ae62d0613a1bf4a6fa2b307 100644 (file)
@@ -298,3 +298,12 @@ rtc-ds1307-remove-clear-of-oscillator-stop-flag-osf-.patch
 scsi-lpfc-remove-redundant-assignment-to-avoid-memor.patch
 asoc-fsl_sai-replace-regmap_write-with-regmap_update.patch
 drm-amdgpu-fix-incorrect-vm-flags-to-map-bo.patch
+cifs-reset-iface-weights-when-we-cannot-find-a-candidate.patch
+iommu-arm-smmu-qcom-add-sm6115-mdss-compatible.patch
+iommufd-report-unmapped-bytes-in-the-error-path-of-iopt_unmap_iova_range.patch
+iommufd-prevent-align-overflow.patch
+ext4-fix-zombie-groups-in-average-fragment-size-lists.patch
+ext4-fix-largest-free-orders-lists-corruption-on-mb_optimize_scan-switch.patch
+usb-core-config-prevent-oob-read-in-ss-endpoint-companion-parsing.patch
+misc-rtsx-usb-ensure-mmc-child-device-is-active-when-card-is-present.patch
+usb-typec-ucsi-update-power_supply-on-power-role-change.patch
diff --git a/queue-6.6/usb-core-config-prevent-oob-read-in-ss-endpoint-companion-parsing.patch b/queue-6.6/usb-core-config-prevent-oob-read-in-ss-endpoint-companion-parsing.patch
new file mode 100644 (file)
index 0000000..9359627
--- /dev/null
@@ -0,0 +1,41 @@
+From cf16f408364efd8a68f39011a3b073c83a03612d Mon Sep 17 00:00:00 2001
+From: Xinyu Liu <katieeliu@tencent.com>
+Date: Mon, 30 Jun 2025 10:02:56 +0800
+Subject: usb: core: config: Prevent OOB read in SS endpoint companion parsing
+
+From: Xinyu Liu <katieeliu@tencent.com>
+
+commit cf16f408364efd8a68f39011a3b073c83a03612d upstream.
+
+usb_parse_ss_endpoint_companion() checks descriptor type before length,
+enabling a potentially odd read outside of the buffer size.
+
+Fix this up by checking the size first before looking at any of the
+fields in the descriptor.
+
+Signed-off-by: Xinyu Liu <katieeliu@tencent.com>
+Cc: stable <stable@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/core/config.c |   10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -81,8 +81,14 @@ static void usb_parse_ss_endpoint_compan
+        */
+       desc = (struct usb_ss_ep_comp_descriptor *) buffer;
+-      if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP ||
+-                      size < USB_DT_SS_EP_COMP_SIZE) {
++      if (size < USB_DT_SS_EP_COMP_SIZE) {
++              dev_notice(ddev,
++                         "invalid SuperSpeed endpoint companion descriptor "
++                         "of length %d, skipping\n", size);
++              return;
++      }
++
++      if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) {
+               dev_notice(ddev, "No SuperSpeed endpoint companion for config %d "
+                               " interface %d altsetting %d ep %d: "
+                               "using minimum values\n",
diff --git a/queue-6.6/usb-typec-ucsi-update-power_supply-on-power-role-change.patch b/queue-6.6/usb-typec-ucsi-update-power_supply-on-power-role-change.patch
new file mode 100644 (file)
index 0000000..5325ddf
--- /dev/null
@@ -0,0 +1,37 @@
+From 7616f006db07017ef5d4ae410fca99279aaca7aa Mon Sep 17 00:00:00 2001
+From: Myrrh Periwinkle <myrrhperiwinkle@qtmlabs.xyz>
+Date: Mon, 21 Jul 2025 13:32:51 +0700
+Subject: usb: typec: ucsi: Update power_supply on power role change
+
+From: Myrrh Periwinkle <myrrhperiwinkle@qtmlabs.xyz>
+
+commit 7616f006db07017ef5d4ae410fca99279aaca7aa upstream.
+
+The current power direction of an USB-C port also influences the
+power_supply's online status, so a power role change should also update
+the power_supply.
+
+Fixes an issue on some systems where plugging in a normal USB device in
+for the first time after a reboot will cause upower to erroneously
+consider the system to be connected to AC power.
+
+Cc: stable <stable@kernel.org>
+Fixes: 0e6371fbfba3 ("usb: typec: ucsi: Report power supply changes")
+Signed-off-by: Myrrh Periwinkle <myrrhperiwinkle@qtmlabs.xyz>
+Reviewed-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Link: https://lore.kernel.org/r/20250721-fix-ucsi-pwr-dir-notify-v1-1-e53d5340cb38@qtmlabs.xyz
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/typec/ucsi/ucsi.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -910,6 +910,7 @@ static void ucsi_handle_connector_change
+       if (con->status.change & UCSI_CONSTAT_POWER_DIR_CHANGE) {
+               typec_set_pwr_role(con->port, role);
++              ucsi_port_psy_changed(con);
+               /* Complete pending power role swap */
+               if (!completion_done(&con->complete))