]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.8-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 23 Apr 2024 14:08:20 +0000 (07:08 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 23 Apr 2024 14:08:20 +0000 (07:08 -0700)
added patches:
arm64-head-disable-mmu-at-el2-before-clearing-hcr_el2.e2h.patch
arm64-hibernate-fix-level3-translation-fault-in-swsusp_save.patch
bootconfig-use-memblock_free_late-to-free-xbc-memory-to-buddy.patch
drm-amdgpu-remove-invalid-resource-start-check-v2.patch
drm-amdgpu-validate-the-parameters-of-bo-mapping-operations-more-clearly.patch
drm-amdkfd-fix-memory-leak-in-create_process-failure.patch
drm-ttm-stop-pooling-cached-numa-pages-v2.patch
drm-vmwgfx-fix-crtc-s-atomic-check-conditional.patch
drm-vmwgfx-fix-prime-import-export.patch
drm-vmwgfx-sort-primary-plane-formats-by-order-of-preference.patch
drm-xe-fix-bo-leak-in-intel_fb_bo_framebuffer_init.patch
fork-defer-linking-file-vma-until-vma-is-fully-initialized.patch
fs-sysfs-fix-reference-leak-in-sysfs_break_active_protection.patch
fuse-fix-leaked-enosys-error-on-first-statx-call.patch
init-main.c-fix-potential-static_command_line-memory-overflow.patch
kvm-x86-mmu-write-protect-l2-sptes-in-tdp-mmu-when-clearing-dirty-status.patch
kvm-x86-mmu-x86-don-t-overflow-lpage_info-when-checking-attributes.patch
kvm-x86-pmu-disable-support-for-adaptive-pebs.patch
kvm-x86-pmu-do-not-mask-lvtpc-when-handling-a-pmi-on-amd-platforms.patch
kvm-x86-snapshot-if-a-vcpu-s-vendor-model-is-amd-vs.-intel-compatible.patch
mei-me-disable-rpl-s-on-sps-and-ign-firmwares.patch
mei-vsc-unregister-interrupt-handler-for-system-suspend.patch
mm-madvise-make-madv_populate_-read-write-handle-vm_fault_retry-properly.patch
mm-memory-failure-fix-deadlock-when-hugetlb_optimize_vmemmap-is-enabled.patch
mm-shmem-inline-shmem_is_huge-for-disabled-transparent-hugepages.patch
mm-swapops-update-check-in-is_pfn_swap_entry-for-hwpoison-entries.patch
mm-userfaultfd-allow-hugetlb-change-protection-upon-poison-entry.patch
nilfs2-fix-oob-in-nilfs_set_de_type.patch
nouveau-fix-instmem-race-condition-around-ptr-stores.patch
revert-mei-vsc-call-wake_up-in-the-threaded-irq-handler.patch
revert-usb-cdc-wdm-close-race-between-read-and-workqueue.patch
sched-add-missing-memory-barrier-in-switch_mm_cid.patch
serial-8250_dw-revert-do-not-reclock-if-already-at-correct-rate.patch
serial-core-clearing-the-circular-buffer-before-nullifying-it.patch
serial-core-fix-missing-shutdown-and-startup-for-serial-base-port.patch
serial-core-fix-regression-when-runtime-pm-is-not-enabled.patch
serial-mxs-auart-add-spinlock-around-changing-cts-state.patch
serial-pmac_zilog-remove-flawed-mitigation-for-rx-irq-flood.patch
serial-stm32-reset-.throttled-state-in-.startup.patch
serial-stm32-return-irq_none-in-the-isr-if-no-handling-happend.patch
speakup-avoid-crash-on-very-long-word.patch
squashfs-check-the-inode-number-is-not-the-invalid-value-of-zero.patch
usb-disable-usb3-lpm-at-shutdown.patch
usb-dwc2-host-fix-dereference-issue-in-ddma-completion-flow.patch
usb-gadget-f_ncm-fix-uaf-ncm-object-at-re-bind-after-usb-ep-transport-error.patch
usb-serial-option-add-fibocom-fm135-gl-variants.patch
usb-serial-option-add-lonsung-u8300-u9300-product.patch
usb-serial-option-add-rolling-rw101-gl-and-rw135-gl-support.patch
usb-serial-option-add-support-for-fibocom-fm650-fg650.patch
usb-serial-option-add-telit-fn920c04-rmnet-compositions.patch
usb-serial-option-support-quectel-em060k-sub-models.patch
usb-typec-tcpm-correct-the-pdo-counting-in-pd_set.patch

53 files changed:
queue-6.8/arm64-head-disable-mmu-at-el2-before-clearing-hcr_el2.e2h.patch [new file with mode: 0644]
queue-6.8/arm64-hibernate-fix-level3-translation-fault-in-swsusp_save.patch [new file with mode: 0644]
queue-6.8/bootconfig-use-memblock_free_late-to-free-xbc-memory-to-buddy.patch [new file with mode: 0644]
queue-6.8/drm-amdgpu-remove-invalid-resource-start-check-v2.patch [new file with mode: 0644]
queue-6.8/drm-amdgpu-validate-the-parameters-of-bo-mapping-operations-more-clearly.patch [new file with mode: 0644]
queue-6.8/drm-amdkfd-fix-memory-leak-in-create_process-failure.patch [new file with mode: 0644]
queue-6.8/drm-ttm-stop-pooling-cached-numa-pages-v2.patch [new file with mode: 0644]
queue-6.8/drm-vmwgfx-fix-crtc-s-atomic-check-conditional.patch [new file with mode: 0644]
queue-6.8/drm-vmwgfx-fix-prime-import-export.patch [new file with mode: 0644]
queue-6.8/drm-vmwgfx-sort-primary-plane-formats-by-order-of-preference.patch [new file with mode: 0644]
queue-6.8/drm-xe-fix-bo-leak-in-intel_fb_bo_framebuffer_init.patch [new file with mode: 0644]
queue-6.8/fork-defer-linking-file-vma-until-vma-is-fully-initialized.patch [new file with mode: 0644]
queue-6.8/fs-sysfs-fix-reference-leak-in-sysfs_break_active_protection.patch [new file with mode: 0644]
queue-6.8/fuse-fix-leaked-enosys-error-on-first-statx-call.patch [new file with mode: 0644]
queue-6.8/init-main.c-fix-potential-static_command_line-memory-overflow.patch [new file with mode: 0644]
queue-6.8/kvm-x86-mmu-write-protect-l2-sptes-in-tdp-mmu-when-clearing-dirty-status.patch [new file with mode: 0644]
queue-6.8/kvm-x86-mmu-x86-don-t-overflow-lpage_info-when-checking-attributes.patch [new file with mode: 0644]
queue-6.8/kvm-x86-pmu-disable-support-for-adaptive-pebs.patch [new file with mode: 0644]
queue-6.8/kvm-x86-pmu-do-not-mask-lvtpc-when-handling-a-pmi-on-amd-platforms.patch [new file with mode: 0644]
queue-6.8/kvm-x86-snapshot-if-a-vcpu-s-vendor-model-is-amd-vs.-intel-compatible.patch [new file with mode: 0644]
queue-6.8/mei-me-disable-rpl-s-on-sps-and-ign-firmwares.patch [new file with mode: 0644]
queue-6.8/mei-vsc-unregister-interrupt-handler-for-system-suspend.patch [new file with mode: 0644]
queue-6.8/mm-madvise-make-madv_populate_-read-write-handle-vm_fault_retry-properly.patch [new file with mode: 0644]
queue-6.8/mm-memory-failure-fix-deadlock-when-hugetlb_optimize_vmemmap-is-enabled.patch [new file with mode: 0644]
queue-6.8/mm-shmem-inline-shmem_is_huge-for-disabled-transparent-hugepages.patch [new file with mode: 0644]
queue-6.8/mm-swapops-update-check-in-is_pfn_swap_entry-for-hwpoison-entries.patch [new file with mode: 0644]
queue-6.8/mm-userfaultfd-allow-hugetlb-change-protection-upon-poison-entry.patch [new file with mode: 0644]
queue-6.8/nilfs2-fix-oob-in-nilfs_set_de_type.patch [new file with mode: 0644]
queue-6.8/nouveau-fix-instmem-race-condition-around-ptr-stores.patch [new file with mode: 0644]
queue-6.8/revert-mei-vsc-call-wake_up-in-the-threaded-irq-handler.patch [new file with mode: 0644]
queue-6.8/revert-usb-cdc-wdm-close-race-between-read-and-workqueue.patch [new file with mode: 0644]
queue-6.8/sched-add-missing-memory-barrier-in-switch_mm_cid.patch [new file with mode: 0644]
queue-6.8/serial-8250_dw-revert-do-not-reclock-if-already-at-correct-rate.patch [new file with mode: 0644]
queue-6.8/serial-core-clearing-the-circular-buffer-before-nullifying-it.patch [new file with mode: 0644]
queue-6.8/serial-core-fix-missing-shutdown-and-startup-for-serial-base-port.patch [new file with mode: 0644]
queue-6.8/serial-core-fix-regression-when-runtime-pm-is-not-enabled.patch [new file with mode: 0644]
queue-6.8/serial-mxs-auart-add-spinlock-around-changing-cts-state.patch [new file with mode: 0644]
queue-6.8/serial-pmac_zilog-remove-flawed-mitigation-for-rx-irq-flood.patch [new file with mode: 0644]
queue-6.8/serial-stm32-reset-.throttled-state-in-.startup.patch [new file with mode: 0644]
queue-6.8/serial-stm32-return-irq_none-in-the-isr-if-no-handling-happend.patch [new file with mode: 0644]
queue-6.8/series
queue-6.8/speakup-avoid-crash-on-very-long-word.patch [new file with mode: 0644]
queue-6.8/squashfs-check-the-inode-number-is-not-the-invalid-value-of-zero.patch [new file with mode: 0644]
queue-6.8/usb-disable-usb3-lpm-at-shutdown.patch [new file with mode: 0644]
queue-6.8/usb-dwc2-host-fix-dereference-issue-in-ddma-completion-flow.patch [new file with mode: 0644]
queue-6.8/usb-gadget-f_ncm-fix-uaf-ncm-object-at-re-bind-after-usb-ep-transport-error.patch [new file with mode: 0644]
queue-6.8/usb-serial-option-add-fibocom-fm135-gl-variants.patch [new file with mode: 0644]
queue-6.8/usb-serial-option-add-lonsung-u8300-u9300-product.patch [new file with mode: 0644]
queue-6.8/usb-serial-option-add-rolling-rw101-gl-and-rw135-gl-support.patch [new file with mode: 0644]
queue-6.8/usb-serial-option-add-support-for-fibocom-fm650-fg650.patch [new file with mode: 0644]
queue-6.8/usb-serial-option-add-telit-fn920c04-rmnet-compositions.patch [new file with mode: 0644]
queue-6.8/usb-serial-option-support-quectel-em060k-sub-models.patch [new file with mode: 0644]
queue-6.8/usb-typec-tcpm-correct-the-pdo-counting-in-pd_set.patch [new file with mode: 0644]

diff --git a/queue-6.8/arm64-head-disable-mmu-at-el2-before-clearing-hcr_el2.e2h.patch b/queue-6.8/arm64-head-disable-mmu-at-el2-before-clearing-hcr_el2.e2h.patch
new file mode 100644 (file)
index 0000000..1ff0a63
--- /dev/null
@@ -0,0 +1,58 @@
+From 34e526cb7d46726b2ae5f83f2892d00ebb088509 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Mon, 15 Apr 2024 09:54:15 +0200
+Subject: arm64/head: Disable MMU at EL2 before clearing HCR_EL2.E2H
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+commit 34e526cb7d46726b2ae5f83f2892d00ebb088509 upstream.
+
+Even though the boot protocol stipulates otherwise, an exception has
+been made for the EFI stub, and entering the core kernel with the MMU
+enabled is permitted. This allows a substantial amount of cache
+maintenance to be elided, wich is significant when fast boot times are
+critical (e.g., for booting micro-VMs)
+
+Once the initial ID map has been populated, the MMU is disabled as part
+of the logic sequence that puts all system registers into a known state.
+Any code that needs to execute within the window where the MMU is off is
+cleaned to the PoC explicitly, which includes all of HYP text when
+entering at EL2.
+
+However, the current sequence of initializing the EL2 system registers
+is not safe: HCR_EL2 is set to its nVHE initial state before SCTLR_EL2
+is reprogrammed, and this means that a VHE-to-nVHE switch may occur
+while the MMU is enabled. This switch causes some system registers as
+well as page table descriptors to be interpreted in a different way,
+potentially resulting in spurious exceptions relating to MMU
+translation.
+
+So disable the MMU explicitly first when entering in EL2 with the MMU
+and caches enabled.
+
+Fixes: 617861703830 ("efi: arm64: enter with MMU and caches enabled")
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Cc: <stable@vger.kernel.org> # 6.3.x
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20240415075412.2347624-6-ardb+git@google.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/head.S |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -569,6 +569,11 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
+       adr_l   x1, __hyp_text_end
+       adr_l   x2, dcache_clean_poc
+       blr     x2
++
++      mov_q   x0, INIT_SCTLR_EL2_MMU_OFF
++      pre_disable_mmu_workaround
++      msr     sctlr_el2, x0
++      isb
+ 0:
+       mov_q   x0, HCR_HOST_NVHE_FLAGS
+       msr     hcr_el2, x0
diff --git a/queue-6.8/arm64-hibernate-fix-level3-translation-fault-in-swsusp_save.patch b/queue-6.8/arm64-hibernate-fix-level3-translation-fault-in-swsusp_save.patch
new file mode 100644 (file)
index 0000000..26ad8a6
--- /dev/null
@@ -0,0 +1,96 @@
+From 50449ca66cc5a8cbc64749cf4b9f3d3fc5f4b457 Mon Sep 17 00:00:00 2001
+From: Yaxiong Tian <tianyaxiong@kylinos.cn>
+Date: Wed, 17 Apr 2024 10:52:48 +0800
+Subject: arm64: hibernate: Fix level3 translation fault in swsusp_save()
+
+From: Yaxiong Tian <tianyaxiong@kylinos.cn>
+
+commit 50449ca66cc5a8cbc64749cf4b9f3d3fc5f4b457 upstream.
+
+On arm64 machines, swsusp_save() faults if it attempts to access
+MEMBLOCK_NOMAP memory ranges. This can be reproduced in QEMU using UEFI
+when booting with rodata=off debug_pagealloc=off and CONFIG_KFENCE=n:
+
+  Unable to handle kernel paging request at virtual address ffffff8000000000
+  Mem abort info:
+    ESR = 0x0000000096000007
+    EC = 0x25: DABT (current EL), IL = 32 bits
+    SET = 0, FnV = 0
+    EA = 0, S1PTW = 0
+    FSC = 0x07: level 3 translation fault
+  Data abort info:
+    ISV = 0, ISS = 0x00000007, ISS2 = 0x00000000
+    CM = 0, WnR = 0, TnD = 0, TagAccess = 0
+    GCS = 0, Overlay = 0, DirtyBit = 0, Xs = 0
+  swapper pgtable: 4k pages, 39-bit VAs, pgdp=00000000eeb0b000
+  [ffffff8000000000] pgd=180000217fff9803, p4d=180000217fff9803, pud=180000217fff9803, pmd=180000217fff8803, pte=0000000000000000
+  Internal error: Oops: 0000000096000007 [#1] SMP
+  Internal error: Oops: 0000000096000007 [#1] SMP
+  Modules linked in: xt_multiport ipt_REJECT nf_reject_ipv4 xt_conntrack nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 libcrc32c iptable_filter bpfilter rfkill at803x snd_hda_codec_hdmi snd_hda_intel snd_intel_dspcfg dwmac_generic stmmac_platform snd_hda_codec stmmac joydev pcs_xpcs snd_hda_core phylink ppdev lp parport ramoops reed_solomon ip_tables x_tables nls_iso8859_1 vfat multipath linear amdgpu amdxcp drm_exec gpu_sched drm_buddy hid_generic usbhid hid radeon video drm_suballoc_helper drm_ttm_helper ttm i2c_algo_bit drm_display_helper cec drm_kms_helper drm
+  CPU: 0 PID: 3663 Comm: systemd-sleep Not tainted 6.6.2+ #76
+  Source Version: 4e22ed63a0a48e7a7cff9b98b7806d8d4add7dc0
+  Hardware name: Greatwall GW-XXXXXX-XXX/GW-XXXXXX-XXX, BIOS KunLun BIOS V4.0 01/19/2021
+  pstate: 600003c5 (nZCv DAIF -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+  pc : swsusp_save+0x280/0x538
+  lr : swsusp_save+0x280/0x538
+  sp : ffffffa034a3fa40
+  x29: ffffffa034a3fa40 x28: ffffff8000001000 x27: 0000000000000000
+  x26: ffffff8001400000 x25: ffffffc08113e248 x24: 0000000000000000
+  x23: 0000000000080000 x22: ffffffc08113e280 x21: 00000000000c69f2
+  x20: ffffff8000000000 x19: ffffffc081ae2500 x18: 0000000000000000
+  x17: 6666662074736420 x16: 3030303030303030 x15: 3038666666666666
+  x14: 0000000000000b69 x13: ffffff9f89088530 x12: 00000000ffffffea
+  x11: 00000000ffff7fff x10: 00000000ffff7fff x9 : ffffffc08193f0d0
+  x8 : 00000000000bffe8 x7 : c0000000ffff7fff x6 : 0000000000000001
+  x5 : ffffffa0fff09dc8 x4 : 0000000000000000 x3 : 0000000000000027
+  x2 : 0000000000000000 x1 : 0000000000000000 x0 : 000000000000004e
+  Call trace:
+   swsusp_save+0x280/0x538
+   swsusp_arch_suspend+0x148/0x190
+   hibernation_snapshot+0x240/0x39c
+   hibernate+0xc4/0x378
+   state_store+0xf0/0x10c
+   kobj_attr_store+0x14/0x24
+
+The reason is swsusp_save() -> copy_data_pages() -> page_is_saveable()
+-> kernel_page_present() assuming that a page is always present when
+can_set_direct_map() is false (all of rodata_full,
+debug_pagealloc_enabled() and arm64_kfence_can_set_direct_map() false),
+irrespective of the MEMBLOCK_NOMAP ranges. Such MEMBLOCK_NOMAP regions
+should not be saved during hibernation.
+
+This problem was introduced by changes to the pfn_valid() logic in
+commit a7d9f306ba70 ("arm64: drop pfn_valid_within() and simplify
+pfn_valid()").
+
+Similar to other architectures, drop the !can_set_direct_map() check in
+kernel_page_present() so that page_is_savable() skips such pages.
+
+Fixes: a7d9f306ba70 ("arm64: drop pfn_valid_within() and simplify pfn_valid()")
+Cc: <stable@vger.kernel.org> # 5.14.x
+Suggested-by: Mike Rapoport <rppt@kernel.org>
+Suggested-by: Catalin Marinas <catalin.marinas@arm.com>
+Co-developed-by: xiongxin <xiongxin@kylinos.cn>
+Signed-off-by: xiongxin <xiongxin@kylinos.cn>
+Signed-off-by: Yaxiong Tian <tianyaxiong@kylinos.cn>
+Acked-by: Mike Rapoport (IBM) <rppt@kernel.org>
+Link: https://lore.kernel.org/r/20240417025248.386622-1-tianyaxiong@kylinos.cn
+[catalin.marinas@arm.com: rework commit message]
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/mm/pageattr.c |    3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/arch/arm64/mm/pageattr.c
++++ b/arch/arm64/mm/pageattr.c
+@@ -219,9 +219,6 @@ bool kernel_page_present(struct page *pa
+       pte_t *ptep;
+       unsigned long addr = (unsigned long)page_address(page);
+-      if (!can_set_direct_map())
+-              return true;
+-
+       pgdp = pgd_offset_k(addr);
+       if (pgd_none(READ_ONCE(*pgdp)))
+               return false;
diff --git a/queue-6.8/bootconfig-use-memblock_free_late-to-free-xbc-memory-to-buddy.patch b/queue-6.8/bootconfig-use-memblock_free_late-to-free-xbc-memory-to-buddy.patch
new file mode 100644 (file)
index 0000000..bf8c9c1
--- /dev/null
@@ -0,0 +1,155 @@
+From 89f9a1e876b5a7ad884918c03a46831af202c8a0 Mon Sep 17 00:00:00 2001
+From: Qiang Zhang <qiang4.zhang@intel.com>
+Date: Sun, 14 Apr 2024 19:49:45 +0800
+Subject: bootconfig: use memblock_free_late to free xbc memory to buddy
+
+From: Qiang Zhang <qiang4.zhang@intel.com>
+
+commit 89f9a1e876b5a7ad884918c03a46831af202c8a0 upstream.
+
+On the time to free xbc memory in xbc_exit(), memblock may has handed
+over memory to buddy allocator. So it doesn't make sense to free memory
+back to memblock. memblock_free() called by xbc_exit() even causes UAF bugs
+on architectures with CONFIG_ARCH_KEEP_MEMBLOCK disabled like x86.
+Following KASAN logs shows this case.
+
+This patch fixes the xbc memory free problem by calling memblock_free()
+in early xbc init error rewind path and calling memblock_free_late() in
+xbc exit path to free memory to buddy allocator.
+
+[    9.410890] ==================================================================
+[    9.418962] BUG: KASAN: use-after-free in memblock_isolate_range+0x12d/0x260
+[    9.426850] Read of size 8 at addr ffff88845dd30000 by task swapper/0/1
+
+[    9.435901] CPU: 9 PID: 1 Comm: swapper/0 Tainted: G     U             6.9.0-rc3-00208-g586b5dfb51b9 #5
+[    9.446403] Hardware name: Intel Corporation RPLP LP5 (CPU:RaptorLake)/RPLP LP5 (ID:13), BIOS IRPPN02.01.01.00.00.19.015.D-00000000 Dec 28 2023
+[    9.460789] Call Trace:
+[    9.463518]  <TASK>
+[    9.465859]  dump_stack_lvl+0x53/0x70
+[    9.469949]  print_report+0xce/0x610
+[    9.473944]  ? __virt_addr_valid+0xf5/0x1b0
+[    9.478619]  ? memblock_isolate_range+0x12d/0x260
+[    9.483877]  kasan_report+0xc6/0x100
+[    9.487870]  ? memblock_isolate_range+0x12d/0x260
+[    9.493125]  memblock_isolate_range+0x12d/0x260
+[    9.498187]  memblock_phys_free+0xb4/0x160
+[    9.502762]  ? __pfx_memblock_phys_free+0x10/0x10
+[    9.508021]  ? mutex_unlock+0x7e/0xd0
+[    9.512111]  ? __pfx_mutex_unlock+0x10/0x10
+[    9.516786]  ? kernel_init_freeable+0x2d4/0x430
+[    9.521850]  ? __pfx_kernel_init+0x10/0x10
+[    9.526426]  xbc_exit+0x17/0x70
+[    9.529935]  kernel_init+0x38/0x1e0
+[    9.533829]  ? _raw_spin_unlock_irq+0xd/0x30
+[    9.538601]  ret_from_fork+0x2c/0x50
+[    9.542596]  ? __pfx_kernel_init+0x10/0x10
+[    9.547170]  ret_from_fork_asm+0x1a/0x30
+[    9.551552]  </TASK>
+
+[    9.555649] The buggy address belongs to the physical page:
+[    9.561875] page: refcount:0 mapcount:0 mapping:0000000000000000 index:0x1 pfn:0x45dd30
+[    9.570821] flags: 0x200000000000000(node=0|zone=2)
+[    9.576271] page_type: 0xffffffff()
+[    9.580167] raw: 0200000000000000 ffffea0011774c48 ffffea0012ba1848 0000000000000000
+[    9.588823] raw: 0000000000000001 0000000000000000 00000000ffffffff 0000000000000000
+[    9.597476] page dumped because: kasan: bad access detected
+
+[    9.605362] Memory state around the buggy address:
+[    9.610714]  ffff88845dd2ff00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+[    9.618786]  ffff88845dd2ff80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+[    9.626857] >ffff88845dd30000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff
+[    9.634930]                    ^
+[    9.638534]  ffff88845dd30080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff
+[    9.646605]  ffff88845dd30100: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff
+[    9.654675] ==================================================================
+
+Link: https://lore.kernel.org/all/20240414114944.1012359-1-qiang4.zhang@linux.intel.com/
+
+Fixes: 40caa127f3c7 ("init: bootconfig: Remove all bootconfig data when the init memory is removed")
+Cc: Stable@vger.kernel.org
+Signed-off-by: Qiang Zhang <qiang4.zhang@intel.com>
+Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/bootconfig.h |    7 ++++++-
+ lib/bootconfig.c           |   19 +++++++++++--------
+ 2 files changed, 17 insertions(+), 9 deletions(-)
+
+--- a/include/linux/bootconfig.h
++++ b/include/linux/bootconfig.h
+@@ -288,7 +288,12 @@ int __init xbc_init(const char *buf, siz
+ int __init xbc_get_info(int *node_size, size_t *data_size);
+ /* XBC cleanup data structures */
+-void __init xbc_exit(void);
++void __init _xbc_exit(bool early);
++
++static inline void xbc_exit(void)
++{
++      _xbc_exit(false);
++}
+ /* XBC embedded bootconfig data in kernel */
+ #ifdef CONFIG_BOOT_CONFIG_EMBED
+--- a/lib/bootconfig.c
++++ b/lib/bootconfig.c
+@@ -61,9 +61,12 @@ static inline void * __init xbc_alloc_me
+       return memblock_alloc(size, SMP_CACHE_BYTES);
+ }
+-static inline void __init xbc_free_mem(void *addr, size_t size)
++static inline void __init xbc_free_mem(void *addr, size_t size, bool early)
+ {
+-      memblock_free(addr, size);
++      if (early)
++              memblock_free(addr, size);
++      else if (addr)
++              memblock_free_late(__pa(addr), size);
+ }
+ #else /* !__KERNEL__ */
+@@ -73,7 +76,7 @@ static inline void *xbc_alloc_mem(size_t
+       return malloc(size);
+ }
+-static inline void xbc_free_mem(void *addr, size_t size)
++static inline void xbc_free_mem(void *addr, size_t size, bool early)
+ {
+       free(addr);
+ }
+@@ -904,13 +907,13 @@ static int __init xbc_parse_tree(void)
+  * If you need to reuse xbc_init() with new boot config, you can
+  * use this.
+  */
+-void __init xbc_exit(void)
++void __init _xbc_exit(bool early)
+ {
+-      xbc_free_mem(xbc_data, xbc_data_size);
++      xbc_free_mem(xbc_data, xbc_data_size, early);
+       xbc_data = NULL;
+       xbc_data_size = 0;
+       xbc_node_num = 0;
+-      xbc_free_mem(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX);
++      xbc_free_mem(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX, early);
+       xbc_nodes = NULL;
+       brace_index = 0;
+ }
+@@ -963,7 +966,7 @@ int __init xbc_init(const char *data, si
+       if (!xbc_nodes) {
+               if (emsg)
+                       *emsg = "Failed to allocate bootconfig nodes";
+-              xbc_exit();
++              _xbc_exit(true);
+               return -ENOMEM;
+       }
+       memset(xbc_nodes, 0, sizeof(struct xbc_node) * XBC_NODE_MAX);
+@@ -977,7 +980,7 @@ int __init xbc_init(const char *data, si
+                       *epos = xbc_err_pos;
+               if (emsg)
+                       *emsg = xbc_err_msg;
+-              xbc_exit();
++              _xbc_exit(true);
+       } else
+               ret = xbc_node_num;
diff --git a/queue-6.8/drm-amdgpu-remove-invalid-resource-start-check-v2.patch b/queue-6.8/drm-amdgpu-remove-invalid-resource-start-check-v2.patch
new file mode 100644 (file)
index 0000000..efce8b6
--- /dev/null
@@ -0,0 +1,51 @@
+From ca7c4507ba87e9fc22e0ecfa819c3664b3e8287b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 15 Mar 2024 13:07:53 +0100
+Subject: drm/amdgpu: remove invalid resource->start check v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Christian König <christian.koenig@amd.com>
+
+commit ca7c4507ba87e9fc22e0ecfa819c3664b3e8287b upstream.
+
+The majority of those where removed in the commit aed01a68047b
+("drm/amdgpu: Remove TTM resource->start visible VRAM condition v2")
+
+But this one was missed because it's working on the resource and not the
+BO. Since we also no longer use a fake start address for visible BOs
+this will now trigger invalid mapping errors.
+
+v2: also remove the unused variable
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Fixes: aed01a68047b ("drm/amdgpu: Remove TTM resource->start visible VRAM condition v2")
+CC: stable@vger.kernel.org
+Acked-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c |    4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -562,7 +562,6 @@ static int amdgpu_ttm_io_mem_reserve(str
+                                    struct ttm_resource *mem)
+ {
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
+-      size_t bus_size = (size_t)mem->size;
+       switch (mem->mem_type) {
+       case TTM_PL_SYSTEM:
+@@ -573,9 +572,6 @@ static int amdgpu_ttm_io_mem_reserve(str
+               break;
+       case TTM_PL_VRAM:
+               mem->bus.offset = mem->start << PAGE_SHIFT;
+-              /* check if it's visible */
+-              if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
+-                      return -EINVAL;
+               if (adev->mman.aper_base_kaddr &&
+                   mem->placement & TTM_PL_FLAG_CONTIGUOUS)
diff --git a/queue-6.8/drm-amdgpu-validate-the-parameters-of-bo-mapping-operations-more-clearly.patch b/queue-6.8/drm-amdgpu-validate-the-parameters-of-bo-mapping-operations-more-clearly.patch
new file mode 100644 (file)
index 0000000..8d26de0
--- /dev/null
@@ -0,0 +1,141 @@
+From 6fef2d4c00b5b8561ad68dd2b68173f5c6af1e75 Mon Sep 17 00:00:00 2001
+From: xinhui pan <xinhui.pan@amd.com>
+Date: Thu, 11 Apr 2024 11:11:38 +0800
+Subject: drm/amdgpu: validate the parameters of bo mapping operations more clearly
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: xinhui pan <xinhui.pan@amd.com>
+
+commit 6fef2d4c00b5b8561ad68dd2b68173f5c6af1e75 upstream.
+
+Verify the parameters of
+amdgpu_vm_bo_(map/replace_map/clearing_mappings) in one common place.
+
+Fixes: dc54d3d1744d ("drm/amdgpu: implement AMDGPU_VA_OP_CLEAR v2")
+Cc: stable@vger.kernel.org
+Reported-by: Vlad Stolyarov <hexed@google.com>
+Suggested-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: xinhui pan <xinhui.pan@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c |   72 +++++++++++++++++++++------------
+ 1 file changed, 46 insertions(+), 26 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1559,6 +1559,37 @@ static void amdgpu_vm_bo_insert_map(stru
+       trace_amdgpu_vm_bo_map(bo_va, mapping);
+ }
++/* Validate operation parameters to prevent potential abuse */
++static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
++                                        struct amdgpu_bo *bo,
++                                        uint64_t saddr,
++                                        uint64_t offset,
++                                        uint64_t size)
++{
++      uint64_t tmp, lpfn;
++
++      if (saddr & AMDGPU_GPU_PAGE_MASK
++          || offset & AMDGPU_GPU_PAGE_MASK
++          || size & AMDGPU_GPU_PAGE_MASK)
++              return -EINVAL;
++
++      if (check_add_overflow(saddr, size, &tmp)
++          || check_add_overflow(offset, size, &tmp)
++          || size == 0 /* which also leads to end < begin */)
++              return -EINVAL;
++
++      /* make sure object fit at this offset */
++      if (bo && offset + size > amdgpu_bo_size(bo))
++              return -EINVAL;
++
++      /* Ensure last pfn not exceed max_pfn */
++      lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
++      if (lpfn >= adev->vm_manager.max_pfn)
++              return -EINVAL;
++
++      return 0;
++}
++
+ /**
+  * amdgpu_vm_bo_map - map bo inside a vm
+  *
+@@ -1585,21 +1616,14 @@ int amdgpu_vm_bo_map(struct amdgpu_devic
+       struct amdgpu_bo *bo = bo_va->base.bo;
+       struct amdgpu_vm *vm = bo_va->base.vm;
+       uint64_t eaddr;
++      int r;
+-      /* validate the parameters */
+-      if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
+-              return -EINVAL;
+-      if (saddr + size <= saddr || offset + size <= offset)
+-              return -EINVAL;
+-
+-      /* make sure object fit at this offset */
+-      eaddr = saddr + size - 1;
+-      if ((bo && offset + size > amdgpu_bo_size(bo)) ||
+-          (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
+-              return -EINVAL;
++      r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
++      if (r)
++              return r;
+       saddr /= AMDGPU_GPU_PAGE_SIZE;
+-      eaddr /= AMDGPU_GPU_PAGE_SIZE;
++      eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
+       tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
+       if (tmp) {
+@@ -1652,17 +1676,9 @@ int amdgpu_vm_bo_replace_map(struct amdg
+       uint64_t eaddr;
+       int r;
+-      /* validate the parameters */
+-      if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
+-              return -EINVAL;
+-      if (saddr + size <= saddr || offset + size <= offset)
+-              return -EINVAL;
+-
+-      /* make sure object fit at this offset */
+-      eaddr = saddr + size - 1;
+-      if ((bo && offset + size > amdgpu_bo_size(bo)) ||
+-          (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
+-              return -EINVAL;
++      r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
++      if (r)
++              return r;
+       /* Allocate all the needed memory */
+       mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
+@@ -1676,7 +1692,7 @@ int amdgpu_vm_bo_replace_map(struct amdg
+       }
+       saddr /= AMDGPU_GPU_PAGE_SIZE;
+-      eaddr /= AMDGPU_GPU_PAGE_SIZE;
++      eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
+       mapping->start = saddr;
+       mapping->last = eaddr;
+@@ -1763,10 +1779,14 @@ int amdgpu_vm_bo_clear_mappings(struct a
+       struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
+       LIST_HEAD(removed);
+       uint64_t eaddr;
++      int r;
++
++      r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
++      if (r)
++              return r;
+-      eaddr = saddr + size - 1;
+       saddr /= AMDGPU_GPU_PAGE_SIZE;
+-      eaddr /= AMDGPU_GPU_PAGE_SIZE;
++      eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
+       /* Allocate all the needed memory */
+       before = kzalloc(sizeof(*before), GFP_KERNEL);
diff --git a/queue-6.8/drm-amdkfd-fix-memory-leak-in-create_process-failure.patch b/queue-6.8/drm-amdkfd-fix-memory-leak-in-create_process-failure.patch
new file mode 100644 (file)
index 0000000..56d4561
--- /dev/null
@@ -0,0 +1,39 @@
+From 18921b205012568b45760753ad3146ddb9e2d4e2 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <felix.kuehling@amd.com>
+Date: Wed, 10 Apr 2024 15:52:10 -0400
+Subject: drm/amdkfd: Fix memory leak in create_process failure
+
+From: Felix Kuehling <felix.kuehling@amd.com>
+
+commit 18921b205012568b45760753ad3146ddb9e2d4e2 upstream.
+
+Fix memory leak due to a leaked mmget reference on an error handling
+code path that is triggered when attempting to create KFD processes
+while a GPU reset is in progress.
+
+Fixes: 0ab2d7532b05 ("drm/amdkfd: prepare per-process debug enable and disable")
+CC: Xiaogang Chen <xiaogang.chen@amd.com>
+Signed-off-by: Felix Kuehling <felix.kuehling@amd.com>
+Tested-by: Harish Kasiviswanthan <Harish.Kasiviswanthan@amd.com>
+Reviewed-by: Mukul Joshi <mukul.joshi@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -819,9 +819,9 @@ struct kfd_process *kfd_create_process(s
+       mutex_lock(&kfd_processes_mutex);
+       if (kfd_is_locked()) {
+-              mutex_unlock(&kfd_processes_mutex);
+               pr_debug("KFD is locked! Cannot create process");
+-              return ERR_PTR(-EINVAL);
++              process = ERR_PTR(-EINVAL);
++              goto out;
+       }
+       /* A prior open of /dev/kfd could have already created the process. */
diff --git a/queue-6.8/drm-ttm-stop-pooling-cached-numa-pages-v2.patch b/queue-6.8/drm-ttm-stop-pooling-cached-numa-pages-v2.patch
new file mode 100644 (file)
index 0000000..1dd9a95
--- /dev/null
@@ -0,0 +1,107 @@
+From b6976f323a8687cc0d55bc92c2086fd934324ed5 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <ckoenig.leichtzumerken@gmail.com>
+Date: Mon, 15 Apr 2024 15:48:21 +0200
+Subject: drm/ttm: stop pooling cached NUMA pages v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Christian König <ckoenig.leichtzumerken@gmail.com>
+
+commit b6976f323a8687cc0d55bc92c2086fd934324ed5 upstream.
+
+We only pool write combined and uncached allocations because they
+require extra overhead on allocation and release.
+
+If we also pool cached NUMA it not only means some extra unnecessary
+overhead, but also that under memory pressure it can happen that
+pages from the wrong NUMA node enters the pool and are re-used
+over and over again.
+
+This can lead to performance reduction after running into memory
+pressure.
+
+v2: restructure and cleanup the code a bit from the internal hack to
+    test this.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Fixes: 4482d3c94d7f ("drm/ttm: add NUMA node id to the pool")
+CC: stable@vger.kernel.org
+Reviewed-by: Felix Kuehling <felix.kuehling@amd.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240415134821.1919-1-christian.koenig@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/ttm/ttm_pool.c |   38 ++++++++++++++++++++++++++++----------
+ 1 file changed, 28 insertions(+), 10 deletions(-)
+
+--- a/drivers/gpu/drm/ttm/ttm_pool.c
++++ b/drivers/gpu/drm/ttm/ttm_pool.c
+@@ -288,17 +288,23 @@ static struct ttm_pool_type *ttm_pool_se
+                                                 enum ttm_caching caching,
+                                                 unsigned int order)
+ {
+-      if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE)
++      if (pool->use_dma_alloc)
+               return &pool->caching[caching].orders[order];
+ #ifdef CONFIG_X86
+       switch (caching) {
+       case ttm_write_combined:
++              if (pool->nid != NUMA_NO_NODE)
++                      return &pool->caching[caching].orders[order];
++
+               if (pool->use_dma32)
+                       return &global_dma32_write_combined[order];
+               return &global_write_combined[order];
+       case ttm_uncached:
++              if (pool->nid != NUMA_NO_NODE)
++                      return &pool->caching[caching].orders[order];
++
+               if (pool->use_dma32)
+                       return &global_dma32_uncached[order];
+@@ -566,11 +572,17 @@ void ttm_pool_init(struct ttm_pool *pool
+       pool->use_dma_alloc = use_dma_alloc;
+       pool->use_dma32 = use_dma32;
+-      if (use_dma_alloc || nid != NUMA_NO_NODE) {
+-              for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
+-                      for (j = 0; j < NR_PAGE_ORDERS; ++j)
+-                              ttm_pool_type_init(&pool->caching[i].orders[j],
+-                                                 pool, i, j);
++      for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
++              for (j = 0; j < NR_PAGE_ORDERS; ++j) {
++                      struct ttm_pool_type *pt;
++
++                      /* Initialize only pool types which are actually used */
++                      pt = ttm_pool_select_type(pool, i, j);
++                      if (pt != &pool->caching[i].orders[j])
++                              continue;
++
++                      ttm_pool_type_init(pt, pool, i, j);
++              }
+       }
+ }
+ EXPORT_SYMBOL(ttm_pool_init);
+@@ -599,10 +611,16 @@ void ttm_pool_fini(struct ttm_pool *pool
+ {
+       unsigned int i, j;
+-      if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
+-              for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
+-                      for (j = 0; j < NR_PAGE_ORDERS; ++j)
+-                              ttm_pool_type_fini(&pool->caching[i].orders[j]);
++      for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
++              for (j = 0; j < NR_PAGE_ORDERS; ++j) {
++                      struct ttm_pool_type *pt;
++
++                      pt = ttm_pool_select_type(pool, i, j);
++                      if (pt != &pool->caching[i].orders[j])
++                              continue;
++
++                      ttm_pool_type_fini(pt);
++              }
+       }
+       /* We removed the pool types from the LRU, but we need to also make sure
diff --git a/queue-6.8/drm-vmwgfx-fix-crtc-s-atomic-check-conditional.patch b/queue-6.8/drm-vmwgfx-fix-crtc-s-atomic-check-conditional.patch
new file mode 100644 (file)
index 0000000..d40e8ff
--- /dev/null
@@ -0,0 +1,58 @@
+From a60ccade88f926e871a57176e86a34bbf0db0098 Mon Sep 17 00:00:00 2001
+From: Zack Rusin <zack.rusin@broadcom.com>
+Date: Thu, 11 Apr 2024 22:55:10 -0400
+Subject: drm/vmwgfx: Fix crtc's atomic check conditional
+
+From: Zack Rusin <zack.rusin@broadcom.com>
+
+commit a60ccade88f926e871a57176e86a34bbf0db0098 upstream.
+
+The conditional was supposed to prevent enabling of a crtc state
+without a set primary plane. Accidently it also prevented disabling
+crtc state with a set primary plane. Neither is correct.
+
+Fix the conditional and just driver-warn when a crtc state has been
+enabled without a primary plane which will help debug broken userspace.
+
+Fixes IGT's kms_atomic_interruptible and kms_atomic_transition tests.
+
+Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
+Fixes: 06ec41909e31 ("drm/vmwgfx: Add and connect CRTC helper functions")
+Cc: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
+Cc: dri-devel@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v4.12+
+Reviewed-by: Ian Forbes <ian.forbes@broadcom.com>
+Reviewed-by: Martin Krastev <martin.krastev@broadcom.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240412025511.78553-5-zack.rusin@broadcom.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_kms.c |   11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -932,6 +932,7 @@ int vmw_du_cursor_plane_atomic_check(str
+ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
+                            struct drm_atomic_state *state)
+ {
++      struct vmw_private *vmw = vmw_priv(crtc->dev);
+       struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
+                                                                        crtc);
+       struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
+@@ -939,9 +940,13 @@ int vmw_du_crtc_atomic_check(struct drm_
+       bool has_primary = new_state->plane_mask &
+                          drm_plane_mask(crtc->primary);
+-      /* We always want to have an active plane with an active CRTC */
+-      if (has_primary != new_state->enable)
+-              return -EINVAL;
++      /*
++       * This is fine in general, but broken userspace might expect
++       * some actual rendering so give a clue as why it's blank.
++       */
++      if (new_state->enable && !has_primary)
++              drm_dbg_driver(&vmw->drm,
++                             "CRTC without a primary plane will be blank.\n");
+       if (new_state->connector_mask != connector_mask &&
diff --git a/queue-6.8/drm-vmwgfx-fix-prime-import-export.patch b/queue-6.8/drm-vmwgfx-fix-prime-import-export.patch
new file mode 100644 (file)
index 0000000..ef46aa4
--- /dev/null
@@ -0,0 +1,319 @@
+From b32233accefff1338806f064fb9b62cf5bc0609f Mon Sep 17 00:00:00 2001
+From: Zack Rusin <zack.rusin@broadcom.com>
+Date: Thu, 11 Apr 2024 22:55:09 -0400
+Subject: drm/vmwgfx: Fix prime import/export
+
+From: Zack Rusin <zack.rusin@broadcom.com>
+
+commit b32233accefff1338806f064fb9b62cf5bc0609f upstream.
+
+vmwgfx never supported prime import of external buffers. Furthermore the
+driver exposes two different objects to userspace: vmw_surface's and
+gem buffers but prime import/export only worked with vmw_surfaces.
+
+Because gem buffers are used through the dumb_buffer interface this meant
+that the driver created buffers couldn't have been prime exported or
+imported.
+
+Fix prime import/export. Makes IGT's kms_prime pass.
+
+Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
+Fixes: 8afa13a0583f ("drm/vmwgfx: Implement DRIVER_GEM")
+Cc: <stable@vger.kernel.org> # v6.6+
+Reviewed-by: Martin Krastev <martin.krastev@broadcom.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240412025511.78553-4-zack.rusin@broadcom.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_blit.c       |   35 +++++++++++++++++++++--
+ drivers/gpu/drm/vmwgfx/vmwgfx_bo.c         |    7 ++--
+ drivers/gpu/drm/vmwgfx/vmwgfx_bo.h         |    2 +
+ drivers/gpu/drm/vmwgfx/vmwgfx_drv.c        |    1 
+ drivers/gpu/drm/vmwgfx/vmwgfx_drv.h        |    3 +
+ drivers/gpu/drm/vmwgfx/vmwgfx_gem.c        |   32 +++++++++++++++++++++
+ drivers/gpu/drm/vmwgfx/vmwgfx_prime.c      |   15 ++++++++-
+ drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c |   44 +++++++++++++++++++----------
+ 8 files changed, 117 insertions(+), 22 deletions(-)
+
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+@@ -456,8 +456,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_ob
+               .no_wait_gpu = false
+       };
+       u32 j, initial_line = dst_offset / dst_stride;
+-      struct vmw_bo_blit_line_data d;
++      struct vmw_bo_blit_line_data d = {0};
+       int ret = 0;
++      struct page **dst_pages = NULL;
++      struct page **src_pages = NULL;
+       /* Buffer objects need to be either pinned or reserved: */
+       if (!(dst->pin_count))
+@@ -477,12 +479,35 @@ int vmw_bo_cpu_blit(struct ttm_buffer_ob
+                       return ret;
+       }
++      if (!src->ttm->pages && src->ttm->sg) {
++              src_pages = kvmalloc_array(src->ttm->num_pages,
++                                         sizeof(struct page *), GFP_KERNEL);
++              if (!src_pages)
++                      return -ENOMEM;
++              ret = drm_prime_sg_to_page_array(src->ttm->sg, src_pages,
++                                               src->ttm->num_pages);
++              if (ret)
++                      goto out;
++      }
++      if (!dst->ttm->pages && dst->ttm->sg) {
++              dst_pages = kvmalloc_array(dst->ttm->num_pages,
++                                         sizeof(struct page *), GFP_KERNEL);
++              if (!dst_pages) {
++                      ret = -ENOMEM;
++                      goto out;
++              }
++              ret = drm_prime_sg_to_page_array(dst->ttm->sg, dst_pages,
++                                               dst->ttm->num_pages);
++              if (ret)
++                      goto out;
++      }
++
+       d.mapped_dst = 0;
+       d.mapped_src = 0;
+       d.dst_addr = NULL;
+       d.src_addr = NULL;
+-      d.dst_pages = dst->ttm->pages;
+-      d.src_pages = src->ttm->pages;
++      d.dst_pages = dst->ttm->pages ? dst->ttm->pages : dst_pages;
++      d.src_pages = src->ttm->pages ? src->ttm->pages : src_pages;
+       d.dst_num_pages = PFN_UP(dst->resource->size);
+       d.src_num_pages = PFN_UP(src->resource->size);
+       d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
+@@ -504,6 +529,10 @@ out:
+               kunmap_atomic(d.src_addr);
+       if (d.dst_addr)
+               kunmap_atomic(d.dst_addr);
++      if (src_pages)
++              kvfree(src_pages);
++      if (dst_pages)
++              kvfree(dst_pages);
+       return ret;
+ }
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+@@ -377,7 +377,8 @@ static int vmw_bo_init(struct vmw_privat
+ {
+       struct ttm_operation_ctx ctx = {
+               .interruptible = params->bo_type != ttm_bo_type_kernel,
+-              .no_wait_gpu = false
++              .no_wait_gpu = false,
++              .resv = params->resv,
+       };
+       struct ttm_device *bdev = &dev_priv->bdev;
+       struct drm_device *vdev = &dev_priv->drm;
+@@ -394,8 +395,8 @@ static int vmw_bo_init(struct vmw_privat
+       vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
+       ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
+-                                 &vmw_bo->placement, 0, &ctx, NULL,
+-                                 NULL, destroy);
++                                 &vmw_bo->placement, 0, &ctx,
++                                 params->sg, params->resv, destroy);
+       if (unlikely(ret))
+               return ret;
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+@@ -55,6 +55,8 @@ struct vmw_bo_params {
+       enum ttm_bo_type bo_type;
+       size_t size;
+       bool pin;
++      struct dma_resv *resv;
++      struct sg_table *sg;
+ };
+ /**
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -1628,6 +1628,7 @@ static const struct drm_driver driver =
+       .prime_fd_to_handle = vmw_prime_fd_to_handle,
+       .prime_handle_to_fd = vmw_prime_handle_to_fd,
++      .gem_prime_import_sg_table = vmw_prime_import_sg_table,
+       .fops = &vmwgfx_driver_fops,
+       .name = VMWGFX_DRIVER_NAME,
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -1131,6 +1131,9 @@ extern int vmw_prime_handle_to_fd(struct
+                                 struct drm_file *file_priv,
+                                 uint32_t handle, uint32_t flags,
+                                 int *prime_fd);
++struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
++                                               struct dma_buf_attachment *attach,
++                                               struct sg_table *table);
+ /*
+  * MemoryOBject management -  vmwgfx_mob.c
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+@@ -149,6 +149,38 @@ out_no_bo:
+       return ret;
+ }
++struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
++                                               struct dma_buf_attachment *attach,
++                                               struct sg_table *table)
++{
++      int ret;
++      struct vmw_private *dev_priv = vmw_priv(dev);
++      struct drm_gem_object *gem = NULL;
++      struct vmw_bo *vbo;
++      struct vmw_bo_params params = {
++              .domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
++              .busy_domain = VMW_BO_DOMAIN_SYS,
++              .bo_type = ttm_bo_type_sg,
++              .size = attach->dmabuf->size,
++              .pin = false,
++              .resv = attach->dmabuf->resv,
++              .sg = table,
++
++      };
++
++      dma_resv_lock(params.resv, NULL);
++
++      ret = vmw_bo_create(dev_priv, &params, &vbo);
++      if (ret != 0)
++              goto out_no_bo;
++
++      vbo->tbo.base.funcs = &vmw_gem_object_funcs;
++
++      gem = &vbo->tbo.base;
++out_no_bo:
++      dma_resv_unlock(params.resv);
++      return gem;
++}
+ int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *filp)
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+@@ -75,8 +75,12 @@ int vmw_prime_fd_to_handle(struct drm_de
+                          int fd, u32 *handle)
+ {
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
++      int ret = ttm_prime_fd_to_handle(tfile, fd, handle);
+-      return ttm_prime_fd_to_handle(tfile, fd, handle);
++      if (ret)
++              ret = drm_gem_prime_fd_to_handle(dev, file_priv, fd, handle);
++
++      return ret;
+ }
+ int vmw_prime_handle_to_fd(struct drm_device *dev,
+@@ -85,5 +89,12 @@ int vmw_prime_handle_to_fd(struct drm_de
+                          int *prime_fd)
+ {
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+-      return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
++      int ret;
++
++      if (handle > VMWGFX_NUM_MOB)
++              ret = ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
++      else
++              ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, prime_fd);
++
++      return ret;
+ }
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+@@ -220,13 +220,18 @@ static int vmw_ttm_map_dma(struct vmw_tt
+       switch (dev_priv->map_mode) {
+       case vmw_dma_map_bind:
+       case vmw_dma_map_populate:
+-              vsgt->sgt = &vmw_tt->sgt;
+-              ret = sg_alloc_table_from_pages_segment(
+-                      &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
+-                      (unsigned long)vsgt->num_pages << PAGE_SHIFT,
+-                      dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
+-              if (ret)
+-                      goto out_sg_alloc_fail;
++              if (vmw_tt->dma_ttm.page_flags  & TTM_TT_FLAG_EXTERNAL) {
++                      vsgt->sgt = vmw_tt->dma_ttm.sg;
++              } else {
++                      vsgt->sgt = &vmw_tt->sgt;
++                      ret = sg_alloc_table_from_pages_segment(&vmw_tt->sgt,
++                              vsgt->pages, vsgt->num_pages, 0,
++                              (unsigned long)vsgt->num_pages << PAGE_SHIFT,
++                              dma_get_max_seg_size(dev_priv->drm.dev),
++                              GFP_KERNEL);
++                      if (ret)
++                              goto out_sg_alloc_fail;
++              }
+               ret = vmw_ttm_map_for_dma(vmw_tt);
+               if (unlikely(ret != 0))
+@@ -241,8 +246,9 @@ static int vmw_ttm_map_dma(struct vmw_tt
+       return 0;
+ out_map_fail:
+-      sg_free_table(vmw_tt->vsgt.sgt);
+-      vmw_tt->vsgt.sgt = NULL;
++      drm_warn(&dev_priv->drm, "VSG table map failed!");
++      sg_free_table(vsgt->sgt);
++      vsgt->sgt = NULL;
+ out_sg_alloc_fail:
+       return ret;
+ }
+@@ -388,15 +394,17 @@ static void vmw_ttm_destroy(struct ttm_d
+ static int vmw_ttm_populate(struct ttm_device *bdev,
+                           struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
+ {
+-      int ret;
++      bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
+-      /* TODO: maybe completely drop this ? */
+       if (ttm_tt_is_populated(ttm))
+               return 0;
+-      ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
++      if (external && ttm->sg)
++              return  drm_prime_sg_to_dma_addr_array(ttm->sg,
++                                                     ttm->dma_address,
++                                                     ttm->num_pages);
+-      return ret;
++      return ttm_pool_alloc(&bdev->pool, ttm, ctx);
+ }
+ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
+@@ -404,6 +412,10 @@ static void vmw_ttm_unpopulate(struct tt
+ {
+       struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
+                                                dma_ttm);
++      bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
++
++      if (external)
++              return;
+       vmw_ttm_unbind(bdev, ttm);
+@@ -422,6 +434,7 @@ static struct ttm_tt *vmw_ttm_tt_create(
+ {
+       struct vmw_ttm_tt *vmw_be;
+       int ret;
++      bool external = bo->type == ttm_bo_type_sg;
+       vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
+       if (!vmw_be)
+@@ -430,7 +443,10 @@ static struct ttm_tt *vmw_ttm_tt_create(
+       vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
+       vmw_be->mob = NULL;
+-      if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
++      if (external)
++              page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE;
++
++      if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent || external)
+               ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
+                                    ttm_cached);
+       else
diff --git a/queue-6.8/drm-vmwgfx-sort-primary-plane-formats-by-order-of-preference.patch b/queue-6.8/drm-vmwgfx-sort-primary-plane-formats-by-order-of-preference.patch
new file mode 100644 (file)
index 0000000..35a5dd0
--- /dev/null
@@ -0,0 +1,48 @@
+From d4c972bff3129a9dd4c22a3999fd8eba1a81531a Mon Sep 17 00:00:00 2001
+From: Zack Rusin <zack.rusin@broadcom.com>
+Date: Thu, 11 Apr 2024 22:55:11 -0400
+Subject: drm/vmwgfx: Sort primary plane formats by order of preference
+
+From: Zack Rusin <zack.rusin@broadcom.com>
+
+commit d4c972bff3129a9dd4c22a3999fd8eba1a81531a upstream.
+
+The table of primary plane formats wasn't sorted at all, leading to
+applications picking our least desirable formats by defaults.
+
+Sort the primary plane formats according to our order of preference.
+
+Nice side-effect of this change is that it makes IGT's kms_atomic
+plane-invalid-params pass because the test picks the first format
+which for vmwgfx was DRM_FORMAT_XRGB1555 and uses fb's with odd sizes
+which make Pixman, which IGT depends on assert due to the fact that our
+16bpp formats aren't 32 bit aligned like Pixman requires all formats
+to be.
+
+Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
+Fixes: 36cc79bc9077 ("drm/vmwgfx: Add universal plane support")
+Cc: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
+Cc: dri-devel@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v4.12+
+Acked-by: Pekka Paalanen <pekka.paalanen@collabora.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240412025511.78553-6-zack.rusin@broadcom.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_kms.h |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+@@ -243,10 +243,10 @@ struct vmw_framebuffer_bo {
+ static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
+-      DRM_FORMAT_XRGB1555,
+-      DRM_FORMAT_RGB565,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_ARGB8888,
++      DRM_FORMAT_RGB565,
++      DRM_FORMAT_XRGB1555,
+ };
+ static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
diff --git a/queue-6.8/drm-xe-fix-bo-leak-in-intel_fb_bo_framebuffer_init.patch b/queue-6.8/drm-xe-fix-bo-leak-in-intel_fb_bo_framebuffer_init.patch
new file mode 100644 (file)
index 0000000..5521964
--- /dev/null
@@ -0,0 +1,59 @@
+From 652ead9b746a63e4e79d7ad66d3edf0a8a5b0c2f Mon Sep 17 00:00:00 2001
+From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Date: Thu, 4 Apr 2024 11:03:02 +0200
+Subject: drm/xe: Fix bo leak in intel_fb_bo_framebuffer_init
+
+From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+
+commit 652ead9b746a63e4e79d7ad66d3edf0a8a5b0c2f upstream.
+
+Add a unreference bo in the error path, to prevent leaking a bo ref.
+
+Return 0 on success to clarify the success path.
+
+Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Fixes: 44e694958b95 ("drm/xe/display: Implement display support")
+Cc: <stable@vger.kernel.org> # v6.8+
+Reviewed-by: Nirmoy Das <nirmoy.das@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240404090302.68422-1-maarten.lankhorst@linux.intel.com
+(cherry picked from commit a2f3d731be3893e730417ae3190760fcaffdf549)
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/display/intel_fb_bo.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.c b/drivers/gpu/drm/xe/display/intel_fb_bo.c
+index b21da7b745a5..a9c1f9885c6b 100644
+--- a/drivers/gpu/drm/xe/display/intel_fb_bo.c
++++ b/drivers/gpu/drm/xe/display/intel_fb_bo.c
+@@ -31,7 +31,7 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
+       ret = ttm_bo_reserve(&bo->ttm, true, false, NULL);
+       if (ret)
+-              return ret;
++              goto err;
+       if (!(bo->flags & XE_BO_SCANOUT_BIT)) {
+               /*
+@@ -42,12 +42,16 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
+                */
+               if (XE_IOCTL_DBG(i915, !list_empty(&bo->ttm.base.gpuva.list))) {
+                       ttm_bo_unreserve(&bo->ttm);
+-                      return -EINVAL;
++                      ret = -EINVAL;
++                      goto err;
+               }
+               bo->flags |= XE_BO_SCANOUT_BIT;
+       }
+       ttm_bo_unreserve(&bo->ttm);
++      return 0;
++err:
++      xe_bo_put(bo);
+       return ret;
+ }
+-- 
+2.44.0
+
diff --git a/queue-6.8/fork-defer-linking-file-vma-until-vma-is-fully-initialized.patch b/queue-6.8/fork-defer-linking-file-vma-until-vma-is-fully-initialized.patch
new file mode 100644 (file)
index 0000000..5fbe9a1
--- /dev/null
@@ -0,0 +1,107 @@
+From 35e351780fa9d8240dd6f7e4f245f9ea37e96c19 Mon Sep 17 00:00:00 2001
+From: Miaohe Lin <linmiaohe@huawei.com>
+Date: Wed, 10 Apr 2024 17:14:41 +0800
+Subject: fork: defer linking file vma until vma is fully initialized
+
+From: Miaohe Lin <linmiaohe@huawei.com>
+
+commit 35e351780fa9d8240dd6f7e4f245f9ea37e96c19 upstream.
+
+Thorvald reported a WARNING [1]. And the root cause is below race:
+
+ CPU 1                                 CPU 2
+ fork                                  hugetlbfs_fallocate
+  dup_mmap                              hugetlbfs_punch_hole
+   i_mmap_lock_write(mapping);
+   vma_interval_tree_insert_after -- Child vma is visible through i_mmap tree.
+   i_mmap_unlock_write(mapping);
+   hugetlb_dup_vma_private -- Clear vma_lock outside i_mmap_rwsem!
+                                        i_mmap_lock_write(mapping);
+                                        hugetlb_vmdelete_list
+                                         vma_interval_tree_foreach
+                                          hugetlb_vma_trylock_write -- Vma_lock is cleared.
+   tmp->vm_ops->open -- Alloc new vma_lock outside i_mmap_rwsem!
+                                          hugetlb_vma_unlock_write -- Vma_lock is assigned!!!
+                                        i_mmap_unlock_write(mapping);
+
+hugetlb_dup_vma_private() and hugetlb_vm_op_open() are called outside
+i_mmap_rwsem lock while vma lock can be used in the same time.  Fix this
+by deferring linking file vma until vma is fully initialized.  Those vmas
+should be initialized first before they can be used.
+
+Link: https://lkml.kernel.org/r/20240410091441.3539905-1-linmiaohe@huawei.com
+Fixes: 8d9bfb260814 ("hugetlb: add vma based lock for pmd sharing")
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Reported-by: Thorvald Natvig <thorvald@google.com>
+Closes: https://lore.kernel.org/linux-mm/20240129161735.6gmjsswx62o4pbja@revolver/T/ [1]
+Reviewed-by: Jane Chu <jane.chu@oracle.com>
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: Heiko Carstens <hca@linux.ibm.com>
+Cc: Kent Overstreet <kent.overstreet@linux.dev>
+Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
+Cc: Mateusz Guzik <mjguzik@gmail.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Muchun Song <muchun.song@linux.dev>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peng Zhang <zhangpeng.00@bytedance.com>
+Cc: Tycho Andersen <tandersen@netflix.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/fork.c |   33 +++++++++++++++++----------------
+ 1 file changed, 17 insertions(+), 16 deletions(-)
+
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -712,6 +712,23 @@ static __latent_entropy int dup_mmap(str
+               } else if (anon_vma_fork(tmp, mpnt))
+                       goto fail_nomem_anon_vma_fork;
+               vm_flags_clear(tmp, VM_LOCKED_MASK);
++              /*
++               * Copy/update hugetlb private vma information.
++               */
++              if (is_vm_hugetlb_page(tmp))
++                      hugetlb_dup_vma_private(tmp);
++
++              /*
++               * Link the vma into the MT. After using __mt_dup(), memory
++               * allocation is not necessary here, so it cannot fail.
++               */
++              vma_iter_bulk_store(&vmi, tmp);
++
++              mm->map_count++;
++
++              if (tmp->vm_ops && tmp->vm_ops->open)
++                      tmp->vm_ops->open(tmp);
++
+               file = tmp->vm_file;
+               if (file) {
+                       struct address_space *mapping = file->f_mapping;
+@@ -728,25 +745,9 @@ static __latent_entropy int dup_mmap(str
+                       i_mmap_unlock_write(mapping);
+               }
+-              /*
+-               * Copy/update hugetlb private vma information.
+-               */
+-              if (is_vm_hugetlb_page(tmp))
+-                      hugetlb_dup_vma_private(tmp);
+-
+-              /*
+-               * Link the vma into the MT. After using __mt_dup(), memory
+-               * allocation is not necessary here, so it cannot fail.
+-               */
+-              vma_iter_bulk_store(&vmi, tmp);
+-
+-              mm->map_count++;
+               if (!(tmp->vm_flags & VM_WIPEONFORK))
+                       retval = copy_page_range(tmp, mpnt);
+-              if (tmp->vm_ops && tmp->vm_ops->open)
+-                      tmp->vm_ops->open(tmp);
+-
+               if (retval) {
+                       mpnt = vma_next(&vmi);
+                       goto loop_out;
diff --git a/queue-6.8/fs-sysfs-fix-reference-leak-in-sysfs_break_active_protection.patch b/queue-6.8/fs-sysfs-fix-reference-leak-in-sysfs_break_active_protection.patch
new file mode 100644 (file)
index 0000000..c4837f0
--- /dev/null
@@ -0,0 +1,42 @@
+From a90bca2228c0646fc29a72689d308e5fe03e6d78 Mon Sep 17 00:00:00 2001
+From: Alan Stern <stern@rowland.harvard.edu>
+Date: Wed, 13 Mar 2024 17:43:41 -0400
+Subject: fs: sysfs: Fix reference leak in sysfs_break_active_protection()
+
+From: Alan Stern <stern@rowland.harvard.edu>
+
+commit a90bca2228c0646fc29a72689d308e5fe03e6d78 upstream.
+
+The sysfs_break_active_protection() routine has an obvious reference
+leak in its error path.  If the call to kernfs_find_and_get() fails then
+kn will be NULL, so the companion sysfs_unbreak_active_protection()
+routine won't get called (and would only cause an access violation by
+trying to dereference kn->parent if it was called).  As a result, the
+reference to kobj acquired at the start of the function will never be
+released.
+
+Fix the leak by adding an explicit kobject_put() call when kn is NULL.
+
+Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
+Fixes: 2afc9166f79b ("scsi: sysfs: Introduce sysfs_{un,}break_active_protection()")
+Cc: Bart Van Assche <bvanassche@acm.org>
+Cc: stable@vger.kernel.org
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Acked-by: Tejun Heo <tj@kernel.org>
+Link: https://lore.kernel.org/r/8a4d3f0f-c5e3-4b70-a188-0ca433f9e6f9@rowland.harvard.edu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/sysfs/file.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/sysfs/file.c
++++ b/fs/sysfs/file.c
+@@ -463,6 +463,8 @@ struct kernfs_node *sysfs_break_active_p
+       kn = kernfs_find_and_get(kobj->sd, attr->name);
+       if (kn)
+               kernfs_break_active_protection(kn);
++      else
++              kobject_put(kobj);
+       return kn;
+ }
+ EXPORT_SYMBOL_GPL(sysfs_break_active_protection);
diff --git a/queue-6.8/fuse-fix-leaked-enosys-error-on-first-statx-call.patch b/queue-6.8/fuse-fix-leaked-enosys-error-on-first-statx-call.patch
new file mode 100644 (file)
index 0000000..de864ec
--- /dev/null
@@ -0,0 +1,46 @@
+From eb4b691b9115fae4c844f5941418335575cf667f Mon Sep 17 00:00:00 2001
+From: Danny Lin <danny@orbstack.dev>
+Date: Sat, 13 Apr 2024 17:34:31 -0700
+Subject: fuse: fix leaked ENOSYS error on first statx call
+
+From: Danny Lin <danny@orbstack.dev>
+
+commit eb4b691b9115fae4c844f5941418335575cf667f upstream.
+
+FUSE attempts to detect server support for statx by trying it once and
+setting no_statx=1 if it fails with ENOSYS, but consider the following
+scenario:
+
+- Userspace (e.g. sh) calls stat() on a file
+  * succeeds
+- Userspace (e.g. lsd) calls statx(BTIME) on the same file
+  - request_mask = STATX_BASIC_STATS | STATX_BTIME
+  - first pass: sync=true due to differing cache_mask
+  - statx fails and returns ENOSYS
+  - set no_statx and retry
+  - retry sets mask = STATX_BASIC_STATS
+  - now mask == cache_mask; sync=false (time_before: still valid)
+  - so we take the "else if (stat)" path
+  - "err" is still ENOSYS from the failed statx call
+
+Fix this by zeroing "err" before retrying the failed call.
+
+Fixes: d3045530bdd2 ("fuse: implement statx")
+Cc: stable@vger.kernel.org # v6.6
+Signed-off-by: Danny Lin <danny@orbstack.dev>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/fuse/dir.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -1317,6 +1317,7 @@ retry:
+                       err = fuse_do_statx(inode, file, stat);
+                       if (err == -ENOSYS) {
+                               fc->no_statx = 1;
++                              err = 0;
+                               goto retry;
+                       }
+               } else {
diff --git a/queue-6.8/init-main.c-fix-potential-static_command_line-memory-overflow.patch b/queue-6.8/init-main.c-fix-potential-static_command_line-memory-overflow.patch
new file mode 100644 (file)
index 0000000..5680f68
--- /dev/null
@@ -0,0 +1,43 @@
+From 46dad3c1e57897ab9228332f03e1c14798d2d3b9 Mon Sep 17 00:00:00 2001
+From: Yuntao Wang <ytcoode@gmail.com>
+Date: Fri, 12 Apr 2024 16:17:32 +0800
+Subject: init/main.c: Fix potential static_command_line memory overflow
+
+From: Yuntao Wang <ytcoode@gmail.com>
+
+commit 46dad3c1e57897ab9228332f03e1c14798d2d3b9 upstream.
+
+We allocate memory of size 'xlen + strlen(boot_command_line) + 1' for
+static_command_line, but the strings copied into static_command_line are
+extra_command_line and command_line, rather than extra_command_line and
+boot_command_line.
+
+When strlen(command_line) > strlen(boot_command_line), static_command_line
+will overflow.
+
+This patch just recovers strlen(command_line) which was miss-consolidated
+with strlen(boot_command_line) in the commit f5c7310ac73e ("init/main: add
+checks for the return value of memblock_alloc*()")
+
+Link: https://lore.kernel.org/all/20240412081733.35925-2-ytcoode@gmail.com/
+
+Fixes: f5c7310ac73e ("init/main: add checks for the return value of memblock_alloc*()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Yuntao Wang <ytcoode@gmail.com>
+Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ init/main.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/init/main.c
++++ b/init/main.c
+@@ -635,6 +635,8 @@ static void __init setup_command_line(ch
+       if (!saved_command_line)
+               panic("%s: Failed to allocate %zu bytes\n", __func__, len + ilen);
++      len = xlen + strlen(command_line) + 1;
++
+       static_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
+       if (!static_command_line)
+               panic("%s: Failed to allocate %zu bytes\n", __func__, len);
diff --git a/queue-6.8/kvm-x86-mmu-write-protect-l2-sptes-in-tdp-mmu-when-clearing-dirty-status.patch b/queue-6.8/kvm-x86-mmu-write-protect-l2-sptes-in-tdp-mmu-when-clearing-dirty-status.patch
new file mode 100644 (file)
index 0000000..0c1945d
--- /dev/null
@@ -0,0 +1,92 @@
+From 2673dfb591a359c75080dd5af3da484b89320d22 Mon Sep 17 00:00:00 2001
+From: David Matlack <dmatlack@google.com>
+Date: Fri, 15 Mar 2024 16:05:38 -0700
+Subject: KVM: x86/mmu: Write-protect L2 SPTEs in TDP MMU when clearing dirty status
+
+From: David Matlack <dmatlack@google.com>
+
+commit 2673dfb591a359c75080dd5af3da484b89320d22 upstream.
+
+Check kvm_mmu_page_ad_need_write_protect() when deciding whether to
+write-protect or clear D-bits on TDP MMU SPTEs, so that the TDP MMU
+accounts for any role-specific reasons for disabling D-bit dirty logging.
+
+Specifically, TDP MMU SPTEs must be write-protected when the TDP MMU is
+being used to run an L2 (i.e. L1 has disabled EPT) and PML is enabled.
+KVM always disables PML when running L2, even when L1 and L2 GPAs are in
+the some domain, so failing to write-protect TDP MMU SPTEs will cause
+writes made by L2 to not be reflected in the dirty log.
+
+Reported-by: syzbot+900d58a45dcaab9e4821@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=900d58a45dcaab9e4821
+Fixes: 5982a5392663 ("KVM: x86/mmu: Use kvm_ad_enabled() to determine if TDP MMU SPTEs need wrprot")
+Cc: stable@vger.kernel.org
+Cc: Vipin Sharma <vipinsh@google.com>
+Cc: Sean Christopherson <seanjc@google.com>
+Signed-off-by: David Matlack <dmatlack@google.com>
+Link: https://lore.kernel.org/r/20240315230541.1635322-2-dmatlack@google.com
+[sean: massage shortlog and changelog, tweak ternary op formatting]
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/mmu/tdp_mmu.c |   21 ++++++++++++++++-----
+ 1 file changed, 16 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/mmu/tdp_mmu.c
++++ b/arch/x86/kvm/mmu/tdp_mmu.c
+@@ -1498,6 +1498,16 @@ void kvm_tdp_mmu_try_split_huge_pages(st
+       }
+ }
++static bool tdp_mmu_need_write_protect(struct kvm_mmu_page *sp)
++{
++      /*
++       * All TDP MMU shadow pages share the same role as their root, aside
++       * from level, so it is valid to key off any shadow page to determine if
++       * write protection is needed for an entire tree.
++       */
++      return kvm_mmu_page_ad_need_write_protect(sp) || !kvm_ad_enabled();
++}
++
+ /*
+  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
+  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
+@@ -1508,7 +1518,8 @@ void kvm_tdp_mmu_try_split_huge_pages(st
+ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
+                          gfn_t start, gfn_t end)
+ {
+-      u64 dbit = kvm_ad_enabled() ? shadow_dirty_mask : PT_WRITABLE_MASK;
++      const u64 dbit = tdp_mmu_need_write_protect(root) ? PT_WRITABLE_MASK :
++                                                          shadow_dirty_mask;
+       struct tdp_iter iter;
+       bool spte_set = false;
+@@ -1523,7 +1534,7 @@ retry:
+               if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
+                       continue;
+-              KVM_MMU_WARN_ON(kvm_ad_enabled() &&
++              KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
+                               spte_ad_need_write_protect(iter.old_spte));
+               if (!(iter.old_spte & dbit))
+@@ -1570,8 +1581,8 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct
+ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
+                                 gfn_t gfn, unsigned long mask, bool wrprot)
+ {
+-      u64 dbit = (wrprot || !kvm_ad_enabled()) ? PT_WRITABLE_MASK :
+-                                                 shadow_dirty_mask;
++      const u64 dbit = (wrprot || tdp_mmu_need_write_protect(root)) ? PT_WRITABLE_MASK :
++                                                                      shadow_dirty_mask;
+       struct tdp_iter iter;
+       lockdep_assert_held_write(&kvm->mmu_lock);
+@@ -1583,7 +1594,7 @@ static void clear_dirty_pt_masked(struct
+               if (!mask)
+                       break;
+-              KVM_MMU_WARN_ON(kvm_ad_enabled() &&
++              KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
+                               spte_ad_need_write_protect(iter.old_spte));
+               if (iter.level > PG_LEVEL_4K ||
diff --git a/queue-6.8/kvm-x86-mmu-x86-don-t-overflow-lpage_info-when-checking-attributes.patch b/queue-6.8/kvm-x86-mmu-x86-don-t-overflow-lpage_info-when-checking-attributes.patch
new file mode 100644 (file)
index 0000000..05399c2
--- /dev/null
@@ -0,0 +1,114 @@
+From 992b54bd083c5bee24ff7cc35991388ab08598c4 Mon Sep 17 00:00:00 2001
+From: Rick Edgecombe <rick.p.edgecombe@intel.com>
+Date: Thu, 14 Mar 2024 14:29:02 -0700
+Subject: KVM: x86/mmu: x86: Don't overflow lpage_info when checking attributes
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Rick Edgecombe <rick.p.edgecombe@intel.com>
+
+commit 992b54bd083c5bee24ff7cc35991388ab08598c4 upstream.
+
+Fix KVM_SET_MEMORY_ATTRIBUTES to not overflow lpage_info array and trigger
+KASAN splat, as seen in the private_mem_conversions_test selftest.
+
+When memory attributes are set on a GFN range, that range will have
+specific properties applied to the TDP. A huge page cannot be used when
+the attributes are inconsistent, so they are disabled for those the
+specific huge pages. For internal KVM reasons, huge pages are also not
+allowed to span adjacent memslots regardless of whether the backing memory
+could be mapped as huge.
+
+What GFNs support which huge page sizes is tracked by an array of arrays
+'lpage_info' on the memslot, of ‘kvm_lpage_info’ structs. Each index of
+lpage_info contains a vmalloc allocated array of these for a specific
+supported page size. The kvm_lpage_info denotes whether a specific huge
+page (GFN and page size) on the memslot is supported. These arrays include
+indices for unaligned head and tail huge pages.
+
+Preventing huge pages from spanning adjacent memslot is covered by
+incrementing the count in head and tail kvm_lpage_info when the memslot is
+allocated, but disallowing huge pages for memory that has mixed attributes
+has to be done in a more complicated way. During the
+KVM_SET_MEMORY_ATTRIBUTES ioctl KVM updates lpage_info for each memslot in
+the range that has mismatched attributes. KVM does this a memslot at a
+time, and marks a special bit, KVM_LPAGE_MIXED_FLAG, in the kvm_lpage_info
+for any huge page. This bit is essentially a permanently elevated count.
+So huge pages will not be mapped for the GFN at that page size if the
+count is elevated in either case: a huge head or tail page unaligned to
+the memslot or if KVM_LPAGE_MIXED_FLAG is set because it has mixed
+attributes.
+
+To determine whether a huge page has consistent attributes, the
+KVM_SET_MEMORY_ATTRIBUTES operation checks an xarray to make sure it
+consistently has the incoming attribute. Since level - 1 huge pages are
+aligned to level huge pages, it employs an optimization. As long as the
+level - 1 huge pages are checked first, it can just check these and assume
+that if each level - 1 huge page contained within the level sized huge
+page is not mixed, then the level size huge page is not mixed. This
+optimization happens in the helper hugepage_has_attrs().
+
+Unfortunately, although the kvm_lpage_info array representing page size
+'level' will contain an entry for an unaligned tail page of size level,
+the array for level - 1  will not contain an entry for each GFN at page
+size level. The level - 1 array will only contain an index for any
+unaligned region covered by level - 1 huge page size, which can be a
+smaller region. So this causes the optimization to overflow the level - 1
+kvm_lpage_info and perform a vmalloc out of bounds read.
+
+In some cases of head and tail pages where an overflow could happen,
+callers skip the operation completely as KVM_LPAGE_MIXED_FLAG is not
+required to prevent huge pages as discussed earlier. But for memslots that
+are smaller than the 1GB page size, it does call hugepage_has_attrs(). In
+this case the huge page is both the head and tail page. The issue can be
+observed simply by compiling the kernel with CONFIG_KASAN_VMALLOC and
+running the selftest “private_mem_conversions_test”, which produces the
+output like the following:
+
+BUG: KASAN: vmalloc-out-of-bounds in hugepage_has_attrs+0x7e/0x110
+Read of size 4 at addr ffffc900000a3008 by task private_mem_con/169
+Call Trace:
+  dump_stack_lvl
+  print_report
+  ? __virt_addr_valid
+  ? hugepage_has_attrs
+  ? hugepage_has_attrs
+  kasan_report
+  ? hugepage_has_attrs
+  hugepage_has_attrs
+  kvm_arch_post_set_memory_attributes
+  kvm_vm_ioctl
+
+It is a little ambiguous whether the unaligned head page (in the bug case
+also the tail page) should be expected to have KVM_LPAGE_MIXED_FLAG set.
+It is not functionally required, as the unaligned head/tail pages will
+already have their kvm_lpage_info count incremented. The comments imply
+not setting it on unaligned head pages is intentional, so fix the callers
+to skip trying to set KVM_LPAGE_MIXED_FLAG in this case, and in doing so
+not call hugepage_has_attrs().
+
+Cc: stable@vger.kernel.org
+Fixes: 90b4fe17981e ("KVM: x86: Disallow hugepages when memory attributes are mixed")
+Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
+Reviewed-by: Kai Huang <kai.huang@intel.com>
+Reviewed-by: Chao Peng <chao.p.peng@linux.intel.com>
+Link: https://lore.kernel.org/r/20240314212902.2762507-1-rick.p.edgecombe@intel.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/mmu/mmu.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -7388,7 +7388,8 @@ bool kvm_arch_post_set_memory_attributes
+                        * by the memslot, KVM can't use a hugepage due to the
+                        * misaligned address regardless of memory attributes.
+                        */
+-                      if (gfn >= slot->base_gfn) {
++                      if (gfn >= slot->base_gfn &&
++                          gfn + nr_pages <= slot->base_gfn + slot->npages) {
+                               if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
+                                       hugepage_clear_mixed(slot, gfn, level);
+                               else
diff --git a/queue-6.8/kvm-x86-pmu-disable-support-for-adaptive-pebs.patch b/queue-6.8/kvm-x86-pmu-disable-support-for-adaptive-pebs.patch
new file mode 100644 (file)
index 0000000..7cff685
--- /dev/null
@@ -0,0 +1,104 @@
+From 9e985cbf2942a1bb8fcef9adc2a17d90fd7ca8ee Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Wed, 6 Mar 2024 16:58:33 -0800
+Subject: KVM: x86/pmu: Disable support for adaptive PEBS
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 9e985cbf2942a1bb8fcef9adc2a17d90fd7ca8ee upstream.
+
+Drop support for virtualizing adaptive PEBS, as KVM's implementation is
+architecturally broken without an obvious/easy path forward, and because
+exposing adaptive PEBS can leak host LBRs to the guest, i.e. can leak
+host kernel addresses to the guest.
+
+Bug #1 is that KVM doesn't account for the upper 32 bits of
+IA32_FIXED_CTR_CTRL when (re)programming fixed counters, e.g
+fixed_ctrl_field() drops the upper bits, reprogram_fixed_counters()
+stores local variables as u8s and truncates the upper bits too, etc.
+
+Bug #2 is that, because KVM _always_ sets precise_ip to a non-zero value
+for PEBS events, perf will _always_ generate an adaptive record, even if
+the guest requested a basic record.  Note, KVM will also enable adaptive
+PEBS in individual *counter*, even if adaptive PEBS isn't exposed to the
+guest, but this is benign as MSR_PEBS_DATA_CFG is guaranteed to be zero,
+i.e. the guest will only ever see Basic records.
+
+Bug #3 is in perf.  intel_pmu_disable_fixed() doesn't clear the upper
+bits either, i.e. leaves ICL_FIXED_0_ADAPTIVE set, and
+intel_pmu_enable_fixed() effectively doesn't clear ICL_FIXED_0_ADAPTIVE
+either.  I.e. perf _always_ enables ADAPTIVE counters, regardless of what
+KVM requests.
+
+Bug #4 is that adaptive PEBS *might* effectively bypass event filters set
+by the host, as "Updated Memory Access Info Group" records information
+that might be disallowed by userspace via KVM_SET_PMU_EVENT_FILTER.
+
+Bug #5 is that KVM doesn't ensure LBR MSRs hold guest values (or at least
+zeros) when entering a vCPU with adaptive PEBS, which allows the guest
+to read host LBRs, i.e. host RIPs/addresses, by enabling "LBR Entries"
+records.
+
+Disable adaptive PEBS support as an immediate fix due to the severity of
+the LBR leak in particular, and because fixing all of the bugs will be
+non-trivial, e.g. not suitable for backporting to stable kernels.
+
+Note!  This will break live migration, but trying to make KVM play nice
+with live migration would be quite complicated, wouldn't be guaranteed to
+work (i.e. KVM might still kill/confuse the guest), and it's not clear
+that there are any publicly available VMMs that support adaptive PEBS,
+let alone live migrate VMs that support adaptive PEBS, e.g. QEMU doesn't
+support PEBS in any capacity.
+
+Link: https://lore.kernel.org/all/20240306230153.786365-1-seanjc@google.com
+Link: https://lore.kernel.org/all/ZeepGjHCeSfadANM@google.com
+Fixes: c59a1f106f5c ("KVM: x86/pmu: Add IA32_PEBS_ENABLE MSR emulation for extended PEBS")
+Cc: stable@vger.kernel.org
+Cc: Like Xu <like.xu.linux@gmail.com>
+Cc: Mingwei Zhang <mizhang@google.com>
+Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
+Cc: Zhang Xiong <xiong.y.zhang@intel.com>
+Cc: Lv Zhiyuan <zhiyuan.lv@intel.com>
+Cc: Dapeng Mi <dapeng1.mi@intel.com>
+Cc: Jim Mattson <jmattson@google.com>
+Acked-by: Like Xu <likexu@tencent.com>
+Link: https://lore.kernel.org/r/20240307005833.827147-1-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/vmx.c |   24 ++++++++++++++++++++++--
+ 1 file changed, 22 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -7857,8 +7857,28 @@ static u64 vmx_get_perf_capabilities(voi
+       if (vmx_pebs_supported()) {
+               perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
+-              if ((perf_cap & PERF_CAP_PEBS_FORMAT) < 4)
+-                      perf_cap &= ~PERF_CAP_PEBS_BASELINE;
++
++              /*
++               * Disallow adaptive PEBS as it is functionally broken, can be
++               * used by the guest to read *host* LBRs, and can be used to
++               * bypass userspace event filters.  To correctly and safely
++               * support adaptive PEBS, KVM needs to:
++               *
++               * 1. Account for the ADAPTIVE flag when (re)programming fixed
++               *    counters.
++               *
++               * 2. Gain support from perf (or take direct control of counter
++               *    programming) to support events without adaptive PEBS
++               *    enabled for the hardware counter.
++               *
++               * 3. Ensure LBR MSRs cannot hold host data on VM-Entry with
++               *    adaptive PEBS enabled and MSR_PEBS_DATA_CFG.LBRS=1.
++               *
++               * 4. Document which PMU events are effectively exposed to the
++               *    guest via adaptive PEBS, and make adaptive PEBS mutually
++               *    exclusive with KVM_SET_PMU_EVENT_FILTER if necessary.
++               */
++              perf_cap &= ~PERF_CAP_PEBS_BASELINE;
+       }
+       return perf_cap;
diff --git a/queue-6.8/kvm-x86-pmu-do-not-mask-lvtpc-when-handling-a-pmi-on-amd-platforms.patch b/queue-6.8/kvm-x86-pmu-do-not-mask-lvtpc-when-handling-a-pmi-on-amd-platforms.patch
new file mode 100644 (file)
index 0000000..ee09ae7
--- /dev/null
@@ -0,0 +1,58 @@
+From 49ff3b4aec51e3abfc9369997cc603319b02af9a Mon Sep 17 00:00:00 2001
+From: Sandipan Das <sandipan.das@amd.com>
+Date: Fri, 5 Apr 2024 16:55:55 -0700
+Subject: KVM: x86/pmu: Do not mask LVTPC when handling a PMI on AMD platforms
+
+From: Sandipan Das <sandipan.das@amd.com>
+
+commit 49ff3b4aec51e3abfc9369997cc603319b02af9a upstream.
+
+On AMD and Hygon platforms, the local APIC does not automatically set
+the mask bit of the LVTPC register when handling a PMI and there is
+no need to clear it in the kernel's PMI handler.
+
+For guests, the mask bit is currently set by kvm_apic_local_deliver()
+and unless it is cleared by the guest kernel's PMI handler, PMIs stop
+arriving and break use-cases like sampling with perf record.
+
+This does not affect non-PerfMonV2 guests because PMIs are handled in
+the guest kernel by x86_pmu_handle_irq() which always clears the LVTPC
+mask bit irrespective of the vendor.
+
+Before:
+
+  $ perf record -e cycles:u true
+  [ perf record: Woken up 1 times to write data ]
+  [ perf record: Captured and wrote 0.001 MB perf.data (1 samples) ]
+
+After:
+
+  $ perf record -e cycles:u true
+  [ perf record: Woken up 1 times to write data ]
+  [ perf record: Captured and wrote 0.002 MB perf.data (19 samples) ]
+
+Fixes: a16eb25b09c0 ("KVM: x86: Mask LVTPC when handling a PMI")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sandipan Das <sandipan.das@amd.com>
+Reviewed-by: Jim Mattson <jmattson@google.com>
+[sean: use is_intel_compatible instead of !is_amd_or_hygon()]
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-ID: <20240405235603.1173076-3-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/lapic.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -2771,7 +2771,8 @@ int kvm_apic_local_deliver(struct kvm_la
+               trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
+               r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
+-              if (r && lvt_type == APIC_LVTPC)
++              if (r && lvt_type == APIC_LVTPC &&
++                  guest_cpuid_is_intel_compatible(apic->vcpu))
+                       kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
+               return r;
+       }
diff --git a/queue-6.8/kvm-x86-snapshot-if-a-vcpu-s-vendor-model-is-amd-vs.-intel-compatible.patch b/queue-6.8/kvm-x86-snapshot-if-a-vcpu-s-vendor-model-is-amd-vs.-intel-compatible.patch
new file mode 100644 (file)
index 0000000..0509d1b
--- /dev/null
@@ -0,0 +1,112 @@
+From fd706c9b1674e2858766bfbf7430534c2b26fbef Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Fri, 5 Apr 2024 16:55:54 -0700
+Subject: KVM: x86: Snapshot if a vCPU's vendor model is AMD vs. Intel compatible
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit fd706c9b1674e2858766bfbf7430534c2b26fbef upstream.
+
+Add kvm_vcpu_arch.is_amd_compatible to cache if a vCPU's vendor model is
+compatible with AMD, i.e. if the vCPU vendor is AMD or Hygon, along with
+helpers to check if a vCPU is compatible AMD vs. Intel.  To handle Intel
+vs. AMD behavior related to masking the LVTPC entry, KVM will need to
+check for vendor compatibility on every PMI injection, i.e. querying for
+AMD will soon be a moderately hot path.
+
+Note!  This subtly (or maybe not-so-subtly) makes "Intel compatible" KVM's
+default behavior, both if userspace omits (or never sets) CPUID 0x0 and if
+userspace sets a completely unknown vendor.  One could argue that KVM
+should treat such vCPUs as not being compatible with Intel *or* AMD, but
+that would add useless complexity to KVM.
+
+KVM needs to do *something* in the face of vendor specific behavior, and
+so unless KVM conjured up a magic third option, choosing to treat unknown
+vendors as neither Intel nor AMD means that checks on AMD compatibility
+would yield Intel behavior, and checks for Intel compatibility would yield
+AMD behavior.  And that's far worse as it would effectively yield random
+behavior depending on whether KVM checked for AMD vs. Intel vs. !AMD vs.
+!Intel.  And practically speaking, all x86 CPUs follow either Intel or AMD
+architecture, i.e. "supporting" an unknown third architecture adds no
+value.
+
+Deliberately don't convert any of the existing guest_cpuid_is_intel()
+checks, as the Intel side of things is messier due to some flows explicitly
+checking for exactly vendor==Intel, versus some flows assuming anything
+that isn't "AMD compatible" gets Intel behavior.  The Intel code will be
+cleaned up in the future.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-ID: <20240405235603.1173076-2-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/kvm_host.h |    1 +
+ arch/x86/kvm/cpuid.c            |    1 +
+ arch/x86/kvm/cpuid.h            |   10 ++++++++++
+ arch/x86/kvm/mmu/mmu.c          |    2 +-
+ arch/x86/kvm/x86.c              |    2 +-
+ 5 files changed, 14 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -854,6 +854,7 @@ struct kvm_vcpu_arch {
+       int cpuid_nent;
+       struct kvm_cpuid_entry2 *cpuid_entries;
+       struct kvm_hypervisor_cpuid kvm_cpuid;
++      bool is_amd_compatible;
+       /*
+        * FIXME: Drop this macro and use KVM_NR_GOVERNED_FEATURES directly
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -366,6 +366,7 @@ static void kvm_vcpu_after_set_cpuid(str
+       kvm_update_pv_runtime(vcpu);
++      vcpu->arch.is_amd_compatible = guest_cpuid_is_amd_or_hygon(vcpu);
+       vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
+       vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -120,6 +120,16 @@ static inline bool guest_cpuid_is_intel(
+       return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
+ }
++static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu)
++{
++      return vcpu->arch.is_amd_compatible;
++}
++
++static inline bool guest_cpuid_is_intel_compatible(struct kvm_vcpu *vcpu)
++{
++      return !guest_cpuid_is_amd_compatible(vcpu);
++}
++
+ static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
+ {
+       struct kvm_cpuid_entry2 *best;
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -4922,7 +4922,7 @@ static void reset_guest_rsvds_bits_mask(
+                               context->cpu_role.base.level, is_efer_nx(context),
+                               guest_can_use(vcpu, X86_FEATURE_GBPAGES),
+                               is_cr4_pse(context),
+-                              guest_cpuid_is_amd_or_hygon(vcpu));
++                              guest_cpuid_is_amd_compatible(vcpu));
+ }
+ static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3422,7 +3422,7 @@ static bool is_mci_status_msr(u32 msr)
+ static bool can_set_mci_status(struct kvm_vcpu *vcpu)
+ {
+       /* McStatusWrEn enabled? */
+-      if (guest_cpuid_is_amd_or_hygon(vcpu))
++      if (guest_cpuid_is_amd_compatible(vcpu))
+               return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
+       return false;
diff --git a/queue-6.8/mei-me-disable-rpl-s-on-sps-and-ign-firmwares.patch b/queue-6.8/mei-me-disable-rpl-s-on-sps-and-ign-firmwares.patch
new file mode 100644 (file)
index 0000000..f063f92
--- /dev/null
@@ -0,0 +1,34 @@
+From 0dc04112bee6fdd6eb847ccb32214703022c0269 Mon Sep 17 00:00:00 2001
+From: Alexander Usyskin <alexander.usyskin@intel.com>
+Date: Tue, 12 Mar 2024 07:19:58 +0200
+Subject: mei: me: disable RPL-S on SPS and IGN firmwares
+
+From: Alexander Usyskin <alexander.usyskin@intel.com>
+
+commit 0dc04112bee6fdd6eb847ccb32214703022c0269 upstream.
+
+Extend the quirk to disable MEI interface on Intel PCH Ignition (IGN)
+and SPS firmwares for RPL-S devices. These firmwares do not support
+the MEI protocol.
+
+Fixes: 3ed8c7d39cfe ("mei: me: add raptor lake point S DID")
+Cc: stable@vger.kernel.org
+Signed-off-by: Alexander Usyskin <alexander.usyskin@intel.com>
+Signed-off-by: Tomas Winkler <tomas.winkler@intel.com>
+Link: https://lore.kernel.org/r/20240312051958.118478-1-tomas.winkler@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/mei/pci-me.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -116,7 +116,7 @@ static const struct pci_device_id mei_me
+       {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
+-      {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
++      {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_SPS_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)},
diff --git a/queue-6.8/mei-vsc-unregister-interrupt-handler-for-system-suspend.patch b/queue-6.8/mei-vsc-unregister-interrupt-handler-for-system-suspend.patch
new file mode 100644 (file)
index 0000000..524bdec
--- /dev/null
@@ -0,0 +1,214 @@
+From f6085a96c97387154be7eaebd1a5420eb3cd55dc Mon Sep 17 00:00:00 2001
+From: Sakari Ailus <sakari.ailus@linux.intel.com>
+Date: Wed, 3 Apr 2024 13:13:41 +0800
+Subject: mei: vsc: Unregister interrupt handler for system suspend
+
+From: Sakari Ailus <sakari.ailus@linux.intel.com>
+
+commit f6085a96c97387154be7eaebd1a5420eb3cd55dc upstream.
+
+Unregister the MEI VSC interrupt handler before system suspend and
+re-register it at system resume time. This mirrors implementation of other
+MEI devices.
+
+This patch fixes the bug that causes continuous stream of MEI VSC errors
+after system resume.
+
+Fixes: 386a766c4169 ("mei: Add MEI hardware support for IVSC device")
+Cc: stable@vger.kernel.org # for 6.8
+Reported-by: Dominik Brodowski <linux@dominikbrodowski.net>
+Signed-off-by: Wentong Wu <wentong.wu@intel.com>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Acked-by: Tomas Winkler <tomas.winkler@intel.com>
+Link: https://lore.kernel.org/r/20240403051341.3534650-2-wentong.wu@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/mei/platform-vsc.c |   17 +++++++-
+ drivers/misc/mei/vsc-tp.c       |   84 ++++++++++++++++++++++++++++------------
+ drivers/misc/mei/vsc-tp.h       |    3 +
+ 3 files changed, 78 insertions(+), 26 deletions(-)
+
+--- a/drivers/misc/mei/platform-vsc.c
++++ b/drivers/misc/mei/platform-vsc.c
+@@ -402,25 +402,40 @@ static int mei_vsc_remove(struct platfor
+ static int mei_vsc_suspend(struct device *dev)
+ {
+       struct mei_device *mei_dev = dev_get_drvdata(dev);
++      struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+       mei_stop(mei_dev);
++      mei_disable_interrupts(mei_dev);
++
++      vsc_tp_free_irq(hw->tp);
++
+       return 0;
+ }
+ static int mei_vsc_resume(struct device *dev)
+ {
+       struct mei_device *mei_dev = dev_get_drvdata(dev);
++      struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+       int ret;
+-      ret = mei_restart(mei_dev);
++      ret = vsc_tp_request_irq(hw->tp);
+       if (ret)
+               return ret;
++      ret = mei_restart(mei_dev);
++      if (ret)
++              goto err_free;
++
+       /* start timer if stopped in suspend */
+       schedule_delayed_work(&mei_dev->timer_work, HZ);
+       return 0;
++
++err_free:
++      vsc_tp_free_irq(hw->tp);
++
++      return ret;
+ }
+ static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume);
+--- a/drivers/misc/mei/vsc-tp.c
++++ b/drivers/misc/mei/vsc-tp.c
+@@ -94,6 +94,27 @@ static const struct acpi_gpio_mapping vs
+       {}
+ };
++static irqreturn_t vsc_tp_isr(int irq, void *data)
++{
++      struct vsc_tp *tp = data;
++
++      atomic_inc(&tp->assert_cnt);
++
++      wake_up(&tp->xfer_wait);
++
++      return IRQ_WAKE_THREAD;
++}
++
++static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
++{
++      struct vsc_tp *tp = data;
++
++      if (tp->event_notify)
++              tp->event_notify(tp->event_notify_context);
++
++      return IRQ_HANDLED;
++}
++
+ /* wakeup firmware and wait for response */
+ static int vsc_tp_wakeup_request(struct vsc_tp *tp)
+ {
+@@ -384,6 +405,37 @@ int vsc_tp_register_event_cb(struct vsc_
+ EXPORT_SYMBOL_NS_GPL(vsc_tp_register_event_cb, VSC_TP);
+ /**
++ * vsc_tp_request_irq - request irq for vsc_tp device
++ * @tp: vsc_tp device handle
++ */
++int vsc_tp_request_irq(struct vsc_tp *tp)
++{
++      struct spi_device *spi = tp->spi;
++      struct device *dev = &spi->dev;
++      int ret;
++
++      irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
++      ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
++                                 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
++                                 dev_name(dev), tp);
++      if (ret)
++              return ret;
++
++      return 0;
++}
++EXPORT_SYMBOL_NS_GPL(vsc_tp_request_irq, VSC_TP);
++
++/**
++ * vsc_tp_free_irq - free irq for vsc_tp device
++ * @tp: vsc_tp device handle
++ */
++void vsc_tp_free_irq(struct vsc_tp *tp)
++{
++      free_irq(tp->spi->irq, tp);
++}
++EXPORT_SYMBOL_NS_GPL(vsc_tp_free_irq, VSC_TP);
++
++/**
+  * vsc_tp_intr_synchronize - synchronize vsc_tp interrupt
+  * @tp: vsc_tp device handle
+  */
+@@ -413,27 +465,6 @@ void vsc_tp_intr_disable(struct vsc_tp *
+ }
+ EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_disable, VSC_TP);
+-static irqreturn_t vsc_tp_isr(int irq, void *data)
+-{
+-      struct vsc_tp *tp = data;
+-
+-      atomic_inc(&tp->assert_cnt);
+-
+-      wake_up(&tp->xfer_wait);
+-
+-      return IRQ_WAKE_THREAD;
+-}
+-
+-static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
+-{
+-      struct vsc_tp *tp = data;
+-
+-      if (tp->event_notify)
+-              tp->event_notify(tp->event_notify_context);
+-
+-      return IRQ_HANDLED;
+-}
+-
+ static int vsc_tp_match_any(struct acpi_device *adev, void *data)
+ {
+       struct acpi_device **__adev = data;
+@@ -485,10 +516,9 @@ static int vsc_tp_probe(struct spi_devic
+       tp->spi = spi;
+       irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
+-      ret = devm_request_threaded_irq(dev, spi->irq, vsc_tp_isr,
+-                                      vsc_tp_thread_isr,
+-                                      IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+-                                      dev_name(dev), tp);
++      ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
++                                 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
++                                 dev_name(dev), tp);
+       if (ret)
+               return ret;
+@@ -522,6 +552,8 @@ static int vsc_tp_probe(struct spi_devic
+ err_destroy_lock:
+       mutex_destroy(&tp->mutex);
++      free_irq(spi->irq, tp);
++
+       return ret;
+ }
+@@ -532,6 +564,8 @@ static void vsc_tp_remove(struct spi_dev
+       platform_device_unregister(tp->pdev);
+       mutex_destroy(&tp->mutex);
++
++      free_irq(spi->irq, tp);
+ }
+ static const struct acpi_device_id vsc_tp_acpi_ids[] = {
+--- a/drivers/misc/mei/vsc-tp.h
++++ b/drivers/misc/mei/vsc-tp.h
+@@ -37,6 +37,9 @@ int vsc_tp_xfer(struct vsc_tp *tp, u8 cm
+ int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
+                            void *context);
++int vsc_tp_request_irq(struct vsc_tp *tp);
++void vsc_tp_free_irq(struct vsc_tp *tp);
++
+ void vsc_tp_intr_enable(struct vsc_tp *tp);
+ void vsc_tp_intr_disable(struct vsc_tp *tp);
+ void vsc_tp_intr_synchronize(struct vsc_tp *tp);
diff --git a/queue-6.8/mm-madvise-make-madv_populate_-read-write-handle-vm_fault_retry-properly.patch b/queue-6.8/mm-madvise-make-madv_populate_-read-write-handle-vm_fault_retry-properly.patch
new file mode 100644 (file)
index 0000000..4a27760
--- /dev/null
@@ -0,0 +1,234 @@
+From 631426ba1d45a8672b177ee85ad4cabe760dd131 Mon Sep 17 00:00:00 2001
+From: David Hildenbrand <david@redhat.com>
+Date: Thu, 14 Mar 2024 17:12:59 +0100
+Subject: mm/madvise: make MADV_POPULATE_(READ|WRITE) handle VM_FAULT_RETRY properly
+
+From: David Hildenbrand <david@redhat.com>
+
+commit 631426ba1d45a8672b177ee85ad4cabe760dd131 upstream.
+
+Darrick reports that in some cases where pread() would fail with -EIO and
+mmap()+access would generate a SIGBUS signal, MADV_POPULATE_READ /
+MADV_POPULATE_WRITE will keep retrying forever and not fail with -EFAULT.
+
+While the madvise() call can be interrupted by a signal, this is not the
+desired behavior.  MADV_POPULATE_READ / MADV_POPULATE_WRITE should behave
+like page faults in that case: fail and not retry forever.
+
+A reproducer can be found at [1].
+
+The reason is that __get_user_pages(), as called by
+faultin_vma_page_range(), will not handle VM_FAULT_RETRY in a proper way:
+it will simply return 0 when VM_FAULT_RETRY happened, making
+madvise_populate()->faultin_vma_page_range() retry again and again, never
+setting FOLL_TRIED->FAULT_FLAG_TRIED for __get_user_pages().
+
+__get_user_pages_locked() does what we want, but duplicating that logic in
+faultin_vma_page_range() feels wrong.
+
+So let's use __get_user_pages_locked() instead, that will detect
+VM_FAULT_RETRY and set FOLL_TRIED when retrying, making the fault handler
+return VM_FAULT_SIGBUS (VM_FAULT_ERROR) at some point, propagating -EFAULT
+from faultin_page() to __get_user_pages(), all the way to
+madvise_populate().
+
+But, there is an issue: __get_user_pages_locked() will end up re-taking
+the MM lock and then __get_user_pages() will do another VMA lookup.  In
+the meantime, the VMA layout could have changed and we'd fail with
+different error codes than we'd want to.
+
+As __get_user_pages() will currently do a new VMA lookup either way, let
+it do the VMA handling in a different way, controlled by a new
+FOLL_MADV_POPULATE flag, effectively moving these checks from
+madvise_populate() + faultin_page_range() in there.
+
+With this change, Darricks reproducer properly fails with -EFAULT, as
+documented for MADV_POPULATE_READ / MADV_POPULATE_WRITE.
+
+[1] https://lore.kernel.org/all/20240313171936.GN1927156@frogsfrogsfrogs/
+
+Link: https://lkml.kernel.org/r/20240314161300.382526-1-david@redhat.com
+Link: https://lkml.kernel.org/r/20240314161300.382526-2-david@redhat.com
+Fixes: 4ca9b3859dac ("mm/madvise: introduce MADV_POPULATE_(READ|WRITE) to prefault page tables")
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Reported-by: Darrick J. Wong <djwong@kernel.org>
+Closes: https://lore.kernel.org/all/20240311223815.GW1927156@frogsfrogsfrogs/
+Cc: Darrick J. Wong <djwong@kernel.org>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Jason Gunthorpe <jgg@nvidia.com>
+Cc: John Hubbard <jhubbard@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/gup.c      |   54 ++++++++++++++++++++++++++++++++----------------------
+ mm/internal.h |   10 ++++++----
+ mm/madvise.c  |   17 ++---------------
+ 3 files changed, 40 insertions(+), 41 deletions(-)
+
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -1206,6 +1206,22 @@ static long __get_user_pages(struct mm_s
+               /* first iteration or cross vma bound */
+               if (!vma || start >= vma->vm_end) {
++                      /*
++                       * MADV_POPULATE_(READ|WRITE) wants to handle VMA
++                       * lookups+error reporting differently.
++                       */
++                      if (gup_flags & FOLL_MADV_POPULATE) {
++                              vma = vma_lookup(mm, start);
++                              if (!vma) {
++                                      ret = -ENOMEM;
++                                      goto out;
++                              }
++                              if (check_vma_flags(vma, gup_flags)) {
++                                      ret = -EINVAL;
++                                      goto out;
++                              }
++                              goto retry;
++                      }
+                       vma = gup_vma_lookup(mm, start);
+                       if (!vma && in_gate_area(mm, start)) {
+                               ret = get_gate_page(mm, start & PAGE_MASK,
+@@ -1683,35 +1699,35 @@ long populate_vma_page_range(struct vm_a
+ }
+ /*
+- * faultin_vma_page_range() - populate (prefault) page tables inside the
+- *                          given VMA range readable/writable
++ * faultin_page_range() - populate (prefault) page tables inside the
++ *                      given range readable/writable
+  *
+  * This takes care of mlocking the pages, too, if VM_LOCKED is set.
+  *
+- * @vma: target vma
++ * @mm: the mm to populate page tables in
+  * @start: start address
+  * @end: end address
+  * @write: whether to prefault readable or writable
+  * @locked: whether the mmap_lock is still held
+  *
+- * Returns either number of processed pages in the vma, or a negative error
+- * code on error (see __get_user_pages()).
++ * Returns either number of processed pages in the MM, or a negative error
++ * code on error (see __get_user_pages()). Note that this function reports
++ * errors related to VMAs, such as incompatible mappings, as expected by
++ * MADV_POPULATE_(READ|WRITE).
+  *
+- * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and
+- * covered by the VMA. If it's released, *@locked will be set to 0.
++ * The range must be page-aligned.
++ *
++ * mm->mmap_lock must be held. If it's released, *@locked will be set to 0.
+  */
+-long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
+-                          unsigned long end, bool write, int *locked)
++long faultin_page_range(struct mm_struct *mm, unsigned long start,
++                      unsigned long end, bool write, int *locked)
+ {
+-      struct mm_struct *mm = vma->vm_mm;
+       unsigned long nr_pages = (end - start) / PAGE_SIZE;
+       int gup_flags;
+       long ret;
+       VM_BUG_ON(!PAGE_ALIGNED(start));
+       VM_BUG_ON(!PAGE_ALIGNED(end));
+-      VM_BUG_ON_VMA(start < vma->vm_start, vma);
+-      VM_BUG_ON_VMA(end > vma->vm_end, vma);
+       mmap_assert_locked(mm);
+       /*
+@@ -1723,19 +1739,13 @@ long faultin_vma_page_range(struct vm_ar
+        *                a poisoned page.
+        * !FOLL_FORCE: Require proper access permissions.
+        */
+-      gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE;
++      gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE |
++                  FOLL_MADV_POPULATE;
+       if (write)
+               gup_flags |= FOLL_WRITE;
+-      /*
+-       * We want to report -EINVAL instead of -EFAULT for any permission
+-       * problems or incompatible mappings.
+-       */
+-      if (check_vma_flags(vma, gup_flags))
+-              return -EINVAL;
+-
+-      ret = __get_user_pages(mm, start, nr_pages, gup_flags,
+-                             NULL, locked);
++      ret = __get_user_pages_locked(mm, start, nr_pages, NULL, locked,
++                                    gup_flags);
+       lru_add_drain();
+       return ret;
+ }
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -590,9 +590,8 @@ struct anon_vma *folio_anon_vma(struct f
+ void unmap_mapping_folio(struct folio *folio);
+ extern long populate_vma_page_range(struct vm_area_struct *vma,
+               unsigned long start, unsigned long end, int *locked);
+-extern long faultin_vma_page_range(struct vm_area_struct *vma,
+-                                 unsigned long start, unsigned long end,
+-                                 bool write, int *locked);
++extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
++              unsigned long end, bool write, int *locked);
+ extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
+                              unsigned long bytes);
+@@ -1031,10 +1030,13 @@ enum {
+       FOLL_FAST_ONLY = 1 << 20,
+       /* allow unlocking the mmap lock */
+       FOLL_UNLOCKABLE = 1 << 21,
++      /* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
++      FOLL_MADV_POPULATE = 1 << 22,
+ };
+ #define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
+-                          FOLL_FAST_ONLY | FOLL_UNLOCKABLE)
++                          FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
++                          FOLL_MADV_POPULATE)
+ /*
+  * Indicates for which pages that are write-protected in the page table,
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -908,27 +908,14 @@ static long madvise_populate(struct vm_a
+ {
+       const bool write = behavior == MADV_POPULATE_WRITE;
+       struct mm_struct *mm = vma->vm_mm;
+-      unsigned long tmp_end;
+       int locked = 1;
+       long pages;
+       *prev = vma;
+       while (start < end) {
+-              /*
+-               * We might have temporarily dropped the lock. For example,
+-               * our VMA might have been split.
+-               */
+-              if (!vma || start >= vma->vm_end) {
+-                      vma = vma_lookup(mm, start);
+-                      if (!vma)
+-                              return -ENOMEM;
+-              }
+-
+-              tmp_end = min_t(unsigned long, end, vma->vm_end);
+               /* Populate (prefault) page tables readable/writable. */
+-              pages = faultin_vma_page_range(vma, start, tmp_end, write,
+-                                             &locked);
++              pages = faultin_page_range(mm, start, end, write, &locked);
+               if (!locked) {
+                       mmap_read_lock(mm);
+                       locked = 1;
+@@ -949,7 +936,7 @@ static long madvise_populate(struct vm_a
+                               pr_warn_once("%s: unhandled return value: %ld\n",
+                                            __func__, pages);
+                               fallthrough;
+-                      case -ENOMEM:
++                      case -ENOMEM: /* No VMA or out of memory. */
+                               return -ENOMEM;
+                       }
+               }
diff --git a/queue-6.8/mm-memory-failure-fix-deadlock-when-hugetlb_optimize_vmemmap-is-enabled.patch b/queue-6.8/mm-memory-failure-fix-deadlock-when-hugetlb_optimize_vmemmap-is-enabled.patch
new file mode 100644 (file)
index 0000000..e5c7e69
--- /dev/null
@@ -0,0 +1,167 @@
+From 1983184c22dd84a4d95a71e5c6775c2638557dc7 Mon Sep 17 00:00:00 2001
+From: Miaohe Lin <linmiaohe@huawei.com>
+Date: Sun, 7 Apr 2024 16:54:56 +0800
+Subject: mm/memory-failure: fix deadlock when hugetlb_optimize_vmemmap is enabled
+
+From: Miaohe Lin <linmiaohe@huawei.com>
+
+commit 1983184c22dd84a4d95a71e5c6775c2638557dc7 upstream.
+
+When I did hard offline test with hugetlb pages, below deadlock occurs:
+
+======================================================
+WARNING: possible circular locking dependency detected
+6.8.0-11409-gf6cef5f8c37f #1 Not tainted
+------------------------------------------------------
+bash/46904 is trying to acquire lock:
+ffffffffabe68910 (cpu_hotplug_lock){++++}-{0:0}, at: static_key_slow_dec+0x16/0x60
+
+but task is already holding lock:
+ffffffffabf92ea8 (pcp_batch_high_lock){+.+.}-{3:3}, at: zone_pcp_disable+0x16/0x40
+
+which lock already depends on the new lock.
+
+the existing dependency chain (in reverse order) is:
+
+-> #1 (pcp_batch_high_lock){+.+.}-{3:3}:
+       __mutex_lock+0x6c/0x770
+       page_alloc_cpu_online+0x3c/0x70
+       cpuhp_invoke_callback+0x397/0x5f0
+       __cpuhp_invoke_callback_range+0x71/0xe0
+       _cpu_up+0xeb/0x210
+       cpu_up+0x91/0xe0
+       cpuhp_bringup_mask+0x49/0xb0
+       bringup_nonboot_cpus+0xb7/0xe0
+       smp_init+0x25/0xa0
+       kernel_init_freeable+0x15f/0x3e0
+       kernel_init+0x15/0x1b0
+       ret_from_fork+0x2f/0x50
+       ret_from_fork_asm+0x1a/0x30
+
+-> #0 (cpu_hotplug_lock){++++}-{0:0}:
+       __lock_acquire+0x1298/0x1cd0
+       lock_acquire+0xc0/0x2b0
+       cpus_read_lock+0x2a/0xc0
+       static_key_slow_dec+0x16/0x60
+       __hugetlb_vmemmap_restore_folio+0x1b9/0x200
+       dissolve_free_huge_page+0x211/0x260
+       __page_handle_poison+0x45/0xc0
+       memory_failure+0x65e/0xc70
+       hard_offline_page_store+0x55/0xa0
+       kernfs_fop_write_iter+0x12c/0x1d0
+       vfs_write+0x387/0x550
+       ksys_write+0x64/0xe0
+       do_syscall_64+0xca/0x1e0
+       entry_SYSCALL_64_after_hwframe+0x6d/0x75
+
+other info that might help us debug this:
+
+ Possible unsafe locking scenario:
+
+       CPU0                    CPU1
+       ----                    ----
+  lock(pcp_batch_high_lock);
+                               lock(cpu_hotplug_lock);
+                               lock(pcp_batch_high_lock);
+  rlock(cpu_hotplug_lock);
+
+ *** DEADLOCK ***
+
+5 locks held by bash/46904:
+ #0: ffff98f6c3bb23f0 (sb_writers#5){.+.+}-{0:0}, at: ksys_write+0x64/0xe0
+ #1: ffff98f6c328e488 (&of->mutex){+.+.}-{3:3}, at: kernfs_fop_write_iter+0xf8/0x1d0
+ #2: ffff98ef83b31890 (kn->active#113){.+.+}-{0:0}, at: kernfs_fop_write_iter+0x100/0x1d0
+ #3: ffffffffabf9db48 (mf_mutex){+.+.}-{3:3}, at: memory_failure+0x44/0xc70
+ #4: ffffffffabf92ea8 (pcp_batch_high_lock){+.+.}-{3:3}, at: zone_pcp_disable+0x16/0x40
+
+stack backtrace:
+CPU: 10 PID: 46904 Comm: bash Kdump: loaded Not tainted 6.8.0-11409-gf6cef5f8c37f #1
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014
+Call Trace:
+ <TASK>
+ dump_stack_lvl+0x68/0xa0
+ check_noncircular+0x129/0x140
+ __lock_acquire+0x1298/0x1cd0
+ lock_acquire+0xc0/0x2b0
+ cpus_read_lock+0x2a/0xc0
+ static_key_slow_dec+0x16/0x60
+ __hugetlb_vmemmap_restore_folio+0x1b9/0x200
+ dissolve_free_huge_page+0x211/0x260
+ __page_handle_poison+0x45/0xc0
+ memory_failure+0x65e/0xc70
+ hard_offline_page_store+0x55/0xa0
+ kernfs_fop_write_iter+0x12c/0x1d0
+ vfs_write+0x387/0x550
+ ksys_write+0x64/0xe0
+ do_syscall_64+0xca/0x1e0
+ entry_SYSCALL_64_after_hwframe+0x6d/0x75
+RIP: 0033:0x7fc862314887
+Code: 10 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b7 0f 1f 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 51 c3 48 83 ec 28 48 89 54 24 18 48 89 74 24
+RSP: 002b:00007fff19311268 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
+RAX: ffffffffffffffda RBX: 000000000000000c RCX: 00007fc862314887
+RDX: 000000000000000c RSI: 000056405645fe10 RDI: 0000000000000001
+RBP: 000056405645fe10 R08: 00007fc8623d1460 R09: 000000007fffffff
+R10: 0000000000000000 R11: 0000000000000246 R12: 000000000000000c
+R13: 00007fc86241b780 R14: 00007fc862417600 R15: 00007fc862416a00
+
+In short, below scene breaks the lock dependency chain:
+
+ memory_failure
+  __page_handle_poison
+   zone_pcp_disable -- lock(pcp_batch_high_lock)
+   dissolve_free_huge_page
+    __hugetlb_vmemmap_restore_folio
+     static_key_slow_dec
+      cpus_read_lock -- rlock(cpu_hotplug_lock)
+
+Fix this by calling drain_all_pages() instead.
+
+This issue won't occur until commit a6b40850c442 ("mm: hugetlb: replace
+hugetlb_free_vmemmap_enabled with a static_key").  As it introduced
+rlock(cpu_hotplug_lock) in dissolve_free_huge_page() code path while
+lock(pcp_batch_high_lock) is already in the __page_handle_poison().
+
+[linmiaohe@huawei.com: extend comment per Oscar]
+[akpm@linux-foundation.org: reflow block comment]
+Link: https://lkml.kernel.org/r/20240407085456.2798193-1-linmiaohe@huawei.com
+Fixes: a6b40850c442 ("mm: hugetlb: replace hugetlb_free_vmemmap_enabled with a static_key")
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Acked-by: Oscar Salvador <osalvador@suse.de>
+Reviewed-by: Jane Chu <jane.chu@oracle.com>
+Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memory-failure.c |   18 +++++++++++++++---
+ 1 file changed, 15 insertions(+), 3 deletions(-)
+
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -154,11 +154,23 @@ static int __page_handle_poison(struct p
+ {
+       int ret;
+-      zone_pcp_disable(page_zone(page));
++      /*
++       * zone_pcp_disable() can't be used here. It will
++       * hold pcp_batch_high_lock and dissolve_free_huge_page() might hold
++       * cpu_hotplug_lock via static_key_slow_dec() when hugetlb vmemmap
++       * optimization is enabled. This will break current lock dependency
++       * chain and leads to deadlock.
++       * Disabling pcp before dissolving the page was a deterministic
++       * approach because we made sure that those pages cannot end up in any
++       * PCP list. Draining PCP lists expels those pages to the buddy system,
++       * but nothing guarantees that those pages do not get back to a PCP
++       * queue if we need to refill those.
++       */
+       ret = dissolve_free_huge_page(page);
+-      if (!ret)
++      if (!ret) {
++              drain_all_pages(page_zone(page));
+               ret = take_page_off_buddy(page);
+-      zone_pcp_enable(page_zone(page));
++      }
+       return ret;
+ }
diff --git a/queue-6.8/mm-shmem-inline-shmem_is_huge-for-disabled-transparent-hugepages.patch b/queue-6.8/mm-shmem-inline-shmem_is_huge-for-disabled-transparent-hugepages.patch
new file mode 100644 (file)
index 0000000..a93ef4b
--- /dev/null
@@ -0,0 +1,76 @@
+From 1f737846aa3c45f07a06fa0d018b39e1afb8084a Mon Sep 17 00:00:00 2001
+From: Sumanth Korikkar <sumanthk@linux.ibm.com>
+Date: Tue, 9 Apr 2024 17:54:07 +0200
+Subject: mm/shmem: inline shmem_is_huge() for disabled transparent hugepages
+
+From: Sumanth Korikkar <sumanthk@linux.ibm.com>
+
+commit 1f737846aa3c45f07a06fa0d018b39e1afb8084a upstream.
+
+In order to  minimize code size (CONFIG_CC_OPTIMIZE_FOR_SIZE=y),
+compiler might choose to make a regular function call (out-of-line) for
+shmem_is_huge() instead of inlining it. When transparent hugepages are
+disabled (CONFIG_TRANSPARENT_HUGEPAGE=n), it can cause compilation
+error.
+
+mm/shmem.c: In function `shmem_getattr':
+./include/linux/huge_mm.h:383:27: note: in expansion of macro `BUILD_BUG'
+  383 | #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
+      |                           ^~~~~~~~~
+mm/shmem.c:1148:33: note: in expansion of macro `HPAGE_PMD_SIZE'
+ 1148 |                 stat->blksize = HPAGE_PMD_SIZE;
+
+To prevent the possible error, always inline shmem_is_huge() when
+transparent hugepages are disabled.
+
+Link: https://lkml.kernel.org/r/20240409155407.2322714-1-sumanthk@linux.ibm.com
+Signed-off-by: Sumanth Korikkar <sumanthk@linux.ibm.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Cc: Alexander Gordeev <agordeev@linux.ibm.com>
+Cc: Heiko Carstens <hca@linux.ibm.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Ilya Leoshkevich <iii@linux.ibm.com>
+Cc: Vasily Gorbik <gor@linux.ibm.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/shmem_fs.h |    9 +++++++++
+ mm/shmem.c               |    6 ------
+ 2 files changed, 9 insertions(+), 6 deletions(-)
+
+--- a/include/linux/shmem_fs.h
++++ b/include/linux/shmem_fs.h
+@@ -114,8 +114,17 @@ extern struct page *shmem_read_mapping_p
+ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
+ int shmem_unuse(unsigned int type);
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ extern bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
+                         struct mm_struct *mm, unsigned long vm_flags);
++#else
++static __always_inline bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
++                                        struct mm_struct *mm, unsigned long vm_flags)
++{
++      return false;
++}
++#endif
++
+ #ifdef CONFIG_SHMEM
+ extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
+ #else
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -742,12 +742,6 @@ static long shmem_unused_huge_count(stru
+ #define shmem_huge SHMEM_HUGE_DENY
+-bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
+-                 struct mm_struct *mm, unsigned long vm_flags)
+-{
+-      return false;
+-}
+-
+ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
+               struct shrink_control *sc, unsigned long nr_to_split)
+ {
diff --git a/queue-6.8/mm-swapops-update-check-in-is_pfn_swap_entry-for-hwpoison-entries.patch b/queue-6.8/mm-swapops-update-check-in-is_pfn_swap_entry-for-hwpoison-entries.patch
new file mode 100644 (file)
index 0000000..2d486ea
--- /dev/null
@@ -0,0 +1,137 @@
+From 07a57a338adb6ec9e766d6a6790f76527f45ceb5 Mon Sep 17 00:00:00 2001
+From: Oscar Salvador <osalvador@suse.de>
+Date: Sun, 7 Apr 2024 15:05:37 +0200
+Subject: mm,swapops: update check in is_pfn_swap_entry for hwpoison entries
+
+From: Oscar Salvador <osalvador@suse.de>
+
+commit 07a57a338adb6ec9e766d6a6790f76527f45ceb5 upstream.
+
+Tony reported that the Machine check recovery was broken in v6.9-rc1, as
+he was hitting a VM_BUG_ON when injecting uncorrectable memory errors to
+DRAM.
+
+After some more digging and debugging on his side, he realized that this
+went back to v6.1, with the introduction of 'commit 0d206b5d2e0d
+("mm/swap: add swp_offset_pfn() to fetch PFN from swap entry")'.  That
+commit, among other things, introduced swp_offset_pfn(), replacing
+hwpoison_entry_to_pfn() in its favour.
+
+The patch also introduced a VM_BUG_ON() check for is_pfn_swap_entry(), but
+is_pfn_swap_entry() never got updated to cover hwpoison entries, which
+means that we would hit the VM_BUG_ON whenever we would call
+swp_offset_pfn() for such entries on environments with CONFIG_DEBUG_VM
+set.  Fix this by updating the check to cover hwpoison entries as well,
+and update the comment while we are it.
+
+Link: https://lkml.kernel.org/r/20240407130537.16977-1-osalvador@suse.de
+Fixes: 0d206b5d2e0d ("mm/swap: add swp_offset_pfn() to fetch PFN from swap entry")
+Signed-off-by: Oscar Salvador <osalvador@suse.de>
+Reported-by: Tony Luck <tony.luck@intel.com>
+Closes: https://lore.kernel.org/all/Zg8kLSl2yAlA3o5D@agluck-desk3/
+Tested-by: Tony Luck <tony.luck@intel.com>
+Reviewed-by: Peter Xu <peterx@redhat.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Acked-by: Miaohe Lin <linmiaohe@huawei.com>
+Cc: <stable@vger.kernel.org>   [6.1.x]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/swapops.h |   65 ++++++++++++++++++++++++------------------------
+ 1 file changed, 33 insertions(+), 32 deletions(-)
+
+--- a/include/linux/swapops.h
++++ b/include/linux/swapops.h
+@@ -390,6 +390,35 @@ static inline bool is_migration_entry_di
+ }
+ #endif        /* CONFIG_MIGRATION */
++#ifdef CONFIG_MEMORY_FAILURE
++
++/*
++ * Support for hardware poisoned pages
++ */
++static inline swp_entry_t make_hwpoison_entry(struct page *page)
++{
++      BUG_ON(!PageLocked(page));
++      return swp_entry(SWP_HWPOISON, page_to_pfn(page));
++}
++
++static inline int is_hwpoison_entry(swp_entry_t entry)
++{
++      return swp_type(entry) == SWP_HWPOISON;
++}
++
++#else
++
++static inline swp_entry_t make_hwpoison_entry(struct page *page)
++{
++      return swp_entry(0, 0);
++}
++
++static inline int is_hwpoison_entry(swp_entry_t swp)
++{
++      return 0;
++}
++#endif
++
+ typedef unsigned long pte_marker;
+ #define  PTE_MARKER_UFFD_WP                   BIT(0)
+@@ -470,8 +499,9 @@ static inline struct page *pfn_swap_entr
+ /*
+  * A pfn swap entry is a special type of swap entry that always has a pfn stored
+- * in the swap offset. They are used to represent unaddressable device memory
+- * and to restrict access to a page undergoing migration.
++ * in the swap offset. They can either be used to represent unaddressable device
++ * memory, to restrict access to a page undergoing migration or to represent a
++ * pfn which has been hwpoisoned and unmapped.
+  */
+ static inline bool is_pfn_swap_entry(swp_entry_t entry)
+ {
+@@ -479,7 +509,7 @@ static inline bool is_pfn_swap_entry(swp
+       BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
+       return is_migration_entry(entry) || is_device_private_entry(entry) ||
+-             is_device_exclusive_entry(entry);
++             is_device_exclusive_entry(entry) || is_hwpoison_entry(entry);
+ }
+ struct page_vma_mapped_walk;
+@@ -548,35 +578,6 @@ static inline int is_pmd_migration_entry
+ }
+ #endif  /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
+-#ifdef CONFIG_MEMORY_FAILURE
+-
+-/*
+- * Support for hardware poisoned pages
+- */
+-static inline swp_entry_t make_hwpoison_entry(struct page *page)
+-{
+-      BUG_ON(!PageLocked(page));
+-      return swp_entry(SWP_HWPOISON, page_to_pfn(page));
+-}
+-
+-static inline int is_hwpoison_entry(swp_entry_t entry)
+-{
+-      return swp_type(entry) == SWP_HWPOISON;
+-}
+-
+-#else
+-
+-static inline swp_entry_t make_hwpoison_entry(struct page *page)
+-{
+-      return swp_entry(0, 0);
+-}
+-
+-static inline int is_hwpoison_entry(swp_entry_t swp)
+-{
+-      return 0;
+-}
+-#endif
+-
+ static inline int non_swap_entry(swp_entry_t entry)
+ {
+       return swp_type(entry) >= MAX_SWAPFILES;
diff --git a/queue-6.8/mm-userfaultfd-allow-hugetlb-change-protection-upon-poison-entry.patch b/queue-6.8/mm-userfaultfd-allow-hugetlb-change-protection-upon-poison-entry.patch
new file mode 100644 (file)
index 0000000..5873ac7
--- /dev/null
@@ -0,0 +1,61 @@
+From c5977c95dff182d6ee06f4d6f60bcb0284912969 Mon Sep 17 00:00:00 2001
+From: Peter Xu <peterx@redhat.com>
+Date: Fri, 5 Apr 2024 19:19:20 -0400
+Subject: mm/userfaultfd: allow hugetlb change protection upon poison entry
+
+From: Peter Xu <peterx@redhat.com>
+
+commit c5977c95dff182d6ee06f4d6f60bcb0284912969 upstream.
+
+After UFFDIO_POISON, there can be two kinds of hugetlb pte markers, either
+the POISON one or UFFD_WP one.
+
+Allow change protection to run on a poisoned marker just like !hugetlb
+cases, ignoring the marker irrelevant of the permission.
+
+Here the two bits are mutual exclusive.  For example, when install a
+poisoned entry it must not be UFFD_WP already (by checking pte_none()
+before such install).  And it also means if UFFD_WP is set there must have
+no POISON bit set.  It makes sense because UFFD_WP is a bit to reflect
+permission, and permissions do not apply if the pte is poisoned and
+destined to sigbus.
+
+So here we simply check uffd_wp bit set first, do nothing otherwise.
+
+Attach the Fixes to UFFDIO_POISON work, as before that it should not be
+possible to have poison entry for hugetlb (e.g., hugetlb doesn't do swap,
+so no chance of swapin errors).
+
+Link: https://lkml.kernel.org/r/20240405231920.1772199-1-peterx@redhat.com
+Link: https://lore.kernel.org/r/000000000000920d5e0615602dd1@google.com
+Fixes: fc71884a5f59 ("mm: userfaultfd: add new UFFDIO_POISON ioctl")
+Signed-off-by: Peter Xu <peterx@redhat.com>
+Reported-by: syzbot+b07c8ac8eee3d4d8440f@syzkaller.appspotmail.com
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Axel Rasmussen <axelrasmussen@google.com>
+Cc: <stable@vger.kernel.org>   [6.6+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/hugetlb.c |   10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -6943,9 +6943,13 @@ long hugetlb_change_protection(struct vm
+                       if (!pte_same(pte, newpte))
+                               set_huge_pte_at(mm, address, ptep, newpte, psize);
+               } else if (unlikely(is_pte_marker(pte))) {
+-                      /* No other markers apply for now. */
+-                      WARN_ON_ONCE(!pte_marker_uffd_wp(pte));
+-                      if (uffd_wp_resolve)
++                      /*
++                       * Do nothing on a poison marker; page is
++                       * corrupted, permissons do not apply.  Here
++                       * pte_marker_uffd_wp()==true implies !poison
++                       * because they're mutual exclusive.
++                       */
++                      if (pte_marker_uffd_wp(pte) && uffd_wp_resolve)
+                               /* Safe to modify directly (non-present->none). */
+                               huge_pte_clear(mm, address, ptep, psize);
+               } else if (!huge_pte_none(pte)) {
diff --git a/queue-6.8/nilfs2-fix-oob-in-nilfs_set_de_type.patch b/queue-6.8/nilfs2-fix-oob-in-nilfs_set_de_type.patch
new file mode 100644 (file)
index 0000000..37a7125
--- /dev/null
@@ -0,0 +1,53 @@
+From c4a7dc9523b59b3e73fd522c73e95e072f876b16 Mon Sep 17 00:00:00 2001
+From: Jeongjun Park <aha310510@gmail.com>
+Date: Tue, 16 Apr 2024 03:20:48 +0900
+Subject: nilfs2: fix OOB in nilfs_set_de_type
+
+From: Jeongjun Park <aha310510@gmail.com>
+
+commit c4a7dc9523b59b3e73fd522c73e95e072f876b16 upstream.
+
+The size of the nilfs_type_by_mode array in the fs/nilfs2/dir.c file is
+defined as "S_IFMT >> S_SHIFT", but the nilfs_set_de_type() function,
+which uses this array, specifies the index to read from the array in the
+same way as "(mode & S_IFMT) >> S_SHIFT".
+
+static void nilfs_set_de_type(struct nilfs_dir_entry *de, struct inode
+ *inode)
+{
+       umode_t mode = inode->i_mode;
+
+       de->file_type = nilfs_type_by_mode[(mode & S_IFMT)>>S_SHIFT]; // oob
+}
+
+However, when the index is determined this way, an out-of-bounds (OOB)
+error occurs by referring to an index that is 1 larger than the array size
+when the condition "mode & S_IFMT == S_IFMT" is satisfied.  Therefore, a
+patch to resize the nilfs_type_by_mode array should be applied to prevent
+OOB errors.
+
+Link: https://lkml.kernel.org/r/20240415182048.7144-1-konishi.ryusuke@gmail.com
+Reported-by: syzbot+2e22057de05b9f3b30d8@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=2e22057de05b9f3b30d8
+Fixes: 2ba466d74ed7 ("nilfs2: directory entry operations")
+Signed-off-by: Jeongjun Park <aha310510@gmail.com>
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Tested-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nilfs2/dir.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/nilfs2/dir.c
++++ b/fs/nilfs2/dir.c
+@@ -240,7 +240,7 @@ nilfs_filetype_table[NILFS_FT_MAX] = {
+ #define S_SHIFT 12
+ static unsigned char
+-nilfs_type_by_mode[S_IFMT >> S_SHIFT] = {
++nilfs_type_by_mode[(S_IFMT >> S_SHIFT) + 1] = {
+       [S_IFREG >> S_SHIFT]    = NILFS_FT_REG_FILE,
+       [S_IFDIR >> S_SHIFT]    = NILFS_FT_DIR,
+       [S_IFCHR >> S_SHIFT]    = NILFS_FT_CHRDEV,
diff --git a/queue-6.8/nouveau-fix-instmem-race-condition-around-ptr-stores.patch b/queue-6.8/nouveau-fix-instmem-race-condition-around-ptr-stores.patch
new file mode 100644 (file)
index 0000000..efd67dc
--- /dev/null
@@ -0,0 +1,96 @@
+From fff1386cc889d8fb4089d285f883f8cba62d82ce Mon Sep 17 00:00:00 2001
+From: Dave Airlie <airlied@redhat.com>
+Date: Thu, 11 Apr 2024 11:15:09 +1000
+Subject: nouveau: fix instmem race condition around ptr stores
+
+From: Dave Airlie <airlied@redhat.com>
+
+commit fff1386cc889d8fb4089d285f883f8cba62d82ce upstream.
+
+Running a lot of VK CTS in parallel against nouveau, once every
+few hours you might see something like this crash.
+
+BUG: kernel NULL pointer dereference, address: 0000000000000008
+PGD 8000000114e6e067 P4D 8000000114e6e067 PUD 109046067 PMD 0
+Oops: 0000 [#1] PREEMPT SMP PTI
+CPU: 7 PID: 53891 Comm: deqp-vk Not tainted 6.8.0-rc6+ #27
+Hardware name: Gigabyte Technology Co., Ltd. Z390 I AORUS PRO WIFI/Z390 I AORUS PRO WIFI-CF, BIOS F8 11/05/2021
+RIP: 0010:gp100_vmm_pgt_mem+0xe3/0x180 [nouveau]
+Code: c7 48 01 c8 49 89 45 58 85 d2 0f 84 95 00 00 00 41 0f b7 46 12 49 8b 7e 08 89 da 42 8d 2c f8 48 8b 47 08 41 83 c7 01 48 89 ee <48> 8b 40 08 ff d0 0f 1f 00 49 8b 7e 08 48 89 d9 48 8d 75 04 48 c1
+RSP: 0000:ffffac20c5857838 EFLAGS: 00010202
+RAX: 0000000000000000 RBX: 00000000004d8001 RCX: 0000000000000001
+RDX: 00000000004d8001 RSI: 00000000000006d8 RDI: ffffa07afe332180
+RBP: 00000000000006d8 R08: ffffac20c5857ad0 R09: 0000000000ffff10
+R10: 0000000000000001 R11: ffffa07af27e2de0 R12: 000000000000001c
+R13: ffffac20c5857ad0 R14: ffffa07a96fe9040 R15: 000000000000001c
+FS:  00007fe395eed7c0(0000) GS:ffffa07e2c980000(0000) knlGS:0000000000000000
+CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000000000000008 CR3: 000000011febe001 CR4: 00000000003706f0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+
+...
+
+ ? gp100_vmm_pgt_mem+0xe3/0x180 [nouveau]
+ ? gp100_vmm_pgt_mem+0x37/0x180 [nouveau]
+ nvkm_vmm_iter+0x351/0xa20 [nouveau]
+ ? __pfx_nvkm_vmm_ref_ptes+0x10/0x10 [nouveau]
+ ? __pfx_gp100_vmm_pgt_mem+0x10/0x10 [nouveau]
+ ? __pfx_gp100_vmm_pgt_mem+0x10/0x10 [nouveau]
+ ? __lock_acquire+0x3ed/0x2170
+ ? __pfx_gp100_vmm_pgt_mem+0x10/0x10 [nouveau]
+ nvkm_vmm_ptes_get_map+0xc2/0x100 [nouveau]
+ ? __pfx_nvkm_vmm_ref_ptes+0x10/0x10 [nouveau]
+ ? __pfx_gp100_vmm_pgt_mem+0x10/0x10 [nouveau]
+ nvkm_vmm_map_locked+0x224/0x3a0 [nouveau]
+
+Adding any sort of useful debug usually makes it go away, so I hand
+wrote the function in a line, and debugged the asm.
+
+Every so often pt->memory->ptrs is NULL. This ptrs ptr is set in
+the nv50_instobj_acquire called from nvkm_kmap.
+
+If Thread A and Thread B both get to nv50_instobj_acquire around
+the same time, and Thread A hits the refcount_set line, and in
+lockstep thread B succeeds at refcount_inc_not_zero, there is a
+chance the ptrs value won't have been stored since refcount_set
+is unordered. Force a memory barrier here, I picked smp_mb, since
+we want it on all CPUs and it's write followed by a read.
+
+v2: use paired smp_rmb/smp_wmb.
+
+Cc: <stable@vger.kernel.org>
+Fixes: be55287aa5ba ("drm/nouveau/imem/nv50: embed nvkm_instobj directly into nv04_instobj")
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Danilo Krummrich <dakr@redhat.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240411011510.2546857-1-airlied@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+@@ -222,8 +222,11 @@ nv50_instobj_acquire(struct nvkm_memory
+       void __iomem *map = NULL;
+       /* Already mapped? */
+-      if (refcount_inc_not_zero(&iobj->maps))
++      if (refcount_inc_not_zero(&iobj->maps)) {
++              /* read barrier match the wmb on refcount set */
++              smp_rmb();
+               return iobj->map;
++      }
+       /* Take the lock, and re-check that another thread hasn't
+        * already mapped the object in the meantime.
+@@ -250,6 +253,8 @@ nv50_instobj_acquire(struct nvkm_memory
+                       iobj->base.memory.ptrs = &nv50_instobj_fast;
+               else
+                       iobj->base.memory.ptrs = &nv50_instobj_slow;
++              /* barrier to ensure the ptrs are written before refcount is set */
++              smp_wmb();
+               refcount_set(&iobj->maps, 1);
+       }
diff --git a/queue-6.8/revert-mei-vsc-call-wake_up-in-the-threaded-irq-handler.patch b/queue-6.8/revert-mei-vsc-call-wake_up-in-the-threaded-irq-handler.patch
new file mode 100644 (file)
index 0000000..43d9e54
--- /dev/null
@@ -0,0 +1,44 @@
+From e3dc66d998d2b0c2734db9ca1d6c94c97349529a Mon Sep 17 00:00:00 2001
+From: Sakari Ailus <sakari.ailus@linux.intel.com>
+Date: Wed, 3 Apr 2024 13:13:40 +0800
+Subject: Revert "mei: vsc: Call wake_up() in the threaded IRQ handler"
+
+From: Sakari Ailus <sakari.ailus@linux.intel.com>
+
+commit e3dc66d998d2b0c2734db9ca1d6c94c97349529a upstream.
+
+This reverts commit 058a38acba15fd8e7b262ec6e17c4204cb15f984.
+
+It's not necessary to avoid a spinlock, a sleeping lock on PREEMPT_RT, in
+an interrupt handler as the interrupt handler itself would be called in a
+process context if PREEMPT_RT is enabled. So revert the patch.
+
+Cc: stable@vger.kernel.org # for 6.8
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Acked-by: Tomas Winkler <tomas.winkler@intel.com>
+Link: https://lore.kernel.org/r/20240403051341.3534650-1-wentong.wu@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/mei/vsc-tp.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/misc/mei/vsc-tp.c
++++ b/drivers/misc/mei/vsc-tp.c
+@@ -419,6 +419,8 @@ static irqreturn_t vsc_tp_isr(int irq, v
+       atomic_inc(&tp->assert_cnt);
++      wake_up(&tp->xfer_wait);
++
+       return IRQ_WAKE_THREAD;
+ }
+@@ -426,8 +428,6 @@ static irqreturn_t vsc_tp_thread_isr(int
+ {
+       struct vsc_tp *tp = data;
+-      wake_up(&tp->xfer_wait);
+-
+       if (tp->event_notify)
+               tp->event_notify(tp->event_notify_context);
diff --git a/queue-6.8/revert-usb-cdc-wdm-close-race-between-read-and-workqueue.patch b/queue-6.8/revert-usb-cdc-wdm-close-race-between-read-and-workqueue.patch
new file mode 100644 (file)
index 0000000..41f33fa
--- /dev/null
@@ -0,0 +1,50 @@
+From 1607830dadeefc407e4956336d9fcd9e9defd810 Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Thu, 18 Apr 2024 16:33:28 +0200
+Subject: Revert "usb: cdc-wdm: close race between read and workqueue"
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+commit 1607830dadeefc407e4956336d9fcd9e9defd810 upstream.
+
+This reverts commit 339f83612f3a569b194680768b22bf113c26a29d.
+
+It has been found to cause problems in a number of Chromebook devices,
+so revert the change until it can be brought back in a safe way.
+
+Link: https://lore.kernel.org/r/385a3519-b45d-48c5-a6fd-a3fdb6bec92f@chromium.org
+Reported-by:: Aleksander Morgado <aleksandermj@chromium.org>
+Fixes: 339f83612f3a ("usb: cdc-wdm: close race between read and workqueue")
+Cc: stable <stable@kernel.org>
+Cc: Oliver Neukum <oneukum@suse.com>
+Cc: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/class/cdc-wdm.c |    6 +-----
+ 1 file changed, 1 insertion(+), 5 deletions(-)
+
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -485,7 +485,6 @@ out_free_mem:
+ static int service_outstanding_interrupt(struct wdm_device *desc)
+ {
+       int rv = 0;
+-      int used;
+       /* submit read urb only if the device is waiting for it */
+       if (!desc->resp_count || !--desc->resp_count)
+@@ -500,10 +499,7 @@ static int service_outstanding_interrupt
+               goto out;
+       }
+-      used = test_and_set_bit(WDM_RESPONDING, &desc->flags);
+-      if (used)
+-              goto out;
+-
++      set_bit(WDM_RESPONDING, &desc->flags);
+       spin_unlock_irq(&desc->iuspin);
+       rv = usb_submit_urb(desc->response, GFP_KERNEL);
+       spin_lock_irq(&desc->iuspin);
diff --git a/queue-6.8/sched-add-missing-memory-barrier-in-switch_mm_cid.patch b/queue-6.8/sched-add-missing-memory-barrier-in-switch_mm_cid.patch
new file mode 100644 (file)
index 0000000..abaea40
--- /dev/null
@@ -0,0 +1,125 @@
+From fe90f3967bdb3e13f133e5f44025e15f943a99c5 Mon Sep 17 00:00:00 2001
+From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Date: Mon, 15 Apr 2024 11:21:13 -0400
+Subject: sched: Add missing memory barrier in switch_mm_cid
+
+From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+
+commit fe90f3967bdb3e13f133e5f44025e15f943a99c5 upstream.
+
+Many architectures' switch_mm() (e.g. arm64) do not have an smp_mb()
+which the core scheduler code has depended upon since commit:
+
+    commit 223baf9d17f25 ("sched: Fix performance regression introduced by mm_cid")
+
+If switch_mm() doesn't call smp_mb(), sched_mm_cid_remote_clear() can
+unset the actively used cid when it fails to observe active task after it
+sets lazy_put.
+
+There *is* a memory barrier between storing to rq->curr and _return to
+userspace_ (as required by membarrier), but the rseq mm_cid has stricter
+requirements: the barrier needs to be issued between store to rq->curr
+and switch_mm_cid(), which happens earlier than:
+
+  - spin_unlock(),
+  - switch_to().
+
+So it's fine when the architecture switch_mm() happens to have that
+barrier already, but less so when the architecture only provides the
+full barrier in switch_to() or spin_unlock().
+
+It is a bug in the rseq switch_mm_cid() implementation. All architectures
+that don't have memory barriers in switch_mm(), but rather have the full
+barrier either in finish_lock_switch() or switch_to() have them too late
+for the needs of switch_mm_cid().
+
+Introduce a new smp_mb__after_switch_mm(), defined as smp_mb() in the
+generic barrier.h header, and use it in switch_mm_cid() for scheduler
+transitions where switch_mm() is expected to provide a memory barrier.
+
+Architectures can override smp_mb__after_switch_mm() if their
+switch_mm() implementation provides an implicit memory barrier.
+Override it with a no-op on x86 which implicitly provide this memory
+barrier by writing to CR3.
+
+Fixes: 223baf9d17f2 ("sched: Fix performance regression introduced by mm_cid")
+Reported-by: levi.yun <yeoreum.yun@arm.com>
+Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> # for arm64
+Acked-by: Dave Hansen <dave.hansen@linux.intel.com> # for x86
+Cc: <stable@vger.kernel.org> # 6.4.x
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Link: https://lore.kernel.org/r/20240415152114.59122-2-mathieu.desnoyers@efficios.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/barrier.h |    3 +++
+ include/asm-generic/barrier.h  |    8 ++++++++
+ kernel/sched/sched.h           |   20 ++++++++++++++------
+ 3 files changed, 25 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/include/asm/barrier.h
++++ b/arch/x86/include/asm/barrier.h
+@@ -79,6 +79,9 @@ do {                                                                 \
+ #define __smp_mb__before_atomic()     do { } while (0)
+ #define __smp_mb__after_atomic()      do { } while (0)
++/* Writing to CR3 provides a full memory barrier in switch_mm(). */
++#define smp_mb__after_switch_mm()     do { } while (0)
++
+ #include <asm-generic/barrier.h>
+ #endif /* _ASM_X86_BARRIER_H */
+--- a/include/asm-generic/barrier.h
++++ b/include/asm-generic/barrier.h
+@@ -296,5 +296,13 @@ do {                                                                      \
+ #define io_stop_wc() do { } while (0)
+ #endif
++/*
++ * Architectures that guarantee an implicit smp_mb() in switch_mm()
++ * can override smp_mb__after_switch_mm.
++ */
++#ifndef smp_mb__after_switch_mm
++# define smp_mb__after_switch_mm()    smp_mb()
++#endif
++
+ #endif /* !__ASSEMBLY__ */
+ #endif /* __ASM_GENERIC_BARRIER_H */
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -79,6 +79,8 @@
+ # include <asm/paravirt_api_clock.h>
+ #endif
++#include <asm/barrier.h>
++
+ #include "cpupri.h"
+ #include "cpudeadline.h"
+@@ -3445,13 +3447,19 @@ static inline void switch_mm_cid(struct
+                * between rq->curr store and load of {prev,next}->mm->pcpu_cid[cpu].
+                * Provide it here.
+                */
+-              if (!prev->mm)                          // from kernel
++              if (!prev->mm) {                        // from kernel
+                       smp_mb();
+-              /*
+-               * user -> user transition guarantees a memory barrier through
+-               * switch_mm() when current->mm changes. If current->mm is
+-               * unchanged, no barrier is needed.
+-               */
++              } else {                                // from user
++                      /*
++                       * user->user transition relies on an implicit
++                       * memory barrier in switch_mm() when
++                       * current->mm changes. If the architecture
++                       * switch_mm() does not have an implicit memory
++                       * barrier, it is emitted here.  If current->mm
++                       * is unchanged, no barrier is needed.
++                       */
++                      smp_mb__after_switch_mm();
++              }
+       }
+       if (prev->mm_cid_active) {
+               mm_cid_snapshot_time(rq, prev->mm);
diff --git a/queue-6.8/serial-8250_dw-revert-do-not-reclock-if-already-at-correct-rate.patch b/queue-6.8/serial-8250_dw-revert-do-not-reclock-if-already-at-correct-rate.patch
new file mode 100644 (file)
index 0000000..56d3f2e
--- /dev/null
@@ -0,0 +1,71 @@
+From 7dfae6cbadc1ac99e38ad19fb08810b31ff167be Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Sun, 17 Mar 2024 22:41:23 +0100
+Subject: serial: 8250_dw: Revert: Do not reclock if already at correct rate
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit 7dfae6cbadc1ac99e38ad19fb08810b31ff167be upstream.
+
+Commit e5d6bd25f93d ("serial: 8250_dw: Do not reclock if already at
+correct rate") breaks the dw UARTs on Intel Bay Trail (BYT) and
+Cherry Trail (CHT) SoCs.
+
+Before this change the RTL8732BS Bluetooth HCI which is found
+connected over the dw UART on both BYT and CHT boards works properly:
+
+Bluetooth: hci0: RTL: examining hci_ver=06 hci_rev=000b lmp_ver=06 lmp_subver=8723
+Bluetooth: hci0: RTL: rom_version status=0 version=1
+Bluetooth: hci0: RTL: loading rtl_bt/rtl8723bs_fw.bin
+Bluetooth: hci0: RTL: loading rtl_bt/rtl8723bs_config-OBDA8723.bin
+Bluetooth: hci0: RTL: cfg_sz 64, total sz 24508
+Bluetooth: hci0: RTL: fw version 0x365d462e
+
+where as after this change probing it fails:
+
+Bluetooth: hci0: RTL: examining hci_ver=06 hci_rev=000b lmp_ver=06 lmp_subver=8723
+Bluetooth: hci0: RTL: rom_version status=0 version=1
+Bluetooth: hci0: RTL: loading rtl_bt/rtl8723bs_fw.bin
+Bluetooth: hci0: RTL: loading rtl_bt/rtl8723bs_config-OBDA8723.bin
+Bluetooth: hci0: RTL: cfg_sz 64, total sz 24508
+Bluetooth: hci0: command 0xfc20 tx timeout
+Bluetooth: hci0: RTL: download fw command failed (-110)
+
+Revert the changes to fix this regression.
+
+Fixes: e5d6bd25f93d ("serial: 8250_dw: Do not reclock if already at correct rate")
+Cc: stable@vger.kernel.org
+Cc: Peter Collingbourne <pcc@google.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Acked-by: Peter Collingbourne <pcc@google.com>
+Link: https://lore.kernel.org/r/20240317214123.34482-1-hdegoede@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/8250/8250_dw.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/tty/serial/8250/8250_dw.c
++++ b/drivers/tty/serial/8250/8250_dw.c
+@@ -357,9 +357,9 @@ static void dw8250_set_termios(struct ua
+       long rate;
+       int ret;
++      clk_disable_unprepare(d->clk);
+       rate = clk_round_rate(d->clk, newrate);
+-      if (rate > 0 && p->uartclk != rate) {
+-              clk_disable_unprepare(d->clk);
++      if (rate > 0) {
+               /*
+                * Note that any clock-notifer worker will block in
+                * serial8250_update_uartclk() until we are done.
+@@ -367,8 +367,8 @@ static void dw8250_set_termios(struct ua
+               ret = clk_set_rate(d->clk, newrate);
+               if (!ret)
+                       p->uartclk = rate;
+-              clk_prepare_enable(d->clk);
+       }
++      clk_prepare_enable(d->clk);
+       dw8250_do_set_termios(p, termios, old);
+ }
diff --git a/queue-6.8/serial-core-clearing-the-circular-buffer-before-nullifying-it.patch b/queue-6.8/serial-core-clearing-the-circular-buffer-before-nullifying-it.patch
new file mode 100644 (file)
index 0000000..9658b39
--- /dev/null
@@ -0,0 +1,55 @@
+From 9cf7ea2eeb745213dc2a04103e426b960e807940 Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Thu, 4 Apr 2024 17:59:26 +0300
+Subject: serial: core: Clearing the circular buffer before NULLifying it
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit 9cf7ea2eeb745213dc2a04103e426b960e807940 upstream.
+
+The circular buffer is NULLified in uart_tty_port_shutdown()
+under the spin lock. However, the PM or other timer based callbacks
+may still trigger after this event without knowning that buffer pointer
+is not valid. Since the serial code is a bit inconsistent in checking
+the buffer state (some rely on the head-tail positions, some on the
+buffer pointer), it's better to have both aligned, i.e. buffer pointer
+to be NULL and head-tail possitions to be the same, meaning it's empty.
+This will prevent asynchronous calls to dereference NULL pointer as
+reported recently in 8250 case:
+
+  BUG: kernel NULL pointer dereference, address: 00000cf5
+  Workqueue: pm pm_runtime_work
+  EIP: serial8250_tx_chars (drivers/tty/serial/8250/8250_port.c:1809)
+  ...
+  ? serial8250_tx_chars (drivers/tty/serial/8250/8250_port.c:1809)
+  __start_tx (drivers/tty/serial/8250/8250_port.c:1551)
+  serial8250_start_tx (drivers/tty/serial/8250/8250_port.c:1654)
+  serial_port_runtime_suspend (include/linux/serial_core.h:667 drivers/tty/serial/serial_port.c:63)
+  __rpm_callback (drivers/base/power/runtime.c:393)
+  ? serial_port_remove (drivers/tty/serial/serial_port.c:50)
+  rpm_suspend (drivers/base/power/runtime.c:447)
+
+The proposed change will prevent ->start_tx() to be called during
+suspend on shut down port.
+
+Fixes: 43066e32227e ("serial: port: Don't suspend if the port is still busy")
+Cc: stable <stable@kernel.org>
+Reported-by: kernel test robot <oliver.sang@intel.com>
+Closes: https://lore.kernel.org/oe-lkp/202404031607.2e92eebe-lkp@intel.com
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Link: https://lore.kernel.org/r/20240404150034.41648-1-andriy.shevchenko@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/serial_core.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -1788,6 +1788,7 @@ static void uart_tty_port_shutdown(struc
+        * Free the transmit buffer.
+        */
+       uart_port_lock_irq(uport);
++      uart_circ_clear(&state->xmit);
+       buf = state->xmit.buf;
+       state->xmit.buf = NULL;
+       uart_port_unlock_irq(uport);
diff --git a/queue-6.8/serial-core-fix-missing-shutdown-and-startup-for-serial-base-port.patch b/queue-6.8/serial-core-fix-missing-shutdown-and-startup-for-serial-base-port.patch
new file mode 100644 (file)
index 0000000..f83ac8f
--- /dev/null
@@ -0,0 +1,158 @@
+From 1aa4ad4eb695bac1b0a7ba542a16d6833c9c8dd8 Mon Sep 17 00:00:00 2001
+From: Tony Lindgren <tony@atomide.com>
+Date: Thu, 11 Apr 2024 08:58:45 +0300
+Subject: serial: core: Fix missing shutdown and startup for serial base port
+
+From: Tony Lindgren <tony@atomide.com>
+
+commit 1aa4ad4eb695bac1b0a7ba542a16d6833c9c8dd8 upstream.
+
+We are seeing start_tx being called after port shutdown as noted by Jiri.
+This happens because we are missing the startup and shutdown related
+functions for the serial base port.
+
+Let's fix the issue by adding startup and shutdown functions for the
+serial base port to block tx flushing for the serial base port when the
+port is not in use.
+
+Fixes: 84a9582fd203 ("serial: core: Start managing serial controllers to enable runtime PM")
+Cc: stable <stable@kernel.org>
+Reported-by: Jiri Slaby <jirislaby@kernel.org>
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Link: https://lore.kernel.org/r/20240411055848.38190-1-tony@atomide.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/serial_base.h |    4 ++++
+ drivers/tty/serial/serial_core.c |   20 +++++++++++++++++---
+ drivers/tty/serial/serial_port.c |   34 ++++++++++++++++++++++++++++++++++
+ 3 files changed, 55 insertions(+), 3 deletions(-)
+
+--- a/drivers/tty/serial/serial_base.h
++++ b/drivers/tty/serial/serial_base.h
+@@ -22,6 +22,7 @@ struct serial_ctrl_device {
+ struct serial_port_device {
+       struct device dev;
+       struct uart_port *port;
++      unsigned int tx_enabled:1;
+ };
+ int serial_base_ctrl_init(void);
+@@ -30,6 +31,9 @@ void serial_base_ctrl_exit(void);
+ int serial_base_port_init(void);
+ void serial_base_port_exit(void);
++void serial_base_port_startup(struct uart_port *port);
++void serial_base_port_shutdown(struct uart_port *port);
++
+ int serial_base_driver_register(struct device_driver *driver);
+ void serial_base_driver_unregister(struct device_driver *driver);
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -323,16 +323,26 @@ static int uart_startup(struct tty_struc
+                       bool init_hw)
+ {
+       struct tty_port *port = &state->port;
++      struct uart_port *uport;
+       int retval;
+       if (tty_port_initialized(port))
+-              return 0;
++              goto out_base_port_startup;
+       retval = uart_port_startup(tty, state, init_hw);
+-      if (retval)
++      if (retval) {
+               set_bit(TTY_IO_ERROR, &tty->flags);
++              return retval;
++      }
+-      return retval;
++out_base_port_startup:
++      uport = uart_port_check(state);
++      if (!uport)
++              return -EIO;
++
++      serial_base_port_startup(uport);
++
++      return 0;
+ }
+ /*
+@@ -355,6 +365,9 @@ static void uart_shutdown(struct tty_str
+       if (tty)
+               set_bit(TTY_IO_ERROR, &tty->flags);
++      if (uport)
++              serial_base_port_shutdown(uport);
++
+       if (tty_port_initialized(port)) {
+               tty_port_set_initialized(port, false);
+@@ -1775,6 +1788,7 @@ static void uart_tty_port_shutdown(struc
+       uport->ops->stop_rx(uport);
+       uart_port_unlock_irq(uport);
++      serial_base_port_shutdown(uport);
+       uart_port_shutdown(port);
+       /*
+--- a/drivers/tty/serial/serial_port.c
++++ b/drivers/tty/serial/serial_port.c
+@@ -36,8 +36,12 @@ static int serial_port_runtime_resume(st
+       /* Flush any pending TX for the port */
+       uart_port_lock_irqsave(port, &flags);
++      if (!port_dev->tx_enabled)
++              goto unlock;
+       if (__serial_port_busy(port))
+               port->ops->start_tx(port);
++
++unlock:
+       uart_port_unlock_irqrestore(port, flags);
+ out:
+@@ -57,6 +61,11 @@ static int serial_port_runtime_suspend(s
+               return 0;
+       uart_port_lock_irqsave(port, &flags);
++      if (!port_dev->tx_enabled) {
++              uart_port_unlock_irqrestore(port, flags);
++              return 0;
++      }
++
+       busy = __serial_port_busy(port);
+       if (busy)
+               port->ops->start_tx(port);
+@@ -68,6 +77,31 @@ static int serial_port_runtime_suspend(s
+       return busy ? -EBUSY : 0;
+ }
++static void serial_base_port_set_tx(struct uart_port *port,
++                                  struct serial_port_device *port_dev,
++                                  bool enabled)
++{
++      unsigned long flags;
++
++      uart_port_lock_irqsave(port, &flags);
++      port_dev->tx_enabled = enabled;
++      uart_port_unlock_irqrestore(port, flags);
++}
++
++void serial_base_port_startup(struct uart_port *port)
++{
++      struct serial_port_device *port_dev = port->port_dev;
++
++      serial_base_port_set_tx(port, port_dev, true);
++}
++
++void serial_base_port_shutdown(struct uart_port *port)
++{
++      struct serial_port_device *port_dev = port->port_dev;
++
++      serial_base_port_set_tx(port, port_dev, false);
++}
++
+ static DEFINE_RUNTIME_DEV_PM_OPS(serial_port_pm,
+                                serial_port_runtime_suspend,
+                                serial_port_runtime_resume, NULL);
diff --git a/queue-6.8/serial-core-fix-regression-when-runtime-pm-is-not-enabled.patch b/queue-6.8/serial-core-fix-regression-when-runtime-pm-is-not-enabled.patch
new file mode 100644 (file)
index 0000000..ce5e9ce
--- /dev/null
@@ -0,0 +1,42 @@
+From 5555980571cc744cd99b6455e3e388b54519db8f Mon Sep 17 00:00:00 2001
+From: Tony Lindgren <tony@atomide.com>
+Date: Mon, 25 Mar 2024 09:16:47 +0200
+Subject: serial: core: Fix regression when runtime PM is not enabled
+
+From: Tony Lindgren <tony@atomide.com>
+
+commit 5555980571cc744cd99b6455e3e388b54519db8f upstream.
+
+Commit 45a3a8ef8129 ("serial: core: Revert checks for tx runtime PM state")
+caused a regression for Sun Ultra 60 for the sunsab driver as reported by
+Nick Bowler <nbowler@draconx.ca>.
+
+We need to add back the check runtime PM enabled state for serial port
+controller device, I wrongly assumed earlier we could just remove it.
+
+Fixes: 45a3a8ef8129 ("serial: core: Revert checks for tx runtime PM state")
+Cc: stable <stable@kernel.org>
+Reported-by: Nick Bowler <nbowler@draconx.ca>
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Link: https://lore.kernel.org/r/20240325071649.27040-1-tony@atomide.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/serial_core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index ff85ebd3a007..25a83820927a 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -156,7 +156,7 @@ static void __uart_start(struct uart_state *state)
+        * enabled, serial_port_runtime_resume() calls start_tx() again
+        * after enabling the device.
+        */
+-      if (pm_runtime_active(&port_dev->dev))
++      if (!pm_runtime_enabled(port->dev) || pm_runtime_active(&port_dev->dev))
+               port->ops->start_tx(port);
+       pm_runtime_mark_last_busy(&port_dev->dev);
+       pm_runtime_put_autosuspend(&port_dev->dev);
+-- 
+2.44.0
+
diff --git a/queue-6.8/serial-mxs-auart-add-spinlock-around-changing-cts-state.patch b/queue-6.8/serial-mxs-auart-add-spinlock-around-changing-cts-state.patch
new file mode 100644 (file)
index 0000000..1e783fb
--- /dev/null
@@ -0,0 +1,61 @@
+From 54c4ec5f8c471b7c1137a1f769648549c423c026 Mon Sep 17 00:00:00 2001
+From: Emil Kronborg <emil.kronborg@protonmail.com>
+Date: Wed, 20 Mar 2024 12:15:36 +0000
+Subject: serial: mxs-auart: add spinlock around changing cts state
+
+From: Emil Kronborg <emil.kronborg@protonmail.com>
+
+commit 54c4ec5f8c471b7c1137a1f769648549c423c026 upstream.
+
+The uart_handle_cts_change() function in serial_core expects the caller
+to hold uport->lock. For example, I have seen the below kernel splat,
+when the Bluetooth driver is loaded on an i.MX28 board.
+
+    [   85.119255] ------------[ cut here ]------------
+    [   85.124413] WARNING: CPU: 0 PID: 27 at /drivers/tty/serial/serial_core.c:3453 uart_handle_cts_change+0xb4/0xec
+    [   85.134694] Modules linked in: hci_uart bluetooth ecdh_generic ecc wlcore_sdio configfs
+    [   85.143314] CPU: 0 PID: 27 Comm: kworker/u3:0 Not tainted 6.6.3-00021-gd62a2f068f92 #1
+    [   85.151396] Hardware name: Freescale MXS (Device Tree)
+    [   85.156679] Workqueue: hci0 hci_power_on [bluetooth]
+    (...)
+    [   85.191765]  uart_handle_cts_change from mxs_auart_irq_handle+0x380/0x3f4
+    [   85.198787]  mxs_auart_irq_handle from __handle_irq_event_percpu+0x88/0x210
+    (...)
+
+Cc: stable@vger.kernel.org
+Fixes: 4d90bb147ef6 ("serial: core: Document and assert lock requirements for irq helpers")
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Signed-off-by: Emil Kronborg <emil.kronborg@protonmail.com>
+Link: https://lore.kernel.org/r/20240320121530.11348-1-emil.kronborg@protonmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/mxs-auart.c |    8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/tty/serial/mxs-auart.c
++++ b/drivers/tty/serial/mxs-auart.c
+@@ -1086,11 +1086,13 @@ static void mxs_auart_set_ldisc(struct u
+ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
+ {
+-      u32 istat;
++      u32 istat, stat;
+       struct mxs_auart_port *s = context;
+       u32 mctrl_temp = s->mctrl_prev;
+-      u32 stat = mxs_read(s, REG_STAT);
++      uart_port_lock(&s->port);
++
++      stat = mxs_read(s, REG_STAT);
+       istat = mxs_read(s, REG_INTR);
+       /* ack irq */
+@@ -1126,6 +1128,8 @@ static irqreturn_t mxs_auart_irq_handle(
+               istat &= ~AUART_INTR_TXIS;
+       }
++      uart_port_unlock(&s->port);
++
+       return IRQ_HANDLED;
+ }
diff --git a/queue-6.8/serial-pmac_zilog-remove-flawed-mitigation-for-rx-irq-flood.patch b/queue-6.8/serial-pmac_zilog-remove-flawed-mitigation-for-rx-irq-flood.patch
new file mode 100644 (file)
index 0000000..df14969
--- /dev/null
@@ -0,0 +1,86 @@
+From 1be3226445362bfbf461c92a5bcdb1723f2e4907 Mon Sep 17 00:00:00 2001
+From: Finn Thain <fthain@linux-m68k.org>
+Date: Mon, 8 Apr 2024 19:23:43 +1000
+Subject: serial/pmac_zilog: Remove flawed mitigation for rx irq flood
+
+From: Finn Thain <fthain@linux-m68k.org>
+
+commit 1be3226445362bfbf461c92a5bcdb1723f2e4907 upstream.
+
+The mitigation was intended to stop the irq completely. That may be
+better than a hard lock-up but it turns out that you get a crash anyway
+if you're using pmac_zilog as a serial console:
+
+ttyPZ0: pmz: rx irq flood !
+BUG: spinlock recursion on CPU#0, swapper/0
+
+That's because the pr_err() call in pmz_receive_chars() results in
+pmz_console_write() attempting to lock a spinlock already locked in
+pmz_interrupt(). With CONFIG_DEBUG_SPINLOCK=y, this produces a fatal
+BUG splat. The spinlock in question is the one in struct uart_port.
+
+Even when it's not fatal, the serial port rx function ceases to work.
+Also, the iteration limit doesn't play nicely with QEMU, as can be
+seen in the bug report linked below.
+
+A web search for other reports of the error message "pmz: rx irq flood"
+didn't produce anything. So I don't think this code is needed any more.
+Remove it.
+
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Nicholas Piggin <npiggin@gmail.com>
+Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
+Cc: Aneesh Kumar K.V <aneesh.kumar@kernel.org>
+Cc: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
+Cc: Andy Shevchenko <andy.shevchenko@gmail.com>
+Cc: stable@kernel.org
+Cc: linux-m68k@lists.linux-m68k.org
+Link: https://github.com/vivier/qemu-m68k/issues/44
+Link: https://lore.kernel.org/all/1078874617.9746.36.camel@gaston/
+Acked-by: Michael Ellerman <mpe@ellerman.id.au>
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Finn Thain <fthain@linux-m68k.org>
+Link: https://lore.kernel.org/r/e853cf2c762f23101cd2ddec0cc0c2be0e72685f.1712568223.git.fthain@linux-m68k.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/pmac_zilog.c |   14 --------------
+ 1 file changed, 14 deletions(-)
+
+--- a/drivers/tty/serial/pmac_zilog.c
++++ b/drivers/tty/serial/pmac_zilog.c
+@@ -210,7 +210,6 @@ static bool pmz_receive_chars(struct uar
+ {
+       struct tty_port *port;
+       unsigned char ch, r1, drop, flag;
+-      int loops = 0;
+       /* Sanity check, make sure the old bug is no longer happening */
+       if (uap->port.state == NULL) {
+@@ -291,25 +290,12 @@ static bool pmz_receive_chars(struct uar
+               if (r1 & Rx_OVR)
+                       tty_insert_flip_char(port, 0, TTY_OVERRUN);
+       next_char:
+-              /* We can get stuck in an infinite loop getting char 0 when the
+-               * line is in a wrong HW state, we break that here.
+-               * When that happens, I disable the receive side of the driver.
+-               * Note that what I've been experiencing is a real irq loop where
+-               * I'm getting flooded regardless of the actual port speed.
+-               * Something strange is going on with the HW
+-               */
+-              if ((++loops) > 1000)
+-                      goto flood;
+               ch = read_zsreg(uap, R0);
+               if (!(ch & Rx_CH_AV))
+                       break;
+       }
+       return true;
+- flood:
+-      pmz_interrupt_control(uap, 0);
+-      pmz_error("pmz: rx irq flood !\n");
+-      return true;
+ }
+ static void pmz_status_handle(struct uart_pmac_port *uap)
diff --git a/queue-6.8/serial-stm32-reset-.throttled-state-in-.startup.patch b/queue-6.8/serial-stm32-reset-.throttled-state-in-.startup.patch
new file mode 100644 (file)
index 0000000..51643a7
--- /dev/null
@@ -0,0 +1,39 @@
+From ea2624b5b829b8f93c0dce25721d835969b34faf Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= <u.kleine-koenig@pengutronix.de>
+Date: Wed, 17 Apr 2024 11:03:28 +0200
+Subject: serial: stm32: Reset .throttled state in .startup()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+commit ea2624b5b829b8f93c0dce25721d835969b34faf upstream.
+
+When an UART is opened that still has .throttled set from a previous
+open, the RX interrupt is enabled but the irq handler doesn't consider
+it. This easily results in a stuck irq with the effect to occupy the CPU
+in a tight loop.
+
+So reset the throttle state in .startup() to ensure that RX irqs are
+handled.
+
+Fixes: d1ec8a2eabe9 ("serial: stm32: update throttle and unthrottle ops for dma mode")
+Cc: stable@vger.kernel.org
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Link: https://lore.kernel.org/r/a784f80d3414f7db723b2ec66efc56e1ad666cbf.1713344161.git.u.kleine-koenig@pengutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/stm32-usart.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -1088,6 +1088,7 @@ static int stm32_usart_startup(struct ua
+               val |= USART_CR2_SWAP;
+               writel_relaxed(val, port->membase + ofs->cr2);
+       }
++      stm32_port->throttled = false;
+       /* RX FIFO Flush */
+       if (ofs->rqr != UNDEF_REG)
diff --git a/queue-6.8/serial-stm32-return-irq_none-in-the-isr-if-no-handling-happend.patch b/queue-6.8/serial-stm32-return-irq_none-in-the-isr-if-no-handling-happend.patch
new file mode 100644 (file)
index 0000000..5ad9d78
--- /dev/null
@@ -0,0 +1,92 @@
+From 13c785323b36b845300b256d0e5963c3727667d7 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= <u.kleine-koenig@pengutronix.de>
+Date: Wed, 17 Apr 2024 11:03:27 +0200
+Subject: serial: stm32: Return IRQ_NONE in the ISR if no handling happend
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+commit 13c785323b36b845300b256d0e5963c3727667d7 upstream.
+
+If there is a stuck irq that the handler doesn't address, returning
+IRQ_HANDLED unconditionally makes it impossible for the irq core to
+detect the problem and disable the irq. So only return IRQ_HANDLED if
+an event was handled.
+
+A stuck irq is still problematic, but with this change at least it only
+makes the UART nonfunctional instead of occupying the (usually only) CPU
+by 100% and so stall the whole machine.
+
+Fixes: 48a6092fb41f ("serial: stm32-usart: Add STM32 USART Driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Link: https://lore.kernel.org/r/5f92603d0dfd8a5b8014b2b10a902d91e0bb881f.1713344161.git.u.kleine-koenig@pengutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/stm32-usart.c |   12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -857,6 +857,7 @@ static irqreturn_t stm32_usart_interrupt
+       const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+       u32 sr;
+       unsigned int size;
++      irqreturn_t ret = IRQ_NONE;
+       sr = readl_relaxed(port->membase + ofs->isr);
+@@ -865,11 +866,14 @@ static irqreturn_t stm32_usart_interrupt
+           (sr & USART_SR_TC)) {
+               stm32_usart_tc_interrupt_disable(port);
+               stm32_usart_rs485_rts_disable(port);
++              ret = IRQ_HANDLED;
+       }
+-      if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
++      if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG) {
+               writel_relaxed(USART_ICR_RTOCF,
+                              port->membase + ofs->icr);
++              ret = IRQ_HANDLED;
++      }
+       if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
+               /* Clear wake up flag and disable wake up interrupt */
+@@ -878,6 +882,7 @@ static irqreturn_t stm32_usart_interrupt
+               stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
+               if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
+                       pm_wakeup_event(tport->tty->dev, 0);
++              ret = IRQ_HANDLED;
+       }
+       /*
+@@ -892,6 +897,7 @@ static irqreturn_t stm32_usart_interrupt
+                       uart_unlock_and_check_sysrq(port);
+                       if (size)
+                               tty_flip_buffer_push(tport);
++                      ret = IRQ_HANDLED;
+               }
+       }
+@@ -899,6 +905,7 @@ static irqreturn_t stm32_usart_interrupt
+               uart_port_lock(port);
+               stm32_usart_transmit_chars(port);
+               uart_port_unlock(port);
++              ret = IRQ_HANDLED;
+       }
+       /* Receiver timeout irq for DMA RX */
+@@ -908,9 +915,10 @@ static irqreturn_t stm32_usart_interrupt
+               uart_unlock_and_check_sysrq(port);
+               if (size)
+                       tty_flip_buffer_push(tport);
++              ret = IRQ_HANDLED;
+       }
+-      return IRQ_HANDLED;
++      return ret;
+ }
+ static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
index dd75eec26687510afc9364cb05f9132e0c3289f1..4839a2eee1802729ddbd850e7f75fb0431ac719b 100644 (file)
@@ -96,3 +96,55 @@ thunderbolt-avoid-notify-pm-core-about-runtime-pm-resume.patch
 thunderbolt-fix-wake-configurations-after-device-unplug.patch
 thunderbolt-do-not-create-displayport-tunnels-on-adapters-of-the-same-router.patch
 comedi-vmk80xx-fix-incomplete-endpoint-checking.patch
+serial-mxs-auart-add-spinlock-around-changing-cts-state.patch
+serial-pmac_zilog-remove-flawed-mitigation-for-rx-irq-flood.patch
+serial-8250_dw-revert-do-not-reclock-if-already-at-correct-rate.patch
+serial-stm32-return-irq_none-in-the-isr-if-no-handling-happend.patch
+serial-stm32-reset-.throttled-state-in-.startup.patch
+serial-core-fix-regression-when-runtime-pm-is-not-enabled.patch
+serial-core-clearing-the-circular-buffer-before-nullifying-it.patch
+serial-core-fix-missing-shutdown-and-startup-for-serial-base-port.patch
+usb-serial-option-add-fibocom-fm135-gl-variants.patch
+usb-serial-option-add-support-for-fibocom-fm650-fg650.patch
+usb-serial-option-add-lonsung-u8300-u9300-product.patch
+usb-serial-option-support-quectel-em060k-sub-models.patch
+usb-serial-option-add-rolling-rw101-gl-and-rw135-gl-support.patch
+usb-serial-option-add-telit-fn920c04-rmnet-compositions.patch
+revert-usb-cdc-wdm-close-race-between-read-and-workqueue.patch
+revert-mei-vsc-call-wake_up-in-the-threaded-irq-handler.patch
+usb-dwc2-host-fix-dereference-issue-in-ddma-completion-flow.patch
+usb-disable-usb3-lpm-at-shutdown.patch
+usb-gadget-f_ncm-fix-uaf-ncm-object-at-re-bind-after-usb-ep-transport-error.patch
+usb-typec-tcpm-correct-the-pdo-counting-in-pd_set.patch
+mei-vsc-unregister-interrupt-handler-for-system-suspend.patch
+mei-me-disable-rpl-s-on-sps-and-ign-firmwares.patch
+speakup-avoid-crash-on-very-long-word.patch
+fs-sysfs-fix-reference-leak-in-sysfs_break_active_protection.patch
+sched-add-missing-memory-barrier-in-switch_mm_cid.patch
+kvm-x86-snapshot-if-a-vcpu-s-vendor-model-is-amd-vs.-intel-compatible.patch
+kvm-x86-pmu-disable-support-for-adaptive-pebs.patch
+kvm-x86-pmu-do-not-mask-lvtpc-when-handling-a-pmi-on-amd-platforms.patch
+kvm-x86-mmu-x86-don-t-overflow-lpage_info-when-checking-attributes.patch
+kvm-x86-mmu-write-protect-l2-sptes-in-tdp-mmu-when-clearing-dirty-status.patch
+arm64-head-disable-mmu-at-el2-before-clearing-hcr_el2.e2h.patch
+arm64-hibernate-fix-level3-translation-fault-in-swsusp_save.patch
+init-main.c-fix-potential-static_command_line-memory-overflow.patch
+mm-madvise-make-madv_populate_-read-write-handle-vm_fault_retry-properly.patch
+mm-userfaultfd-allow-hugetlb-change-protection-upon-poison-entry.patch
+mm-swapops-update-check-in-is_pfn_swap_entry-for-hwpoison-entries.patch
+mm-memory-failure-fix-deadlock-when-hugetlb_optimize_vmemmap-is-enabled.patch
+mm-shmem-inline-shmem_is_huge-for-disabled-transparent-hugepages.patch
+fuse-fix-leaked-enosys-error-on-first-statx-call.patch
+drm-amdgpu-validate-the-parameters-of-bo-mapping-operations-more-clearly.patch
+drm-amdkfd-fix-memory-leak-in-create_process-failure.patch
+drm-amdgpu-remove-invalid-resource-start-check-v2.patch
+drm-ttm-stop-pooling-cached-numa-pages-v2.patch
+drm-xe-fix-bo-leak-in-intel_fb_bo_framebuffer_init.patch
+drm-vmwgfx-fix-prime-import-export.patch
+drm-vmwgfx-sort-primary-plane-formats-by-order-of-preference.patch
+drm-vmwgfx-fix-crtc-s-atomic-check-conditional.patch
+nouveau-fix-instmem-race-condition-around-ptr-stores.patch
+bootconfig-use-memblock_free_late-to-free-xbc-memory-to-buddy.patch
+squashfs-check-the-inode-number-is-not-the-invalid-value-of-zero.patch
+nilfs2-fix-oob-in-nilfs_set_de_type.patch
+fork-defer-linking-file-vma-until-vma-is-fully-initialized.patch
diff --git a/queue-6.8/speakup-avoid-crash-on-very-long-word.patch b/queue-6.8/speakup-avoid-crash-on-very-long-word.patch
new file mode 100644 (file)
index 0000000..26ce554
--- /dev/null
@@ -0,0 +1,32 @@
+From c8d2f34ea96ea3bce6ba2535f867f0d4ee3b22e1 Mon Sep 17 00:00:00 2001
+From: Samuel Thibault <samuel.thibault@ens-lyon.org>
+Date: Sat, 23 Mar 2024 17:48:43 +0100
+Subject: speakup: Avoid crash on very long word
+
+From: Samuel Thibault <samuel.thibault@ens-lyon.org>
+
+commit c8d2f34ea96ea3bce6ba2535f867f0d4ee3b22e1 upstream.
+
+In case a console is set up really large and contains a really long word
+(> 256 characters), we have to stop before the length of the word buffer.
+
+Signed-off-by: Samuel Thibault <samuel.thibault@ens-lyon.org>
+Fixes: c6e3fd22cd538 ("Staging: add speakup to the staging directory")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240323164843.1426997-1-samuel.thibault@ens-lyon.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/accessibility/speakup/main.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/accessibility/speakup/main.c
++++ b/drivers/accessibility/speakup/main.c
+@@ -574,7 +574,7 @@ static u_long get_word(struct vc_data *v
+       }
+       attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
+       buf[cnt++] = attr_ch;
+-      while (tmpx < vc->vc_cols - 1) {
++      while (tmpx < vc->vc_cols - 1 && cnt < sizeof(buf) - 1) {
+               tmp_pos += 2;
+               tmpx++;
+               ch = get_char(vc, (u_short *)tmp_pos, &temp);
diff --git a/queue-6.8/squashfs-check-the-inode-number-is-not-the-invalid-value-of-zero.patch b/queue-6.8/squashfs-check-the-inode-number-is-not-the-invalid-value-of-zero.patch
new file mode 100644 (file)
index 0000000..3386096
--- /dev/null
@@ -0,0 +1,67 @@
+From 9253c54e01b6505d348afbc02abaa4d9f8a01395 Mon Sep 17 00:00:00 2001
+From: Phillip Lougher <phillip@squashfs.org.uk>
+Date: Mon, 8 Apr 2024 23:02:06 +0100
+Subject: Squashfs: check the inode number is not the invalid value of zero
+
+From: Phillip Lougher <phillip@squashfs.org.uk>
+
+commit 9253c54e01b6505d348afbc02abaa4d9f8a01395 upstream.
+
+Syskiller has produced an out of bounds access in fill_meta_index().
+
+That out of bounds access is ultimately caused because the inode
+has an inode number with the invalid value of zero, which was not checked.
+
+The reason this causes the out of bounds access is due to following
+sequence of events:
+
+1. Fill_meta_index() is called to allocate (via empty_meta_index())
+   and fill a metadata index.  It however suffers a data read error
+   and aborts, invalidating the newly returned empty metadata index.
+   It does this by setting the inode number of the index to zero,
+   which means unused (zero is not a valid inode number).
+
+2. When fill_meta_index() is subsequently called again on another
+   read operation, locate_meta_index() returns the previous index
+   because it matches the inode number of 0.  Because this index
+   has been returned it is expected to have been filled, and because
+   it hasn't been, an out of bounds access is performed.
+
+This patch adds a sanity check which checks that the inode number
+is not zero when the inode is created and returns -EINVAL if it is.
+
+[phillip@squashfs.org.uk: whitespace fix]
+  Link: https://lkml.kernel.org/r/20240409204723.446925-1-phillip@squashfs.org.uk
+Link: https://lkml.kernel.org/r/20240408220206.435788-1-phillip@squashfs.org.uk
+Signed-off-by: Phillip Lougher <phillip@squashfs.org.uk>
+Reported-by: "Ubisectech Sirius" <bugreport@ubisectech.com>
+Closes: https://lore.kernel.org/lkml/87f5c007-b8a5-41ae-8b57-431e924c5915.bugreport@ubisectech.com/
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/squashfs/inode.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/fs/squashfs/inode.c
++++ b/fs/squashfs/inode.c
+@@ -48,6 +48,10 @@ static int squashfs_new_inode(struct sup
+       gid_t i_gid;
+       int err;
++      inode->i_ino = le32_to_cpu(sqsh_ino->inode_number);
++      if (inode->i_ino == 0)
++              return -EINVAL;
++
+       err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->uid), &i_uid);
+       if (err)
+               return err;
+@@ -58,7 +62,6 @@ static int squashfs_new_inode(struct sup
+       i_uid_write(inode, i_uid);
+       i_gid_write(inode, i_gid);
+-      inode->i_ino = le32_to_cpu(sqsh_ino->inode_number);
+       inode_set_mtime(inode, le32_to_cpu(sqsh_ino->mtime), 0);
+       inode_set_atime(inode, inode_get_mtime_sec(inode), 0);
+       inode_set_ctime(inode, inode_get_mtime_sec(inode), 0);
diff --git a/queue-6.8/usb-disable-usb3-lpm-at-shutdown.patch b/queue-6.8/usb-disable-usb3-lpm-at-shutdown.patch
new file mode 100644 (file)
index 0000000..3c8080b
--- /dev/null
@@ -0,0 +1,87 @@
+From d920a2ed8620be04a3301e1a9c2b7cc1de65f19d Mon Sep 17 00:00:00 2001
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Date: Tue, 5 Mar 2024 14:51:38 +0800
+Subject: usb: Disable USB3 LPM at shutdown
+
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+
+commit d920a2ed8620be04a3301e1a9c2b7cc1de65f19d upstream.
+
+SanDisks USB3 storage may disapper after system reboot:
+
+usb usb2-port3: link state change
+xhci_hcd 0000:00:14.0: clear port3 link state change, portsc: 0x2c0
+usb usb2-port3: do warm reset, port only
+xhci_hcd 0000:00:14.0: xhci_hub_status_data: stopping usb2 port polling
+xhci_hcd 0000:00:14.0: Get port status 2-3 read: 0x2b0, return 0x2b0
+usb usb2-port3: not warm reset yet, waiting 50ms
+xhci_hcd 0000:00:14.0: Get port status 2-3 read: 0x2f0, return 0x2f0
+usb usb2-port3: not warm reset yet, waiting 200ms
+...
+xhci_hcd 0000:00:14.0: Get port status 2-3 read: 0x6802c0, return 0x7002c0
+usb usb2-port3: not warm reset yet, waiting 200ms
+xhci_hcd 0000:00:14.0: clear port3 reset change, portsc: 0x4802c0
+xhci_hcd 0000:00:14.0: clear port3 warm(BH) reset change, portsc: 0x4002c0
+xhci_hcd 0000:00:14.0: clear port3 link state change, portsc: 0x2c0
+xhci_hcd 0000:00:14.0: Get port status 2-3 read: 0x2c0, return 0x2c0
+usb usb2-port3: not enabled, trying warm reset again...
+
+This is due to the USB device still cause port change event after xHCI is
+shuted down:
+
+xhci_hcd 0000:38:00.0: // Setting command ring address to 0xffffe001
+xhci_hcd 0000:38:00.0: xhci_resume: starting usb3 port polling.
+xhci_hcd 0000:38:00.0: xhci_hub_status_data: stopping usb4 port polling
+xhci_hcd 0000:38:00.0: xhci_hub_status_data: stopping usb3 port polling
+xhci_hcd 0000:38:00.0: hcd_pci_runtime_resume: 0
+xhci_hcd 0000:38:00.0: xhci_shutdown: stopping usb3 port polling.
+xhci_hcd 0000:38:00.0: // Halt the HC
+xhci_hcd 0000:38:00.0: xhci_shutdown completed - status = 1
+xhci_hcd 0000:00:14.0: xhci_shutdown: stopping usb1 port polling.
+xhci_hcd 0000:00:14.0: // Halt the HC
+xhci_hcd 0000:00:14.0: xhci_shutdown completed - status = 1
+xhci_hcd 0000:00:14.0: Get port status 2-3 read: 0x1203, return 0x203
+xhci_hcd 0000:00:14.0: set port reset, actual port 2-3 status  = 0x1311
+xhci_hcd 0000:00:14.0: Get port status 2-3 read: 0x201203, return 0x100203
+xhci_hcd 0000:00:14.0: clear port3 reset change, portsc: 0x1203
+xhci_hcd 0000:00:14.0: clear port3 warm(BH) reset change, portsc: 0x1203
+xhci_hcd 0000:00:14.0: clear port3 link state change, portsc: 0x1203
+xhci_hcd 0000:00:14.0: clear port3 connect change, portsc: 0x1203
+xhci_hcd 0000:00:14.0: Get port status 2-3 read: 0x1203, return 0x203
+usb 2-3: device not accepting address 2, error -108
+xhci_hcd 0000:00:14.0: xHCI dying or halted, can't queue_command
+xhci_hcd 0000:00:14.0: Set port 2-3 link state, portsc: 0x1203, write 0x11261
+xhci_hcd 0000:00:14.0: Get port status 2-3 read: 0x1263, return 0x263
+xhci_hcd 0000:00:14.0: set port reset, actual port 2-3 status  = 0x1271
+xhci_hcd 0000:00:14.0: Get port status 2-3 read: 0x12b1, return 0x2b1
+usb usb2-port3: not reset yet, waiting 60ms
+ACPI: PM: Preparing to enter system sleep state S5
+xhci_hcd 0000:00:14.0: Get port status 2-3 read: 0x12f1, return 0x2f1
+usb usb2-port3: not reset yet, waiting 200ms
+reboot: Restarting system
+
+The port change event is caused by LPM transition, so disabling LPM at shutdown
+to make sure the device is in U0 for warmboot.
+
+Signed-off-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Cc: stable <stable@kernel.org>
+Link: https://lore.kernel.org/r/20240305065140.66801-1-kai.heng.feng@canonical.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/core/port.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/usb/core/port.c
++++ b/drivers/usb/core/port.c
+@@ -448,8 +448,10 @@ static void usb_port_shutdown(struct dev
+ {
+       struct usb_port *port_dev = to_usb_port(dev);
+-      if (port_dev->child)
++      if (port_dev->child) {
+               usb_disable_usb2_hardware_lpm(port_dev->child);
++              usb_unlocked_disable_lpm(port_dev->child);
++      }
+ }
+ static const struct dev_pm_ops usb_port_pm_ops = {
diff --git a/queue-6.8/usb-dwc2-host-fix-dereference-issue-in-ddma-completion-flow.patch b/queue-6.8/usb-dwc2-host-fix-dereference-issue-in-ddma-completion-flow.patch
new file mode 100644 (file)
index 0000000..879bb6f
--- /dev/null
@@ -0,0 +1,41 @@
+From eed04fa96c48790c1cce73c8a248e9d460b088f8 Mon Sep 17 00:00:00 2001
+From: Minas Harutyunyan <Minas.Harutyunyan@synopsys.com>
+Date: Tue, 9 Apr 2024 12:27:54 +0000
+Subject: usb: dwc2: host: Fix dereference issue in DDMA completion flow.
+
+From: Minas Harutyunyan <Minas.Harutyunyan@synopsys.com>
+
+commit eed04fa96c48790c1cce73c8a248e9d460b088f8 upstream.
+
+Fixed variable dereference issue in DDMA completion flow.
+
+Fixes: b258e4268850 ("usb: dwc2: host: Fix ISOC flow in DDMA mode")
+CC: stable@vger.kernel.org
+Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
+Closes: https://lore.kernel.org/linux-usb/2024040834-ethically-rumble-701f@gregkh/T/#m4c4b83bef0ebb4b67fe2e0a7d6466cbb6f416e39
+Signed-off-by: Minas Harutyunyan <Minas.Harutyunyan@synopsys.com>
+Link: https://lore.kernel.org/r/cc826d3ef53c934d8e6d98870f17f3cdc3d2755d.1712665387.git.Minas.Harutyunyan@synopsys.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/dwc2/hcd_ddma.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/usb/dwc2/hcd_ddma.c
++++ b/drivers/usb/dwc2/hcd_ddma.c
+@@ -867,13 +867,15 @@ static int dwc2_cmpl_host_isoc_dma_desc(
+       struct dwc2_dma_desc *dma_desc;
+       struct dwc2_hcd_iso_packet_desc *frame_desc;
+       u16 frame_desc_idx;
+-      struct urb *usb_urb = qtd->urb->priv;
++      struct urb *usb_urb;
+       u16 remain = 0;
+       int rc = 0;
+       if (!qtd->urb)
+               return -EINVAL;
++      usb_urb = qtd->urb->priv;
++
+       dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
+                               sizeof(struct dwc2_dma_desc)),
+                               sizeof(struct dwc2_dma_desc),
diff --git a/queue-6.8/usb-gadget-f_ncm-fix-uaf-ncm-object-at-re-bind-after-usb-ep-transport-error.patch b/queue-6.8/usb-gadget-f_ncm-fix-uaf-ncm-object-at-re-bind-after-usb-ep-transport-error.patch
new file mode 100644 (file)
index 0000000..a8ea709
--- /dev/null
@@ -0,0 +1,77 @@
+From 6334b8e4553cc69f51e383c9de545082213d785e Mon Sep 17 00:00:00 2001
+From: Norihiko Hama <Norihiko.Hama@alpsalpine.com>
+Date: Wed, 27 Mar 2024 11:35:50 +0900
+Subject: usb: gadget: f_ncm: Fix UAF ncm object at re-bind after usb ep transport error
+
+From: Norihiko Hama <Norihiko.Hama@alpsalpine.com>
+
+commit 6334b8e4553cc69f51e383c9de545082213d785e upstream.
+
+When ncm function is working and then stop usb0 interface for link down,
+eth_stop() is called. At this piont, accidentally if usb transport error
+should happen in usb_ep_enable(), 'in_ep' and/or 'out_ep' may not be enabled.
+
+After that, ncm_disable() is called to disable for ncm unbind
+but gether_disconnect() is never called since 'in_ep' is not enabled.
+
+As the result, ncm object is released in ncm unbind
+but 'dev->port_usb' associated to 'ncm->port' is not NULL.
+
+And when ncm bind again to recover netdev, ncm object is reallocated
+but usb0 interface is already associated to previous released ncm object.
+
+Therefore, once usb0 interface is up and eth_start_xmit() is called,
+released ncm object is dereferrenced and it might cause use-after-free memory.
+
+[function unlink via configfs]
+  usb0: eth_stop dev->port_usb=ffffff9b179c3200
+  --> error happens in usb_ep_enable().
+  NCM: ncm_disable: ncm=ffffff9b179c3200
+  --> no gether_disconnect() since ncm->port.in_ep->enabled is false.
+  NCM: ncm_unbind: ncm unbind ncm=ffffff9b179c3200
+  NCM: ncm_free: ncm free ncm=ffffff9b179c3200   <-- released ncm
+
+[function link via configfs]
+  NCM: ncm_alloc: ncm alloc ncm=ffffff9ac4f8a000
+  NCM: ncm_bind: ncm bind ncm=ffffff9ac4f8a000
+  NCM: ncm_set_alt: ncm=ffffff9ac4f8a000 alt=0
+  usb0: eth_open dev->port_usb=ffffff9b179c3200  <-- previous released ncm
+  usb0: eth_start dev->port_usb=ffffff9b179c3200 <--
+  eth_start_xmit()
+  --> dev->wrap()
+  Unable to handle kernel paging request at virtual address dead00000000014f
+
+This patch addresses the issue by checking if 'ncm->netdev' is not NULL at
+ncm_disable() to call gether_disconnect() to deassociate 'dev->port_usb'.
+It's more reasonable to check 'ncm->netdev' to call gether_connect/disconnect
+rather than check 'ncm->port.in_ep->enabled' since it might not be enabled
+but the gether connection might be established.
+
+Signed-off-by: Norihiko Hama <Norihiko.Hama@alpsalpine.com>
+Cc: stable <stable@kernel.org>
+Link: https://lore.kernel.org/r/20240327023550.51214-1-Norihiko.Hama@alpsalpine.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/gadget/function/f_ncm.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/gadget/function/f_ncm.c
++++ b/drivers/usb/gadget/function/f_ncm.c
+@@ -878,7 +878,7 @@ static int ncm_set_alt(struct usb_functi
+               if (alt > 1)
+                       goto fail;
+-              if (ncm->port.in_ep->enabled) {
++              if (ncm->netdev) {
+                       DBG(cdev, "reset ncm\n");
+                       ncm->netdev = NULL;
+                       gether_disconnect(&ncm->port);
+@@ -1367,7 +1367,7 @@ static void ncm_disable(struct usb_funct
+       DBG(cdev, "ncm deactivated\n");
+-      if (ncm->port.in_ep->enabled) {
++      if (ncm->netdev) {
+               ncm->netdev = NULL;
+               gether_disconnect(&ncm->port);
+       }
diff --git a/queue-6.8/usb-serial-option-add-fibocom-fm135-gl-variants.patch b/queue-6.8/usb-serial-option-add-fibocom-fm135-gl-variants.patch
new file mode 100644 (file)
index 0000000..2408433
--- /dev/null
@@ -0,0 +1,62 @@
+From 356952b13af5b2c338df1e06889fd1b5e12cbbf4 Mon Sep 17 00:00:00 2001
+From: bolan wang <bolan.wang@fibocom.com>
+Date: Wed, 6 Mar 2024 19:03:39 +0800
+Subject: USB: serial: option: add Fibocom FM135-GL variants
+
+From: bolan wang <bolan.wang@fibocom.com>
+
+commit 356952b13af5b2c338df1e06889fd1b5e12cbbf4 upstream.
+
+Update the USB serial option driver support for the Fibocom
+FM135-GL LTE modules.
+- VID:PID 2cb7:0115, FM135-GL for laptop debug M.2 cards(with MBIM
+interface for /Linux/Chrome OS)
+
+0x0115: mbim, diag, at, pipe
+
+Here are the outputs of usb-devices:
+T:  Bus=01 Lev=01 Prnt=01 Port=02 Cnt=01 Dev#= 16 Spd=480 MxCh= 0
+D:  Ver= 2.01 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs=  1
+P:  Vendor=2cb7 ProdID=0115 Rev=05.15
+S:  Manufacturer=Fibocom Wireless Inc.
+S:  Product=Fibocom Module
+S:  SerialNumber=12345678
+C:  #Ifs= 6 Cfg#= 1 Atr=a0 MxPwr=500mA
+I:  If#= 0 Alt= 0 #EPs= 1 Cls=02(commc) Sub=0e Prot=00 Driver=cdc_mbim
+E:  Ad=82(I) Atr=03(Int.) MxPS=  64 Ivl=32ms
+I:  If#= 1 Alt= 1 #EPs= 2 Cls=0a(data ) Sub=00 Prot=02 Driver=cdc_mbim
+E:  Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:  If#= 2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=30 Driver=option
+E:  Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=83(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:  If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=40 Driver=option
+E:  Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=84(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=85(I) Atr=03(Int.) MxPS=  10 Ivl=32ms
+I:  If#= 4 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=04(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=86(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:  If#= 5 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=42 Prot=01 Driver=usbfs
+E:  Ad=05(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=87(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+
+Signed-off-by: bolan wang <bolan.wang@fibocom.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/serial/option.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -2272,6 +2272,8 @@ static const struct usb_device_id option
+       { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) },    /* Fibocom FG150 Diag */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) },          /* Fibocom FG150 AT */
+       { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) },                   /* Fibocom FM160 (MBIM mode) */
++      { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0115, 0xff),                     /* Fibocom FM135 (laptop MBIM) */
++        .driver_info = RSVD(5) },
+       { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) },                   /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
+       { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) },                   /* Fibocom FM101-GL (laptop MBIM) */
+       { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a3, 0xff) },                   /* Fibocom FM101-GL (laptop MBIM) */
diff --git a/queue-6.8/usb-serial-option-add-lonsung-u8300-u9300-product.patch b/queue-6.8/usb-serial-option-add-lonsung-u8300-u9300-product.patch
new file mode 100644 (file)
index 0000000..4058517
--- /dev/null
@@ -0,0 +1,114 @@
+From cf16ffa17c398434a77b8a373e69287c95b60de2 Mon Sep 17 00:00:00 2001
+From: Coia Prant <coiaprant@gmail.com>
+Date: Mon, 15 Apr 2024 07:26:25 -0700
+Subject: USB: serial: option: add Lonsung U8300/U9300 product
+
+From: Coia Prant <coiaprant@gmail.com>
+
+commit cf16ffa17c398434a77b8a373e69287c95b60de2 upstream.
+
+Update the USB serial option driver to support Longsung U8300/U9300.
+
+For U8300
+
+Interface 4 is used by for QMI interface in stock firmware of U8300, the
+router which uses U8300 modem.
+Interface 5 is used by for ADB interface in stock firmware of U8300, the
+router which uses U8300 modem.
+
+Interface mapping is:
+0: unknown (Debug), 1: AT (Modem), 2: AT, 3: PPP (NDIS / Pipe), 4: QMI, 5: ADB
+
+T:  Bus=05 Lev=01 Prnt=03 Port=02 Cnt=01 Dev#=  4 Spd=480 MxCh= 0
+D:  Ver= 2.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs=  1
+P:  Vendor=1c9e ProdID=9b05 Rev=03.18
+S:  Manufacturer=Android
+S:  Product=Android
+C:  #Ifs= 6 Cfg#= 1 Atr=80 MxPwr=500mA
+I:  If#= 0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option
+E:  Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:  If#= 1 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=83(I) Atr=03(Int.) MxPS=  10 Ivl=32ms
+I:  If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=84(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=85(I) Atr=03(Int.) MxPS=  10 Ivl=32ms
+I:  If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=04(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=86(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=87(I) Atr=03(Int.) MxPS=  10 Ivl=32ms
+I:  If#= 4 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan
+E:  Ad=05(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=88(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=89(I) Atr=03(Int.) MxPS=   8 Ivl=32ms
+I:  If#= 5 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=42 Prot=01 Driver=(none)
+E:  Ad=06(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=8a(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+
+For U9300
+
+Interface 1 is used by for ADB interface in stock firmware of U9300, the
+router which uses U9300 modem.
+Interface 4 is used by for QMI interface in stock firmware of U9300, the
+router which uses U9300 modem.
+
+Interface mapping is:
+0: ADB, 1: AT (Modem), 2: AT, 3: PPP (NDIS / Pipe), 4: QMI
+
+Note: Interface 3 of some models of the U9300 series can send AT commands.
+
+T:  Bus=05 Lev=01 Prnt=05 Port=04 Cnt=01 Dev#=  6 Spd=480 MxCh= 0
+D:  Ver= 2.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs=  1
+P:  Vendor=1c9e ProdID=9b3c Rev=03.18
+S:  Manufacturer=Android
+S:  Product=Android
+C:  #Ifs= 5 Cfg#= 1 Atr=80 MxPwr=500mA
+I:  If#= 0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=42 Prot=01 Driver=(none)
+E:  Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:  If#= 1 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=83(I) Atr=03(Int.) MxPS=  10 Ivl=32ms
+I:  If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=84(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=85(I) Atr=03(Int.) MxPS=  10 Ivl=32ms
+I:  If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=04(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=86(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=87(I) Atr=03(Int.) MxPS=  10 Ivl=32ms
+I:  If#= 4 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan
+E:  Ad=05(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=88(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=89(I) Atr=03(Int.) MxPS=   8 Ivl=32ms
+
+Tested successfully using Modem Manager on U9300.
+Tested successfully AT commands using If=1, If=2 and If=3 on U9300.
+
+Signed-off-by: Coia Prant <coiaprant@gmail.com>
+Reviewed-by: Lars Melin <larsm17@gmail.com>
+[ johan: drop product defines, trim commit message ]
+Cc: stable@vger.kernel.org
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/serial/option.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -2052,6 +2052,10 @@ static const struct usb_device_id option
+         .driver_info = RSVD(3) },
+       { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
+         .driver_info = RSVD(4) },
++      { USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b05),      /* Longsung U8300 */
++        .driver_info = RSVD(4) | RSVD(5) },
++      { USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b3c),      /* Longsung U9300 */
++        .driver_info = RSVD(0) | RSVD(4) },
+       { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
+       { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
+       { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
diff --git a/queue-6.8/usb-serial-option-add-rolling-rw101-gl-and-rw135-gl-support.patch b/queue-6.8/usb-serial-option-add-rolling-rw101-gl-and-rw135-gl-support.patch
new file mode 100644 (file)
index 0000000..75fbfe7
--- /dev/null
@@ -0,0 +1,175 @@
+From 311f97a4c7c22a01f8897bddf00428dfd0668e79 Mon Sep 17 00:00:00 2001
+From: Vanillan Wang <vanillanwang@163.com>
+Date: Tue, 16 Apr 2024 18:02:55 +0800
+Subject: USB: serial: option: add Rolling RW101-GL and RW135-GL support
+
+From: Vanillan Wang <vanillanwang@163.com>
+
+commit 311f97a4c7c22a01f8897bddf00428dfd0668e79 upstream.
+
+Update the USB serial option driver support for the Rolling
+LTE modules.
+
+- VID:PID 33f8:01a2, RW101-GL for laptop debug M.2 cards(with MBIM
+interface for /Linux/Chrome OS)
+0x01a2: mbim, diag, at, pipe
+- VID:PID 33f8:01a3, RW101-GL for laptop debug M.2 cards(with MBIM
+interface for /Linux/Chrome OS)
+0x01a3: mbim, pipe
+- VID:PID 33f8:01a4, RW101-GL for laptop debug M.2 cards(with MBIM
+interface for /Linux/Chrome OS)
+0x01a4: mbim, diag, at, pipe
+- VID:PID 33f8:0104, RW101-GL for laptop debug M.2 cards(with RMNET
+interface for /Linux/Chrome OS)
+0x0104: RMNET, diag, at, pipe
+- VID:PID 33f8:0115, RW135-GL for laptop debug M.2 cards(with MBIM
+interface for /Linux/Chrome OS)
+0x0115: MBIM, diag, at, pipe
+
+Here are the outputs of usb-devices:
+T:  Bus=01 Lev=01 Prnt=01 Port=02 Cnt=01 Dev#=  5 Spd=480 MxCh= 0
+D:  Ver= 2.01 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs=  1
+P:  Vendor=33f8 ProdID=01a2 Rev=05.15
+S:  Manufacturer=Rolling Wireless S.a.r.l.
+S:  Product=Rolling Module
+S:  SerialNumber=12345678
+C:  #Ifs= 5 Cfg#= 1 Atr=a0 MxPwr=500mA
+I:  If#= 0 Alt= 0 #EPs= 1 Cls=02(commc) Sub=0e Prot=00 Driver=cdc_mbim
+E:  Ad=82(I) Atr=03(Int.) MxPS=  64 Ivl=32ms
+I:  If#= 1 Alt= 1 #EPs= 2 Cls=0a(data ) Sub=00 Prot=02 Driver=cdc_mbim
+E:  Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:  If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=40 Driver=option
+E:  Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=83(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=84(I) Atr=03(Int.) MxPS=  10 Ivl=32ms
+I:  If#= 3 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=30 Driver=option
+E:  Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=85(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:  If#= 4 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=04(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=86(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+
+T:  Bus=01 Lev=01 Prnt=01 Port=02 Cnt=01 Dev#=  8 Spd=480 MxCh= 0
+D:  Ver= 2.01 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs=  1
+P:  Vendor=33f8 ProdID=01a3 Rev=05.15
+S:  Manufacturer=Rolling Wireless S.a.r.l.
+S:  Product=Rolling Module
+S:  SerialNumber=12345678
+C:  #Ifs= 3 Cfg#= 1 Atr=a0 MxPwr=500mA
+I:  If#= 0 Alt= 0 #EPs= 1 Cls=02(commc) Sub=0e Prot=00 Driver=cdc_mbim
+E:  Ad=82(I) Atr=03(Int.) MxPS=  64 Ivl=32ms
+I:  If#= 1 Alt= 1 #EPs= 2 Cls=0a(data ) Sub=00 Prot=02 Driver=cdc_mbim
+E:  Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:  If#= 2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=83(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+
+T:  Bus=01 Lev=01 Prnt=01 Port=02 Cnt=01 Dev#= 17 Spd=480 MxCh= 0
+D:  Ver= 2.01 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs=  1
+P:  Vendor=33f8 ProdID=01a4 Rev=05.15
+S:  Manufacturer=Rolling Wireless S.a.r.l.
+S:  Product=Rolling Module
+S:  SerialNumber=12345678
+C:  #Ifs= 6 Cfg#= 1 Atr=a0 MxPwr=500mA
+I:  If#= 0 Alt= 0 #EPs= 1 Cls=02(commc) Sub=0e Prot=00 Driver=cdc_mbim
+E:  Ad=82(I) Atr=03(Int.) MxPS=  64 Ivl=32ms
+I:  If#= 1 Alt= 1 #EPs= 2 Cls=0a(data ) Sub=00 Prot=02 Driver=cdc_mbim
+E:  Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:  If#= 2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=30 Driver=option
+E:  Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=83(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:  If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=40 Driver=option
+E:  Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=84(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=85(I) Atr=03(Int.) MxPS=  10 Ivl=32ms
+I:  If#= 4 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=42 Prot=01 Driver=usbfs
+E:  Ad=04(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=86(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:  If#= 5 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=05(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=87(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+
+T:  Bus=04 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#=  2 Spd=5000 MxCh= 0
+D:  Ver= 3.20 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 9 #Cfgs=  1
+P:  Vendor=33f8 ProdID=0104 Rev=05.04
+S:  Manufacturer=Rolling Wireless S.a.r.l.
+S:  Product=Rolling Module
+S:  SerialNumber=ba2eb033
+C:  #Ifs= 6 Cfg#= 1 Atr=a0 MxPwr=896mA
+I:  If#= 0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=30 Driver=option
+E:  Ad=01(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=81(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+I:  If#= 1 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=40 Driver=option
+E:  Ad=02(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=82(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=83(I) Atr=03(Int.) MxPS=  10 Ivl=32ms
+I:  If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=40 Driver=option
+E:  Ad=03(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=84(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=85(I) Atr=03(Int.) MxPS=  10 Ivl=32ms
+I:  If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=40 Driver=option
+E:  Ad=04(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=86(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=87(I) Atr=03(Int.) MxPS=  10 Ivl=32ms
+I:  If#= 4 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=50 Driver=qmi_wwan
+E:  Ad=0f(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=88(I) Atr=03(Int.) MxPS=   8 Ivl=32ms
+E:  Ad=8e(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+I:  If#= 5 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=42 Prot=01 Driver=usbfs
+E:  Ad=05(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=89(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+
+T:  Bus=01 Lev=01 Prnt=01 Port=02 Cnt=01 Dev#= 16 Spd=480 MxCh= 0
+D:  Ver= 2.01 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs=  1
+P:  Vendor=33f8 ProdID=0115 Rev=05.15
+S:  Manufacturer=Rolling Wireless S.a.r.l.
+S:  Product=Rolling Module
+S:  SerialNumber=12345678
+C:  #Ifs= 6 Cfg#= 1 Atr=a0 MxPwr=500mA
+I:  If#= 0 Alt= 0 #EPs= 1 Cls=02(commc) Sub=0e Prot=00 Driver=cdc_mbim
+E:  Ad=82(I) Atr=03(Int.) MxPS=  64 Ivl=32ms
+I:  If#= 1 Alt= 1 #EPs= 2 Cls=0a(data ) Sub=00 Prot=02 Driver=cdc_mbim
+E:  Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:  If#= 2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=30 Driver=option
+E:  Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=83(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:  If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=40 Driver=option
+E:  Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=84(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=85(I) Atr=03(Int.) MxPS=  10 Ivl=32ms
+I:  If#= 4 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=04(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=86(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:  If#= 5 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=42 Prot=01 Driver=usbfs
+E:  Ad=05(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=87(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+
+Signed-off-by: Vanillan Wang <vanillanwang@163.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/serial/option.c |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -2307,6 +2307,14 @@ static const struct usb_device_id option
+       { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) },                   /* GosunCn GM500 RNDIS */
+       { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) },                   /* GosunCn GM500 MBIM */
+       { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) },                   /* GosunCn GM500 ECM/NCM */
++      { USB_DEVICE(0x33f8, 0x0104),                                           /* Rolling RW101-GL (laptop RMNET) */
++        .driver_info = RSVD(4) | RSVD(5) },
++      { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a2, 0xff) },                   /* Rolling RW101-GL (laptop MBIM) */
++      { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a3, 0xff) },                   /* Rolling RW101-GL (laptop MBIM) */
++      { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a4, 0xff),                     /* Rolling RW101-GL (laptop MBIM) */
++        .driver_info = RSVD(4) },
++      { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0115, 0xff),                     /* Rolling RW135-GL (laptop MBIM) */
++        .driver_info = RSVD(5) },
+       { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
+       { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
+       { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
diff --git a/queue-6.8/usb-serial-option-add-support-for-fibocom-fm650-fg650.patch b/queue-6.8/usb-serial-option-add-support-for-fibocom-fm650-fg650.patch
new file mode 100644 (file)
index 0000000..a395d09
--- /dev/null
@@ -0,0 +1,139 @@
+From fb1f4584b1215e8c209f6b3a4028ed8351a0e961 Mon Sep 17 00:00:00 2001
+From: Chuanhong Guo <gch981213@gmail.com>
+Date: Tue, 12 Mar 2024 14:29:12 +0800
+Subject: USB: serial: option: add support for Fibocom FM650/FG650
+
+From: Chuanhong Guo <gch981213@gmail.com>
+
+commit fb1f4584b1215e8c209f6b3a4028ed8351a0e961 upstream.
+
+Fibocom FM650/FG650 are 5G modems with ECM/NCM/RNDIS/MBIM modes.
+This patch adds support to all 4 modes.
+
+In all 4 modes, the first serial port is the AT console while the other
+3 appear to be diagnostic interfaces for dumping modem logs.
+
+usb-devices output for all modes:
+
+ECM:
+T:  Bus=04 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#=  5 Spd=5000 MxCh= 0
+D:  Ver= 3.10 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 9 #Cfgs=  1
+P:  Vendor=2cb7 ProdID=0a04 Rev=04.04
+S:  Manufacturer=Fibocom Wireless Inc.
+S:  Product=FG650 Module
+S:  SerialNumber=0123456789ABCDEF
+C:  #Ifs= 5 Cfg#= 1 Atr=c0 MxPwr=504mA
+I:  If#= 0 Alt= 0 #EPs= 1 Cls=02(commc) Sub=06 Prot=00 Driver=cdc_ether
+E:  Ad=82(I) Atr=03(Int.) MxPS=  16 Ivl=32ms
+I:  If#= 1 Alt= 1 #EPs= 2 Cls=0a(data ) Sub=00 Prot=00 Driver=cdc_ether
+E:  Ad=01(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=81(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+I:  If#= 2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=02(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=83(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+I:  If#= 3 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=03(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=84(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+I:  If#= 4 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=04(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=85(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+
+NCM:
+T:  Bus=04 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#=  6 Spd=5000 MxCh= 0
+D:  Ver= 3.10 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 9 #Cfgs=  1
+P:  Vendor=2cb7 ProdID=0a05 Rev=04.04
+S:  Manufacturer=Fibocom Wireless Inc.
+S:  Product=FG650 Module
+S:  SerialNumber=0123456789ABCDEF
+C:  #Ifs= 6 Cfg#= 1 Atr=c0 MxPwr=504mA
+I:  If#= 0 Alt= 0 #EPs= 1 Cls=02(commc) Sub=0d Prot=00 Driver=cdc_ncm
+E:  Ad=82(I) Atr=03(Int.) MxPS=  16 Ivl=32ms
+I:  If#= 1 Alt= 1 #EPs= 2 Cls=0a(data ) Sub=00 Prot=01 Driver=cdc_ncm
+E:  Ad=01(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=81(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+I:  If#= 2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=02(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=83(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+I:  If#= 3 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=03(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=84(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+I:  If#= 4 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=04(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=85(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+I:  If#= 5 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=05(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=86(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+
+RNDIS:
+T:  Bus=04 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#=  4 Spd=5000 MxCh= 0
+D:  Ver= 3.10 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 9 #Cfgs=  1
+P:  Vendor=2cb7 ProdID=0a06 Rev=04.04
+S:  Manufacturer=Fibocom Wireless Inc.
+S:  Product=FG650 Module
+S:  SerialNumber=0123456789ABCDEF
+C:  #Ifs= 6 Cfg#= 1 Atr=c0 MxPwr=504mA
+I:  If#= 0 Alt= 0 #EPs= 1 Cls=e0(wlcon) Sub=01 Prot=03 Driver=rndis_host
+E:  Ad=82(I) Atr=03(Int.) MxPS=   8 Ivl=32ms
+I:  If#= 1 Alt= 0 #EPs= 2 Cls=0a(data ) Sub=00 Prot=00 Driver=rndis_host
+E:  Ad=01(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=81(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+I:  If#= 2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=02(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=83(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+I:  If#= 3 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=03(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=84(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+I:  If#= 4 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=04(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=85(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+I:  If#= 5 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=05(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=86(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+
+MBIM:
+T:  Bus=04 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#=  7 Spd=5000 MxCh= 0
+D:  Ver= 3.10 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 9 #Cfgs=  1
+P:  Vendor=2cb7 ProdID=0a07 Rev=04.04
+S:  Manufacturer=Fibocom Wireless Inc.
+S:  Product=FG650 Module
+S:  SerialNumber=0123456789ABCDEF
+C:  #Ifs= 6 Cfg#= 1 Atr=c0 MxPwr=504mA
+I:  If#= 0 Alt= 0 #EPs= 1 Cls=02(commc) Sub=0e Prot=00 Driver=cdc_mbim
+E:  Ad=82(I) Atr=03(Int.) MxPS=  64 Ivl=32ms
+I:  If#= 1 Alt= 1 #EPs= 2 Cls=0a(data ) Sub=00 Prot=02 Driver=cdc_mbim
+E:  Ad=01(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=81(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+I:  If#= 2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=02(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=83(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+I:  If#= 3 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=03(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=84(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+I:  If#= 4 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=04(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=85(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+I:  If#= 5 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=05(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E:  Ad=86(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+
+Signed-off-by: Chuanhong Guo <gch981213@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/serial/option.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -2279,6 +2279,10 @@ static const struct usb_device_id option
+       { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a3, 0xff) },                   /* Fibocom FM101-GL (laptop MBIM) */
+       { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff),                     /* Fibocom FM101-GL (laptop MBIM) */
+         .driver_info = RSVD(4) },
++      { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a04, 0xff) },                   /* Fibocom FM650-CN (ECM mode) */
++      { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a05, 0xff) },                   /* Fibocom FM650-CN (NCM mode) */
++      { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a06, 0xff) },                   /* Fibocom FM650-CN (RNDIS mode) */
++      { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a07, 0xff) },                   /* Fibocom FM650-CN (MBIM mode) */
+       { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) },                   /* LongSung M5710 */
+       { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) },                   /* GosunCn GM500 RNDIS */
+       { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) },                   /* GosunCn GM500 MBIM */
diff --git a/queue-6.8/usb-serial-option-add-telit-fn920c04-rmnet-compositions.patch b/queue-6.8/usb-serial-option-add-telit-fn920c04-rmnet-compositions.patch
new file mode 100644 (file)
index 0000000..2172320
--- /dev/null
@@ -0,0 +1,107 @@
+From 582ee2f9d268d302595db3e36b985e5cbb93284d Mon Sep 17 00:00:00 2001
+From: Daniele Palmas <dnlplm@gmail.com>
+Date: Thu, 18 Apr 2024 13:34:30 +0200
+Subject: USB: serial: option: add Telit FN920C04 rmnet compositions
+
+From: Daniele Palmas <dnlplm@gmail.com>
+
+commit 582ee2f9d268d302595db3e36b985e5cbb93284d upstream.
+
+Add the following Telit FN920C04 compositions:
+
+0x10a0: rmnet + tty (AT/NMEA) + tty (AT) + tty (diag)
+T:  Bus=03 Lev=01 Prnt=03 Port=06 Cnt=01 Dev#=  5 Spd=480  MxCh= 0
+D:  Ver= 2.01 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs=  1
+P:  Vendor=1bc7 ProdID=10a0 Rev=05.15
+S:  Manufacturer=Telit Cinterion
+S:  Product=FN920
+S:  SerialNumber=92c4c4d8
+C:  #Ifs= 4 Cfg#= 1 Atr=e0 MxPwr=500mA
+I:  If#= 0 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=50 Driver=qmi_wwan
+E:  Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=82(I) Atr=03(Int.) MxPS=   8 Ivl=32ms
+I:  If#= 1 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=60 Driver=option
+E:  Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=83(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=84(I) Atr=03(Int.) MxPS=  10 Ivl=32ms
+I:  If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=40 Driver=option
+E:  Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=85(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=86(I) Atr=03(Int.) MxPS=  10 Ivl=32ms
+I:  If#= 3 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=30 Driver=option
+E:  Ad=04(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=87(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+
+0x10a4: rmnet + tty (AT) + tty (AT) + tty (diag)
+T:  Bus=03 Lev=01 Prnt=03 Port=06 Cnt=01 Dev#=  8 Spd=480  MxCh= 0
+D:  Ver= 2.01 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs=  1
+P:  Vendor=1bc7 ProdID=10a4 Rev=05.15
+S:  Manufacturer=Telit Cinterion
+S:  Product=FN920
+S:  SerialNumber=92c4c4d8
+C:  #Ifs= 4 Cfg#= 1 Atr=e0 MxPwr=500mA
+I:  If#= 0 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=50 Driver=qmi_wwan
+E:  Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=82(I) Atr=03(Int.) MxPS=   8 Ivl=32ms
+I:  If#= 1 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=40 Driver=option
+E:  Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=83(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=84(I) Atr=03(Int.) MxPS=  10 Ivl=32ms
+I:  If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=40 Driver=option
+E:  Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=85(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=86(I) Atr=03(Int.) MxPS=  10 Ivl=32ms
+I:  If#= 3 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=30 Driver=option
+E:  Ad=04(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=87(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+
+0x10a9: rmnet + tty (AT) + tty (diag) + DPL (data packet logging) + adb
+T:  Bus=03 Lev=01 Prnt=03 Port=06 Cnt=01 Dev#=  9 Spd=480  MxCh= 0
+D:  Ver= 2.01 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs=  1
+P:  Vendor=1bc7 ProdID=10a9 Rev=05.15
+S:  Manufacturer=Telit Cinterion
+S:  Product=FN920
+S:  SerialNumber=92c4c4d8
+C:  #Ifs= 5 Cfg#= 1 Atr=e0 MxPwr=500mA
+I:  If#= 0 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=50 Driver=qmi_wwan
+E:  Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=82(I) Atr=03(Int.) MxPS=   8 Ivl=32ms
+I:  If#= 1 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=40 Driver=option
+E:  Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=83(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=84(I) Atr=03(Int.) MxPS=  10 Ivl=32ms
+I:  If#= 2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=30 Driver=option
+E:  Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=85(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:  If#= 3 Alt= 0 #EPs= 1 Cls=ff(vend.) Sub=ff Prot=80 Driver=(none)
+E:  Ad=86(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:  If#= 4 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=42 Prot=01 Driver=(none)
+E:  Ad=04(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=87(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+
+Signed-off-by: Daniele Palmas <dnlplm@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/serial/option.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1376,6 +1376,12 @@ static const struct usb_device_id option
+         .driver_info = NCTRL(2) | RSVD(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff),    /* Telit FE990 (ECM) */
+         .driver_info = NCTRL(0) | RSVD(1) },
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff),    /* Telit FN20C04 (rmnet) */
++        .driver_info = RSVD(0) | NCTRL(3) },
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a4, 0xff),    /* Telit FN20C04 (rmnet) */
++        .driver_info = RSVD(0) | NCTRL(3) },
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a9, 0xff),    /* Telit FN20C04 (rmnet) */
++        .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
+         .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
diff --git a/queue-6.8/usb-serial-option-support-quectel-em060k-sub-models.patch b/queue-6.8/usb-serial-option-support-quectel-em060k-sub-models.patch
new file mode 100644 (file)
index 0000000..85c4823
--- /dev/null
@@ -0,0 +1,86 @@
+From c840244aba7ad2b83ed904378b36bd6aef25511c Mon Sep 17 00:00:00 2001
+From: Jerry Meng <jerry-meng@foxmail.com>
+Date: Mon, 15 Apr 2024 15:04:29 +0800
+Subject: USB: serial: option: support Quectel EM060K sub-models
+
+From: Jerry Meng <jerry-meng@foxmail.com>
+
+commit c840244aba7ad2b83ed904378b36bd6aef25511c upstream.
+
+EM060K_129, EM060K_12a, EM060K_12b and EM0060K_12c are EM060K's sub-models,
+having the same name "Quectel EM060K-GL" and the same interface layout.
+
+MBIM + GNSS + DIAG + NMEA + AT + QDSS + DPL
+
+T:  Bus=03 Lev=01 Prnt=01 Port=01 Cnt=02 Dev#=  8 Spd=480  MxCh= 0
+D:  Ver= 2.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs=  1
+P:  Vendor=2c7c ProdID=0129 Rev= 5.04
+S:  Manufacturer=Quectel
+S:  Product=Quectel EM060K-GL
+S:  SerialNumber=f6fa08b6
+C:* #Ifs= 8 Cfg#= 1 Atr=a0 MxPwr=500mA
+A:  FirstIf#= 0 IfCount= 2 Cls=02(comm.) Sub=0e Prot=00
+I:* If#= 0 Alt= 0 #EPs= 1 Cls=02(comm.) Sub=0e Prot=00 Driver=cdc_mbim
+E:  Ad=81(I) Atr=03(Int.) MxPS=  64 Ivl=32ms
+I:  If#= 1 Alt= 0 #EPs= 0 Cls=0a(data ) Sub=00 Prot=02 Driver=cdc_mbim
+I:* If#= 1 Alt= 1 #EPs= 2 Cls=0a(data ) Sub=00 Prot=02 Driver=cdc_mbim
+E:  Ad=8e(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=0f(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 2 Alt= 0 #EPs= 1 Cls=ff(vend.) Sub=ff Prot=ff Driver=(none)
+E:  Ad=82(I) Atr=03(Int.) MxPS=  64 Ivl=32ms
+I:* If#= 3 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=30 Driver=option
+E:  Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=83(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 4 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=40 Driver=option
+E:  Ad=85(I) Atr=03(Int.) MxPS=  10 Ivl=32ms
+E:  Ad=84(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 5 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=40 Driver=option
+E:  Ad=87(I) Atr=03(Int.) MxPS=  10 Ivl=32ms
+E:  Ad=86(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 6 Alt= 0 #EPs= 1 Cls=ff(vend.) Sub=ff Prot=70 Driver=(none)
+E:  Ad=88(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 7 Alt= 0 #EPs= 1 Cls=ff(vend.) Sub=ff Prot=80 Driver=(none)
+E:  Ad=8f(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+
+Signed-off-by: Jerry Meng <jerry-meng@foxmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/serial/option.c |   16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -255,6 +255,10 @@ static void option_instat_callback(struc
+ #define QUECTEL_PRODUCT_EM061K_LMS            0x0124
+ #define QUECTEL_PRODUCT_EC25                  0x0125
+ #define QUECTEL_PRODUCT_EM060K_128            0x0128
++#define QUECTEL_PRODUCT_EM060K_129            0x0129
++#define QUECTEL_PRODUCT_EM060K_12a            0x012a
++#define QUECTEL_PRODUCT_EM060K_12b            0x012b
++#define QUECTEL_PRODUCT_EM060K_12c            0x012c
+ #define QUECTEL_PRODUCT_EG91                  0x0191
+ #define QUECTEL_PRODUCT_EG95                  0x0195
+ #define QUECTEL_PRODUCT_BG96                  0x0296
+@@ -1218,6 +1222,18 @@ static const struct usb_device_id option
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x30) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0x00, 0x40) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x40) },
++      { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x30) },
++      { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0x00, 0x40) },
++      { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x40) },
++      { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x30) },
++      { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0x00, 0x40) },
++      { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x40) },
++      { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x30) },
++      { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0x00, 0x40) },
++      { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x40) },
++      { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x30) },
++      { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0x00, 0x40) },
++      { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x40) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) },
diff --git a/queue-6.8/usb-typec-tcpm-correct-the-pdo-counting-in-pd_set.patch b/queue-6.8/usb-typec-tcpm-correct-the-pdo-counting-in-pd_set.patch
new file mode 100644 (file)
index 0000000..7b8a1fa
--- /dev/null
@@ -0,0 +1,56 @@
+From c4128304c2169b4664ed6fb6200f228cead2ab70 Mon Sep 17 00:00:00 2001
+From: Kyle Tso <kyletso@google.com>
+Date: Thu, 4 Apr 2024 21:35:17 +0800
+Subject: usb: typec: tcpm: Correct the PDO counting in pd_set
+
+From: Kyle Tso <kyletso@google.com>
+
+commit c4128304c2169b4664ed6fb6200f228cead2ab70 upstream.
+
+Off-by-one errors happen because nr_snk_pdo and nr_src_pdo are
+incorrectly added one. The index of the loop is equal to the number of
+PDOs to be updated when leaving the loop and it doesn't need to be added
+one.
+
+When doing the power negotiation, TCPM relies on the "nr_snk_pdo" as
+the size of the local sink PDO array to match the Source capabilities
+of the partner port. If the off-by-one overflow occurs, a wrong RDO
+might be sent and unexpected power transfer might happen such as over
+voltage or over current (than expected).
+
+"nr_src_pdo" is used to set the Rp level when the port is in Source
+role. It is also the array size of the local Source capabilities when
+filling up the buffer which will be sent as the Source PDOs (such as
+in Power Negotiation). If the off-by-one overflow occurs, a wrong Rp
+level might be set and wrong Source PDOs will be sent to the partner
+port. This could potentially cause over current or port resets.
+
+Fixes: cd099cde4ed2 ("usb: typec: tcpm: Support multiple capabilities")
+Cc: stable@vger.kernel.org
+Signed-off-by: Kyle Tso <kyletso@google.com>
+Reviewed-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Link: https://lore.kernel.org/r/20240404133517.2707955-1-kyletso@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/typec/tcpm/tcpm.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -6111,14 +6111,14 @@ static int tcpm_pd_set(struct typec_port
+       if (data->sink_desc.pdo[0]) {
+               for (i = 0; i < PDO_MAX_OBJECTS && data->sink_desc.pdo[i]; i++)
+                       port->snk_pdo[i] = data->sink_desc.pdo[i];
+-              port->nr_snk_pdo = i + 1;
++              port->nr_snk_pdo = i;
+               port->operating_snk_mw = data->operating_snk_mw;
+       }
+       if (data->source_desc.pdo[0]) {
+               for (i = 0; i < PDO_MAX_OBJECTS && data->source_desc.pdo[i]; i++)
+                       port->src_pdo[i] = data->source_desc.pdo[i];
+-              port->nr_src_pdo = i + 1;
++              port->nr_src_pdo = i;
+       }
+       switch (port->state) {