From 4b546ffb4e3fb6fbc734eace4509e486cf2400d8 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Sat, 6 Jan 2024 20:58:49 -0500 Subject: [PATCH] Fixes for 6.6 Signed-off-by: Sasha Levin --- .../accel-qaic-fix-gem-import-path-code.patch | 53 +++ ...c-implement-quirk-for-soc_hw_version.patch | 59 +++ ...-acpi_thermal_unregister_thermal_zon.patch | 57 +++ ...e_mount-mediation-by-detecting-if-so.patch | 71 +++ ...x-array-index-out-of-bounds-read-in-.patch | 64 +++ ...x-add-check-for-usbnet_get_endpoints.patch | 38 ++ ...ix-error-handler-with-pm_runtime_ena.patch | 65 +++ ...8186-fix-aud_pad_top-register-and-of.patch | 39 ++ ...n-g12a-toacodec-fix-event-generation.patch | 39 ++ ...toacodec-validate-written-enum-value.patch | 40 ++ ...tohdmitx-fix-event-generation-for-s-.patch | 39 ++ ...tohdmitx-validate-written-enum-value.patch | 50 +++ ...is-applied-code-from-bnxt_cfg_ntp_fi.patch | 47 ++ ...ftool-align-output-skeleton-elf-code.patch | 70 +++ .../bpftool-fix-wcast-qual-warning.patch | 45 ++ ...ip-rk3128-fix-aclk_peri_src-s-parent.patch | 76 ++++ ...p-rk3128-fix-sclk_sdmmc-s-clock-name.patch | 41 ++ ...ease-stack-based-print-buffer-size-i.patch | 67 +++ ...ilicon-qm-fix-eq-aeq-interrupt-issue.patch | 208 +++++++++ ...pto-qat-fix-double-free-during-reset.patch | 41 ++ ...pawn-for-underlying-single-block-cip.patch | 144 +++++++ ...xl-add-cxl_decoders_committed-helper.patch | 125 ++++++ ...hold-region_rwsem-while-reading-pois.patch | 100 +++++ ...pmu-ensure-put_device-on-pmu-devices.patch | 78 ++++ ...ma-add-judgment-on-enabling-round-ro.patch | 60 +++ ...ma-do-not-suspend-and-resume-the-mas.patch | 88 ++++ ...ma-fix-wrong-pointer-check-in-fsl_ed.patch | 40 ++ ...rotect-int_handle-field-in-hw-descri.patch | 59 +++ ...ne-ti-k3-psil-am62-fix-spi-pdma-data.patch | 70 +++ ...e-ti-k3-psil-am62a-fix-spi-pdma-data.patch | 70 +++ ...increase-frame-warning-limit-with-ka.patch | 47 ++ ...ay-increase-num-voltage-states-to-40.patch | 42 ++ ...e-ps8640-never-store-more-than-msg-s.patch | 66 +++ ...8640-fix-size-mismatch-warning-w-len.patch | 51 +++ ...65dsi86-never-store-more-than-msg-si.patch | 55 +++ ...tel_pre_plane_updates-also-for-pipes.patch | 49 +++ ...passing-the-correct-dpcd_rev-for-drm.patch | 42 ++ ...date-handling-of-mmio-triggered-repo.patch | 88 ++++ ...input-checks-to-prevent-config-with-.patch | 53 +++ ...e-after-free-in-i40e_aqc_add_filters.patch | 120 ++++++ ...tore-vf-msi-x-state-during-pci-reset.patch | 104 +++++ .../ice-fix-link_down_on_close-message.patch | 55 +++ ...-vsi-with-link-down-on-close-enabled.patch | 40 ++ queue-6.6/igc-check-vlan-ethertype-mask.patch | 72 ++++ queue-6.6/igc-check-vlan-tci-mask.patch | 141 ++++++ queue-6.6/igc-fix-hicredit-calculation.patch | 45 ++ ...vlan-ethertype-matching-back-to-user.patch | 75 ++++ ...s16475-use-bit-numbers-in-assign_bit.patch | 43 ++ ...rt-enforce_cache_coherency-only-for-.patch | 84 ++++ ...increment-by-align-value-in-get_free.patch | 53 +++ ...ie-fix-wrong-vir-37-when-mso-is-used.patch | 70 +++ .../media-qcom-camss-fix-genpd-cleanup.patch | 113 +++++ ...s-fix-v4l2-async-notifier-error-path.patch | 92 ++++ ...ge-fix-receive-packet-race-condition.patch | 63 +++ ...ock-unlock-page-to-lock-unlock-folio.patch | 202 +++++++++ ...e-pass-the-folio-and-the-page-to-col.patch | 115 +++++ ...-fcs-generation-for-fragmented-skbuf.patch | 46 ++ ...sk_dst_get-and-__sk_dst_get-argument.patch | 42 ++ ...ssing-getsockopt-so_timestamping_new.patch | 60 +++ ...ssing-so_timestamping_new-cmsg-suppo.patch | 40 ++ ...t-libwx-fix-memory-leak-on-free-page.patch | 176 ++++++++ ...-prevent-mss-overflow-in-skb_segment.patch | 117 +++++ ...potential-memleak-in-ql_alloc_buffer.patch | 44 ++ ...ait-for-operating-mode-to-be-applied.patch | 181 ++++++++ ...-restore-msg_namelen-in-sock_sendmsg.patch | 55 +++ ...t-fix-possible-memory-leak-in-em_tex.patch | 40 ++ ...lid-link-access-in-dumping-smc-r-con.patch | 91 ++++ ...-fix-action-not-being-set-for-all-ct.patch | 54 +++ ...les-set-transport-offset-from-mac-he.patch | 75 ++++ ...mediate-drop-chain-reference-counter.patch | 36 ++ ...ld-a-ref-to-llcp_local-dev-when-hold.patch | 128 ++++++ ...ays-configure-nix-tx-link-credits-ba.patch | 184 ++++++++ ...-marking-couple-of-structure-as-__pa.patch | 46 ++ ...enable-mac-tx-in-otx2_stop-processin.patch | 93 ++++ ...i-mt8183-fix-minimal-supported-frequ.patch | 40 ++ ...rn-negative-error-code-in-sp_usb_phy.patch | 37 ++ ...fix-register-offset-when-parent-is-n.patch | 59 +++ ...vas-migration-suspend-waits-for-no-i.patch | 240 +++++++++++ ...r8169-fix-pci-error-on-system-resume.patch | 49 +++ ...rcu-break-rcu_node_0-rq-__lock-order.patch | 148 +++++++ queue-6.6/rcu-introduce-rcu_cpu_online.patch | 66 +++ ...u-tasks-handle-new-pf_idle-semantics.patch | 103 +++++ ...s-trace-handle-new-pf_idle-semantics.patch | 51 +++ .../rdma-mlx5-fix-mkey-cache-wq-flush.patch | 49 +++ ...e-unaligned-access-speed-if-already-.patch | 42 ++ ...-external-interrupt-atomically-for-i.patch | 108 +++++ ...g-do-not-set-port-down-when-adding-t.patch | 53 +++ queue-6.6/series | 94 ++++ ...double-free-bug-in-efx_probe_filters.patch | 51 +++ .../tcp-derive-delack_max-from-rto_min.patch | 119 +++++ ...tty-overhaul-mtty-interrupt-handling.patch | 406 ++++++++++++++++++ ...avoid-data-races-on-dev-stats-fields.patch | 142 ++++++ ...net-fix-missing-dma-unmap-for-resize.patch | 164 +++++++ ...pcie-don-t-synchronize-irqs-from-irq.patch | 170 ++++++++ ...ffer-support-for-sockets-sharing-ume.patch | 74 ++++ 95 files changed, 7796 insertions(+) create mode 100644 queue-6.6/accel-qaic-fix-gem-import-path-code.patch create mode 100644 queue-6.6/accel-qaic-implement-quirk-for-soc_hw_version.patch create mode 100644 queue-6.6/acpi-thermal-fix-acpi_thermal_unregister_thermal_zon.patch create mode 100644 queue-6.6/apparmor-fix-move_mount-mediation-by-detecting-if-so.patch create mode 100644 queue-6.6/arm-sun9i-smp-fix-array-index-out-of-bounds-read-in-.patch create mode 100644 queue-6.6/asix-add-check-for-usbnet_get_endpoints.patch create mode 100644 queue-6.6/asoc-fsl_rpmsg-fix-error-handler-with-pm_runtime_ena.patch create mode 100644 queue-6.6/asoc-mediatek-mt8186-fix-aud_pad_top-register-and-of.patch create mode 100644 queue-6.6/asoc-meson-g12a-toacodec-fix-event-generation.patch create mode 100644 queue-6.6/asoc-meson-g12a-toacodec-validate-written-enum-value.patch create mode 100644 queue-6.6/asoc-meson-g12a-tohdmitx-fix-event-generation-for-s-.patch create mode 100644 queue-6.6/asoc-meson-g12a-tohdmitx-validate-written-enum-value.patch create mode 100644 queue-6.6/bnxt_en-remove-mis-applied-code-from-bnxt_cfg_ntp_fi.patch create mode 100644 queue-6.6/bpftool-align-output-skeleton-elf-code.patch create mode 100644 queue-6.6/bpftool-fix-wcast-qual-warning.patch create mode 100644 queue-6.6/clk-rockchip-rk3128-fix-aclk_peri_src-s-parent.patch create mode 100644 queue-6.6/clk-rockchip-rk3128-fix-sclk_sdmmc-s-clock-name.patch create mode 100644 queue-6.6/clk-si521xx-increase-stack-based-print-buffer-size-i.patch create mode 100644 queue-6.6/crypto-hisilicon-qm-fix-eq-aeq-interrupt-issue.patch create mode 100644 queue-6.6/crypto-qat-fix-double-free-during-reset.patch create mode 100644 queue-6.6/crypto-xts-use-spawn-for-underlying-single-block-cip.patch create mode 100644 queue-6.6/cxl-add-cxl_decoders_committed-helper.patch create mode 100644 queue-6.6/cxl-core-always-hold-region_rwsem-while-reading-pois.patch create mode 100644 queue-6.6/cxl-pmu-ensure-put_device-on-pmu-devices.patch create mode 100644 queue-6.6/dmaengine-fsl-edma-add-judgment-on-enabling-round-ro.patch create mode 100644 queue-6.6/dmaengine-fsl-edma-do-not-suspend-and-resume-the-mas.patch create mode 100644 queue-6.6/dmaengine-fsl-edma-fix-wrong-pointer-check-in-fsl_ed.patch create mode 100644 queue-6.6/dmaengine-idxd-protect-int_handle-field-in-hw-descri.patch create mode 100644 queue-6.6/dmaengine-ti-k3-psil-am62-fix-spi-pdma-data.patch create mode 100644 queue-6.6/dmaengine-ti-k3-psil-am62a-fix-spi-pdma-data.patch create mode 100644 queue-6.6/drm-amd-display-increase-frame-warning-limit-with-ka.patch create mode 100644 queue-6.6/drm-amd-display-increase-num-voltage-states-to-40.patch create mode 100644 queue-6.6/drm-bridge-parade-ps8640-never-store-more-than-msg-s.patch create mode 100644 queue-6.6/drm-bridge-ps8640-fix-size-mismatch-warning-w-len.patch create mode 100644 queue-6.6/drm-bridge-ti-sn65dsi86-never-store-more-than-msg-si.patch create mode 100644 queue-6.6/drm-i915-call-intel_pre_plane_updates-also-for-pipes.patch create mode 100644 queue-6.6/drm-i915-dp-fix-passing-the-correct-dpcd_rev-for-drm.patch create mode 100644 queue-6.6/drm-i915-perf-update-handling-of-mmio-triggered-repo.patch create mode 100644 queue-6.6/i40e-fix-filter-input-checks-to-prevent-config-with-.patch create mode 100644 queue-6.6/i40e-fix-use-after-free-in-i40e_aqc_add_filters.patch create mode 100644 queue-6.6/i40e-restore-vf-msi-x-state-during-pci-reset.patch create mode 100644 queue-6.6/ice-fix-link_down_on_close-message.patch create mode 100644 queue-6.6/ice-shut-down-vsi-with-link-down-on-close-enabled.patch create mode 100644 queue-6.6/igc-check-vlan-ethertype-mask.patch create mode 100644 queue-6.6/igc-check-vlan-tci-mask.patch create mode 100644 queue-6.6/igc-fix-hicredit-calculation.patch create mode 100644 queue-6.6/igc-report-vlan-ethertype-matching-back-to-user.patch create mode 100644 queue-6.6/iio-imu-adis16475-use-bit-numbers-in-assign_bit.patch create mode 100644 queue-6.6/iommu-vt-d-support-enforce_cache_coherency-only-for-.patch create mode 100644 queue-6.6/kernel-resource-increment-by-align-value-in-get_free.patch create mode 100644 queue-6.6/kvm-s390-vsie-fix-wrong-vir-37-when-mso-is-used.patch create mode 100644 queue-6.6/media-qcom-camss-fix-genpd-cleanup.patch create mode 100644 queue-6.6/media-qcom-camss-fix-v4l2-async-notifier-error-path.patch create mode 100644 queue-6.6/mlxbf_gige-fix-receive-packet-race-condition.patch create mode 100644 queue-6.6/mm-convert-dax-lock-unlock-page-to-lock-unlock-folio.patch create mode 100644 queue-6.6/mm-memory-failure-pass-the-folio-and-the-page-to-col.patch create mode 100644 queue-6.6/net-bcmgenet-fix-fcs-generation-for-fragmented-skbuf.patch create mode 100644 queue-6.6/net-constify-sk_dst_get-and-__sk_dst_get-argument.patch create mode 100644 queue-6.6/net-implement-missing-getsockopt-so_timestamping_new.patch create mode 100644 queue-6.6/net-implement-missing-so_timestamping_new-cmsg-suppo.patch create mode 100644 queue-6.6/net-libwx-fix-memory-leak-on-free-page.patch create mode 100644 queue-6.6/net-prevent-mss-overflow-in-skb_segment.patch create mode 100644 queue-6.6/net-qla3xxx-fix-potential-memleak-in-ql_alloc_buffer.patch create mode 100644 queue-6.6/net-ravb-wait-for-operating-mode-to-be-applied.patch create mode 100644 queue-6.6/net-save-and-restore-msg_namelen-in-sock_sendmsg.patch create mode 100644 queue-6.6/net-sched-em_text-fix-possible-memory-leak-in-em_tex.patch create mode 100644 queue-6.6/net-smc-fix-invalid-link-access-in-dumping-smc-r-con.patch create mode 100644 queue-6.6/netfilter-nf_nat-fix-action-not-being-set-for-all-ct.patch create mode 100644 queue-6.6/netfilter-nf_tables-set-transport-offset-from-mac-he.patch create mode 100644 queue-6.6/netfilter-nft_immediate-drop-chain-reference-counter.patch create mode 100644 queue-6.6/nfc-llcp_core-hold-a-ref-to-llcp_local-dev-when-hold.patch create mode 100644 queue-6.6/octeontx2-af-always-configure-nix-tx-link-credits-ba.patch create mode 100644 queue-6.6/octeontx2-af-fix-marking-couple-of-structure-as-__pa.patch create mode 100644 queue-6.6/octeontx2-af-re-enable-mac-tx-in-otx2_stop-processin.patch create mode 100644 queue-6.6/phy-mediatek-mipi-mt8183-fix-minimal-supported-frequ.patch create mode 100644 queue-6.6/phy-sunplus-return-negative-error-code-in-sp_usb_phy.patch create mode 100644 queue-6.6/phy-ti-gmii-sel-fix-register-offset-when-parent-is-n.patch create mode 100644 queue-6.6/powerpc-pseries-vas-migration-suspend-waits-for-no-i.patch create mode 100644 queue-6.6/r8169-fix-pci-error-on-system-resume.patch create mode 100644 queue-6.6/rcu-break-rcu_node_0-rq-__lock-order.patch create mode 100644 queue-6.6/rcu-introduce-rcu_cpu_online.patch create mode 100644 queue-6.6/rcu-tasks-handle-new-pf_idle-semantics.patch create mode 100644 queue-6.6/rcu-tasks-trace-handle-new-pf_idle-semantics.patch create mode 100644 queue-6.6/rdma-mlx5-fix-mkey-cache-wq-flush.patch create mode 100644 queue-6.6/riscv-don-t-probe-unaligned-access-speed-if-already-.patch create mode 100644 queue-6.6/riscv-kvm-update-external-interrupt-atomically-for-i.patch create mode 100644 queue-6.6/selftests-bonding-do-not-set-port-down-when-adding-t.patch create mode 100644 queue-6.6/sfc-fix-a-double-free-bug-in-efx_probe_filters.patch create mode 100644 queue-6.6/tcp-derive-delack_max-from-rto_min.patch create mode 100644 queue-6.6/vfio-mtty-overhaul-mtty-interrupt-handling.patch create mode 100644 queue-6.6/virtio_net-avoid-data-races-on-dev-stats-fields.patch create mode 100644 queue-6.6/virtio_net-fix-missing-dma-unmap-for-resize.patch create mode 100644 queue-6.6/wifi-iwlwifi-pcie-don-t-synchronize-irqs-from-irq.patch create mode 100644 queue-6.6/xsk-add-multi-buffer-support-for-sockets-sharing-ume.patch diff --git a/queue-6.6/accel-qaic-fix-gem-import-path-code.patch b/queue-6.6/accel-qaic-fix-gem-import-path-code.patch new file mode 100644 index 00000000000..32be264f439 --- /dev/null +++ b/queue-6.6/accel-qaic-fix-gem-import-path-code.patch @@ -0,0 +1,53 @@ +From 072d455b76f60ec8e1f75eb0ac4be5ebcfa30c83 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 8 Dec 2023 09:31:00 -0700 +Subject: accel/qaic: Fix GEM import path code + +From: Pranjal Ramajor Asha Kanojiya + +[ Upstream commit c8b6f4ad2ff9c6d88cdeb9acf16d0c4a323dd499 ] + +Do not modify the size of dmabuf as it is immutable. + +Fixes: ff13be830333 ("accel/qaic: Add datapath") +Signed-off-by: Pranjal Ramajor Asha Kanojiya +Reviewed-by: Jeffrey Hugo +Signed-off-by: Jeffrey Hugo +Reviewed-by: Jacek Lawrynowicz +Link: https://patchwork.freedesktop.org/patch/msgid/20231208163101.1295769-2-quic_jhugo@quicinc.com +Signed-off-by: Sasha Levin +--- + drivers/accel/qaic/qaic_data.c | 6 ++---- + 1 file changed, 2 insertions(+), 4 deletions(-) + +diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c +index f4b06792c6f1c..ed1a5af434f24 100644 +--- a/drivers/accel/qaic/qaic_data.c ++++ b/drivers/accel/qaic/qaic_data.c +@@ -766,7 +766,6 @@ struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_ + struct dma_buf_attachment *attach; + struct drm_gem_object *obj; + struct qaic_bo *bo; +- size_t size; + int ret; + + bo = qaic_alloc_init_bo(); +@@ -784,13 +783,12 @@ struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_ + goto attach_fail; + } + +- size = PAGE_ALIGN(attach->dmabuf->size); +- if (size == 0) { ++ if (!attach->dmabuf->size) { + ret = -EINVAL; + goto size_align_fail; + } + +- drm_gem_private_object_init(dev, obj, size); ++ drm_gem_private_object_init(dev, obj, attach->dmabuf->size); + /* + * skipping dma_buf_map_attachment() as we do not know the direction + * just yet. Once the direction is known in the subsequent IOCTL to +-- +2.43.0 + diff --git a/queue-6.6/accel-qaic-implement-quirk-for-soc_hw_version.patch b/queue-6.6/accel-qaic-implement-quirk-for-soc_hw_version.patch new file mode 100644 index 00000000000..2df630d8946 --- /dev/null +++ b/queue-6.6/accel-qaic-implement-quirk-for-soc_hw_version.patch @@ -0,0 +1,59 @@ +From c745226221609caebb9e68d1a19e76e74f1bae26 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 8 Dec 2023 09:31:01 -0700 +Subject: accel/qaic: Implement quirk for SOC_HW_VERSION + +From: Jeffrey Hugo + +[ Upstream commit 4c8874c2a6512b9fe7285cab1a6910d9211a6cfb ] + +The SOC_HW_VERSION register in the BHI space is not correctly initialized +by the device and in many cases contains uninitialized data. The register +could contain 0xFFFFFFFF which is a special value to indicate a link +error in PCIe, therefore if observed, we could incorrectly think the +device is down. + +Intercept reads for this register, and provide the correct value - every +production instance would read 0x60110200 if the device was operating as +intended. + +Fixes: a36bf7af868b ("accel/qaic: Add MHI controller") +Signed-off-by: Jeffrey Hugo +Reviewed-by: Pranjal Ramajor Asha Kanojiya +Reviewed-by: Jacek Lawrynowicz +Link: https://patchwork.freedesktop.org/patch/msgid/20231208163101.1295769-3-quic_jhugo@quicinc.com +Signed-off-by: Sasha Levin +--- + drivers/accel/qaic/mhi_controller.c | 15 ++++++++++++++- + 1 file changed, 14 insertions(+), 1 deletion(-) + +diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c +index 5036e58e7235b..1405623b03e4e 100644 +--- a/drivers/accel/qaic/mhi_controller.c ++++ b/drivers/accel/qaic/mhi_controller.c +@@ -404,8 +404,21 @@ static struct mhi_controller_config aic100_config = { + + static int mhi_read_reg(struct mhi_controller *mhi_cntrl, void __iomem *addr, u32 *out) + { +- u32 tmp = readl_relaxed(addr); ++ u32 tmp; + ++ /* ++ * SOC_HW_VERSION quirk ++ * The SOC_HW_VERSION register (offset 0x224) is not reliable and ++ * may contain uninitialized values, including 0xFFFFFFFF. This could ++ * cause a false positive link down error. Instead, intercept any ++ * reads and provide the correct value of the register. ++ */ ++ if (addr - mhi_cntrl->regs == 0x224) { ++ *out = 0x60110200; ++ return 0; ++ } ++ ++ tmp = readl_relaxed(addr); + if (tmp == U32_MAX) + return -EIO; + +-- +2.43.0 + diff --git a/queue-6.6/acpi-thermal-fix-acpi_thermal_unregister_thermal_zon.patch b/queue-6.6/acpi-thermal-fix-acpi_thermal_unregister_thermal_zon.patch new file mode 100644 index 00000000000..a3cf2da826e --- /dev/null +++ b/queue-6.6/acpi-thermal-fix-acpi_thermal_unregister_thermal_zon.patch @@ -0,0 +1,57 @@ +From 4d1a2397dfb5b11d74fd77072755b929cac3d4f2 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 31 Oct 2023 12:53:06 +0300 +Subject: ACPI: thermal: Fix acpi_thermal_unregister_thermal_zone() cleanup + +From: Dan Carpenter + +[ Upstream commit 4b27d5c420335dad7aea1aa6e799fe1d05c63b7e ] + +The acpi_thermal_unregister_thermal_zone() is paired with +acpi_thermal_register_thermal_zone() so it should mirror it. It should +clean up all the resources that the register function allocated and +leave the stuff that was allocated elsewhere. + +Unfortunately, it doesn't call thermal_zone_device_disable(). Also it +calls kfree(tz->trip_table) when it shouldn't. That was allocated in +acpi_thermal_add(). Putting the kfree() here leads to a double free +in the acpi_thermal_add() clean up function. + +Likewise, the acpi_thermal_remove() should mirror acpi_thermal_add() so +it should have an explicit kfree(tz->trip_table) as well. + +Fixes: ec23c1c462de ("ACPI: thermal: Use trip point table to register thermal zones") +Signed-off-by: Dan Carpenter +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Sasha Levin +--- + drivers/acpi/thermal.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c +index 312730f8272ee..8263508415a8d 100644 +--- a/drivers/acpi/thermal.c ++++ b/drivers/acpi/thermal.c +@@ -778,9 +778,9 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz) + + static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz) + { ++ thermal_zone_device_disable(tz->thermal_zone); + acpi_thermal_zone_sysfs_remove(tz); + thermal_zone_device_unregister(tz->thermal_zone); +- kfree(tz->trip_table); + tz->thermal_zone = NULL; + } + +@@ -985,7 +985,7 @@ static void acpi_thermal_remove(struct acpi_device *device) + + flush_workqueue(acpi_thermal_pm_queue); + acpi_thermal_unregister_thermal_zone(tz); +- ++ kfree(tz->trip_table); + kfree(tz); + } + +-- +2.43.0 + diff --git a/queue-6.6/apparmor-fix-move_mount-mediation-by-detecting-if-so.patch b/queue-6.6/apparmor-fix-move_mount-mediation-by-detecting-if-so.patch new file mode 100644 index 00000000000..04d7a565b0f --- /dev/null +++ b/queue-6.6/apparmor-fix-move_mount-mediation-by-detecting-if-so.patch @@ -0,0 +1,71 @@ +From 56bf9ad98514889ec9638d6dfe57310f53bd29ec Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 18 Dec 2023 01:10:03 -0800 +Subject: apparmor: Fix move_mount mediation by detecting if source is detached + +From: John Johansen + +[ Upstream commit 8026e40608b4d552216d2a818ca7080a4264bb44 ] + +Prevent move_mount from applying the attach_disconnected flag +to move_mount(). This prevents detached mounts from appearing +as / when applying mount mediation, which is not only incorrect +but could result in bad policy being generated. + +Basic mount rules like + allow mount, + allow mount options=(move) -> /target/, + +will allow detached mounts, allowing older policy to continue +to function. New policy gains the ability to specify `detached` as +a source option + allow mount detached -> /target/, + +In addition make sure support of move_mount is advertised as +a feature to userspace so that applications that generate policy +can respond to the addition. + +Note: this fixes mediation of move_mount when a detached mount is used, + it does not fix the broader regression of apparmor mediation of + mounts under the new mount api. + +Link: https://lore.kernel.org/all/68c166b8-5b4d-4612-8042-1dee3334385b@leemhuis.info/T/#mb35fdde37f999f08f0b02d58dc1bf4e6b65b8da2 +Fixes: 157a3537d6bc ("apparmor: Fix regression in mount mediation") +Reviewed-by: Georgia Garcia +Signed-off-by: John Johansen +Signed-off-by: Sasha Levin +--- + security/apparmor/apparmorfs.c | 1 + + security/apparmor/mount.c | 4 ++++ + 2 files changed, 5 insertions(+) + +diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c +index 261cef4c622fb..63ddefb6ddd1c 100644 +--- a/security/apparmor/apparmorfs.c ++++ b/security/apparmor/apparmorfs.c +@@ -2364,6 +2364,7 @@ static struct aa_sfs_entry aa_sfs_entry_policy[] = { + + static struct aa_sfs_entry aa_sfs_entry_mount[] = { + AA_SFS_FILE_STRING("mask", "mount umount pivot_root"), ++ AA_SFS_FILE_STRING("move_mount", "detached"), + { } + }; + +diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c +index f2a114e540079..cb0fdbdb82d94 100644 +--- a/security/apparmor/mount.c ++++ b/security/apparmor/mount.c +@@ -499,6 +499,10 @@ int aa_move_mount(const struct cred *subj_cred, + error = -ENOMEM; + if (!to_buffer || !from_buffer) + goto out; ++ ++ if (!our_mnt(from_path->mnt)) ++ /* moving a mount detached from the namespace */ ++ from_path = NULL; + error = fn_for_each_confined(label, profile, + match_mnt(subj_cred, profile, to_path, to_buffer, + from_path, from_buffer, +-- +2.43.0 + diff --git a/queue-6.6/arm-sun9i-smp-fix-array-index-out-of-bounds-read-in-.patch b/queue-6.6/arm-sun9i-smp-fix-array-index-out-of-bounds-read-in-.patch new file mode 100644 index 00000000000..0ff035374b6 --- /dev/null +++ b/queue-6.6/arm-sun9i-smp-fix-array-index-out-of-bounds-read-in-.patch @@ -0,0 +1,64 @@ +From da98582747521a605fffcb6713cc72a878795e6c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 28 Dec 2023 20:39:02 +0100 +Subject: ARM: sun9i: smp: Fix array-index-out-of-bounds read in + sunxi_mc_smp_init + +From: Stefan Wahren + +[ Upstream commit 72ad3b772b6d393701df58ba1359b0bb346a19ed ] + +Running a multi-arch kernel (multi_v7_defconfig) on a Raspberry Pi 3B+ +with enabled CONFIG_UBSAN triggers the following warning: + + UBSAN: array-index-out-of-bounds in arch/arm/mach-sunxi/mc_smp.c:810:29 + index 2 is out of range for type 'sunxi_mc_smp_data [2]' + CPU: 0 PID: 1 Comm: swapper/0 Not tainted 6.7.0-rc6-00248-g5254c0cbc92d + Hardware name: BCM2835 + unwind_backtrace from show_stack+0x10/0x14 + show_stack from dump_stack_lvl+0x40/0x4c + dump_stack_lvl from ubsan_epilogue+0x8/0x34 + ubsan_epilogue from __ubsan_handle_out_of_bounds+0x78/0x80 + __ubsan_handle_out_of_bounds from sunxi_mc_smp_init+0xe4/0x4cc + sunxi_mc_smp_init from do_one_initcall+0xa0/0x2fc + do_one_initcall from kernel_init_freeable+0xf4/0x2f4 + kernel_init_freeable from kernel_init+0x18/0x158 + kernel_init from ret_from_fork+0x14/0x28 + +Since the enabled method couldn't match with any entry from +sunxi_mc_smp_data, the value of the index shouldn't be used right after +the loop. So move it after the check of ret in order to have a valid +index. + +Fixes: 1631090e34f5 ("ARM: sun9i: smp: Add is_a83t field") +Signed-off-by: Stefan Wahren +Link: https://lore.kernel.org/r/20231228193903.9078-1-wahrenst@gmx.net +Reviewed-by: Chen-Yu Tsai +Signed-off-by: Arnd Bergmann +Signed-off-by: Sasha Levin +--- + arch/arm/mach-sunxi/mc_smp.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/arch/arm/mach-sunxi/mc_smp.c b/arch/arm/mach-sunxi/mc_smp.c +index cb63921232a6f..6ec3445f3c723 100644 +--- a/arch/arm/mach-sunxi/mc_smp.c ++++ b/arch/arm/mach-sunxi/mc_smp.c +@@ -807,12 +807,12 @@ static int __init sunxi_mc_smp_init(void) + break; + } + +- is_a83t = sunxi_mc_smp_data[i].is_a83t; +- + of_node_put(node); + if (ret) + return -ENODEV; + ++ is_a83t = sunxi_mc_smp_data[i].is_a83t; ++ + if (!sunxi_mc_smp_cpu_table_init()) + return -EINVAL; + +-- +2.43.0 + diff --git a/queue-6.6/asix-add-check-for-usbnet_get_endpoints.patch b/queue-6.6/asix-add-check-for-usbnet_get_endpoints.patch new file mode 100644 index 00000000000..2ed53e33fbc --- /dev/null +++ b/queue-6.6/asix-add-check-for-usbnet_get_endpoints.patch @@ -0,0 +1,38 @@ +From 371ef84244be0667d855b23dc2c1d386dd2f8314 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 3 Jan 2024 03:35:34 +0000 +Subject: asix: Add check for usbnet_get_endpoints + +From: Chen Ni + +[ Upstream commit eaac6a2d26b65511e164772bec6918fcbc61938e ] + +Add check for usbnet_get_endpoints() and return the error if it fails +in order to transfer the error. + +Fixes: 16626b0cc3d5 ("asix: Add a new driver for the AX88172A") +Signed-off-by: Chen Ni +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/usb/ax88172a.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c +index 3777c7e2e6fc0..e47bb125048d4 100644 +--- a/drivers/net/usb/ax88172a.c ++++ b/drivers/net/usb/ax88172a.c +@@ -161,7 +161,9 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf) + u8 buf[ETH_ALEN]; + struct ax88172a_private *priv; + +- usbnet_get_endpoints(dev, intf); ++ ret = usbnet_get_endpoints(dev, intf); ++ if (ret) ++ return ret; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) +-- +2.43.0 + diff --git a/queue-6.6/asoc-fsl_rpmsg-fix-error-handler-with-pm_runtime_ena.patch b/queue-6.6/asoc-fsl_rpmsg-fix-error-handler-with-pm_runtime_ena.patch new file mode 100644 index 00000000000..5aaa316e18e --- /dev/null +++ b/queue-6.6/asoc-fsl_rpmsg-fix-error-handler-with-pm_runtime_ena.patch @@ -0,0 +1,65 @@ +From 6e0989017daf3e796687a121b843d88f211b2a34 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 25 Dec 2023 17:06:08 +0900 +Subject: ASoC: fsl_rpmsg: Fix error handler with pm_runtime_enable + +From: Chancel Liu + +[ Upstream commit f9d378fc68c43fd41b35133edec9cd902ec334ec ] + +There is error message when defer probe happens: + +fsl_rpmsg rpmsg_audio: Unbalanced pm_runtime_enable! + +Fix the error handler with pm_runtime_enable. + +Fixes: b73d9e6225e8 ("ASoC: fsl_rpmsg: Add CPU DAI driver for audio base on rpmsg") +Signed-off-by: Chancel Liu +Acked-by: Shengjiu Wang +Link: https://lore.kernel.org/r/20231225080608.967953-1-chancel.liu@nxp.com +Signed-off-by: Mark Brown +Signed-off-by: Sasha Levin +--- + sound/soc/fsl/fsl_rpmsg.c | 10 ++++++++-- + 1 file changed, 8 insertions(+), 2 deletions(-) + +diff --git a/sound/soc/fsl/fsl_rpmsg.c b/sound/soc/fsl/fsl_rpmsg.c +index abe19a8a7aa72..f7180f1959dd0 100644 +--- a/sound/soc/fsl/fsl_rpmsg.c ++++ b/sound/soc/fsl/fsl_rpmsg.c +@@ -239,7 +239,7 @@ static int fsl_rpmsg_probe(struct platform_device *pdev) + ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component, + &fsl_rpmsg_dai, 1); + if (ret) +- return ret; ++ goto err_pm_disable; + + rpmsg->card_pdev = platform_device_register_data(&pdev->dev, + "imx-audio-rpmsg", +@@ -249,16 +249,22 @@ static int fsl_rpmsg_probe(struct platform_device *pdev) + if (IS_ERR(rpmsg->card_pdev)) { + dev_err(&pdev->dev, "failed to register rpmsg card\n"); + ret = PTR_ERR(rpmsg->card_pdev); +- return ret; ++ goto err_pm_disable; + } + + return 0; ++ ++err_pm_disable: ++ pm_runtime_disable(&pdev->dev); ++ return ret; + } + + static void fsl_rpmsg_remove(struct platform_device *pdev) + { + struct fsl_rpmsg *rpmsg = platform_get_drvdata(pdev); + ++ pm_runtime_disable(&pdev->dev); ++ + if (rpmsg->card_pdev) + platform_device_unregister(rpmsg->card_pdev); + } +-- +2.43.0 + diff --git a/queue-6.6/asoc-mediatek-mt8186-fix-aud_pad_top-register-and-of.patch b/queue-6.6/asoc-mediatek-mt8186-fix-aud_pad_top-register-and-of.patch new file mode 100644 index 00000000000..b2960917eb3 --- /dev/null +++ b/queue-6.6/asoc-mediatek-mt8186-fix-aud_pad_top-register-and-of.patch @@ -0,0 +1,39 @@ +From 900cb6ec08f9885a4135d4daf0282dff8d3848ea Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 29 Dec 2023 13:43:42 +0200 +Subject: ASoC: mediatek: mt8186: fix AUD_PAD_TOP register and offset + +From: Eugen Hristev + +[ Upstream commit 38744c3fa00109c51076121c2deb4f02e2f09194 ] + +AUD_PAD_TOP widget's correct register is AFE_AUD_PAD_TOP , and not zero. +Having a zero as register, it would mean that the `snd_soc_dapm_new_widgets` +would try to read the register at offset zero when trying to get the power +status of this widget, which is incorrect. + +Fixes: b65c466220b3 ("ASoC: mediatek: mt8186: support adda in platform driver") +Signed-off-by: Eugen Hristev +Link: https://lore.kernel.org/r/20231229114342.195867-1-eugen.hristev@collabora.com +Signed-off-by: Mark Brown +Signed-off-by: Sasha Levin +--- + sound/soc/mediatek/mt8186/mt8186-dai-adda.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/sound/soc/mediatek/mt8186/mt8186-dai-adda.c b/sound/soc/mediatek/mt8186/mt8186-dai-adda.c +index 247ab8df941f7..ab61e597c9a0f 100644 +--- a/sound/soc/mediatek/mt8186/mt8186-dai-adda.c ++++ b/sound/soc/mediatek/mt8186/mt8186-dai-adda.c +@@ -499,7 +499,7 @@ static const struct snd_soc_dapm_widget mtk_dai_adda_widgets[] = { + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + + SND_SOC_DAPM_SUPPLY_S("AUD_PAD_TOP", SUPPLY_SEQ_ADDA_AUD_PAD_TOP, +- 0, 0, 0, ++ AFE_AUD_PAD_TOP, RG_RX_FIFO_ON_SFT, 0, + mtk_adda_pad_top_event, + SND_SOC_DAPM_PRE_PMU), + SND_SOC_DAPM_SUPPLY_S("ADDA_MTKAIF_CFG", SUPPLY_SEQ_ADDA_MTKAIF_CFG, +-- +2.43.0 + diff --git a/queue-6.6/asoc-meson-g12a-toacodec-fix-event-generation.patch b/queue-6.6/asoc-meson-g12a-toacodec-fix-event-generation.patch new file mode 100644 index 00000000000..2bc1e232d20 --- /dev/null +++ b/queue-6.6/asoc-meson-g12a-toacodec-fix-event-generation.patch @@ -0,0 +1,39 @@ +From 76cb4994c1480fd0f36dfb2d30285f7a71a5af8f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 3 Jan 2024 18:34:03 +0000 +Subject: ASoC: meson: g12a-toacodec: Fix event generation + +From: Mark Brown + +[ Upstream commit 172c88244b5f2d3375403ebb504d407be0fded59 ] + +When a control changes value the return value from _put() should be 1 so +we get events generated to userspace notifying applications of the change. +We are checking if there has been a change and exiting early if not but we +are not providing the correct return value in the latter case, fix this. + +Fixes: af2618a2eee8 ("ASoC: meson: g12a: add internal DAC glue driver") +Signed-off-by: Mark Brown +Link: https://lore.kernel.org/r/20240103-meson-enum-val-v1-3-424af7a8fb91@kernel.org +Signed-off-by: Mark Brown +Signed-off-by: Sasha Levin +--- + sound/soc/meson/g12a-toacodec.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/sound/soc/meson/g12a-toacodec.c b/sound/soc/meson/g12a-toacodec.c +index dd7f07de2685a..531bb8707a3ec 100644 +--- a/sound/soc/meson/g12a-toacodec.c ++++ b/sound/soc/meson/g12a-toacodec.c +@@ -104,7 +104,7 @@ static int g12a_toacodec_mux_put_enum(struct snd_kcontrol *kcontrol, + + snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL); + +- return 0; ++ return 1; + } + + static SOC_ENUM_SINGLE_DECL(g12a_toacodec_mux_enum, TOACODEC_CTRL0, +-- +2.43.0 + diff --git a/queue-6.6/asoc-meson-g12a-toacodec-validate-written-enum-value.patch b/queue-6.6/asoc-meson-g12a-toacodec-validate-written-enum-value.patch new file mode 100644 index 00000000000..3f4835380df --- /dev/null +++ b/queue-6.6/asoc-meson-g12a-toacodec-validate-written-enum-value.patch @@ -0,0 +1,40 @@ +From e231b97dea07712672d74260163c757a9792c808 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 3 Jan 2024 18:34:01 +0000 +Subject: ASoC: meson: g12a-toacodec: Validate written enum values + +From: Mark Brown + +[ Upstream commit 3150b70e944ead909260285dfb5707d0bedcf87b ] + +When writing to an enum we need to verify that the value written is valid +for the enumeration, the helper function snd_soc_item_enum_to_val() doesn't +do it since it needs to return an unsigned (and in any case we'd need to +check the return value). + +Fixes: af2618a2eee8 ("ASoC: meson: g12a: add internal DAC glue driver") +Signed-off-by: Mark Brown +Link: https://lore.kernel.org/r/20240103-meson-enum-val-v1-1-424af7a8fb91@kernel.org +Signed-off-by: Mark Brown +Signed-off-by: Sasha Levin +--- + sound/soc/meson/g12a-toacodec.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/sound/soc/meson/g12a-toacodec.c b/sound/soc/meson/g12a-toacodec.c +index 6c4503766fdca..dd7f07de2685a 100644 +--- a/sound/soc/meson/g12a-toacodec.c ++++ b/sound/soc/meson/g12a-toacodec.c +@@ -71,6 +71,9 @@ static int g12a_toacodec_mux_put_enum(struct snd_kcontrol *kcontrol, + struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; + unsigned int mux, reg; + ++ if (ucontrol->value.enumerated.item[0] >= e->items) ++ return -EINVAL; ++ + mux = snd_soc_enum_item_to_val(e, ucontrol->value.enumerated.item[0]); + regmap_field_read(priv->field_dat_sel, ®); + +-- +2.43.0 + diff --git a/queue-6.6/asoc-meson-g12a-tohdmitx-fix-event-generation-for-s-.patch b/queue-6.6/asoc-meson-g12a-tohdmitx-fix-event-generation-for-s-.patch new file mode 100644 index 00000000000..0e21feb2052 --- /dev/null +++ b/queue-6.6/asoc-meson-g12a-tohdmitx-fix-event-generation-for-s-.patch @@ -0,0 +1,39 @@ +From afda9763e1ea5b55af7c424be19df7d2ea389675 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 3 Jan 2024 18:34:04 +0000 +Subject: ASoC: meson: g12a-tohdmitx: Fix event generation for S/PDIF mux + +From: Mark Brown + +[ Upstream commit b036d8ef3120b996751495ce25994eea58032a98 ] + +When a control changes value the return value from _put() should be 1 so +we get events generated to userspace notifying applications of the change. +While the I2S mux gets this right the S/PDIF mux does not, fix the return +value. + +Fixes: c8609f3870f7 ("ASoC: meson: add g12a tohdmitx control") +Signed-off-by: Mark Brown +Link: https://lore.kernel.org/r/20240103-meson-enum-val-v1-4-424af7a8fb91@kernel.org +Signed-off-by: Mark Brown +Signed-off-by: Sasha Levin +--- + sound/soc/meson/g12a-tohdmitx.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/sound/soc/meson/g12a-tohdmitx.c b/sound/soc/meson/g12a-tohdmitx.c +index 51b7703e1834f..b92434125face 100644 +--- a/sound/soc/meson/g12a-tohdmitx.c ++++ b/sound/soc/meson/g12a-tohdmitx.c +@@ -118,7 +118,7 @@ static int g12a_tohdmitx_spdif_mux_put_enum(struct snd_kcontrol *kcontrol, + + snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL); + +- return 0; ++ return 1; + } + + static SOC_ENUM_SINGLE_DECL(g12a_tohdmitx_spdif_mux_enum, TOHDMITX_CTRL0, +-- +2.43.0 + diff --git a/queue-6.6/asoc-meson-g12a-tohdmitx-validate-written-enum-value.patch b/queue-6.6/asoc-meson-g12a-tohdmitx-validate-written-enum-value.patch new file mode 100644 index 00000000000..06fd388c0b7 --- /dev/null +++ b/queue-6.6/asoc-meson-g12a-tohdmitx-validate-written-enum-value.patch @@ -0,0 +1,50 @@ +From d0ffbcc1d6bfa4db7149ab2b29ffdec1c4d4c065 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 3 Jan 2024 18:34:02 +0000 +Subject: ASoC: meson: g12a-tohdmitx: Validate written enum values + +From: Mark Brown + +[ Upstream commit 1e001206804be3f3d21f4a1cf16e5d059d75643f ] + +When writing to an enum we need to verify that the value written is valid +for the enumeration, the helper function snd_soc_item_enum_to_val() doesn't +do it since it needs to return an unsigned (and in any case we'd need to +check the return value). + +Fixes: c8609f3870f7 ("ASoC: meson: add g12a tohdmitx control") +Signed-off-by: Mark Brown +Link: https://lore.kernel.org/r/20240103-meson-enum-val-v1-2-424af7a8fb91@kernel.org +Signed-off-by: Mark Brown +Signed-off-by: Sasha Levin +--- + sound/soc/meson/g12a-tohdmitx.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/sound/soc/meson/g12a-tohdmitx.c b/sound/soc/meson/g12a-tohdmitx.c +index f7ef9aa1eed8d..51b7703e1834f 100644 +--- a/sound/soc/meson/g12a-tohdmitx.c ++++ b/sound/soc/meson/g12a-tohdmitx.c +@@ -45,6 +45,9 @@ static int g12a_tohdmitx_i2s_mux_put_enum(struct snd_kcontrol *kcontrol, + struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; + unsigned int mux, changed; + ++ if (ucontrol->value.enumerated.item[0] >= e->items) ++ return -EINVAL; ++ + mux = snd_soc_enum_item_to_val(e, ucontrol->value.enumerated.item[0]); + changed = snd_soc_component_test_bits(component, e->reg, + CTRL0_I2S_DAT_SEL, +@@ -93,6 +96,9 @@ static int g12a_tohdmitx_spdif_mux_put_enum(struct snd_kcontrol *kcontrol, + struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; + unsigned int mux, changed; + ++ if (ucontrol->value.enumerated.item[0] >= e->items) ++ return -EINVAL; ++ + mux = snd_soc_enum_item_to_val(e, ucontrol->value.enumerated.item[0]); + changed = snd_soc_component_test_bits(component, TOHDMITX_CTRL0, + CTRL0_SPDIF_SEL, +-- +2.43.0 + diff --git a/queue-6.6/bnxt_en-remove-mis-applied-code-from-bnxt_cfg_ntp_fi.patch b/queue-6.6/bnxt_en-remove-mis-applied-code-from-bnxt_cfg_ntp_fi.patch new file mode 100644 index 00000000000..ec221934140 --- /dev/null +++ b/queue-6.6/bnxt_en-remove-mis-applied-code-from-bnxt_cfg_ntp_fi.patch @@ -0,0 +1,47 @@ +From fd87db231303ce7ff1f685dabaf13da0685d9431 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 3 Jan 2024 16:59:24 -0800 +Subject: bnxt_en: Remove mis-applied code from bnxt_cfg_ntp_filters() + +From: Michael Chan + +[ Upstream commit e009b2efb7a8850498796b360043ac25c8d3d28f ] + +The 2 lines to check for the BNXT_HWRM_PF_UNLOAD_SP_EVENT bit was +mis-applied to bnxt_cfg_ntp_filters() and should have been applied to +bnxt_sp_task(). + +Fixes: 19241368443f ("bnxt_en: Send PF driver unload notification to all VFs.") +Reviewed-by: Andy Gospodarek +Signed-off-by: Michael Chan +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/broadcom/bnxt/bnxt.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +index f811d59fd71fd..6039886a8544f 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +@@ -12073,6 +12073,8 @@ static void bnxt_sp_task(struct work_struct *work) + bnxt_cfg_ntp_filters(bp); + if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) + bnxt_hwrm_exec_fwd_req(bp); ++ if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) ++ netdev_info(bp->dev, "Receive PF driver unload event!\n"); + if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { + bnxt_hwrm_port_qstats(bp, 0); + bnxt_hwrm_port_qstats_ext(bp, 0); +@@ -13052,8 +13054,6 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp) + } + } + } +- if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) +- netdev_info(bp->dev, "Receive PF driver unload event!\n"); + } + + #else +-- +2.43.0 + diff --git a/queue-6.6/bpftool-align-output-skeleton-elf-code.patch b/queue-6.6/bpftool-align-output-skeleton-elf-code.patch new file mode 100644 index 00000000000..b401a37e879 --- /dev/null +++ b/queue-6.6/bpftool-align-output-skeleton-elf-code.patch @@ -0,0 +1,70 @@ +From a6bff5e0aa733d4f6915ee46d120eab73a6927e8 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 6 Oct 2023 21:44:38 -0700 +Subject: bpftool: Align output skeleton ELF code + +From: Ian Rogers + +[ Upstream commit 23671f4dfd10b48b4a2fee4768886f0d8ec55b7e ] + +libbpf accesses the ELF data requiring at least 8 byte alignment, +however, the data is generated into a C string that doesn't guarantee +alignment. Fix this by assigning to an aligned char array. Use sizeof +on the array, less one for the \0 terminator, rather than generating a +constant. + +Fixes: a6cc6b34b93e ("bpftool: Provide a helper method for accessing skeleton's embedded ELF data") +Signed-off-by: Ian Rogers +Signed-off-by: Andrii Nakryiko +Reviewed-by: Alan Maguire +Acked-by: Quentin Monnet +Link: https://lore.kernel.org/bpf/20231007044439.25171-1-irogers@google.com +Signed-off-by: Sasha Levin +--- + tools/bpf/bpftool/gen.c | 15 +++++++++------ + 1 file changed, 9 insertions(+), 6 deletions(-) + +diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c +index 04c47745b3ea5..882bf8e6e70e4 100644 +--- a/tools/bpf/bpftool/gen.c ++++ b/tools/bpf/bpftool/gen.c +@@ -1209,7 +1209,7 @@ static int do_skeleton(int argc, char **argv) + codegen("\ + \n\ + \n\ +- s->data = %2$s__elf_bytes(&s->data_sz); \n\ ++ s->data = %1$s__elf_bytes(&s->data_sz); \n\ + \n\ + obj->skeleton = s; \n\ + return 0; \n\ +@@ -1218,12 +1218,12 @@ static int do_skeleton(int argc, char **argv) + return err; \n\ + } \n\ + \n\ +- static inline const void *%2$s__elf_bytes(size_t *sz) \n\ ++ static inline const void *%1$s__elf_bytes(size_t *sz) \n\ + { \n\ +- *sz = %1$d; \n\ +- return (const void *)\"\\ \n\ +- " +- , file_sz, obj_name); ++ static const char data[] __attribute__((__aligned__(8))) = \"\\\n\ ++ ", ++ obj_name ++ ); + + /* embed contents of BPF object file */ + print_hex(obj_data, file_sz); +@@ -1231,6 +1231,9 @@ static int do_skeleton(int argc, char **argv) + codegen("\ + \n\ + \"; \n\ ++ \n\ ++ *sz = sizeof(data) - 1; \n\ ++ return (const void *)data; \n\ + } \n\ + \n\ + #ifdef __cplusplus \n\ +-- +2.43.0 + diff --git a/queue-6.6/bpftool-fix-wcast-qual-warning.patch b/queue-6.6/bpftool-fix-wcast-qual-warning.patch new file mode 100644 index 00000000000..bfb871088a8 --- /dev/null +++ b/queue-6.6/bpftool-fix-wcast-qual-warning.patch @@ -0,0 +1,45 @@ +From 7a8244029a18615c673d37807f075c201c5858cf Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 7 Sep 2023 02:02:10 -0700 +Subject: bpftool: Fix -Wcast-qual warning + +From: Denys Zagorui + +[ Upstream commit ebc8484d0e6da9e6c9e8cfa1f40bf94e9c6fc512 ] + +This cast was made by purpose for older libbpf where the +bpf_object_skeleton field is void * instead of const void * +to eliminate a warning (as i understand +-Wincompatible-pointer-types-discards-qualifiers) but this +cast introduces another warning (-Wcast-qual) for libbpf +where data field is const void * + +It makes sense for bpftool to be in sync with libbpf from +kernel sources + +Signed-off-by: Denys Zagorui +Signed-off-by: Andrii Nakryiko +Acked-by: Quentin Monnet +Link: https://lore.kernel.org/bpf/20230907090210.968612-1-dzagorui@cisco.com +Stable-dep-of: 23671f4dfd10 ("bpftool: Align output skeleton ELF code") +Signed-off-by: Sasha Levin +--- + tools/bpf/bpftool/gen.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c +index 2883660d6b672..04c47745b3ea5 100644 +--- a/tools/bpf/bpftool/gen.c ++++ b/tools/bpf/bpftool/gen.c +@@ -1209,7 +1209,7 @@ static int do_skeleton(int argc, char **argv) + codegen("\ + \n\ + \n\ +- s->data = (void *)%2$s__elf_bytes(&s->data_sz); \n\ ++ s->data = %2$s__elf_bytes(&s->data_sz); \n\ + \n\ + obj->skeleton = s; \n\ + return 0; \n\ +-- +2.43.0 + diff --git a/queue-6.6/clk-rockchip-rk3128-fix-aclk_peri_src-s-parent.patch b/queue-6.6/clk-rockchip-rk3128-fix-aclk_peri_src-s-parent.patch new file mode 100644 index 00000000000..ec3fc7ed6d8 --- /dev/null +++ b/queue-6.6/clk-rockchip-rk3128-fix-aclk_peri_src-s-parent.patch @@ -0,0 +1,76 @@ +From 15401886245a8e2f8123a74f21f510fe3e4f42db Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 27 Nov 2023 19:14:16 +0100 +Subject: clk: rockchip: rk3128: Fix aclk_peri_src's parent + +From: Finley Xiao + +[ Upstream commit 98dcc6be3859fb15257750b8e1d4e0eefd2c5e1e ] + +According to the TRM there are no specific gpll_peri, cpll_peri, +gpll_div2_peri or gpll_div3_peri gates, but a single clk_peri_src gate. +Instead mux_clk_peri_src directly connects to the plls respectively the pll +divider clocks. +Fix this by creating a single gated composite. + +Also rename all occurrences of aclk_peri_src to clk_peri_src, since it +is the parent for peri aclks, pclks and hclks. That name also matches +the one used in the TRM. + +Fixes: f6022e88faca ("clk: rockchip: add clock controller for rk3128") +Signed-off-by: Finley Xiao +[renamed aclk_peri_src -> clk_peri_src and added commit message] +Signed-off-by: Alex Bee +Link: https://lore.kernel.org/r/20231127181415.11735-4-knaerzche@gmail.com +Signed-off-by: Heiko Stuebner +Signed-off-by: Sasha Levin +--- + drivers/clk/rockchip/clk-rk3128.c | 20 +++++++------------- + 1 file changed, 7 insertions(+), 13 deletions(-) + +diff --git a/drivers/clk/rockchip/clk-rk3128.c b/drivers/clk/rockchip/clk-rk3128.c +index aa53797dbfc14..fcacfe758829c 100644 +--- a/drivers/clk/rockchip/clk-rk3128.c ++++ b/drivers/clk/rockchip/clk-rk3128.c +@@ -138,7 +138,7 @@ PNAME(mux_pll_src_5plls_p) = { "cpll", "gpll", "gpll_div2", "gpll_div3", "usb480 + PNAME(mux_pll_src_4plls_p) = { "cpll", "gpll", "gpll_div2", "usb480m" }; + PNAME(mux_pll_src_3plls_p) = { "cpll", "gpll", "gpll_div2" }; + +-PNAME(mux_aclk_peri_src_p) = { "gpll_peri", "cpll_peri", "gpll_div2_peri", "gpll_div3_peri" }; ++PNAME(mux_clk_peri_src_p) = { "gpll", "cpll", "gpll_div2", "gpll_div3" }; + PNAME(mux_mmc_src_p) = { "cpll", "gpll", "gpll_div2", "xin24m" }; + PNAME(mux_clk_cif_out_src_p) = { "clk_cif_src", "xin24m" }; + PNAME(mux_sclk_vop_src_p) = { "cpll", "gpll", "gpll_div2", "gpll_div3" }; +@@ -275,23 +275,17 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = { + RK2928_CLKGATE_CON(0), 11, GFLAGS), + + /* PD_PERI */ +- GATE(0, "gpll_peri", "gpll", CLK_IGNORE_UNUSED, ++ COMPOSITE(0, "clk_peri_src", mux_clk_peri_src_p, 0, ++ RK2928_CLKSEL_CON(10), 14, 2, MFLAGS, 0, 5, DFLAGS, + RK2928_CLKGATE_CON(2), 0, GFLAGS), +- GATE(0, "cpll_peri", "cpll", CLK_IGNORE_UNUSED, +- RK2928_CLKGATE_CON(2), 0, GFLAGS), +- GATE(0, "gpll_div2_peri", "gpll_div2", CLK_IGNORE_UNUSED, +- RK2928_CLKGATE_CON(2), 0, GFLAGS), +- GATE(0, "gpll_div3_peri", "gpll_div3", CLK_IGNORE_UNUSED, +- RK2928_CLKGATE_CON(2), 0, GFLAGS), +- COMPOSITE_NOGATE(0, "aclk_peri_src", mux_aclk_peri_src_p, 0, +- RK2928_CLKSEL_CON(10), 14, 2, MFLAGS, 0, 5, DFLAGS), +- COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "aclk_peri_src", 0, ++ ++ COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "clk_peri_src", 0, + RK2928_CLKSEL_CON(10), 12, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO, + RK2928_CLKGATE_CON(2), 3, GFLAGS), +- COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "aclk_peri_src", 0, ++ COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "clk_peri_src", 0, + RK2928_CLKSEL_CON(10), 8, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO, + RK2928_CLKGATE_CON(2), 2, GFLAGS), +- GATE(ACLK_PERI, "aclk_peri", "aclk_peri_src", 0, ++ GATE(ACLK_PERI, "aclk_peri", "clk_peri_src", 0, + RK2928_CLKGATE_CON(2), 1, GFLAGS), + + GATE(SCLK_TIMER0, "sclk_timer0", "xin24m", 0, +-- +2.43.0 + diff --git a/queue-6.6/clk-rockchip-rk3128-fix-sclk_sdmmc-s-clock-name.patch b/queue-6.6/clk-rockchip-rk3128-fix-sclk_sdmmc-s-clock-name.patch new file mode 100644 index 00000000000..fdad85507b1 --- /dev/null +++ b/queue-6.6/clk-rockchip-rk3128-fix-sclk_sdmmc-s-clock-name.patch @@ -0,0 +1,41 @@ +From 054db749f3a26ece5c1e3b35644ff2e55f99a28f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 27 Nov 2023 19:14:18 +0100 +Subject: clk: rockchip: rk3128: Fix SCLK_SDMMC's clock name + +From: Alex Bee + +[ Upstream commit 99fe9ee56bd2f7358f1bc72551c2f3a6bbddf80a ] + +SCLK_SDMMC is the parent for SCLK_SDMMC_DRV and SCLK_SDMMC_SAMPLE, but +used with the (more) correct name sclk_sdmmc. SD card tuning does currently +fail as the parent can't be found under that name. +There is no need to suffix the name with '0' since RK312x SoCs do have a +single sdmmc controller - so rename it to the name which is already used +by it's children. + +Fixes: f6022e88faca ("clk: rockchip: add clock controller for rk3128") +Signed-off-by: Alex Bee +Link: https://lore.kernel.org/r/20231127181415.11735-6-knaerzche@gmail.com +Signed-off-by: Heiko Stuebner +Signed-off-by: Sasha Levin +--- + drivers/clk/rockchip/clk-rk3128.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/clk/rockchip/clk-rk3128.c b/drivers/clk/rockchip/clk-rk3128.c +index fcacfe758829c..22e7522360307 100644 +--- a/drivers/clk/rockchip/clk-rk3128.c ++++ b/drivers/clk/rockchip/clk-rk3128.c +@@ -310,7 +310,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = { + GATE(SCLK_MIPI_24M, "clk_mipi_24m", "xin24m", CLK_IGNORE_UNUSED, + RK2928_CLKGATE_CON(2), 15, GFLAGS), + +- COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0, ++ COMPOSITE(SCLK_SDMMC, "sclk_sdmmc", mux_mmc_src_p, 0, + RK2928_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 6, DFLAGS, + RK2928_CLKGATE_CON(2), 11, GFLAGS), + +-- +2.43.0 + diff --git a/queue-6.6/clk-si521xx-increase-stack-based-print-buffer-size-i.patch b/queue-6.6/clk-si521xx-increase-stack-based-print-buffer-size-i.patch new file mode 100644 index 00000000000..b00b782df57 --- /dev/null +++ b/queue-6.6/clk-si521xx-increase-stack-based-print-buffer-size-i.patch @@ -0,0 +1,67 @@ +From d7efc3ba15d1498b4254812e09ff627a572aa53c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 27 Oct 2023 10:58:24 +0200 +Subject: clk: si521xx: Increase stack based print buffer size in probe + +From: Marek Vasut + +[ Upstream commit 7e52b1164a474dc7b90f68fbb40e35ccd7f7e2e2 ] + +Increase the size of temporary print buffer on stack to fix the +following warnings reported by LKP. + +Since all the input parameters of snprintf() are under control +of this driver, it is not possible to trigger and overflow here, +but since the print buffer is on stack and discarded once driver +probe() finishes, it is not an issue to increase it by 10 bytes +and fix the warning in the process. Make it so. + +" + drivers/clk/clk-si521xx.c: In function 'si521xx_probe': +>> drivers/clk/clk-si521xx.c:318:26: warning: '%d' directive output may be truncated writing between 1 and 10 bytes into a region of size 2 [-Wformat-truncation=] + snprintf(name, 6, "DIFF%d", i); + ^~ + drivers/clk/clk-si521xx.c:318:21: note: directive argument in the range [0, 2147483647] + snprintf(name, 6, "DIFF%d", i); + ^~~~~~~~ + drivers/clk/clk-si521xx.c:318:3: note: 'snprintf' output between 6 and 15 bytes into a destination of size 6 + snprintf(name, 6, "DIFF%d", i); + ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +" + +Fixes: edc12763a3a2 ("clk: si521xx: Clock driver for Skyworks Si521xx I2C PCIe clock generators") +Reported-by: kernel test robot +Closes: https://lore.kernel.org/oe-kbuild-all/202310260412.AGASjFN4-lkp@intel.com/ +Signed-off-by: Marek Vasut +Link: https://lore.kernel.org/r/20231027085840.30098-1-marex@denx.de +Signed-off-by: Stephen Boyd +Signed-off-by: Sasha Levin +--- + drivers/clk/clk-si521xx.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/clk/clk-si521xx.c b/drivers/clk/clk-si521xx.c +index ef4ba467e747b..5886bc54aa0e7 100644 +--- a/drivers/clk/clk-si521xx.c ++++ b/drivers/clk/clk-si521xx.c +@@ -282,7 +282,7 @@ static int si521xx_probe(struct i2c_client *client) + const u16 chip_info = (u16)(uintptr_t)device_get_match_data(&client->dev); + const struct clk_parent_data clk_parent_data = { .index = 0 }; + const u8 data[3] = { SI521XX_REG_BC, 1, 1 }; +- unsigned char name[6] = "DIFF0"; ++ unsigned char name[16] = "DIFF0"; + struct clk_init_data init = {}; + struct si521xx *si; + int i, ret; +@@ -316,7 +316,7 @@ static int si521xx_probe(struct i2c_client *client) + /* Register clock */ + for (i = 0; i < hweight16(chip_info); i++) { + memset(&init, 0, sizeof(init)); +- snprintf(name, 6, "DIFF%d", i); ++ snprintf(name, sizeof(name), "DIFF%d", i); + init.name = name; + init.ops = &si521xx_diff_clk_ops; + init.parent_data = &clk_parent_data; +-- +2.43.0 + diff --git a/queue-6.6/crypto-hisilicon-qm-fix-eq-aeq-interrupt-issue.patch b/queue-6.6/crypto-hisilicon-qm-fix-eq-aeq-interrupt-issue.patch new file mode 100644 index 00000000000..2cd20bca972 --- /dev/null +++ b/queue-6.6/crypto-hisilicon-qm-fix-eq-aeq-interrupt-issue.patch @@ -0,0 +1,208 @@ +From 40cefb3fdad3eb762503be81ebe1a187ff05a698 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 13 Oct 2023 11:49:57 +0800 +Subject: crypto: hisilicon/qm - fix EQ/AEQ interrupt issue + +From: Longfang Liu + +[ Upstream commit 5acab6eb592387191c1bb745ba9b815e1e076db5 ] + +During hisilicon accelerator live migration operation. In order to +prevent the problem of EQ/AEQ interrupt loss. Migration driver will +trigger an EQ/AEQ doorbell at the end of the migration. + +This operation may cause double interruption of EQ/AEQ events. +To ensure that the EQ/AEQ interrupt processing function is normal. +The interrupt handling functionality of EQ/AEQ needs to be updated. +Used to handle repeated interrupts event. + +Fixes: b0eed085903e ("hisi_acc_vfio_pci: Add support for VFIO live migration") +Signed-off-by: Longfang Liu +Signed-off-by: Herbert Xu +Signed-off-by: Sasha Levin +--- + drivers/crypto/hisilicon/qm.c | 105 +++++++++++++--------------------- + include/linux/hisi_acc_qm.h | 1 + + 2 files changed, 41 insertions(+), 65 deletions(-) + +diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c +index 193b0b3a77cda..f1589eb3b46af 100644 +--- a/drivers/crypto/hisilicon/qm.c ++++ b/drivers/crypto/hisilicon/qm.c +@@ -855,47 +855,15 @@ static void qm_poll_req_cb(struct hisi_qp *qp) + qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1); + } + +-static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data) +-{ +- struct hisi_qm *qm = poll_data->qm; +- struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; +- u16 eq_depth = qm->eq_depth; +- int eqe_num = 0; +- u16 cqn; +- +- while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { +- cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; +- poll_data->qp_finish_id[eqe_num] = cqn; +- eqe_num++; +- +- if (qm->status.eq_head == eq_depth - 1) { +- qm->status.eqc_phase = !qm->status.eqc_phase; +- eqe = qm->eqe; +- qm->status.eq_head = 0; +- } else { +- eqe++; +- qm->status.eq_head++; +- } +- +- if (eqe_num == (eq_depth >> 1) - 1) +- break; +- } +- +- qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); +- +- return eqe_num; +-} +- + static void qm_work_process(struct work_struct *work) + { + struct hisi_qm_poll_data *poll_data = + container_of(work, struct hisi_qm_poll_data, work); + struct hisi_qm *qm = poll_data->qm; ++ u16 eqe_num = poll_data->eqe_num; + struct hisi_qp *qp; +- int eqe_num, i; ++ int i; + +- /* Get qp id of completed tasks and re-enable the interrupt. */ +- eqe_num = qm_get_complete_eqe_num(poll_data); + for (i = eqe_num - 1; i >= 0; i--) { + qp = &qm->qp_array[poll_data->qp_finish_id[i]]; + if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP)) +@@ -911,39 +879,55 @@ static void qm_work_process(struct work_struct *work) + } + } + +-static bool do_qm_eq_irq(struct hisi_qm *qm) ++static void qm_get_complete_eqe_num(struct hisi_qm *qm) + { + struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; +- struct hisi_qm_poll_data *poll_data; +- u16 cqn; ++ struct hisi_qm_poll_data *poll_data = NULL; ++ u16 eq_depth = qm->eq_depth; ++ u16 cqn, eqe_num = 0; + +- if (!readl(qm->io_base + QM_VF_EQ_INT_SOURCE)) +- return false; ++ if (QM_EQE_PHASE(eqe) != qm->status.eqc_phase) { ++ atomic64_inc(&qm->debug.dfx.err_irq_cnt); ++ qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); ++ return; ++ } + +- if (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { ++ cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; ++ if (unlikely(cqn >= qm->qp_num)) ++ return; ++ poll_data = &qm->poll_data[cqn]; ++ ++ while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { + cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; +- poll_data = &qm->poll_data[cqn]; +- queue_work(qm->wq, &poll_data->work); ++ poll_data->qp_finish_id[eqe_num] = cqn; ++ eqe_num++; ++ ++ if (qm->status.eq_head == eq_depth - 1) { ++ qm->status.eqc_phase = !qm->status.eqc_phase; ++ eqe = qm->eqe; ++ qm->status.eq_head = 0; ++ } else { ++ eqe++; ++ qm->status.eq_head++; ++ } + +- return true; ++ if (eqe_num == (eq_depth >> 1) - 1) ++ break; + } + +- return false; ++ poll_data->eqe_num = eqe_num; ++ queue_work(qm->wq, &poll_data->work); ++ qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); + } + + static irqreturn_t qm_eq_irq(int irq, void *data) + { + struct hisi_qm *qm = data; +- bool ret; +- +- ret = do_qm_eq_irq(qm); +- if (ret) +- return IRQ_HANDLED; + +- atomic64_inc(&qm->debug.dfx.err_irq_cnt); +- qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); ++ /* Get qp id of completed tasks and re-enable the interrupt */ ++ qm_get_complete_eqe_num(qm); + +- return IRQ_NONE; ++ return IRQ_HANDLED; + } + + static irqreturn_t qm_mb_cmd_irq(int irq, void *data) +@@ -1025,6 +1009,8 @@ static irqreturn_t qm_aeq_thread(int irq, void *data) + u16 aeq_depth = qm->aeq_depth; + u32 type, qp_id; + ++ atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); ++ + while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { + type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT; + qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK; +@@ -1062,17 +1048,6 @@ static irqreturn_t qm_aeq_thread(int irq, void *data) + return IRQ_HANDLED; + } + +-static irqreturn_t qm_aeq_irq(int irq, void *data) +-{ +- struct hisi_qm *qm = data; +- +- atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); +- if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE)) +- return IRQ_NONE; +- +- return IRQ_WAKE_THREAD; +-} +- + static void qm_init_qp_status(struct hisi_qp *qp) + { + struct hisi_qp_status *qp_status = &qp->qp_status; +@@ -5012,8 +4987,8 @@ static int qm_register_aeq_irq(struct hisi_qm *qm) + return 0; + + irq_vector = val & QM_IRQ_VECTOR_MASK; +- ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), qm_aeq_irq, +- qm_aeq_thread, 0, qm->dev_name, qm); ++ ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), NULL, ++ qm_aeq_thread, IRQF_ONESHOT, qm->dev_name, qm); + if (ret) + dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); + +diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h +index 9da4f3f1e6d61..7262c9993c39c 100644 +--- a/include/linux/hisi_acc_qm.h ++++ b/include/linux/hisi_acc_qm.h +@@ -276,6 +276,7 @@ struct hisi_qm_poll_data { + struct hisi_qm *qm; + struct work_struct work; + u16 *qp_finish_id; ++ u16 eqe_num; + }; + + /** +-- +2.43.0 + diff --git a/queue-6.6/crypto-qat-fix-double-free-during-reset.patch b/queue-6.6/crypto-qat-fix-double-free-during-reset.patch new file mode 100644 index 00000000000..87b9e414bab --- /dev/null +++ b/queue-6.6/crypto-qat-fix-double-free-during-reset.patch @@ -0,0 +1,41 @@ +From ba0c58777ea9a25f99f2ea3f7883bbc3fe80e5a9 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 9 Oct 2023 13:27:19 +0100 +Subject: crypto: qat - fix double free during reset + +From: Svyatoslav Pankratov + +[ Upstream commit 01aed663e6c421aeafc9c330bda630976b50a764 ] + +There is no need to free the reset_data structure if the recovery is +unsuccessful and the reset is synchronous. The function +adf_dev_aer_schedule_reset() handles the cleanup properly. Only +asynchronous resets require such structure to be freed inside the reset +worker. + +Fixes: d8cba25d2c68 ("crypto: qat - Intel(R) QAT driver framework") +Signed-off-by: Svyatoslav Pankratov +Signed-off-by: Giovanni Cabiddu +Signed-off-by: Herbert Xu +Signed-off-by: Sasha Levin +--- + drivers/crypto/intel/qat/qat_common/adf_aer.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c +index 04af32a2811c8..a39e70bd4b21b 100644 +--- a/drivers/crypto/intel/qat/qat_common/adf_aer.c ++++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c +@@ -92,7 +92,8 @@ static void adf_device_reset_worker(struct work_struct *work) + if (adf_dev_restart(accel_dev)) { + /* The device hanged and we can't restart it so stop here */ + dev_err(&GET_DEV(accel_dev), "Restart device failed\n"); +- kfree(reset_data); ++ if (reset_data->mode == ADF_DEV_RESET_ASYNC) ++ kfree(reset_data); + WARN(1, "QAT: device restart failed. Device is unusable\n"); + return; + } +-- +2.43.0 + diff --git a/queue-6.6/crypto-xts-use-spawn-for-underlying-single-block-cip.patch b/queue-6.6/crypto-xts-use-spawn-for-underlying-single-block-cip.patch new file mode 100644 index 00000000000..b624f07d647 --- /dev/null +++ b/queue-6.6/crypto-xts-use-spawn-for-underlying-single-block-cip.patch @@ -0,0 +1,144 @@ +From 170ba968d7aae1ed796f47394efcbbdbb5192520 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 8 Oct 2023 19:31:16 -0700 +Subject: crypto: xts - use 'spawn' for underlying single-block cipher + +From: Eric Biggers + +[ Upstream commit bb40d32689d73c46de39a0529d551f523f21dc9b ] + +Since commit adad556efcdd ("crypto: api - Fix built-in testing +dependency failures"), the following warning appears when booting an +x86_64 kernel that is configured with +CONFIG_CRYPTO_MANAGER_EXTRA_TESTS=y and CONFIG_CRYPTO_AES_NI_INTEL=y, +even when CONFIG_CRYPTO_XTS=y and CONFIG_CRYPTO_AES=y: + + alg: skcipher: skipping comparison tests for xts-aes-aesni because xts(ecb(aes-generic)) is unavailable + +This is caused by an issue in the xts template where it allocates an +"aes" single-block cipher without declaring a dependency on it via the +crypto_spawn mechanism. This issue was exposed by the above commit +because it reversed the order that the algorithms are tested in. + +Specifically, when "xts(ecb(aes-generic))" is instantiated and tested +during the comparison tests for "xts-aes-aesni", the "xts" template +allocates an "aes" crypto_cipher for encrypting tweaks. This resolves +to "aes-aesni". (Getting "aes-aesni" instead of "aes-generic" here is a +bit weird, but it's apparently intended.) Due to the above-mentioned +commit, the testing of "aes-aesni", and the finalization of its +registration, now happens at this point instead of before. At the end +of that, crypto_remove_spawns() unregisters all algorithm instances that +depend on a lower-priority "aes" implementation such as "aes-generic" +but that do not depend on "aes-aesni". However, because "xts" does not +use the crypto_spawn mechanism for its "aes", its dependency on +"aes-aesni" is not recognized by crypto_remove_spawns(). Thus, +crypto_remove_spawns() unexpectedly unregisters "xts(ecb(aes-generic))". + +Fix this issue by making the "xts" template use the crypto_spawn +mechanism for its "aes" dependency, like what other templates do. + +Note, this fix could be applied as far back as commit f1c131b45410 +("crypto: xts - Convert to skcipher"). However, the issue only got +exposed by the much more recent changes to how the crypto API runs the +self-tests, so there should be no need to backport this to very old +kernels. Also, an alternative fix would be to flip the list iteration +order in crypto_start_tests() to restore the original testing order. +I'm thinking we should do that too, since the original order seems more +natural, but it shouldn't be relied on for correctness. + +Fixes: adad556efcdd ("crypto: api - Fix built-in testing dependency failures") +Signed-off-by: Eric Biggers +Signed-off-by: Herbert Xu +Signed-off-by: Sasha Levin +--- + crypto/xts.c | 23 +++++++++++++++-------- + 1 file changed, 15 insertions(+), 8 deletions(-) + +diff --git a/crypto/xts.c b/crypto/xts.c +index 548b302c6c6a0..038f60dd512d9 100644 +--- a/crypto/xts.c ++++ b/crypto/xts.c +@@ -28,7 +28,7 @@ struct xts_tfm_ctx { + + struct xts_instance_ctx { + struct crypto_skcipher_spawn spawn; +- char name[CRYPTO_MAX_ALG_NAME]; ++ struct crypto_cipher_spawn tweak_spawn; + }; + + struct xts_request_ctx { +@@ -306,7 +306,7 @@ static int xts_init_tfm(struct crypto_skcipher *tfm) + + ctx->child = child; + +- tweak = crypto_alloc_cipher(ictx->name, 0, 0); ++ tweak = crypto_spawn_cipher(&ictx->tweak_spawn); + if (IS_ERR(tweak)) { + crypto_free_skcipher(ctx->child); + return PTR_ERR(tweak); +@@ -333,11 +333,13 @@ static void xts_free_instance(struct skcipher_instance *inst) + struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst); + + crypto_drop_skcipher(&ictx->spawn); ++ crypto_drop_cipher(&ictx->tweak_spawn); + kfree(inst); + } + + static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) + { ++ char name[CRYPTO_MAX_ALG_NAME]; + struct skcipher_instance *inst; + struct xts_instance_ctx *ctx; + struct skcipher_alg *alg; +@@ -363,13 +365,13 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) + cipher_name, 0, mask); + if (err == -ENOENT) { + err = -ENAMETOOLONG; +- if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", ++ if (snprintf(name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", + cipher_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + err = crypto_grab_skcipher(&ctx->spawn, + skcipher_crypto_instance(inst), +- ctx->name, 0, mask); ++ name, 0, mask); + } + + if (err) +@@ -398,23 +400,28 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) + if (!strncmp(cipher_name, "ecb(", 4)) { + int len; + +- len = strscpy(ctx->name, cipher_name + 4, sizeof(ctx->name)); ++ len = strscpy(name, cipher_name + 4, sizeof(name)); + if (len < 2) + goto err_free_inst; + +- if (ctx->name[len - 1] != ')') ++ if (name[len - 1] != ')') + goto err_free_inst; + +- ctx->name[len - 1] = 0; ++ name[len - 1] = 0; + + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, +- "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) { ++ "xts(%s)", name) >= CRYPTO_MAX_ALG_NAME) { + err = -ENAMETOOLONG; + goto err_free_inst; + } + } else + goto err_free_inst; + ++ err = crypto_grab_cipher(&ctx->tweak_spawn, ++ skcipher_crypto_instance(inst), name, 0, mask); ++ if (err) ++ goto err_free_inst; ++ + inst->alg.base.cra_priority = alg->base.cra_priority; + inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE; + inst->alg.base.cra_alignmask = alg->base.cra_alignmask | +-- +2.43.0 + diff --git a/queue-6.6/cxl-add-cxl_decoders_committed-helper.patch b/queue-6.6/cxl-add-cxl_decoders_committed-helper.patch new file mode 100644 index 00000000000..aa445e3e9f0 --- /dev/null +++ b/queue-6.6/cxl-add-cxl_decoders_committed-helper.patch @@ -0,0 +1,125 @@ +From 0e03ff05833f6d325430ef301e358f12de327a74 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 16 Oct 2023 10:57:48 -0700 +Subject: cxl: Add cxl_decoders_committed() helper + +From: Dave Jiang + +[ Upstream commit 458ba8189cb4380aa6a6cc4d52ab067f80a64829 ] + +Add a helper to retrieve the number of decoders committed for the port. +Replace all the open coding of the calculation with the helper. + +Link: https://lore.kernel.org/linux-cxl/651c98472dfed_ae7e729495@dwillia2-xfh.jf.intel.com.notmuch/ +Suggested-by: Dan Williams +Signed-off-by: Dave Jiang +Reviewed-by: Jonathan Cameron +Reviewed-by: Jim Harris +Reviewed-by: Alison Schofield +Link: https://lore.kernel.org/r/169747906849.272156.1729290904857372335.stgit@djiang5-mobl3 +Signed-off-by: Dan Williams +Stable-dep-of: 5558b92e8d39 ("cxl/core: Always hold region_rwsem while reading poison lists") +Signed-off-by: Sasha Levin +--- + drivers/cxl/core/hdm.c | 7 ++++--- + drivers/cxl/core/mbox.c | 2 +- + drivers/cxl/core/memdev.c | 4 ++-- + drivers/cxl/core/port.c | 7 +++++++ + drivers/cxl/cxl.h | 1 + + 5 files changed, 15 insertions(+), 6 deletions(-) + +diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c +index 22829267ccd86..54a535f6736bd 100644 +--- a/drivers/cxl/core/hdm.c ++++ b/drivers/cxl/core/hdm.c +@@ -634,10 +634,11 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld) + if (cxld->flags & CXL_DECODER_F_ENABLE) + return 0; + +- if (port->commit_end + 1 != id) { ++ if (cxl_num_decoders_committed(port) != id) { + dev_dbg(&port->dev, + "%s: out of order commit, expected decoder%d.%d\n", +- dev_name(&cxld->dev), port->id, port->commit_end + 1); ++ dev_name(&cxld->dev), port->id, ++ cxl_num_decoders_committed(port)); + return -EBUSY; + } + +@@ -847,7 +848,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, + cxld->target_type = CXL_DECODER_HOSTONLYMEM; + else + cxld->target_type = CXL_DECODER_DEVMEM; +- if (cxld->id != port->commit_end + 1) { ++ if (cxld->id != cxl_num_decoders_committed(port)) { + dev_warn(&port->dev, + "decoder%d.%d: Committed out of order\n", + port->id, cxld->id); +diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c +index b91bb98869917..b12986b968da4 100644 +--- a/drivers/cxl/core/mbox.c ++++ b/drivers/cxl/core/mbox.c +@@ -1200,7 +1200,7 @@ int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd) + * Require an endpoint to be safe otherwise the driver can not + * be sure that the device is unmapped. + */ +- if (endpoint && endpoint->commit_end == -1) ++ if (endpoint && cxl_num_decoders_committed(endpoint) == 0) + rc = __cxl_mem_sanitize(mds, cmd); + else + rc = -EBUSY; +diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c +index fed9573cf355e..fc5c2b414793b 100644 +--- a/drivers/cxl/core/memdev.c ++++ b/drivers/cxl/core/memdev.c +@@ -231,7 +231,7 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd) + if (rc) + return rc; + +- if (port->commit_end == -1) { ++ if (cxl_num_decoders_committed(port) == 0) { + /* No regions mapped to this memdev */ + rc = cxl_get_poison_by_memdev(cxlmd); + } else { +@@ -282,7 +282,7 @@ static struct cxl_region *cxl_dpa_to_region(struct cxl_memdev *cxlmd, u64 dpa) + .dpa = dpa, + }; + port = cxlmd->endpoint; +- if (port && is_cxl_endpoint(port) && port->commit_end != -1) ++ if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port)) + device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region); + + return ctx.cxlr; +diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c +index 1e0558d18b965..f430280fa6bd0 100644 +--- a/drivers/cxl/core/port.c ++++ b/drivers/cxl/core/port.c +@@ -37,6 +37,13 @@ DECLARE_RWSEM(cxl_region_rwsem); + static DEFINE_IDA(cxl_port_ida); + static DEFINE_XARRAY(cxl_root_buses); + ++int cxl_num_decoders_committed(struct cxl_port *port) ++{ ++ lockdep_assert_held(&cxl_region_rwsem); ++ ++ return port->commit_end + 1; ++} ++ + static ssize_t devtype_show(struct device *dev, struct device_attribute *attr, + char *buf) + { +diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h +index b5b015b661eae..6c6afda0e4c61 100644 +--- a/drivers/cxl/cxl.h ++++ b/drivers/cxl/cxl.h +@@ -679,6 +679,7 @@ static inline bool is_cxl_root(struct cxl_port *port) + return port->uport_dev == port->dev.parent; + } + ++int cxl_num_decoders_committed(struct cxl_port *port); + bool is_cxl_port(const struct device *dev); + struct cxl_port *to_cxl_port(const struct device *dev); + struct pci_bus; +-- +2.43.0 + diff --git a/queue-6.6/cxl-core-always-hold-region_rwsem-while-reading-pois.patch b/queue-6.6/cxl-core-always-hold-region_rwsem-while-reading-pois.patch new file mode 100644 index 00000000000..9018d6041c7 --- /dev/null +++ b/queue-6.6/cxl-core-always-hold-region_rwsem-while-reading-pois.patch @@ -0,0 +1,100 @@ +From 203884f9753f21acf3a2cd94f71156f6f30d5501 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 26 Nov 2023 16:09:29 -0800 +Subject: cxl/core: Always hold region_rwsem while reading poison lists + +From: Alison Schofield + +[ Upstream commit 5558b92e8d39e18aa19619be2ee37274e9592528 ] + +A read of a device poison list is triggered via a sysfs attribute +and the results are logged as kernel trace events of type cxl_poison. +The work is managed by either: a) the region driver when one of more +regions map the device, or by b) the memdev driver when no regions +map the device. + +In the case of a) the region driver holds the region_rwsem while +reading the poison by committed endpoint decoder mappings and for +any unmapped resources. This makes sure that the cxl_poison trace +event trace reports valid region info. (Region name, HPA, and UUID). + +In the case of b) the memdev driver holds the dpa_rwsem preventing +new DPA resources from being attached to a region. However, it leaves +a gap between region attach and decoder commit actions. If a DPA in +the gap is in the poison list, the cxl_poison trace event will omit +the region info. + +Close the gap by holding the region_rwsem and the dpa_rwsem when +reading poison per memdev. Since both methods now hold both locks, +down_read both from the caller. Doing so also addresses the lockdep +assert that found this issue: +Commit 458ba8189cb4 ("cxl: Add cxl_decoders_committed() helper") + +Fixes: f0832a586396 ("cxl/region: Provide region info to the cxl_poison trace event") +Signed-off-by: Alison Schofield +Reviewed-by: Davidlohr Bueso +Reviewed-by: Dave Jiang +Link: https://lore.kernel.org/r/08e8e7ec9a3413b91d51de39e385653494b1eed0.1701041440.git.alison.schofield@intel.com +Signed-off-by: Dan Williams +Signed-off-by: Sasha Levin +--- + drivers/cxl/core/memdev.c | 9 ++++++++- + drivers/cxl/core/region.c | 5 ----- + 2 files changed, 8 insertions(+), 6 deletions(-) + +diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c +index fc5c2b414793b..5ad1b13e780af 100644 +--- a/drivers/cxl/core/memdev.c ++++ b/drivers/cxl/core/memdev.c +@@ -227,10 +227,16 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd) + if (!port || !is_cxl_endpoint(port)) + return -EINVAL; + +- rc = down_read_interruptible(&cxl_dpa_rwsem); ++ rc = down_read_interruptible(&cxl_region_rwsem); + if (rc) + return rc; + ++ rc = down_read_interruptible(&cxl_dpa_rwsem); ++ if (rc) { ++ up_read(&cxl_region_rwsem); ++ return rc; ++ } ++ + if (cxl_num_decoders_committed(port) == 0) { + /* No regions mapped to this memdev */ + rc = cxl_get_poison_by_memdev(cxlmd); +@@ -239,6 +245,7 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd) + rc = cxl_get_poison_by_endpoint(port); + } + up_read(&cxl_dpa_rwsem); ++ up_read(&cxl_region_rwsem); + + return rc; + } +diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c +index 9d60020c5cb3b..e7206367ec669 100644 +--- a/drivers/cxl/core/region.c ++++ b/drivers/cxl/core/region.c +@@ -2467,10 +2467,6 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port) + struct cxl_poison_context ctx; + int rc = 0; + +- rc = down_read_interruptible(&cxl_region_rwsem); +- if (rc) +- return rc; +- + ctx = (struct cxl_poison_context) { + .port = port + }; +@@ -2480,7 +2476,6 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port) + rc = cxl_get_poison_unmapped(to_cxl_memdev(port->uport_dev), + &ctx); + +- up_read(&cxl_region_rwsem); + return rc; + } + +-- +2.43.0 + diff --git a/queue-6.6/cxl-pmu-ensure-put_device-on-pmu-devices.patch b/queue-6.6/cxl-pmu-ensure-put_device-on-pmu-devices.patch new file mode 100644 index 00000000000..5644bb9d46e --- /dev/null +++ b/queue-6.6/cxl-pmu-ensure-put_device-on-pmu-devices.patch @@ -0,0 +1,78 @@ +From 13e21064eeff6b62df3a4c867cd232e768aea0e8 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 16 Oct 2023 16:25:05 -0700 +Subject: cxl/pmu: Ensure put_device on pmu devices + +From: Ira Weiny + +[ Upstream commit ef3d5cf9c59cccb012aa6b93d99f4c6eb5d6648e ] + +The following kmemleaks were detected when removing the cxl module +stack: + +unreferenced object 0xffff88822616b800 (size 1024): +... + backtrace: + [<00000000bedc6f83>] kmalloc_trace+0x26/0x90 + [<00000000448d1afc>] devm_cxl_pmu_add+0x3a/0x110 [cxl_core] + [<00000000ca3bfe16>] 0xffffffffa105213b + [<00000000ba7f78dc>] local_pci_probe+0x41/0x90 + [<000000005bb027ac>] pci_device_probe+0xb0/0x1c0 +... +unreferenced object 0xffff8882260abcc0 (size 16): +... + hex dump (first 16 bytes): + 70 6d 75 5f 6d 65 6d 30 2e 30 00 26 82 88 ff ff pmu_mem0.0.&.... + backtrace: +... + [<00000000152b5e98>] dev_set_name+0x43/0x50 + [<00000000c228798b>] devm_cxl_pmu_add+0x102/0x110 [cxl_core] + [<00000000ca3bfe16>] 0xffffffffa105213b + [<00000000ba7f78dc>] local_pci_probe+0x41/0x90 + [<000000005bb027ac>] pci_device_probe+0xb0/0x1c0 +... +unreferenced object 0xffff8882272af200 (size 256): +... + backtrace: + [<00000000bedc6f83>] kmalloc_trace+0x26/0x90 + [<00000000a14d1813>] device_add+0x4ea/0x890 + [<00000000a3f07b47>] devm_cxl_pmu_add+0xbe/0x110 [cxl_core] + [<00000000ca3bfe16>] 0xffffffffa105213b + [<00000000ba7f78dc>] local_pci_probe+0x41/0x90 + [<000000005bb027ac>] pci_device_probe+0xb0/0x1c0 +... + +devm_cxl_pmu_add() correctly registers a device remove function but it +only calls device_del() which is only part of device unregistration. + +Properly call device_unregister() to free up the memory associated with +the device. + +Fixes: 1ad3f701c399 ("cxl/pci: Find and register CXL PMU devices") +Cc: Jonathan Cameron +Signed-off-by: Ira Weiny +Reviewed-by: Jonathan Cameron +Reviewed-by: Dave Jiang +Link: https://lore.kernel.org/r/20231016-pmu-unregister-fix-v1-1-1e2eb2fa3c69@intel.com +Signed-off-by: Dan Williams +Signed-off-by: Sasha Levin +--- + drivers/cxl/core/pmu.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/cxl/core/pmu.c b/drivers/cxl/core/pmu.c +index 7684c843e5a59..5d8e06b0ba6e8 100644 +--- a/drivers/cxl/core/pmu.c ++++ b/drivers/cxl/core/pmu.c +@@ -23,7 +23,7 @@ const struct device_type cxl_pmu_type = { + + static void remove_dev(void *dev) + { +- device_del(dev); ++ device_unregister(dev); + } + + int devm_cxl_pmu_add(struct device *parent, struct cxl_pmu_regs *regs, +-- +2.43.0 + diff --git a/queue-6.6/dmaengine-fsl-edma-add-judgment-on-enabling-round-ro.patch b/queue-6.6/dmaengine-fsl-edma-add-judgment-on-enabling-round-ro.patch new file mode 100644 index 00000000000..3d8d1466477 --- /dev/null +++ b/queue-6.6/dmaengine-fsl-edma-add-judgment-on-enabling-round-ro.patch @@ -0,0 +1,60 @@ +From 435a415648504af1ca5e28c029aa24f5380cc8e2 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 14 Nov 2023 06:57:13 +0800 +Subject: dmaengine: fsl-edma: Add judgment on enabling round robin arbitration + +From: Xiaolei Wang + +[ Upstream commit 3448397a47c08c291c3fccb7ac5f0f429fd547e0 ] + +Add judgment on enabling round robin arbitration to avoid +exceptions if this function is not supported. + +Call trace: + fsl_edma_resume_early+0x1d4/0x208 + dpm_run_callback+0xd4/0x304 + device_resume_early+0xb0/0x208 + dpm_resume_early+0x224/0x528 + suspend_devices_and_enter+0x3e4/0xd00 + pm_suspend+0x3c4/0x910 + state_store+0x90/0x124 + kobj_attr_store+0x48/0x64 + sysfs_kf_write+0x84/0xb4 + kernfs_fop_write_iter+0x19c/0x264 + vfs_write+0x664/0x858 + ksys_write+0xc8/0x180 + __arm64_sys_write+0x44/0x58 + invoke_syscall+0x5c/0x178 + el0_svc_common.constprop.0+0x11c/0x14c + do_el0_svc+0x30/0x40 + el0_svc+0x58/0xa8 + el0t_64_sync_handler+0xc0/0xc4 + el0t_64_sync+0x190/0x194 + +Fixes: 72f5801a4e2b ("dmaengine: fsl-edma: integrate v3 support") +Signed-off-by: Xiaolei Wang +Reviewed-by: Frank Li +Link: https://lore.kernel.org/r/20231113225713.1892643-3-xiaolei.wang@windriver.com +Signed-off-by: Vinod Koul +Signed-off-by: Sasha Levin +--- + drivers/dma/fsl-edma-main.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c +index 242a70cf85f4b..db6cd8431f30a 100644 +--- a/drivers/dma/fsl-edma-main.c ++++ b/drivers/dma/fsl-edma-main.c +@@ -674,7 +674,8 @@ static int fsl_edma_resume_early(struct device *dev) + fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true); + } + +- edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr); ++ if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_SPLIT_REG)) ++ edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr); + + return 0; + } +-- +2.43.0 + diff --git a/queue-6.6/dmaengine-fsl-edma-do-not-suspend-and-resume-the-mas.patch b/queue-6.6/dmaengine-fsl-edma-do-not-suspend-and-resume-the-mas.patch new file mode 100644 index 00000000000..5358a153691 --- /dev/null +++ b/queue-6.6/dmaengine-fsl-edma-do-not-suspend-and-resume-the-mas.patch @@ -0,0 +1,88 @@ +From 9285c92148afa0a1eb7e7798e654ebbdbaadbc92 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 14 Nov 2023 06:57:12 +0800 +Subject: dmaengine: fsl-edma: Do not suspend and resume the masked dma channel + when the system is sleeping + +From: Xiaolei Wang + +[ Upstream commit 2838a897654c4810153cc51646414ffa54fd23b0 ] + +Some channels may be masked. When the system is suspended, +if these masked channels are not filtered out, this will +lead to null pointer operations and system crash: + +Unable to handle kernel NULL pointer dereference at virtual address +Mem abort info: +ESR = 0x0000000096000004 +EC = 0x25: DABT (current EL), IL = 32 bits +SET = 0, FnV = 0 +EA = 0, S1PTW = 0 +FSC = 0x04: level 0 translation fault +Data abort info: +ISV = 0, ISS = 0x00000004, ISS2 = 0x00000000 +CM = 0, WnR = 0, TnD = 0, TagAccess = 0 +GCS = 0, Overlay = 0, DirtyBit = 0, Xs = 0 +user pgtable: 4k pages, 48-bit VAs, pgdp=0000000894300000 +[00000000000002a0] pgd=0000000000000000, p4d=0000000000000000 +Internal error: Oops: 0000000096000004 [#1] PREEMPT SMP +Modules linked in: +CPU: 1 PID: 989 Comm: sh Tainted: G B 6.6.0-16203-g557fb7a3ec4c-dirty #70 +Hardware name: Freescale i.MX8QM MEK (DT) +pstate: 400000c5 (nZcv daIF -PAN -UAO -TCO -DIT -SSBS BTYPE=--) + pc: fsl_edma_disable_request+0x3c/0x78 + lr: fsl_edma_disable_request+0x3c/0x78 + sp:ffff800089ae7690 + x29: ffff800089ae7690 x28: ffff000807ab5440 x27: ffff000807ab5830 + x26: 0000000000000008 x25: 0000000000000278 x24: 0000000000000001 + 23: ffff000807ab4328 x22: 0000000000000000 x21: 0000000000000009 + x20: ffff800082616940 x19: 0000000000000000 x18: 0000000000000000 + x17: 3d3d3d3d3d3d3d3d x16: 3d3d3d3d3d3d3d3d x15: 3d3d3d3d3d3d3d3d + x14: 3d3d3d3d3d3d3d3d x13: 3d3d3d3d3d3d3d3d x12: 1ffff00010d45724 + x11: ffff700010d45724 x10: dfff800000000000 x9: dfff800000000000 + x8: 00008fffef2ba8dc x7: 0000000000000001 x6: ffff800086a2b927 + x5: ffff800086a2b920 x4: ffff700010d45725 x3: ffff8000800d5bbc + x2 : 0000000000000000 x1 : ffff000800c1d880 x0 : 0000000000000001 + Call trace: + fsl_edma_disable_request+0x3c/0x78 + fsl_edma_suspend_late+0x128/0x12c + dpm_run_callback+0xd4/0x304 + __device_suspend_late+0xd0/0x240 + dpm_suspend_late+0x174/0x59c + suspend_devices_and_enter+0x194/0xd00 + pm_suspend+0x3c4/0x910 + +Fixes: 72f5801a4e2b ("dmaengine: fsl-edma: integrate v3 support") +Signed-off-by: Xiaolei Wang +Link: https://lore.kernel.org/r/20231113225713.1892643-2-xiaolei.wang@windriver.com +Signed-off-by: Vinod Koul +Signed-off-by: Sasha Levin +--- + drivers/dma/fsl-edma-main.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c +index 8c4ed7012e232..242a70cf85f4b 100644 +--- a/drivers/dma/fsl-edma-main.c ++++ b/drivers/dma/fsl-edma-main.c +@@ -640,6 +640,8 @@ static int fsl_edma_suspend_late(struct device *dev) + + for (i = 0; i < fsl_edma->n_chans; i++) { + fsl_chan = &fsl_edma->chans[i]; ++ if (fsl_edma->chan_masked & BIT(i)) ++ continue; + spin_lock_irqsave(&fsl_chan->vchan.lock, flags); + /* Make sure chan is idle or will force disable. */ + if (unlikely(!fsl_chan->idle)) { +@@ -664,6 +666,8 @@ static int fsl_edma_resume_early(struct device *dev) + + for (i = 0; i < fsl_edma->n_chans; i++) { + fsl_chan = &fsl_edma->chans[i]; ++ if (fsl_edma->chan_masked & BIT(i)) ++ continue; + fsl_chan->pm_state = RUNNING; + edma_write_tcdreg(fsl_chan, 0, csr); + if (fsl_chan->slave_id != 0) +-- +2.43.0 + diff --git a/queue-6.6/dmaengine-fsl-edma-fix-wrong-pointer-check-in-fsl_ed.patch b/queue-6.6/dmaengine-fsl-edma-fix-wrong-pointer-check-in-fsl_ed.patch new file mode 100644 index 00000000000..74a4bdfdcfe --- /dev/null +++ b/queue-6.6/dmaengine-fsl-edma-fix-wrong-pointer-check-in-fsl_ed.patch @@ -0,0 +1,40 @@ +From 01ab571a6f081f36a7b56e14058689c0fcd93e55 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 29 Nov 2023 17:00:00 +0800 +Subject: dmaengine: fsl-edma: fix wrong pointer check in fsl_edma3_attach_pd() + +From: Yang Yingliang + +[ Upstream commit bffa7218dcddb80e7f18dfa545dd4b359b11dd93 ] + +device_link_add() returns NULL pointer not PTR_ERR() when it fails, +so replace the IS_ERR() check with NULL pointer check. + +Fixes: 72f5801a4e2b ("dmaengine: fsl-edma: integrate v3 support") +Signed-off-by: Yang Yingliang +Link: https://lore.kernel.org/r/20231129090000.841440-1-yangyingliang@huaweicloud.com +Signed-off-by: Vinod Koul +Signed-off-by: Sasha Levin +--- + drivers/dma/fsl-edma-main.c | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c +index db6cd8431f30a..00cb70aca34a3 100644 +--- a/drivers/dma/fsl-edma-main.c ++++ b/drivers/dma/fsl-edma-main.c +@@ -400,9 +400,8 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng + link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS | + DL_FLAG_PM_RUNTIME | + DL_FLAG_RPM_ACTIVE); +- if (IS_ERR(link)) { +- dev_err(dev, "Failed to add device_link to %d: %ld\n", i, +- PTR_ERR(link)); ++ if (!link) { ++ dev_err(dev, "Failed to add device_link to %d\n", i); + return -EINVAL; + } + +-- +2.43.0 + diff --git a/queue-6.6/dmaengine-idxd-protect-int_handle-field-in-hw-descri.patch b/queue-6.6/dmaengine-idxd-protect-int_handle-field-in-hw-descri.patch new file mode 100644 index 00000000000..d5bf7832734 --- /dev/null +++ b/queue-6.6/dmaengine-idxd-protect-int_handle-field-in-hw-descri.patch @@ -0,0 +1,59 @@ +From 3221f3bebbde03b6b93527ea52bf0c01373f6c56 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 11 Dec 2023 13:37:03 +0800 +Subject: dmaengine: idxd: Protect int_handle field in hw descriptor + +From: Guanjun + +[ Upstream commit 778dfacc903d4b1ef5b7a9726e3a36bc15913d29 ] + +The int_handle field in hw descriptor should also be protected +by wmb() before possibly triggering a DMA read. + +Fixes: eb0cf33a91b4 (dmaengine: idxd: move interrupt handle assignment) +Signed-off-by: Guanjun +Reviewed-by: Dave Jiang +Reviewed-by: Fenghua Yu +Reviewed-by: Lijun Pan +Link: https://lore.kernel.org/r/20231211053704.2725417-2-guanjun@linux.alibaba.com +Signed-off-by: Vinod Koul +Signed-off-by: Sasha Levin +--- + drivers/dma/idxd/submit.c | 14 +++++++------- + 1 file changed, 7 insertions(+), 7 deletions(-) + +diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c +index c01db23e3333f..3f922518e3a52 100644 +--- a/drivers/dma/idxd/submit.c ++++ b/drivers/dma/idxd/submit.c +@@ -182,13 +182,6 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) + + portal = idxd_wq_portal_addr(wq); + +- /* +- * The wmb() flushes writes to coherent DMA data before +- * possibly triggering a DMA read. The wmb() is necessary +- * even on UP because the recipient is a device. +- */ +- wmb(); +- + /* + * Pending the descriptor to the lockless list for the irq_entry + * that we designated the descriptor to. +@@ -199,6 +192,13 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) + llist_add(&desc->llnode, &ie->pending_llist); + } + ++ /* ++ * The wmb() flushes writes to coherent DMA data before ++ * possibly triggering a DMA read. The wmb() is necessary ++ * even on UP because the recipient is a device. ++ */ ++ wmb(); ++ + if (wq_dedicated(wq)) { + iosubmit_cmds512(portal, desc->hw, 1); + } else { +-- +2.43.0 + diff --git a/queue-6.6/dmaengine-ti-k3-psil-am62-fix-spi-pdma-data.patch b/queue-6.6/dmaengine-ti-k3-psil-am62-fix-spi-pdma-data.patch new file mode 100644 index 00000000000..9b9dcb92821 --- /dev/null +++ b/queue-6.6/dmaengine-ti-k3-psil-am62-fix-spi-pdma-data.patch @@ -0,0 +1,70 @@ +From 053c2285d1a485905501573b5bccadc3ceac94cc Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 30 Oct 2023 20:01:13 +0100 +Subject: dmaengine: ti: k3-psil-am62: Fix SPI PDMA data + +From: Ronald Wahl + +[ Upstream commit 744f5e7b69710701dc225020769138f8ca2894df ] + +AM62x has 3 SPI channels where each channel has 4 TX and 4 RX threads. +This also fixes the thread numbers. + +Signed-off-by: Ronald Wahl +Fixes: 5ac6bfb58777 ("dmaengine: ti: k3-psil: Add AM62x PSIL and PDMA data") +Reviewed-by: Jai Luthra +Acked-by: Peter Ujfalusi +Link: https://lore.kernel.org/r/20231030190113.16782-1-rwahl@gmx.de +Signed-off-by: Vinod Koul +Signed-off-by: Sasha Levin +--- + drivers/dma/ti/k3-psil-am62.c | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +diff --git a/drivers/dma/ti/k3-psil-am62.c b/drivers/dma/ti/k3-psil-am62.c +index 2b6fd6e37c610..1272b1541f61e 100644 +--- a/drivers/dma/ti/k3-psil-am62.c ++++ b/drivers/dma/ti/k3-psil-am62.c +@@ -74,7 +74,9 @@ static struct psil_ep am62_src_ep_map[] = { + PSIL_SAUL(0x7505, 21, 35, 8, 36, 0), + PSIL_SAUL(0x7506, 22, 43, 8, 43, 0), + PSIL_SAUL(0x7507, 23, 43, 8, 44, 0), +- /* PDMA_MAIN0 - SPI0-3 */ ++ /* PDMA_MAIN0 - SPI0-2 */ ++ PSIL_PDMA_XY_PKT(0x4300), ++ PSIL_PDMA_XY_PKT(0x4301), + PSIL_PDMA_XY_PKT(0x4302), + PSIL_PDMA_XY_PKT(0x4303), + PSIL_PDMA_XY_PKT(0x4304), +@@ -85,8 +87,6 @@ static struct psil_ep am62_src_ep_map[] = { + PSIL_PDMA_XY_PKT(0x4309), + PSIL_PDMA_XY_PKT(0x430a), + PSIL_PDMA_XY_PKT(0x430b), +- PSIL_PDMA_XY_PKT(0x430c), +- PSIL_PDMA_XY_PKT(0x430d), + /* PDMA_MAIN1 - UART0-6 */ + PSIL_PDMA_XY_PKT(0x4400), + PSIL_PDMA_XY_PKT(0x4401), +@@ -141,7 +141,9 @@ static struct psil_ep am62_dst_ep_map[] = { + /* SAUL */ + PSIL_SAUL(0xf500, 27, 83, 8, 83, 1), + PSIL_SAUL(0xf501, 28, 91, 8, 91, 1), +- /* PDMA_MAIN0 - SPI0-3 */ ++ /* PDMA_MAIN0 - SPI0-2 */ ++ PSIL_PDMA_XY_PKT(0xc300), ++ PSIL_PDMA_XY_PKT(0xc301), + PSIL_PDMA_XY_PKT(0xc302), + PSIL_PDMA_XY_PKT(0xc303), + PSIL_PDMA_XY_PKT(0xc304), +@@ -152,8 +154,6 @@ static struct psil_ep am62_dst_ep_map[] = { + PSIL_PDMA_XY_PKT(0xc309), + PSIL_PDMA_XY_PKT(0xc30a), + PSIL_PDMA_XY_PKT(0xc30b), +- PSIL_PDMA_XY_PKT(0xc30c), +- PSIL_PDMA_XY_PKT(0xc30d), + /* PDMA_MAIN1 - UART0-6 */ + PSIL_PDMA_XY_PKT(0xc400), + PSIL_PDMA_XY_PKT(0xc401), +-- +2.43.0 + diff --git a/queue-6.6/dmaengine-ti-k3-psil-am62a-fix-spi-pdma-data.patch b/queue-6.6/dmaengine-ti-k3-psil-am62a-fix-spi-pdma-data.patch new file mode 100644 index 00000000000..32e7e9b4a57 --- /dev/null +++ b/queue-6.6/dmaengine-ti-k3-psil-am62a-fix-spi-pdma-data.patch @@ -0,0 +1,70 @@ +From 2ce7ccda57c65207b8593a0f6a91e482add25c96 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 23 Nov 2023 14:57:31 +0530 +Subject: dmaengine: ti: k3-psil-am62a: Fix SPI PDMA data + +From: Jai Luthra + +[ Upstream commit be37542afbfcd27b3bb99a135abf9b4736b96f75 ] + +AM62Ax has 3 SPI channels where each channel has 4x TX and 4x RX +threads. Also fix the thread numbers to match what the firmware expects +according to the PSI-L device description. + +Link: http://downloads.ti.com/tisci/esd/latest/5_soc_doc/am62ax/psil_cfg.html [1] +Fixes: aac6db7e243a ("dmaengine: ti: k3-psil-am62a: Add AM62Ax PSIL and PDMA data") +Signed-off-by: Jai Luthra +Link: https://lore.kernel.org/r/20231123-psil_fix-v1-1-6604d80819be@ti.com +Signed-off-by: Vinod Koul +Signed-off-by: Sasha Levin +--- + drivers/dma/ti/k3-psil-am62a.c | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +diff --git a/drivers/dma/ti/k3-psil-am62a.c b/drivers/dma/ti/k3-psil-am62a.c +index ca9d71f914220..4cf9123b0e932 100644 +--- a/drivers/dma/ti/k3-psil-am62a.c ++++ b/drivers/dma/ti/k3-psil-am62a.c +@@ -84,7 +84,9 @@ static struct psil_ep am62a_src_ep_map[] = { + PSIL_SAUL(0x7505, 21, 35, 8, 36, 0), + PSIL_SAUL(0x7506, 22, 43, 8, 43, 0), + PSIL_SAUL(0x7507, 23, 43, 8, 44, 0), +- /* PDMA_MAIN0 - SPI0-3 */ ++ /* PDMA_MAIN0 - SPI0-2 */ ++ PSIL_PDMA_XY_PKT(0x4300), ++ PSIL_PDMA_XY_PKT(0x4301), + PSIL_PDMA_XY_PKT(0x4302), + PSIL_PDMA_XY_PKT(0x4303), + PSIL_PDMA_XY_PKT(0x4304), +@@ -95,8 +97,6 @@ static struct psil_ep am62a_src_ep_map[] = { + PSIL_PDMA_XY_PKT(0x4309), + PSIL_PDMA_XY_PKT(0x430a), + PSIL_PDMA_XY_PKT(0x430b), +- PSIL_PDMA_XY_PKT(0x430c), +- PSIL_PDMA_XY_PKT(0x430d), + /* PDMA_MAIN1 - UART0-6 */ + PSIL_PDMA_XY_PKT(0x4400), + PSIL_PDMA_XY_PKT(0x4401), +@@ -151,7 +151,9 @@ static struct psil_ep am62a_dst_ep_map[] = { + /* SAUL */ + PSIL_SAUL(0xf500, 27, 83, 8, 83, 1), + PSIL_SAUL(0xf501, 28, 91, 8, 91, 1), +- /* PDMA_MAIN0 - SPI0-3 */ ++ /* PDMA_MAIN0 - SPI0-2 */ ++ PSIL_PDMA_XY_PKT(0xc300), ++ PSIL_PDMA_XY_PKT(0xc301), + PSIL_PDMA_XY_PKT(0xc302), + PSIL_PDMA_XY_PKT(0xc303), + PSIL_PDMA_XY_PKT(0xc304), +@@ -162,8 +164,6 @@ static struct psil_ep am62a_dst_ep_map[] = { + PSIL_PDMA_XY_PKT(0xc309), + PSIL_PDMA_XY_PKT(0xc30a), + PSIL_PDMA_XY_PKT(0xc30b), +- PSIL_PDMA_XY_PKT(0xc30c), +- PSIL_PDMA_XY_PKT(0xc30d), + /* PDMA_MAIN1 - UART0-6 */ + PSIL_PDMA_XY_PKT(0xc400), + PSIL_PDMA_XY_PKT(0xc401), +-- +2.43.0 + diff --git a/queue-6.6/drm-amd-display-increase-frame-warning-limit-with-ka.patch b/queue-6.6/drm-amd-display-increase-frame-warning-limit-with-ka.patch new file mode 100644 index 00000000000..07a27971eeb --- /dev/null +++ b/queue-6.6/drm-amd-display-increase-frame-warning-limit-with-ka.patch @@ -0,0 +1,47 @@ +From bca2539d148f6dde86836d8490315f8e5ba21629 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 30 Nov 2023 17:34:07 -0500 +Subject: drm/amd/display: Increase frame warning limit with KASAN or KCSAN in + dml + +From: Alex Deucher + +[ Upstream commit 5b750b22530fe53bf7fd6a30baacd53ada26911b ] + +Does the same thing as: +commit 6740ec97bcdb ("drm/amd/display: Increase frame warning limit with KASAN or KCSAN in dml2") + +Reviewed-by: Harry Wentland +Reported-by: kernel test robot +Closes: https://lore.kernel.org/oe-kbuild-all/202311302107.hUDXVyWT-lkp@intel.com/ +Fixes: 67e38874b85b ("drm/amd/display: Increase num voltage states to 40") +Signed-off-by: Alex Deucher +Cc: Alvin Lee +Cc: Hamza Mahfooz +Cc: Samson Tam +Cc: Harry Wentland +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/amd/display/dc/dml/Makefile | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile +index 77cf5545c94cc..c206812dc6897 100644 +--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile ++++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile +@@ -61,8 +61,12 @@ endif + endif + + ifneq ($(CONFIG_FRAME_WARN),0) ++ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y) ++frame_warn_flag := -Wframe-larger-than=3072 ++else + frame_warn_flag := -Wframe-larger-than=2048 + endif ++endif + + CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags) + CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags) +-- +2.43.0 + diff --git a/queue-6.6/drm-amd-display-increase-num-voltage-states-to-40.patch b/queue-6.6/drm-amd-display-increase-num-voltage-states-to-40.patch new file mode 100644 index 00000000000..f02a031a0e1 --- /dev/null +++ b/queue-6.6/drm-amd-display-increase-num-voltage-states-to-40.patch @@ -0,0 +1,42 @@ +From 281fed68e1e6d9e6e234dc7bccbb8296cf8b07d4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 8 Nov 2023 17:16:28 -0500 +Subject: drm/amd/display: Increase num voltage states to 40 + +From: Alvin Lee + +[ Upstream commit 67e38874b85b8df7b23d29f78ac3d7ecccd9519d ] + +[Description] +If during driver init stage there are greater than 20 +intermediary voltage states while constructing the SOC +BB we could hit issues because we will index outside of the +clock_limits array and start overwriting data. Increase the +total number of states to 40 to avoid this issue. + +Cc: stable@vger.kernel.org # 6.1+ +Reviewed-by: Samson Tam +Acked-by: Hamza Mahfooz +Signed-off-by: Alvin Lee +Signed-off-by: Alex Deucher +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/amd/display/dc/dml/dc_features.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h +index 2cbdd75429ffd..6e669a2c5b2d4 100644 +--- a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h ++++ b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h +@@ -36,7 +36,7 @@ + * Define the maximum amount of states supported by the ASIC. Every ASIC has a + * specific number of states; this macro defines the maximum number of states. + */ +-#define DC__VOLTAGE_STATES 20 ++#define DC__VOLTAGE_STATES 40 + #define DC__NUM_DPP__4 1 + #define DC__NUM_DPP__0_PRESENT 1 + #define DC__NUM_DPP__1_PRESENT 1 +-- +2.43.0 + diff --git a/queue-6.6/drm-bridge-parade-ps8640-never-store-more-than-msg-s.patch b/queue-6.6/drm-bridge-parade-ps8640-never-store-more-than-msg-s.patch new file mode 100644 index 00000000000..6e7e94d588b --- /dev/null +++ b/queue-6.6/drm-bridge-parade-ps8640-never-store-more-than-msg-s.patch @@ -0,0 +1,66 @@ +From 4b3b4003280beb9ca9f950193e9244fc3aff313d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 14 Dec 2023 12:37:51 -0800 +Subject: drm/bridge: parade-ps8640: Never store more than msg->size bytes in + AUX xfer + +From: Douglas Anderson + +[ Upstream commit 3164c8a70073d43629b4e11e083d3d2798f7750f ] + +While testing, I happened to notice a random crash that looked like: + + Kernel panic - not syncing: stack-protector: + Kernel stack is corrupted in: drm_dp_dpcd_probe+0x120/0x120 + +Analysis of drm_dp_dpcd_probe() shows that we pass in a 1-byte buffer +(allocated on the stack) to the aux->transfer() function. Presumably +if the aux->transfer() writes more than one byte to this buffer then +we're in a bad shape. + +Dropping into kgdb, I noticed that "aux->transfer" pointed at +ps8640_aux_transfer(). + +Reading through ps8640_aux_transfer(), I can see that there are cases +where it could write more bytes to msg->buffer than were specified by +msg->size. This could happen if the hardware reported back something +bogus to us. Let's fix this so we never write more than msg->size +bytes. We'll still read all the bytes from the hardware just in case +the hardware requires it since the aux transfer data comes through an +auto-incrementing register. + +NOTE: I have no actual way to reproduce this issue but it seems likely +this is what was happening in the crash I looked at. + +Fixes: 13afcdd7277e ("drm/bridge: parade-ps8640: Add support for AUX channel") +Reviewed-by: Stephen Boyd +Reviewed-by: Guenter Roeck +Signed-off-by: Douglas Anderson +Link: https://patchwork.freedesktop.org/patch/msgid/20231214123752.v3.1.I9d1afcaad76a3e2c0ca046dc4adbc2b632c22eda@changeid +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/bridge/parade-ps8640.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c +index 8161b1a1a4b12..d264b80d909de 100644 +--- a/drivers/gpu/drm/bridge/parade-ps8640.c ++++ b/drivers/gpu/drm/bridge/parade-ps8640.c +@@ -330,11 +330,12 @@ static ssize_t ps8640_aux_transfer_msg(struct drm_dp_aux *aux, + return ret; + } + +- buf[i] = data; ++ if (i < msg->size) ++ buf[i] = data; + } + } + +- return len; ++ return min(len, msg->size); + } + + static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux, +-- +2.43.0 + diff --git a/queue-6.6/drm-bridge-ps8640-fix-size-mismatch-warning-w-len.patch b/queue-6.6/drm-bridge-ps8640-fix-size-mismatch-warning-w-len.patch new file mode 100644 index 00000000000..a533e73ed54 --- /dev/null +++ b/queue-6.6/drm-bridge-ps8640-fix-size-mismatch-warning-w-len.patch @@ -0,0 +1,51 @@ +From 60f7fe0dba377efda73d4153e89f62b6df1869d6 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 18 Dec 2023 09:04:54 -0800 +Subject: drm/bridge: ps8640: Fix size mismatch warning w/ len + +From: Douglas Anderson + +[ Upstream commit 35ba6bd582cf926a082296b7e9a876ec81136cb1 ] + +After commit 26195af57798 ("drm/bridge: ps8640: Drop the ability of +ps8640 to fetch the EDID"), I got an error compiling: + + error: comparison of distinct pointer types + ('typeof (len) *' (aka 'unsigned int *') and + 'typeof (msg->size) *' (aka 'unsigned long *')) + [-Werror,-Wcompare-distinct-pointer-types] + +Fix it by declaring the `len` as size_t. + +The above error only shows up on downstream kernels without commit +d03eba99f5bf ("minmax: allow min()/max()/clamp() if the arguments have +the same signedness."), but since commit 26195af57798 ("drm/bridge: +ps8640: Drop the ability of ps8640 to fetch the EDID") is a "Fix" that +will likely be backported it seems nice to make it easy. ...plus it's +more correct to declare `len` as size_t anyway. + +Fixes: 26195af57798 ("drm/bridge: ps8640: Drop the ability of ps8640 to fetch the EDID") +Reviewed-by: Stephen Boyd +Signed-off-by: Douglas Anderson +Link: https://patchwork.freedesktop.org/patch/msgid/20231218090454.1.I5c6eb80b2f746439c4b58efab788e00701d08759@changeid +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/bridge/parade-ps8640.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c +index d264b80d909de..541e4f5afc4c8 100644 +--- a/drivers/gpu/drm/bridge/parade-ps8640.c ++++ b/drivers/gpu/drm/bridge/parade-ps8640.c +@@ -210,7 +210,7 @@ static ssize_t ps8640_aux_transfer_msg(struct drm_dp_aux *aux, + struct ps8640 *ps_bridge = aux_to_ps8640(aux); + struct regmap *map = ps_bridge->regmap[PAGE0_DP_CNTL]; + struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev; +- unsigned int len = msg->size; ++ size_t len = msg->size; + unsigned int data; + unsigned int base; + int ret; +-- +2.43.0 + diff --git a/queue-6.6/drm-bridge-ti-sn65dsi86-never-store-more-than-msg-si.patch b/queue-6.6/drm-bridge-ti-sn65dsi86-never-store-more-than-msg-si.patch new file mode 100644 index 00000000000..d11c302b2bb --- /dev/null +++ b/queue-6.6/drm-bridge-ti-sn65dsi86-never-store-more-than-msg-si.patch @@ -0,0 +1,55 @@ +From bff6a729444229fb3df5470cca39757c76ae1ea7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 14 Dec 2023 12:37:52 -0800 +Subject: drm/bridge: ti-sn65dsi86: Never store more than msg->size bytes in + AUX xfer + +From: Douglas Anderson + +[ Upstream commit aca58eac52b88138ab98c814afb389a381725cd7 ] + +For aux reads, the value `msg->size` indicates the size of the buffer +provided by `msg->buffer`. We should never in any circumstances write +more bytes to the buffer since it may overflow the buffer. + +In the ti-sn65dsi86 driver there is one code path that reads the +transfer length from hardware. Even though it's never been seen to be +a problem, we should make extra sure that the hardware isn't +increasing the length since doing so would cause us to overrun the +buffer. + +Fixes: 982f589bde7a ("drm/bridge: ti-sn65dsi86: Update reply on aux failures") +Reviewed-by: Stephen Boyd +Reviewed-by: Guenter Roeck +Signed-off-by: Douglas Anderson +Link: https://patchwork.freedesktop.org/patch/msgid/20231214123752.v3.2.I7b83c0f31aeedc6b1dc98c7c741d3e1f94f040f8@changeid +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/bridge/ti-sn65dsi86.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c +index 84148a79414b7..3309c01fa7153 100644 +--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c ++++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c +@@ -527,6 +527,7 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux, + u32 request_val = AUX_CMD_REQ(msg->request); + u8 *buf = msg->buffer; + unsigned int len = msg->size; ++ unsigned int short_len; + unsigned int val; + int ret; + u8 addr_len[SN_AUX_LENGTH_REG + 1 - SN_AUX_ADDR_19_16_REG]; +@@ -600,7 +601,8 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux, + } + + if (val & AUX_IRQ_STATUS_AUX_SHORT) { +- ret = regmap_read(pdata->regmap, SN_AUX_LENGTH_REG, &len); ++ ret = regmap_read(pdata->regmap, SN_AUX_LENGTH_REG, &short_len); ++ len = min(len, short_len); + if (ret) + goto exit; + } else if (val & AUX_IRQ_STATUS_NAT_I2C_FAIL) { +-- +2.43.0 + diff --git a/queue-6.6/drm-i915-call-intel_pre_plane_updates-also-for-pipes.patch b/queue-6.6/drm-i915-call-intel_pre_plane_updates-also-for-pipes.patch new file mode 100644 index 00000000000..603ee17d955 --- /dev/null +++ b/queue-6.6/drm-i915-call-intel_pre_plane_updates-also-for-pipes.patch @@ -0,0 +1,49 @@ +From 977b81657221ca1223c5cd372913472ad66e27a4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 21 Nov 2023 07:43:15 +0200 +Subject: drm/i915: Call intel_pre_plane_updates() also for pipes getting + enabled +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Ville Syrjälä + +[ Upstream commit d21a3962d3042e6f56ad324cf18bdd64a1e6ecfa ] + +We used to call intel_pre_plane_updates() for any pipe going through +a modeset whether the pipe was previously enabled or not. This in +fact needed to apply all the necessary clock gating workarounds/etc. +Restore the correct behaviour. + +Fixes: 39919997322f ("drm/i915: Disable all planes before modesetting any pipes") +Reviewed-by: Jani Nikula +Signed-off-by: Ville Syrjälä +Link: https://patchwork.freedesktop.org/patch/msgid/20231121054324.9988-3-ville.syrjala@linux.intel.com +(cherry picked from commit e0d5ce11ed0a21bb2bf328ad82fd261783c7ad88) +Signed-off-by: Jani Nikula +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/i915/display/intel_display.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c +index 2e0daad23aa61..a072fbb9872aa 100644 +--- a/drivers/gpu/drm/i915/display/intel_display.c ++++ b/drivers/gpu/drm/i915/display/intel_display.c +@@ -6670,10 +6670,11 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state) + if (!intel_crtc_needs_modeset(new_crtc_state)) + continue; + ++ intel_pre_plane_update(state, crtc); ++ + if (!old_crtc_state->hw.active) + continue; + +- intel_pre_plane_update(state, crtc); + intel_crtc_disable_planes(state, crtc); + } + +-- +2.43.0 + diff --git a/queue-6.6/drm-i915-dp-fix-passing-the-correct-dpcd_rev-for-drm.patch b/queue-6.6/drm-i915-dp-fix-passing-the-correct-dpcd_rev-for-drm.patch new file mode 100644 index 00000000000..f046d2efaa9 --- /dev/null +++ b/queue-6.6/drm-i915-dp-fix-passing-the-correct-dpcd_rev-for-drm.patch @@ -0,0 +1,42 @@ +From a9e179f9143f3d7b2652ba26934081ce52c507cb Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 13 Dec 2023 13:15:42 -0800 +Subject: drm/i915/dp: Fix passing the correct DPCD_REV for + drm_dp_set_phy_test_pattern + +From: Khaled Almahallawy + +[ Upstream commit 2bd7a06a1208aaacb4e7a2a5436c23bce8d70801 ] + +Using link_status to get DPCD_REV fails when disabling/defaulting +phy pattern. Use intel_dp->dpcd to access DPCD_REV correctly. + +Fixes: 8cdf72711928 ("drm/i915/dp: Program vswing, pre-emphasis, test-pattern") +Cc: Jani Nikula +Cc: Imre Deak +Cc: Lee Shawn C +Signed-off-by: Khaled Almahallawy +Signed-off-by: Jani Nikula +Link: https://patchwork.freedesktop.org/patch/msgid/20231213211542.3585105-3-khaled.almahallawy@intel.com +(cherry picked from commit 3ee302ec22d6e1d7d1e6d381b0d507ee80f2135c) +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/i915/display/intel_dp.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c +index b4fb7ce39d06f..18ee4f2a87f9e 100644 +--- a/drivers/gpu/drm/i915/display/intel_dp.c ++++ b/drivers/gpu/drm/i915/display/intel_dp.c +@@ -3987,7 +3987,7 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp, + intel_dp->train_set, crtc_state->lane_count); + + drm_dp_set_phy_test_pattern(&intel_dp->aux, data, +- link_status[DP_DPCD_REV]); ++ intel_dp->dpcd[DP_DPCD_REV]); + } + + static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) +-- +2.43.0 + diff --git a/queue-6.6/drm-i915-perf-update-handling-of-mmio-triggered-repo.patch b/queue-6.6/drm-i915-perf-update-handling-of-mmio-triggered-repo.patch new file mode 100644 index 00000000000..8f0c44a5737 --- /dev/null +++ b/queue-6.6/drm-i915-perf-update-handling-of-mmio-triggered-repo.patch @@ -0,0 +1,88 @@ +From b61e62d3cdb32347aa1a823ff125f45a5f8cb531 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 18 Dec 2023 16:05:43 -0800 +Subject: drm/i915/perf: Update handling of MMIO triggered reports + +From: Umesh Nerlige Ramappa + +[ Upstream commit ee11d2d37f5c05bd7bf5ccc820a58f48423d032b ] + +On XEHP platforms user is not able to find MMIO triggered reports in the +OA buffer since i915 squashes the context ID fields. These context ID +fields hold the MMIO trigger markers. + +Update logic to not squash the context ID fields of MMIO triggered +reports. + +Fixes: cba94bbcff08 ("drm/i915/perf: Determine context valid in OA reports") +Signed-off-by: Umesh Nerlige Ramappa +Reviewed-by: Ashutosh Dixit +Link: https://patchwork.freedesktop.org/patch/msgid/20231219000543.1087706-1-umesh.nerlige.ramappa@intel.com +(cherry picked from commit 0c68132df6e66244acec1bb5b9e19b0751414389) +Signed-off-by: Jani Nikula +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/i915/i915_perf.c | 39 ++++++++++++++++++++++++++++---- + 1 file changed, 34 insertions(+), 5 deletions(-) + +diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c +index 109135fcfca28..8f4a25d2cfc24 100644 +--- a/drivers/gpu/drm/i915/i915_perf.c ++++ b/drivers/gpu/drm/i915/i915_perf.c +@@ -785,10 +785,6 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, + * The reason field includes flags identifying what + * triggered this specific report (mostly timer + * triggered or e.g. due to a context switch). +- * +- * In MMIO triggered reports, some platforms do not set the +- * reason bit in this field and it is valid to have a reason +- * field of zero. + */ + reason = oa_report_reason(stream, report); + ctx_id = oa_context_id(stream, report32); +@@ -800,8 +796,41 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, + * + * Note: that we don't clear the valid_ctx_bit so userspace can + * understand that the ID has been squashed by the kernel. ++ * ++ * Update: ++ * ++ * On XEHP platforms the behavior of context id valid bit has ++ * changed compared to prior platforms. To describe this, we ++ * define a few terms: ++ * ++ * context-switch-report: This is a report with the reason type ++ * being context-switch. It is generated when a context switches ++ * out. ++ * ++ * context-valid-bit: A bit that is set in the report ID field ++ * to indicate that a valid context has been loaded. ++ * ++ * gpu-idle: A condition characterized by a ++ * context-switch-report with context-valid-bit set to 0. ++ * ++ * On prior platforms, context-id-valid bit is set to 0 only ++ * when GPU goes idle. In all other reports, it is set to 1. ++ * ++ * On XEHP platforms, context-valid-bit is set to 1 in a context ++ * switch report if a new context switched in. For all other ++ * reports it is set to 0. ++ * ++ * This change in behavior causes an issue with MMIO triggered ++ * reports. MMIO triggered reports have the markers in the ++ * context ID field and the context-valid-bit is 0. The logic ++ * below to squash the context ID would render the report ++ * useless since the user will not be able to find it in the OA ++ * buffer. Since MMIO triggered reports exist only on XEHP, ++ * we should avoid squashing these for XEHP platforms. + */ +- if (oa_report_ctx_invalid(stream, report)) { ++ ++ if (oa_report_ctx_invalid(stream, report) && ++ GRAPHICS_VER_FULL(stream->engine->i915) < IP_VER(12, 50)) { + ctx_id = INVALID_CTX_ID; + oa_context_id_squash(stream, report32); + } +-- +2.43.0 + diff --git a/queue-6.6/i40e-fix-filter-input-checks-to-prevent-config-with-.patch b/queue-6.6/i40e-fix-filter-input-checks-to-prevent-config-with-.patch new file mode 100644 index 00000000000..3c5ebe58f1e --- /dev/null +++ b/queue-6.6/i40e-fix-filter-input-checks-to-prevent-config-with-.patch @@ -0,0 +1,53 @@ +From 2623374400f3041d5c04a0c2286ea9574953ab7f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 29 Nov 2023 11:23:11 +0100 +Subject: i40e: Fix filter input checks to prevent config with invalid values + +From: Sudheer Mogilappagari + +[ Upstream commit 3e48041d9820c17e0a51599d12e66c6e12a8d08d ] + +Prevent VF from configuring filters with unsupported actions or use +REDIRECT action with invalid tc number. Current checks could cause +out of bounds access on PF side. + +Fixes: e284fc280473 ("i40e: Add and delete cloud filter") +Reviewed-by: Andrii Staikov +Signed-off-by: Sudheer Mogilappagari +Signed-off-by: Aleksandr Loktionov +Reviewed-by: Simon Horman +Tested-by: Bharathi Sreenivas +Signed-off-by: Tony Nguyen +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +index 4441b00297f47..220dad902f9b4 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +@@ -3519,16 +3519,16 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf, + bool found = false; + int bkt; + +- if (!tc_filter->action) { ++ if (tc_filter->action != VIRTCHNL_ACTION_TC_REDIRECT) { + dev_info(&pf->pdev->dev, +- "VF %d: Currently ADq doesn't support Drop Action\n", +- vf->vf_id); ++ "VF %d: ADQ doesn't support this action (%d)\n", ++ vf->vf_id, tc_filter->action); + goto err; + } + + /* action_meta is TC number here to which the filter is applied */ + if (!tc_filter->action_meta || +- tc_filter->action_meta > I40E_MAX_VF_VSI) { ++ tc_filter->action_meta > vf->num_tc) { + dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n", + vf->vf_id, tc_filter->action_meta); + goto err; +-- +2.43.0 + diff --git a/queue-6.6/i40e-fix-use-after-free-in-i40e_aqc_add_filters.patch b/queue-6.6/i40e-fix-use-after-free-in-i40e_aqc_add_filters.patch new file mode 100644 index 00000000000..6a9b0fc703a --- /dev/null +++ b/queue-6.6/i40e-fix-use-after-free-in-i40e_aqc_add_filters.patch @@ -0,0 +1,120 @@ +From e1144c1cd987a93af2cf23ea5f58c660898091c4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 18 Dec 2023 15:08:50 +0800 +Subject: i40e: fix use-after-free in i40e_aqc_add_filters() + +From: Ke Xiao + +[ Upstream commit 6a15584e99db8918b60e507539c7446375dcf366 ] + +Commit 3116f59c12bd ("i40e: fix use-after-free in +i40e_sync_filters_subtask()") avoided use-after-free issues, +by increasing refcount during update the VSI filter list to +the HW. However, it missed the unicast situation. + +When deleting an unicast FDB entry, the i40e driver will release +the mac_filter, and i40e_service_task will concurrently request +firmware to add the mac_filter, which will lead to the following +use-after-free issue. + +Fix again for both netdev->uc and netdev->mc. + +BUG: KASAN: use-after-free in i40e_aqc_add_filters+0x55c/0x5b0 [i40e] +Read of size 2 at addr ffff888eb3452d60 by task kworker/8:7/6379 + +CPU: 8 PID: 6379 Comm: kworker/8:7 Kdump: loaded Tainted: G +Workqueue: i40e i40e_service_task [i40e] +Call Trace: + dump_stack+0x71/0xab + print_address_description+0x6b/0x290 + kasan_report+0x14a/0x2b0 + i40e_aqc_add_filters+0x55c/0x5b0 [i40e] + i40e_sync_vsi_filters+0x1676/0x39c0 [i40e] + i40e_service_task+0x1397/0x2bb0 [i40e] + process_one_work+0x56a/0x11f0 + worker_thread+0x8f/0xf40 + kthread+0x2a0/0x390 + ret_from_fork+0x1f/0x40 + +Allocated by task 21948: + kasan_kmalloc+0xa6/0xd0 + kmem_cache_alloc_trace+0xdb/0x1c0 + i40e_add_filter+0x11e/0x520 [i40e] + i40e_addr_sync+0x37/0x60 [i40e] + __hw_addr_sync_dev+0x1f5/0x2f0 + i40e_set_rx_mode+0x61/0x1e0 [i40e] + dev_uc_add_excl+0x137/0x190 + i40e_ndo_fdb_add+0x161/0x260 [i40e] + rtnl_fdb_add+0x567/0x950 + rtnetlink_rcv_msg+0x5db/0x880 + netlink_rcv_skb+0x254/0x380 + netlink_unicast+0x454/0x610 + netlink_sendmsg+0x747/0xb00 + sock_sendmsg+0xe2/0x120 + __sys_sendto+0x1ae/0x290 + __x64_sys_sendto+0xdd/0x1b0 + do_syscall_64+0xa0/0x370 + entry_SYSCALL_64_after_hwframe+0x65/0xca + +Freed by task 21948: + __kasan_slab_free+0x137/0x190 + kfree+0x8b/0x1b0 + __i40e_del_filter+0x116/0x1e0 [i40e] + i40e_del_mac_filter+0x16c/0x300 [i40e] + i40e_addr_unsync+0x134/0x1b0 [i40e] + __hw_addr_sync_dev+0xff/0x2f0 + i40e_set_rx_mode+0x61/0x1e0 [i40e] + dev_uc_del+0x77/0x90 + rtnl_fdb_del+0x6a5/0x860 + rtnetlink_rcv_msg+0x5db/0x880 + netlink_rcv_skb+0x254/0x380 + netlink_unicast+0x454/0x610 + netlink_sendmsg+0x747/0xb00 + sock_sendmsg+0xe2/0x120 + __sys_sendto+0x1ae/0x290 + __x64_sys_sendto+0xdd/0x1b0 + do_syscall_64+0xa0/0x370 + entry_SYSCALL_64_after_hwframe+0x65/0xca + +Fixes: 3116f59c12bd ("i40e: fix use-after-free in i40e_sync_filters_subtask()") +Fixes: 41c445ff0f48 ("i40e: main driver core") +Signed-off-by: Ke Xiao +Signed-off-by: Ding Hui +Cc: Di Zhu +Reviewed-by: Jan Sokolowski +Reviewed-by: Simon Horman +Reviewed-by: Jacob Keller +Tested-by: Pucha Himasekhar Reddy (A Contingent worker at Intel) +Signed-off-by: Tony Nguyen +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/intel/i40e/i40e_main.c | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c +index a9f5a8a7d3f05..370825e433cc6 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c +@@ -104,12 +104,18 @@ static struct workqueue_struct *i40e_wq; + static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f, + struct net_device *netdev, int delta) + { ++ struct netdev_hw_addr_list *ha_list; + struct netdev_hw_addr *ha; + + if (!f || !netdev) + return; + +- netdev_for_each_mc_addr(ha, netdev) { ++ if (is_unicast_ether_addr(f->macaddr) || is_link_local_ether_addr(f->macaddr)) ++ ha_list = &netdev->uc; ++ else ++ ha_list = &netdev->mc; ++ ++ netdev_hw_addr_list_for_each(ha, ha_list) { + if (ether_addr_equal(ha->addr, f->macaddr)) { + ha->refcount += delta; + if (ha->refcount <= 0) +-- +2.43.0 + diff --git a/queue-6.6/i40e-restore-vf-msi-x-state-during-pci-reset.patch b/queue-6.6/i40e-restore-vf-msi-x-state-during-pci-reset.patch new file mode 100644 index 00000000000..f5beec0bbca --- /dev/null +++ b/queue-6.6/i40e-restore-vf-msi-x-state-during-pci-reset.patch @@ -0,0 +1,104 @@ +From f07c7fcf74e4a3a8b76ea74f2772ef211013c241 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 21 Dec 2023 14:27:35 +0100 +Subject: i40e: Restore VF MSI-X state during PCI reset + +From: Andrii Staikov + +[ Upstream commit 371e576ff3e8580d91d49026e5d5faebf5565558 ] + +During a PCI FLR the MSI-X Enable flag in the VF PCI MSI-X capability +register will be cleared. This can lead to issues when a VF is +assigned to a VM because in these cases the VF driver receives no +indication of the PF PCI error/reset and additionally it is incapable +of restoring the cleared flag in the hypervisor configuration space +without fully reinitializing the driver interrupt functionality. + +Since the VF driver is unable to easily resolve this condition on its own, +restore the VF MSI-X flag during the PF PCI reset handling. + +Fixes: 19b7960b2da1 ("i40e: implement split PCI error reset handler") +Co-developed-by: Karen Ostrowska +Signed-off-by: Karen Ostrowska +Co-developed-by: Mateusz Palczewski +Signed-off-by: Mateusz Palczewski +Reviewed-by: Wojciech Drewek +Reviewed-by: Przemek Kitszel +Signed-off-by: Andrii Staikov +Tested-by: Rafal Romanowski +Signed-off-by: Tony Nguyen +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/intel/i40e/i40e_main.c | 3 +++ + .../ethernet/intel/i40e/i40e_virtchnl_pf.c | 26 +++++++++++++++++++ + .../ethernet/intel/i40e/i40e_virtchnl_pf.h | 3 +++ + 3 files changed, 32 insertions(+) + +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c +index 370825e433cc6..5b20eba93d048 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c +@@ -16485,6 +16485,9 @@ static void i40e_pci_error_reset_done(struct pci_dev *pdev) + return; + + i40e_reset_and_rebuild(pf, false, false); ++#ifdef CONFIG_PCI_IOV ++ i40e_restore_all_vfs_msi_state(pdev); ++#endif /* CONFIG_PCI_IOV */ + } + + /** +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +index 220dad902f9b4..a97ca2224da0e 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +@@ -152,6 +152,32 @@ void i40e_vc_notify_reset(struct i40e_pf *pf) + (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); + } + ++#ifdef CONFIG_PCI_IOV ++void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev) ++{ ++ u16 vf_id; ++ u16 pos; ++ ++ /* Continue only if this is a PF */ ++ if (!pdev->is_physfn) ++ return; ++ ++ if (!pci_num_vf(pdev)) ++ return; ++ ++ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); ++ if (pos) { ++ struct pci_dev *vf_dev = NULL; ++ ++ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); ++ while ((vf_dev = pci_get_device(pdev->vendor, vf_id, vf_dev))) { ++ if (vf_dev->is_virtfn && vf_dev->physfn == pdev) ++ pci_restore_msi_state(vf_dev); ++ } ++ } ++} ++#endif /* CONFIG_PCI_IOV */ ++ + /** + * i40e_vc_notify_vf_reset + * @vf: pointer to the VF structure +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +index 895b8feb2567c..bda9ba668c41e 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +@@ -135,6 +135,9 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable); + + void i40e_vc_notify_link_state(struct i40e_pf *pf); + void i40e_vc_notify_reset(struct i40e_pf *pf); ++#ifdef CONFIG_PCI_IOV ++void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev); ++#endif /* CONFIG_PCI_IOV */ + int i40e_get_vf_stats(struct net_device *netdev, int vf_id, + struct ifla_vf_stats *vf_stats); + +-- +2.43.0 + diff --git a/queue-6.6/ice-fix-link_down_on_close-message.patch b/queue-6.6/ice-fix-link_down_on_close-message.patch new file mode 100644 index 00000000000..7a24d37ceab --- /dev/null +++ b/queue-6.6/ice-fix-link_down_on_close-message.patch @@ -0,0 +1,55 @@ +From ac5e5dcefb608856a2865727d24a7d31195c25f0 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 15 Dec 2023 12:01:56 +0100 +Subject: ice: Fix link_down_on_close message + +From: Katarzyna Wieczerzycka + +[ Upstream commit 6a8d8bb55e7001de2d50920381cc858f3a3e9fb7 ] + +The driver should not report an error message when for a medialess port +the link_down_on_close flag is enabled and the physical link cannot be +set down. + +Fixes: 8ac7132704f3 ("ice: Fix interface being down after reset with link-down-on-close flag on") +Reviewed-by: Przemek Kitszel +Signed-off-by: Katarzyna Wieczerzycka +Signed-off-by: Wojciech Drewek +Tested-by: Pucha Himasekhar Reddy (A Contingent worker at Intel) +Signed-off-by: Tony Nguyen +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/intel/ice/ice_main.c | 10 +++++++--- + 1 file changed, 7 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c +index 7784135160fd2..66f4c54d8aa5a 100644 +--- a/drivers/net/ethernet/intel/ice/ice_main.c ++++ b/drivers/net/ethernet/intel/ice/ice_main.c +@@ -2146,7 +2146,7 @@ static int ice_configure_phy(struct ice_vsi *vsi) + + /* Ensure we have media as we cannot configure a medialess port */ + if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) +- return -EPERM; ++ return -ENOMEDIUM; + + ice_print_topo_conflict(vsi); + +@@ -9173,8 +9173,12 @@ int ice_stop(struct net_device *netdev) + int link_err = ice_force_phys_link_state(vsi, false); + + if (link_err) { +- netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", +- vsi->vsi_num, link_err); ++ if (link_err == -ENOMEDIUM) ++ netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n", ++ vsi->vsi_num); ++ else ++ netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", ++ vsi->vsi_num, link_err); + return -EIO; + } + } +-- +2.43.0 + diff --git a/queue-6.6/ice-shut-down-vsi-with-link-down-on-close-enabled.patch b/queue-6.6/ice-shut-down-vsi-with-link-down-on-close-enabled.patch new file mode 100644 index 00000000000..d22b7d7beb7 --- /dev/null +++ b/queue-6.6/ice-shut-down-vsi-with-link-down-on-close-enabled.patch @@ -0,0 +1,40 @@ +From 313bcf9e29066d6317d24550da19700299917f8d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 15 Dec 2023 12:01:57 +0100 +Subject: ice: Shut down VSI with "link-down-on-close" enabled + +From: Ngai-Mint Kwan + +[ Upstream commit 6d05ff55ef4f4954d28551236239f297bd52ea48 ] + +Disabling netdev with ethtool private flag "link-down-on-close" enabled +can cause NULL pointer dereference bug. Shut down VSI regardless of +"link-down-on-close" state. + +Fixes: 8ac7132704f3 ("ice: Fix interface being down after reset with link-down-on-close flag on") +Reviewed-by: Przemek Kitszel +Signed-off-by: Ngai-Mint Kwan +Signed-off-by: Wojciech Drewek +Tested-by: Pucha Himasekhar Reddy (A Contingent worker at Intel) +Signed-off-by: Tony Nguyen +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/intel/ice/ice_main.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c +index 66f4c54d8aa5a..d8d2aa4c0216a 100644 +--- a/drivers/net/ethernet/intel/ice/ice_main.c ++++ b/drivers/net/ethernet/intel/ice/ice_main.c +@@ -9179,6 +9179,8 @@ int ice_stop(struct net_device *netdev) + else + netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", + vsi->vsi_num, link_err); ++ ++ ice_vsi_close(vsi); + return -EIO; + } + } +-- +2.43.0 + diff --git a/queue-6.6/igc-check-vlan-ethertype-mask.patch b/queue-6.6/igc-check-vlan-ethertype-mask.patch new file mode 100644 index 00000000000..f4376046ce0 --- /dev/null +++ b/queue-6.6/igc-check-vlan-ethertype-mask.patch @@ -0,0 +1,72 @@ +From d83510add615079f48d05616e32b10b2ff9ffe7b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 6 Dec 2023 15:07:18 +0100 +Subject: igc: Check VLAN EtherType mask + +From: Kurt Kanzenbach + +[ Upstream commit 7afd49a38e73afd57ff62c8d1cf5af760c4d49c0 ] + +Currently the driver accepts VLAN EtherType steering rules regardless of +the configured mask. And things might fail silently or with confusing error +messages to the user. The VLAN EtherType can only be matched by full +mask. Therefore, add a check for that. + +For instance the following rule is invalid, but the driver accepts it and +ignores the user specified mask: +|root@host:~# ethtool -N enp3s0 flow-type ether vlan-etype 0x8100 \ +| m 0x00ff action 0 +|Added rule with ID 63 +|root@host:~# ethtool --show-ntuple enp3s0 +|4 RX rings available +|Total 1 rules +| +|Filter: 63 +| Flow Type: Raw Ethernet +| Src MAC addr: 00:00:00:00:00:00 mask: FF:FF:FF:FF:FF:FF +| Dest MAC addr: 00:00:00:00:00:00 mask: FF:FF:FF:FF:FF:FF +| Ethertype: 0x0 mask: 0xFFFF +| VLAN EtherType: 0x8100 mask: 0x0 +| VLAN: 0x0 mask: 0xffff +| User-defined: 0x0 mask: 0xffffffffffffffff +| Action: Direct to queue 0 + +After: +|root@host:~# ethtool -N enp3s0 flow-type ether vlan-etype 0x8100 \ +| m 0x00ff action 0 +|rmgr: Cannot insert RX class rule: Operation not supported + +Fixes: 2b477d057e33 ("igc: Integrate flex filter into ethtool ops") +Suggested-by: Suman Ghosh +Signed-off-by: Kurt Kanzenbach +Acked-by: Vinicius Costa Gomes +Reviewed-by: Simon Horman +Tested-by: Naama Meir +Signed-off-by: Tony Nguyen +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/intel/igc/igc_ethtool.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c +index a044e4a3a39a4..f7284fa4324a4 100644 +--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c ++++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c +@@ -1356,6 +1356,14 @@ static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter, + return -EOPNOTSUPP; + } + ++ /* VLAN EtherType can only be matched by full mask. */ ++ if ((fsp->flow_type & FLOW_EXT) && ++ fsp->m_ext.vlan_etype && ++ fsp->m_ext.vlan_etype != ETHER_TYPE_FULL_MASK) { ++ netdev_dbg(netdev, "VLAN EtherType mask not supported\n"); ++ return -EOPNOTSUPP; ++ } ++ + if (fsp->location >= IGC_MAX_RXNFC_RULES) { + netdev_dbg(netdev, "Invalid location\n"); + return -EINVAL; +-- +2.43.0 + diff --git a/queue-6.6/igc-check-vlan-tci-mask.patch b/queue-6.6/igc-check-vlan-tci-mask.patch new file mode 100644 index 00000000000..68bcd6c6e08 --- /dev/null +++ b/queue-6.6/igc-check-vlan-tci-mask.patch @@ -0,0 +1,141 @@ +From 8fcfac7c4f885a2b9fb22771fc836012440611e4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 1 Dec 2023 08:50:43 +0100 +Subject: igc: Check VLAN TCI mask + +From: Kurt Kanzenbach + +[ Upstream commit b5063cbe148b829e8eb97672c2cbccc058835476 ] + +Currently the driver accepts VLAN TCI steering rules regardless of the +configured mask. And things might fail silently or with confusing error +messages to the user. + +There are two ways to handle the VLAN TCI mask: + + 1. Match on the PCP field using a VLAN prio filter + 2. Match on complete TCI field using a flex filter + +Therefore, add checks and code for that. + +For instance the following rule is invalid and will be converted into a +VLAN prio rule which is not correct: +|root@host:~# ethtool -N enp3s0 flow-type ether vlan 0x0001 m 0xf000 \ +| action 1 +|Added rule with ID 61 +|root@host:~# ethtool --show-ntuple enp3s0 +|4 RX rings available +|Total 1 rules +| +|Filter: 61 +| Flow Type: Raw Ethernet +| Src MAC addr: 00:00:00:00:00:00 mask: FF:FF:FF:FF:FF:FF +| Dest MAC addr: 00:00:00:00:00:00 mask: FF:FF:FF:FF:FF:FF +| Ethertype: 0x0 mask: 0xFFFF +| VLAN EtherType: 0x0 mask: 0xffff +| VLAN: 0x1 mask: 0x1fff +| User-defined: 0x0 mask: 0xffffffffffffffff +| Action: Direct to queue 1 + +After: +|root@host:~# ethtool -N enp3s0 flow-type ether vlan 0x0001 m 0xf000 \ +| action 1 +|rmgr: Cannot insert RX class rule: Operation not supported + +Fixes: 7991487ecb2d ("igc: Allow for Flex Filters to be installed") +Signed-off-by: Kurt Kanzenbach +Acked-by: Vinicius Costa Gomes +Reviewed-by: Simon Horman +Tested-by: Naama Meir +Signed-off-by: Tony Nguyen +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/intel/igc/igc.h | 1 + + drivers/net/ethernet/intel/igc/igc_ethtool.c | 28 +++++++++++++++++--- + 2 files changed, 26 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h +index f48f82d5e274b..85cc163965062 100644 +--- a/drivers/net/ethernet/intel/igc/igc.h ++++ b/drivers/net/ethernet/intel/igc/igc.h +@@ -568,6 +568,7 @@ struct igc_nfc_filter { + u16 etype; + __be16 vlan_etype; + u16 vlan_tci; ++ u16 vlan_tci_mask; + u8 src_addr[ETH_ALEN]; + u8 dst_addr[ETH_ALEN]; + u8 user_data[8]; +diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c +index fa71bfb753b6d..a044e4a3a39a4 100644 +--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c ++++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c +@@ -957,6 +957,7 @@ static int igc_ethtool_set_coalesce(struct net_device *netdev, + } + + #define ETHER_TYPE_FULL_MASK ((__force __be16)~0) ++#define VLAN_TCI_FULL_MASK ((__force __be16)~0) + static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) + { +@@ -988,7 +989,7 @@ static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter, + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + fsp->flow_type |= FLOW_EXT; + fsp->h_ext.vlan_tci = htons(rule->filter.vlan_tci); +- fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); ++ fsp->m_ext.vlan_tci = htons(rule->filter.vlan_tci_mask); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { +@@ -1223,6 +1224,7 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule, + + if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { + rule->filter.vlan_tci = ntohs(fsp->h_ext.vlan_tci); ++ rule->filter.vlan_tci_mask = ntohs(fsp->m_ext.vlan_tci); + rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI; + } + +@@ -1260,11 +1262,19 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule, + memcpy(rule->filter.user_mask, fsp->m_ext.data, sizeof(fsp->m_ext.data)); + } + +- /* When multiple filter options or user data or vlan etype is set, use a +- * flex filter. ++ /* The i225/i226 has various different filters. Flex filters provide a ++ * way to match up to the first 128 bytes of a packet. Use them for: ++ * a) For specific user data ++ * b) For VLAN EtherType ++ * c) For full TCI match ++ * d) Or in case multiple filter criteria are set ++ * ++ * Otherwise, use the simple MAC, VLAN PRIO or EtherType filters. + */ + if ((rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) || + (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) || ++ ((rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) && ++ rule->filter.vlan_tci_mask == ntohs(VLAN_TCI_FULL_MASK)) || + (rule->filter.match_flags & (rule->filter.match_flags - 1))) + rule->flex = true; + else +@@ -1334,6 +1344,18 @@ static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter, + return -EINVAL; + } + ++ /* There are two ways to match the VLAN TCI: ++ * 1. Match on PCP field and use vlan prio filter for it ++ * 2. Match on complete TCI field and use flex filter for it ++ */ ++ if ((fsp->flow_type & FLOW_EXT) && ++ fsp->m_ext.vlan_tci && ++ fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK) && ++ fsp->m_ext.vlan_tci != VLAN_TCI_FULL_MASK) { ++ netdev_dbg(netdev, "VLAN mask not supported\n"); ++ return -EOPNOTSUPP; ++ } ++ + if (fsp->location >= IGC_MAX_RXNFC_RULES) { + netdev_dbg(netdev, "Invalid location\n"); + return -EINVAL; +-- +2.43.0 + diff --git a/queue-6.6/igc-fix-hicredit-calculation.patch b/queue-6.6/igc-fix-hicredit-calculation.patch new file mode 100644 index 00000000000..e6d33b61cf2 --- /dev/null +++ b/queue-6.6/igc-fix-hicredit-calculation.patch @@ -0,0 +1,45 @@ +From bb9620af9afe1d5657173a6aa5a321739e7d70ba Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 8 Dec 2023 15:58:16 +0100 +Subject: igc: Fix hicredit calculation + +From: Rodrigo Cataldo + +[ Upstream commit 947dfc8138dfaeb6e966e2d661de89eb203e3064 ] + +According to the Intel Software Manual for I225, Section 7.5.2.7, +hicredit should be multiplied by the constant link-rate value, 0x7736. + +Currently, the old constant link-rate value, 0x7735, from the boards +supported on igb are being used, most likely due to a copy'n'paste, as +the rest of the logic is the same for both drivers. + +Update hicredit accordingly. + +Fixes: 1ab011b0bf07 ("igc: Add support for CBS offloading") +Reviewed-by: Kurt Kanzenbach +Signed-off-by: Rodrigo Cataldo +Acked-by: Vinicius Costa Gomes +Tested-by: Naama Meir +Signed-off-by: Tony Nguyen +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/intel/igc/igc_tsn.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c +index a9c08321aca90..22cefb1eeedfa 100644 +--- a/drivers/net/ethernet/intel/igc/igc_tsn.c ++++ b/drivers/net/ethernet/intel/igc/igc_tsn.c +@@ -227,7 +227,7 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) + wr32(IGC_TQAVCC(i), tqavcc); + + wr32(IGC_TQAVHC(i), +- 0x80000000 + ring->hicredit * 0x7735); ++ 0x80000000 + ring->hicredit * 0x7736); + } else { + /* Disable any CBS for the queue */ + txqctl &= ~(IGC_TXQCTL_QAV_SEL_MASK); +-- +2.43.0 + diff --git a/queue-6.6/igc-report-vlan-ethertype-matching-back-to-user.patch b/queue-6.6/igc-report-vlan-ethertype-matching-back-to-user.patch new file mode 100644 index 00000000000..4cfb988d393 --- /dev/null +++ b/queue-6.6/igc-report-vlan-ethertype-matching-back-to-user.patch @@ -0,0 +1,75 @@ +From 6e2f0b25e2cd81b8a76aa009a085ba67c9773886 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 1 Dec 2023 08:50:42 +0100 +Subject: igc: Report VLAN EtherType matching back to user + +From: Kurt Kanzenbach + +[ Upstream commit 088464abd48cf3735aee91f9e211b32da9d81117 ] + +Currently the driver allows to configure matching by VLAN EtherType. +However, the retrieval function does not report it back to the user. Add +it. + +Before: +|root@host:~# ethtool -N enp3s0 flow-type ether vlan-etype 0x8100 action 0 +|Added rule with ID 63 +|root@host:~# ethtool --show-ntuple enp3s0 +|4 RX rings available +|Total 1 rules +| +|Filter: 63 +| Flow Type: Raw Ethernet +| Src MAC addr: 00:00:00:00:00:00 mask: FF:FF:FF:FF:FF:FF +| Dest MAC addr: 00:00:00:00:00:00 mask: FF:FF:FF:FF:FF:FF +| Ethertype: 0x0 mask: 0xFFFF +| Action: Direct to queue 0 + +After: +|root@host:~# ethtool -N enp3s0 flow-type ether vlan-etype 0x8100 action 0 +|Added rule with ID 63 +|root@host:~# ethtool --show-ntuple enp3s0 +|4 RX rings available +|Total 1 rules +| +|Filter: 63 +| Flow Type: Raw Ethernet +| Src MAC addr: 00:00:00:00:00:00 mask: FF:FF:FF:FF:FF:FF +| Dest MAC addr: 00:00:00:00:00:00 mask: FF:FF:FF:FF:FF:FF +| Ethertype: 0x0 mask: 0xFFFF +| VLAN EtherType: 0x8100 mask: 0x0 +| VLAN: 0x0 mask: 0xffff +| User-defined: 0x0 mask: 0xffffffffffffffff +| Action: Direct to queue 0 + +Fixes: 2b477d057e33 ("igc: Integrate flex filter into ethtool ops") +Signed-off-by: Kurt Kanzenbach +Acked-by: Vinicius Costa Gomes +Reviewed-by: Simon Horman +Tested-by: Naama Meir +Signed-off-by: Tony Nguyen +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/intel/igc/igc_ethtool.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c +index dd8a9d27a1670..fa71bfb753b6d 100644 +--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c ++++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c +@@ -979,6 +979,12 @@ static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter, + fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; + } + ++ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) { ++ fsp->flow_type |= FLOW_EXT; ++ fsp->h_ext.vlan_etype = rule->filter.vlan_etype; ++ fsp->m_ext.vlan_etype = ETHER_TYPE_FULL_MASK; ++ } ++ + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + fsp->flow_type |= FLOW_EXT; + fsp->h_ext.vlan_tci = htons(rule->filter.vlan_tci); +-- +2.43.0 + diff --git a/queue-6.6/iio-imu-adis16475-use-bit-numbers-in-assign_bit.patch b/queue-6.6/iio-imu-adis16475-use-bit-numbers-in-assign_bit.patch new file mode 100644 index 00000000000..5abb90cd019 --- /dev/null +++ b/queue-6.6/iio-imu-adis16475-use-bit-numbers-in-assign_bit.patch @@ -0,0 +1,43 @@ +From ed7fc20174dd0c8604e5bb593322b1bae8840f9c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 6 Nov 2023 16:07:30 +0100 +Subject: iio: imu: adis16475: use bit numbers in assign_bit() + +From: Nuno Sa + +[ Upstream commit 1cd2fe4fd63e54b799a68c0856bda18f2e40caa8 ] + +assign_bit() expects a bit number and not a mask like BIT(x). Hence, +just remove the BIT() macro from the #defines. + +Reported-by: kernel test robot +Reported-by: Dan Carpenter +Closes: https://lore.kernel.org/r/202311060647.i9XyO4ej-lkp@intel.com/ +Fixes: fff7352bf7a3ce ("iio: imu: Add support for adis16475") +Signed-off-by: Nuno Sa +Link: https://lore.kernel.org/r/20231106150730.945-1-nuno.sa@analog.com +Cc: +Signed-off-by: Jonathan Cameron +Signed-off-by: Sasha Levin +--- + drivers/iio/imu/adis16475.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c +index 0a9951a502101..6c81dc5bf2c7a 100644 +--- a/drivers/iio/imu/adis16475.c ++++ b/drivers/iio/imu/adis16475.c +@@ -63,8 +63,8 @@ + #define ADIS16475_MAX_SCAN_DATA 20 + /* spi max speed in brust mode */ + #define ADIS16475_BURST_MAX_SPEED 1000000 +-#define ADIS16475_LSB_DEC_MASK BIT(0) +-#define ADIS16475_LSB_FIR_MASK BIT(1) ++#define ADIS16475_LSB_DEC_MASK 0 ++#define ADIS16475_LSB_FIR_MASK 1 + + enum { + ADIS16475_SYNC_DIRECT = 1, +-- +2.43.0 + diff --git a/queue-6.6/iommu-vt-d-support-enforce_cache_coherency-only-for-.patch b/queue-6.6/iommu-vt-d-support-enforce_cache_coherency-only-for-.patch new file mode 100644 index 00000000000..ddbdadf6aa0 --- /dev/null +++ b/queue-6.6/iommu-vt-d-support-enforce_cache_coherency-only-for-.patch @@ -0,0 +1,84 @@ +From 02d31a50994079cc278a191ffc74c44ead0f9378 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 22 Nov 2023 11:26:02 +0800 +Subject: iommu/vt-d: Support enforce_cache_coherency only for empty domains + +From: Lu Baolu + +[ Upstream commit e645c20e8e9cde549bc233435d3c1338e1cd27fe ] + +The enforce_cache_coherency callback ensures DMA cache coherency for +devices attached to the domain. + +Intel IOMMU supports enforced DMA cache coherency when the Snoop +Control bit in the IOMMU's extended capability register is set. +Supporting it differs between legacy and scalable modes. + +In legacy mode, it's supported page-level by setting the SNP field +in second-stage page-table entries. In scalable mode, it's supported +in PASID-table granularity by setting the PGSNP field in PASID-table +entries. + +In legacy mode, mappings before attaching to a device have SNP +fields cleared, while mappings after the callback have them set. +This means partial DMAs are cache coherent while others are not. + +One possible fix is replaying mappings and flipping SNP bits when +attaching a domain to a device. But this seems to be over-engineered, +given that all real use cases just attach an empty domain to a device. + +To meet practical needs while reducing mode differences, only support +enforce_cache_coherency on a domain without mappings if SNP field is +used. + +Fixes: fc0051cb9590 ("iommu/vt-d: Check domain force_snooping against attached devices") +Signed-off-by: Lu Baolu +Reviewed-by: Kevin Tian +Link: https://lore.kernel.org/r/20231114011036.70142-1-baolu.lu@linux.intel.com +Signed-off-by: Joerg Roedel +Signed-off-by: Sasha Levin +--- + drivers/iommu/intel/iommu.c | 5 ++++- + drivers/iommu/intel/iommu.h | 3 +++ + 2 files changed, 7 insertions(+), 1 deletion(-) + +diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c +index 4c3707384bd92..744e4e6b8d72d 100644 +--- a/drivers/iommu/intel/iommu.c ++++ b/drivers/iommu/intel/iommu.c +@@ -2204,6 +2204,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, + attr |= DMA_FL_PTE_DIRTY; + } + ++ domain->has_mappings = true; ++ + pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr; + + while (nr_pages > 0) { +@@ -4309,7 +4311,8 @@ static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain) + return true; + + spin_lock_irqsave(&dmar_domain->lock, flags); +- if (!domain_support_force_snooping(dmar_domain)) { ++ if (!domain_support_force_snooping(dmar_domain) || ++ (!dmar_domain->use_first_level && dmar_domain->has_mappings)) { + spin_unlock_irqrestore(&dmar_domain->lock, flags); + return false; + } +diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h +index 7dac94f62b4ec..e6a3e70656166 100644 +--- a/drivers/iommu/intel/iommu.h ++++ b/drivers/iommu/intel/iommu.h +@@ -592,6 +592,9 @@ struct dmar_domain { + * otherwise, goes through the second + * level. + */ ++ u8 has_mappings:1; /* Has mappings configured through ++ * iommu_map() interface. ++ */ + + spinlock_t lock; /* Protect device tracking lists */ + struct list_head devices; /* all devices' list */ +-- +2.43.0 + diff --git a/queue-6.6/kernel-resource-increment-by-align-value-in-get_free.patch b/queue-6.6/kernel-resource-increment-by-align-value-in-get_free.patch new file mode 100644 index 00000000000..23cde95909d --- /dev/null +++ b/queue-6.6/kernel-resource-increment-by-align-value-in-get_free.patch @@ -0,0 +1,53 @@ +From 664db2cb95ad54b05bbdd4f460a44b0a9cb66d42 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 13 Nov 2023 14:13:24 -0800 +Subject: kernel/resource: Increment by align value in get_free_mem_region() + +From: Alison Schofield + +[ Upstream commit 659aa050a53817157b7459529538598a6449c1d3 ] + +Currently get_free_mem_region() searches for available capacity +in increments equal to the region size being requested. This can +cause the search to take giant steps through the resource leaving +needless gaps and missing available space. + +Specifically 'cxl create-region' fails with ERANGE even though capacity +of the given size and CXL's expected 256M x InterleaveWays alignment can +be satisfied. + +Replace the total-request-size increment with a next alignment increment +so that the next possible address is always examined for availability. + +Fixes: 14b80582c43e ("resource: Introduce alloc_free_mem_region()") +Reported-by: Dmytro Adamenko +Reported-by: Dan Williams +Signed-off-by: Alison Schofield +Reviewed-by: Dave Jiang +Link: https://lore.kernel.org/r/20231113221324.1118092-1-alison.schofield@intel.com +Cc: Jason Gunthorpe +Reviewed-by: Christoph Hellwig +Signed-off-by: Dan Williams +Signed-off-by: Sasha Levin +--- + kernel/resource.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/kernel/resource.c b/kernel/resource.c +index b1763b2fd7ef3..e3f5680a564cf 100644 +--- a/kernel/resource.c ++++ b/kernel/resource.c +@@ -1847,8 +1847,8 @@ get_free_mem_region(struct device *dev, struct resource *base, + + write_lock(&resource_lock); + for (addr = gfr_start(base, size, align, flags); +- gfr_continue(base, addr, size, flags); +- addr = gfr_next(addr, size, flags)) { ++ gfr_continue(base, addr, align, flags); ++ addr = gfr_next(addr, align, flags)) { + if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) != + REGION_DISJOINT) + continue; +-- +2.43.0 + diff --git a/queue-6.6/kvm-s390-vsie-fix-wrong-vir-37-when-mso-is-used.patch b/queue-6.6/kvm-s390-vsie-fix-wrong-vir-37-when-mso-is-used.patch new file mode 100644 index 00000000000..798176c6b8c --- /dev/null +++ b/queue-6.6/kvm-s390-vsie-fix-wrong-vir-37-when-mso-is-used.patch @@ -0,0 +1,70 @@ +From ae8ee64e2f7a1243d08efeefd0f6f562c8afd9ce Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 2 Nov 2023 16:35:49 +0100 +Subject: KVM: s390: vsie: fix wrong VIR 37 when MSO is used + +From: Claudio Imbrenda + +[ Upstream commit 80aea01c48971a1fffc0252d036995572d84950d ] + +When the host invalidates a guest page, it will also check if the page +was used to map the prefix of any guest CPUs, in which case they are +stopped and marked as needing a prefix refresh. Upon starting the +affected CPUs again, their prefix pages are explicitly faulted in and +revalidated if they had been invalidated. A bit in the PGSTEs indicates +whether or not a page might contain a prefix. The bit is allowed to +overindicate. Pages above 2G are skipped, because they cannot be +prefixes, since KVM runs all guests with MSO = 0. + +The same applies for nested guests (VSIE). When the host invalidates a +guest page that maps the prefix of the nested guest, it has to stop the +affected nested guest CPUs and mark them as needing a prefix refresh. +The same PGSTE bit used for the guest prefix is also used for the +nested guest. Pages above 2G are skipped like for normal guests, which +is the source of the bug. + +The nested guest runs is the guest primary address space. The guest +could be running the nested guest using MSO != 0. If the MSO + prefix +for the nested guest is above 2G, the check for nested prefix will skip +it. This will cause the invalidation notifier to not stop the CPUs of +the nested guest and not mark them as needing refresh. When the nested +guest is run again, its prefix will not be refreshed, since it has not +been marked for refresh. This will cause a fatal validity intercept +with VIR code 37. + +Fix this by removing the check for 2G for nested guests. Now all +invalidations of pages with the notify bit set will always scan the +existing VSIE shadow state descriptors. + +This allows to catch invalidations of nested guest prefix mappings even +when the prefix is above 2G in the guest virtual address space. + +Fixes: a3508fbe9dc6 ("KVM: s390: vsie: initial support for nested virtualization") +Tested-by: Nico Boehr +Reviewed-by: Nico Boehr +Reviewed-by: David Hildenbrand +Message-ID: <20231102153549.53984-1-imbrenda@linux.ibm.com> +Signed-off-by: Claudio Imbrenda +Signed-off-by: Sasha Levin +--- + arch/s390/kvm/vsie.c | 4 ---- + 1 file changed, 4 deletions(-) + +diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c +index 61499293c2ac3..e55f489e1fb79 100644 +--- a/arch/s390/kvm/vsie.c ++++ b/arch/s390/kvm/vsie.c +@@ -587,10 +587,6 @@ void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start, + + if (!gmap_is_shadow(gmap)) + return; +- if (start >= 1UL << 31) +- /* We are only interested in prefix pages */ +- return; +- + /* + * Only new shadow blocks are added to the list during runtime, + * therefore we can safely reference them all the time. +-- +2.43.0 + diff --git a/queue-6.6/media-qcom-camss-fix-genpd-cleanup.patch b/queue-6.6/media-qcom-camss-fix-genpd-cleanup.patch new file mode 100644 index 00000000000..0228546abde --- /dev/null +++ b/queue-6.6/media-qcom-camss-fix-genpd-cleanup.patch @@ -0,0 +1,113 @@ +From bb5de28ea5ab4dae47b1401322c2a0bded2c4e9e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 30 Aug 2023 16:16:08 +0100 +Subject: media: qcom: camss: Fix genpd cleanup + +From: Bryan O'Donoghue + +[ Upstream commit f69791c39745e64621216fe8919cb73c0065002b ] + +Right now we never release the power-domains properly on the error path. +Add a routine to be reused for this purpose and appropriate jumps in +probe() to run that routine where necessary. + +Fixes: 2f6f8af67203 ("media: camss: Refactor VFE power domain toggling") +Cc: stable@vger.kernel.org +Signed-off-by: Bryan O'Donoghue +Signed-off-by: Hans Verkuil +Signed-off-by: Sasha Levin +--- + drivers/media/platform/qcom/camss/camss.c | 35 ++++++++++++++--------- + 1 file changed, 21 insertions(+), 14 deletions(-) + +diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c +index a925b2bfd8989..c6df862c79e39 100644 +--- a/drivers/media/platform/qcom/camss/camss.c ++++ b/drivers/media/platform/qcom/camss/camss.c +@@ -1538,6 +1538,20 @@ static int camss_icc_get(struct camss *camss) + return 0; + } + ++static void camss_genpd_cleanup(struct camss *camss) ++{ ++ int i; ++ ++ if (camss->genpd_num == 1) ++ return; ++ ++ if (camss->genpd_num > camss->vfe_num) ++ device_link_del(camss->genpd_link[camss->genpd_num - 1]); ++ ++ for (i = 0; i < camss->genpd_num; i++) ++ dev_pm_domain_detach(camss->genpd[i], true); ++} ++ + /* + * camss_probe - Probe CAMSS platform device + * @pdev: Pointer to CAMSS platform device +@@ -1627,11 +1641,11 @@ static int camss_probe(struct platform_device *pdev) + + ret = camss_init_subdevices(camss); + if (ret < 0) +- return ret; ++ goto err_genpd_cleanup; + + ret = dma_set_mask_and_coherent(dev, 0xffffffff); + if (ret) +- return ret; ++ goto err_genpd_cleanup; + + camss->media_dev.dev = camss->dev; + strscpy(camss->media_dev.model, "Qualcomm Camera Subsystem", +@@ -1643,7 +1657,7 @@ static int camss_probe(struct platform_device *pdev) + ret = v4l2_device_register(camss->dev, &camss->v4l2_dev); + if (ret < 0) { + dev_err(dev, "Failed to register V4L2 device: %d\n", ret); +- return ret; ++ goto err_genpd_cleanup; + } + + v4l2_async_nf_init(&camss->notifier, &camss->v4l2_dev); +@@ -1693,28 +1707,19 @@ static int camss_probe(struct platform_device *pdev) + err_v4l2_device_unregister: + v4l2_device_unregister(&camss->v4l2_dev); + v4l2_async_nf_cleanup(&camss->notifier); ++err_genpd_cleanup: ++ camss_genpd_cleanup(camss); + + return ret; + } + + void camss_delete(struct camss *camss) + { +- int i; +- + v4l2_device_unregister(&camss->v4l2_dev); + media_device_unregister(&camss->media_dev); + media_device_cleanup(&camss->media_dev); + + pm_runtime_disable(camss->dev); +- +- if (camss->genpd_num == 1) +- return; +- +- if (camss->genpd_num > camss->vfe_num) +- device_link_del(camss->genpd_link[camss->genpd_num - 1]); +- +- for (i = 0; i < camss->genpd_num; i++) +- dev_pm_domain_detach(camss->genpd[i], true); + } + + /* +@@ -1733,6 +1738,8 @@ static void camss_remove(struct platform_device *pdev) + + if (atomic_read(&camss->ref_count) == 0) + camss_delete(camss); ++ ++ camss_genpd_cleanup(camss); + } + + static const struct of_device_id camss_dt_match[] = { +-- +2.43.0 + diff --git a/queue-6.6/media-qcom-camss-fix-v4l2-async-notifier-error-path.patch b/queue-6.6/media-qcom-camss-fix-v4l2-async-notifier-error-path.patch new file mode 100644 index 00000000000..5b7c84330df --- /dev/null +++ b/queue-6.6/media-qcom-camss-fix-v4l2-async-notifier-error-path.patch @@ -0,0 +1,92 @@ +From 857864a875e721f09193fd3002901e6d112d5dc7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 30 Aug 2023 16:16:07 +0100 +Subject: media: qcom: camss: Fix V4L2 async notifier error path + +From: Bryan O'Donoghue + +[ Upstream commit b278080a89f452063915beda0ade6b3ed5ee4271 ] + +Previously the jump label err_cleanup was used higher in the probe() +function to release the async notifier however the async notifier +registration was moved later in the code rendering the previous four jumps +redundant. + +Rename the label from err_cleanup to err_v4l2_device_unregister to capture +what the jump does. + +Fixes: 51397a4ec75d ("media: qcom: Initialise V4L2 async notifier later") +Signed-off-by: Bryan O'Donoghue +Signed-off-by: Hans Verkuil +[hverkuil: fix old name in commit log: err_v4l2_device_register -> err_v4l2_device_unregister] +Signed-off-by: Sasha Levin +--- + drivers/media/platform/qcom/camss/camss.c | 16 ++++++++-------- + 1 file changed, 8 insertions(+), 8 deletions(-) + +diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c +index 75991d849b571..a925b2bfd8989 100644 +--- a/drivers/media/platform/qcom/camss/camss.c ++++ b/drivers/media/platform/qcom/camss/camss.c +@@ -1617,21 +1617,21 @@ static int camss_probe(struct platform_device *pdev) + + ret = camss_icc_get(camss); + if (ret < 0) +- goto err_cleanup; ++ return ret; + + ret = camss_configure_pd(camss); + if (ret < 0) { + dev_err(dev, "Failed to configure power domains: %d\n", ret); +- goto err_cleanup; ++ return ret; + } + + ret = camss_init_subdevices(camss); + if (ret < 0) +- goto err_cleanup; ++ return ret; + + ret = dma_set_mask_and_coherent(dev, 0xffffffff); + if (ret) +- goto err_cleanup; ++ return ret; + + camss->media_dev.dev = camss->dev; + strscpy(camss->media_dev.model, "Qualcomm Camera Subsystem", +@@ -1643,7 +1643,7 @@ static int camss_probe(struct platform_device *pdev) + ret = v4l2_device_register(camss->dev, &camss->v4l2_dev); + if (ret < 0) { + dev_err(dev, "Failed to register V4L2 device: %d\n", ret); +- goto err_cleanup; ++ return ret; + } + + v4l2_async_nf_init(&camss->notifier, &camss->v4l2_dev); +@@ -1651,12 +1651,12 @@ static int camss_probe(struct platform_device *pdev) + num_subdevs = camss_of_parse_ports(camss); + if (num_subdevs < 0) { + ret = num_subdevs; +- goto err_cleanup; ++ goto err_v4l2_device_unregister; + } + + ret = camss_register_entities(camss); + if (ret < 0) +- goto err_cleanup; ++ goto err_v4l2_device_unregister; + + if (num_subdevs) { + camss->notifier.ops = &camss_subdev_notifier_ops; +@@ -1690,7 +1690,7 @@ static int camss_probe(struct platform_device *pdev) + + err_register_subdevs: + camss_unregister_entities(camss); +-err_cleanup: ++err_v4l2_device_unregister: + v4l2_device_unregister(&camss->v4l2_dev); + v4l2_async_nf_cleanup(&camss->notifier); + +-- +2.43.0 + diff --git a/queue-6.6/mlxbf_gige-fix-receive-packet-race-condition.patch b/queue-6.6/mlxbf_gige-fix-receive-packet-race-condition.patch new file mode 100644 index 00000000000..901b9b1e4b1 --- /dev/null +++ b/queue-6.6/mlxbf_gige-fix-receive-packet-race-condition.patch @@ -0,0 +1,63 @@ +From 60e829865f09970201419c8a2cb2ba5d3418770b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 20 Dec 2023 18:47:39 -0500 +Subject: mlxbf_gige: fix receive packet race condition + +From: David Thompson + +[ Upstream commit dcea1bd45e6d111cc8fc1aaefa7e31694089bda3 ] + +Under heavy traffic, the BlueField Gigabit interface can +become unresponsive. This is due to a possible race condition +in the mlxbf_gige_rx_packet function, where the function exits +with producer and consumer indices equal but there are remaining +packet(s) to be processed. In order to prevent this situation, +read receive consumer index *before* the HW replenish so that +the mlxbf_gige_rx_packet function returns an accurate return +value even if a packet is received into just-replenished buffer +prior to exiting this routine. If the just-replenished buffer +is received and occupies the last RX ring entry, the interface +would not recover and instead would encounter RX packet drops +related to internal buffer shortages since the driver RX logic +is not being triggered to drain the RX ring. This patch will +address and prevent this "ring full" condition. + +Fixes: f92e1869d74e ("Add Mellanox BlueField Gigabit Ethernet driver") +Reviewed-by: Asmaa Mnebhi +Signed-off-by: David Thompson +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c | 9 +++++++-- + 1 file changed, 7 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c +index 0d5a41a2ae010..227d01cace3f0 100644 +--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c ++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c +@@ -267,6 +267,13 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts) + priv->stats.rx_truncate_errors++; + } + ++ /* Read receive consumer index before replenish so that this routine ++ * returns accurate return value even if packet is received into ++ * just-replenished buffer prior to exiting this routine. ++ */ ++ rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI); ++ rx_ci_rem = rx_ci % priv->rx_q_entries; ++ + /* Let hardware know we've replenished one buffer */ + rx_pi++; + +@@ -279,8 +286,6 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts) + rx_pi_rem = rx_pi % priv->rx_q_entries; + if (rx_pi_rem == 0) + priv->valid_polarity ^= 1; +- rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI); +- rx_ci_rem = rx_ci % priv->rx_q_entries; + + if (skb) + netif_receive_skb(skb); +-- +2.43.0 + diff --git a/queue-6.6/mm-convert-dax-lock-unlock-page-to-lock-unlock-folio.patch b/queue-6.6/mm-convert-dax-lock-unlock-page-to-lock-unlock-folio.patch new file mode 100644 index 00000000000..3beec4a34d9 --- /dev/null +++ b/queue-6.6/mm-convert-dax-lock-unlock-page-to-lock-unlock-folio.patch @@ -0,0 +1,202 @@ +From 3597e7ca7400f585506aa6a6f7593d15a82b9873 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 23 Aug 2023 00:13:14 +0100 +Subject: mm: convert DAX lock/unlock page to lock/unlock folio + +From: Matthew Wilcox (Oracle) + +[ Upstream commit 91e79d22be75fec88ae58d274a7c9e49d6215099 ] + +The one caller of DAX lock/unlock page already calls compound_head(), so +use page_folio() instead, then use a folio throughout the DAX code to +remove uses of page->mapping and page->index. + +[jane.chu@oracle.com: add comment to mf_generic_kill_procss(), simplify mf_generic_kill_procs:folio initialization] + Link: https://lkml.kernel.org/r/20230908222336.186313-1-jane.chu@oracle.com +Link: https://lkml.kernel.org/r/20230822231314.349200-1-willy@infradead.org +Signed-off-by: Matthew Wilcox (Oracle) +Signed-off-by: Jane Chu +Acked-by: Naoya Horiguchi +Cc: Dan Williams +Cc: Jane Chu +Signed-off-by: Andrew Morton +Stable-dep-of: 376907f3a0b3 ("mm/memory-failure: pass the folio and the page to collect_procs()") +Signed-off-by: Sasha Levin +--- + fs/dax.c | 24 ++++++++++++------------ + include/linux/dax.h | 10 +++++----- + mm/memory-failure.c | 29 ++++++++++++++++------------- + 3 files changed, 33 insertions(+), 30 deletions(-) + +diff --git a/fs/dax.c b/fs/dax.c +index 8fafecbe42b15..3380b43cb6bbb 100644 +--- a/fs/dax.c ++++ b/fs/dax.c +@@ -412,23 +412,23 @@ static struct page *dax_busy_page(void *entry) + return NULL; + } + +-/* +- * dax_lock_page - Lock the DAX entry corresponding to a page +- * @page: The page whose entry we want to lock ++/** ++ * dax_lock_folio - Lock the DAX entry corresponding to a folio ++ * @folio: The folio whose entry we want to lock + * + * Context: Process context. +- * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could ++ * Return: A cookie to pass to dax_unlock_folio() or 0 if the entry could + * not be locked. + */ +-dax_entry_t dax_lock_page(struct page *page) ++dax_entry_t dax_lock_folio(struct folio *folio) + { + XA_STATE(xas, NULL, 0); + void *entry; + +- /* Ensure page->mapping isn't freed while we look at it */ ++ /* Ensure folio->mapping isn't freed while we look at it */ + rcu_read_lock(); + for (;;) { +- struct address_space *mapping = READ_ONCE(page->mapping); ++ struct address_space *mapping = READ_ONCE(folio->mapping); + + entry = NULL; + if (!mapping || !dax_mapping(mapping)) +@@ -447,11 +447,11 @@ dax_entry_t dax_lock_page(struct page *page) + + xas.xa = &mapping->i_pages; + xas_lock_irq(&xas); +- if (mapping != page->mapping) { ++ if (mapping != folio->mapping) { + xas_unlock_irq(&xas); + continue; + } +- xas_set(&xas, page->index); ++ xas_set(&xas, folio->index); + entry = xas_load(&xas); + if (dax_is_locked(entry)) { + rcu_read_unlock(); +@@ -467,10 +467,10 @@ dax_entry_t dax_lock_page(struct page *page) + return (dax_entry_t)entry; + } + +-void dax_unlock_page(struct page *page, dax_entry_t cookie) ++void dax_unlock_folio(struct folio *folio, dax_entry_t cookie) + { +- struct address_space *mapping = page->mapping; +- XA_STATE(xas, &mapping->i_pages, page->index); ++ struct address_space *mapping = folio->mapping; ++ XA_STATE(xas, &mapping->i_pages, folio->index); + + if (S_ISCHR(mapping->host->i_mode)) + return; +diff --git a/include/linux/dax.h b/include/linux/dax.h +index 22cd9902345d7..b463502b16e17 100644 +--- a/include/linux/dax.h ++++ b/include/linux/dax.h +@@ -159,8 +159,8 @@ int dax_writeback_mapping_range(struct address_space *mapping, + + struct page *dax_layout_busy_page(struct address_space *mapping); + struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end); +-dax_entry_t dax_lock_page(struct page *page); +-void dax_unlock_page(struct page *page, dax_entry_t cookie); ++dax_entry_t dax_lock_folio(struct folio *folio); ++void dax_unlock_folio(struct folio *folio, dax_entry_t cookie); + dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, + unsigned long index, struct page **page); + void dax_unlock_mapping_entry(struct address_space *mapping, +@@ -182,14 +182,14 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping, + return -EOPNOTSUPP; + } + +-static inline dax_entry_t dax_lock_page(struct page *page) ++static inline dax_entry_t dax_lock_folio(struct folio *folio) + { +- if (IS_DAX(page->mapping->host)) ++ if (IS_DAX(folio->mapping->host)) + return ~0UL; + return 0; + } + +-static inline void dax_unlock_page(struct page *page, dax_entry_t cookie) ++static inline void dax_unlock_folio(struct folio *folio, dax_entry_t cookie) + { + } + +diff --git a/mm/memory-failure.c b/mm/memory-failure.c +index 16e002e08cf8f..75eb1d6857e48 100644 +--- a/mm/memory-failure.c ++++ b/mm/memory-failure.c +@@ -1713,20 +1713,23 @@ static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn, + kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags); + } + ++/* ++ * Only dev_pagemap pages get here, such as fsdax when the filesystem ++ * either do not claim or fails to claim a hwpoison event, or devdax. ++ * The fsdax pages are initialized per base page, and the devdax pages ++ * could be initialized either as base pages, or as compound pages with ++ * vmemmap optimization enabled. Devdax is simplistic in its dealing with ++ * hwpoison, such that, if a subpage of a compound page is poisoned, ++ * simply mark the compound head page is by far sufficient. ++ */ + static int mf_generic_kill_procs(unsigned long long pfn, int flags, + struct dev_pagemap *pgmap) + { +- struct page *page = pfn_to_page(pfn); ++ struct folio *folio = pfn_folio(pfn); + LIST_HEAD(to_kill); + dax_entry_t cookie; + int rc = 0; + +- /* +- * Pages instantiated by device-dax (not filesystem-dax) +- * may be compound pages. +- */ +- page = compound_head(page); +- + /* + * Prevent the inode from being freed while we are interrogating + * the address_space, typically this would be handled by +@@ -1734,11 +1737,11 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags, + * also prevents changes to the mapping of this pfn until + * poison signaling is complete. + */ +- cookie = dax_lock_page(page); ++ cookie = dax_lock_folio(folio); + if (!cookie) + return -EBUSY; + +- if (hwpoison_filter(page)) { ++ if (hwpoison_filter(&folio->page)) { + rc = -EOPNOTSUPP; + goto unlock; + } +@@ -1760,7 +1763,7 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags, + * Use this flag as an indication that the dax page has been + * remapped UC to prevent speculative consumption of poison. + */ +- SetPageHWPoison(page); ++ SetPageHWPoison(&folio->page); + + /* + * Unlike System-RAM there is no possibility to swap in a +@@ -1769,11 +1772,11 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags, + * SIGBUS (i.e. MF_MUST_KILL) + */ + flags |= MF_ACTION_REQUIRED | MF_MUST_KILL; +- collect_procs(page, &to_kill, true); ++ collect_procs(&folio->page, &to_kill, true); + +- unmap_and_kill(&to_kill, pfn, page->mapping, page->index, flags); ++ unmap_and_kill(&to_kill, pfn, folio->mapping, folio->index, flags); + unlock: +- dax_unlock_page(page, cookie); ++ dax_unlock_folio(folio, cookie); + return rc; + } + +-- +2.43.0 + diff --git a/queue-6.6/mm-memory-failure-pass-the-folio-and-the-page-to-col.patch b/queue-6.6/mm-memory-failure-pass-the-folio-and-the-page-to-col.patch new file mode 100644 index 00000000000..f30994f2f1f --- /dev/null +++ b/queue-6.6/mm-memory-failure-pass-the-folio-and-the-page-to-col.patch @@ -0,0 +1,115 @@ +From 033f535e55332406b1d6dd5cc2e8b6c39c96e622 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 18 Dec 2023 13:58:35 +0000 +Subject: mm/memory-failure: pass the folio and the page to collect_procs() + +From: Matthew Wilcox (Oracle) + +[ Upstream commit 376907f3a0b34a17e80417825f8cc1c40fcba81b ] + +Patch series "Three memory-failure fixes". + +I've been looking at the memory-failure code and I believe I have found +three bugs that need fixing -- one going all the way back to 2010! I'll +have more patches later to use folios more extensively but didn't want +these bugfixes to get caught up in that. + +This patch (of 3): + +Both collect_procs_anon() and collect_procs_file() iterate over the VMA +interval trees looking for a single pgoff, so it is wrong to look for the +pgoff of the head page as is currently done. However, it is also wrong to +look at page->mapping of the precise page as this is invalid for tail +pages. Clear up the confusion by passing both the folio and the precise +page to collect_procs(). + +Link: https://lkml.kernel.org/r/20231218135837.3310403-1-willy@infradead.org +Link: https://lkml.kernel.org/r/20231218135837.3310403-2-willy@infradead.org +Fixes: 415c64c1453a ("mm/memory-failure: split thp earlier in memory error handling") +Signed-off-by: Matthew Wilcox (Oracle) +Cc: Dan Williams +Cc: Naoya Horiguchi +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Sasha Levin +--- + mm/memory-failure.c | 25 ++++++++++++------------- + 1 file changed, 12 insertions(+), 13 deletions(-) + +diff --git a/mm/memory-failure.c b/mm/memory-failure.c +index 75eb1d6857e48..455093f73a70c 100644 +--- a/mm/memory-failure.c ++++ b/mm/memory-failure.c +@@ -595,10 +595,9 @@ struct task_struct *task_early_kill(struct task_struct *tsk, int force_early) + /* + * Collect processes when the error hit an anonymous page. + */ +-static void collect_procs_anon(struct page *page, struct list_head *to_kill, +- int force_early) ++static void collect_procs_anon(struct folio *folio, struct page *page, ++ struct list_head *to_kill, int force_early) + { +- struct folio *folio = page_folio(page); + struct vm_area_struct *vma; + struct task_struct *tsk; + struct anon_vma *av; +@@ -633,12 +632,12 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, + /* + * Collect processes when the error hit a file mapped page. + */ +-static void collect_procs_file(struct page *page, struct list_head *to_kill, +- int force_early) ++static void collect_procs_file(struct folio *folio, struct page *page, ++ struct list_head *to_kill, int force_early) + { + struct vm_area_struct *vma; + struct task_struct *tsk; +- struct address_space *mapping = page->mapping; ++ struct address_space *mapping = folio->mapping; + pgoff_t pgoff; + + i_mmap_lock_read(mapping); +@@ -704,17 +703,17 @@ static void collect_procs_fsdax(struct page *page, + /* + * Collect the processes who have the corrupted page mapped to kill. + */ +-static void collect_procs(struct page *page, struct list_head *tokill, +- int force_early) ++static void collect_procs(struct folio *folio, struct page *page, ++ struct list_head *tokill, int force_early) + { +- if (!page->mapping) ++ if (!folio->mapping) + return; + if (unlikely(PageKsm(page))) + collect_procs_ksm(page, tokill, force_early); + else if (PageAnon(page)) +- collect_procs_anon(page, tokill, force_early); ++ collect_procs_anon(folio, page, tokill, force_early); + else +- collect_procs_file(page, tokill, force_early); ++ collect_procs_file(folio, page, tokill, force_early); + } + + struct hwpoison_walk { +@@ -1602,7 +1601,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, + * mapped in dirty form. This has to be done before try_to_unmap, + * because ttu takes the rmap data structures down. + */ +- collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); ++ collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED); + + if (PageHuge(hpage) && !PageAnon(hpage)) { + /* +@@ -1772,7 +1771,7 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags, + * SIGBUS (i.e. MF_MUST_KILL) + */ + flags |= MF_ACTION_REQUIRED | MF_MUST_KILL; +- collect_procs(&folio->page, &to_kill, true); ++ collect_procs(folio, &folio->page, &to_kill, true); + + unmap_and_kill(&to_kill, pfn, folio->mapping, folio->index, flags); + unlock: +-- +2.43.0 + diff --git a/queue-6.6/net-bcmgenet-fix-fcs-generation-for-fragmented-skbuf.patch b/queue-6.6/net-bcmgenet-fix-fcs-generation-for-fragmented-skbuf.patch new file mode 100644 index 00000000000..295edad15e0 --- /dev/null +++ b/queue-6.6/net-bcmgenet-fix-fcs-generation-for-fragmented-skbuf.patch @@ -0,0 +1,46 @@ +From c666641a229982b371ed674781b92658f4b82226 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 28 Dec 2023 14:56:38 +0100 +Subject: net: bcmgenet: Fix FCS generation for fragmented skbuffs + +From: Adrian Cinal + +[ Upstream commit e584f2ff1e6cc9b1d99e8a6b0f3415940d1b3eb3 ] + +The flag DMA_TX_APPEND_CRC was only written to the first DMA descriptor +in the TX path, where each descriptor corresponds to a single skbuff +fragment (or the skbuff head). This led to packets with no FCS appearing +on the wire if the kernel allocated the packet in fragments, which would +always happen when using PACKET_MMAP/TPACKET (cf. tpacket_fill_skb() in +net/af_packet.c). + +Fixes: 1c1008c793fa ("net: bcmgenet: add main driver file") +Signed-off-by: Adrian Cinal +Acked-by: Doug Berger +Acked-by: Florian Fainelli +Link: https://lore.kernel.org/r/20231228135638.1339245-1-adriancinal1@gmail.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/broadcom/genet/bcmgenet.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c +index 24bade875ca6a..89c8ddc6565ae 100644 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c +@@ -2132,8 +2132,10 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) + /* Note: if we ever change from DMA_TX_APPEND_CRC below we + * will need to restore software padding of "runt" packets + */ ++ len_stat |= DMA_TX_APPEND_CRC; ++ + if (!i) { +- len_stat |= DMA_TX_APPEND_CRC | DMA_SOP; ++ len_stat |= DMA_SOP; + if (skb->ip_summed == CHECKSUM_PARTIAL) + len_stat |= DMA_TX_DO_CSUM; + } +-- +2.43.0 + diff --git a/queue-6.6/net-constify-sk_dst_get-and-__sk_dst_get-argument.patch b/queue-6.6/net-constify-sk_dst_get-and-__sk_dst_get-argument.patch new file mode 100644 index 00000000000..7a2b9f61a70 --- /dev/null +++ b/queue-6.6/net-constify-sk_dst_get-and-__sk_dst_get-argument.patch @@ -0,0 +1,42 @@ +From 75cd14e6c588a3190b87f716063b9ad389aa8c10 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 20 Sep 2023 17:29:41 +0000 +Subject: net: constify sk_dst_get() and __sk_dst_get() argument + +From: Eric Dumazet + +[ Upstream commit 5033f58d5feed1040eebeadb0c5efc95b8bf5720 ] + +Both helpers only read fields from their socket argument. + +Signed-off-by: Eric Dumazet +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + include/net/sock.h | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/include/net/sock.h b/include/net/sock.h +index 1b7ca8f35dd60..70a771d964676 100644 +--- a/include/net/sock.h ++++ b/include/net/sock.h +@@ -2152,14 +2152,14 @@ static inline bool sk_rethink_txhash(struct sock *sk) + } + + static inline struct dst_entry * +-__sk_dst_get(struct sock *sk) ++__sk_dst_get(const struct sock *sk) + { + return rcu_dereference_check(sk->sk_dst_cache, + lockdep_sock_is_held(sk)); + } + + static inline struct dst_entry * +-sk_dst_get(struct sock *sk) ++sk_dst_get(const struct sock *sk) + { + struct dst_entry *dst; + +-- +2.43.0 + diff --git a/queue-6.6/net-implement-missing-getsockopt-so_timestamping_new.patch b/queue-6.6/net-implement-missing-getsockopt-so_timestamping_new.patch new file mode 100644 index 00000000000..61e05b96da9 --- /dev/null +++ b/queue-6.6/net-implement-missing-getsockopt-so_timestamping_new.patch @@ -0,0 +1,60 @@ +From 25a23006e5aa8f2a209151f20a1895aaa6b1d8e5 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 22 Dec 2023 00:19:01 +0100 +Subject: net: Implement missing getsockopt(SO_TIMESTAMPING_NEW) +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Jörn-Thorben Hinz + +[ Upstream commit 7f6ca95d16b96567ce4cf458a2790ff17fa620c3 ] + +Commit 9718475e6908 ("socket: Add SO_TIMESTAMPING_NEW") added the new +socket option SO_TIMESTAMPING_NEW. Setting the option is handled in +sk_setsockopt(), querying it was not handled in sk_getsockopt(), though. + +Following remarks on an earlier submission of this patch, keep the old +behavior of getsockopt(SO_TIMESTAMPING_OLD) which returns the active +flags even if they actually have been set through SO_TIMESTAMPING_NEW. + +The new getsockopt(SO_TIMESTAMPING_NEW) is stricter, returning flags +only if they have been set through the same option. + +Fixes: 9718475e6908 ("socket: Add SO_TIMESTAMPING_NEW") +Link: https://lore.kernel.org/lkml/20230703175048.151683-1-jthinz@mailbox.tu-berlin.de/ +Link: https://lore.kernel.org/netdev/0d7cddc9-03fa-43db-a579-14f3e822615b@app.fastmail.com/ +Signed-off-by: Jörn-Thorben Hinz +Reviewed-by: Willem de Bruijn +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + net/core/sock.c | 11 +++++++++-- + 1 file changed, 9 insertions(+), 2 deletions(-) + +diff --git a/net/core/sock.c b/net/core/sock.c +index bfaf47b3f3c7c..fe687e6170c9a 100644 +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -1718,9 +1718,16 @@ int sk_getsockopt(struct sock *sk, int level, int optname, + break; + + case SO_TIMESTAMPING_OLD: ++ case SO_TIMESTAMPING_NEW: + lv = sizeof(v.timestamping); +- v.timestamping.flags = READ_ONCE(sk->sk_tsflags); +- v.timestamping.bind_phc = READ_ONCE(sk->sk_bind_phc); ++ /* For the later-added case SO_TIMESTAMPING_NEW: Be strict about only ++ * returning the flags when they were set through the same option. ++ * Don't change the beviour for the old case SO_TIMESTAMPING_OLD. ++ */ ++ if (optname == SO_TIMESTAMPING_OLD || sock_flag(sk, SOCK_TSTAMP_NEW)) { ++ v.timestamping.flags = READ_ONCE(sk->sk_tsflags); ++ v.timestamping.bind_phc = READ_ONCE(sk->sk_bind_phc); ++ } + break; + + case SO_RCVTIMEO_OLD: +-- +2.43.0 + diff --git a/queue-6.6/net-implement-missing-so_timestamping_new-cmsg-suppo.patch b/queue-6.6/net-implement-missing-so_timestamping_new-cmsg-suppo.patch new file mode 100644 index 00000000000..eae61862324 --- /dev/null +++ b/queue-6.6/net-implement-missing-so_timestamping_new-cmsg-suppo.patch @@ -0,0 +1,40 @@ +From 03034829ff41f4afd88e3c7ab157970c98274577 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 4 Jan 2024 09:57:44 +0100 +Subject: net: Implement missing SO_TIMESTAMPING_NEW cmsg support + +From: Thomas Lange + +[ Upstream commit 382a32018b74f407008615e0e831d05ed28e81cd ] + +Commit 9718475e6908 ("socket: Add SO_TIMESTAMPING_NEW") added the new +socket option SO_TIMESTAMPING_NEW. However, it was never implemented in +__sock_cmsg_send thus breaking SO_TIMESTAMPING cmsg for platforms using +SO_TIMESTAMPING_NEW. + +Fixes: 9718475e6908 ("socket: Add SO_TIMESTAMPING_NEW") +Link: https://lore.kernel.org/netdev/6a7281bf-bc4a-4f75-bb88-7011908ae471@app.fastmail.com/ +Signed-off-by: Thomas Lange +Reviewed-by: Willem de Bruijn +Link: https://lore.kernel.org/r/20240104085744.49164-1-thomas@corelatus.se +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + net/core/sock.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/net/core/sock.c b/net/core/sock.c +index fe687e6170c9a..5cd21e699f2d6 100644 +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -2828,6 +2828,7 @@ int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg, + sockc->mark = *(u32 *)CMSG_DATA(cmsg); + break; + case SO_TIMESTAMPING_OLD: ++ case SO_TIMESTAMPING_NEW: + if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) + return -EINVAL; + +-- +2.43.0 + diff --git a/queue-6.6/net-libwx-fix-memory-leak-on-free-page.patch b/queue-6.6/net-libwx-fix-memory-leak-on-free-page.patch new file mode 100644 index 00000000000..51839ef181a --- /dev/null +++ b/queue-6.6/net-libwx-fix-memory-leak-on-free-page.patch @@ -0,0 +1,176 @@ +From 6b2fd0f8ab1ee9869685d408db55a5e8149ce11c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 14 Dec 2023 10:33:37 +0800 +Subject: net: libwx: fix memory leak on free page + +From: duanqiangwen + +[ Upstream commit 738b54b9b6236f573eed2453c4cbfa77326793e2 ] + +ifconfig ethx up, will set page->refcount larger than 1, +and then ifconfig ethx down, calling __page_frag_cache_drain() +to free pages, it is not compatible with page pool. +So deleting codes which changing page->refcount. + +Fixes: 3c47e8ae113a ("net: libwx: Support to receive packets in NAPI") +Signed-off-by: duanqiangwen +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/wangxun/libwx/wx_lib.c | 82 ++------------------ + drivers/net/ethernet/wangxun/libwx/wx_type.h | 1 - + 2 files changed, 6 insertions(+), 77 deletions(-) + +diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c +index 21505920136c6..e078f4071dc23 100644 +--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c ++++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c +@@ -160,60 +160,6 @@ static __le32 wx_test_staterr(union wx_rx_desc *rx_desc, + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); + } + +-static bool wx_can_reuse_rx_page(struct wx_rx_buffer *rx_buffer, +- int rx_buffer_pgcnt) +-{ +- unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; +- struct page *page = rx_buffer->page; +- +- /* avoid re-using remote and pfmemalloc pages */ +- if (!dev_page_is_reusable(page)) +- return false; +- +-#if (PAGE_SIZE < 8192) +- /* if we are only owner of page we can reuse it */ +- if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) +- return false; +-#endif +- +- /* If we have drained the page fragment pool we need to update +- * the pagecnt_bias and page count so that we fully restock the +- * number of references the driver holds. +- */ +- if (unlikely(pagecnt_bias == 1)) { +- page_ref_add(page, USHRT_MAX - 1); +- rx_buffer->pagecnt_bias = USHRT_MAX; +- } +- +- return true; +-} +- +-/** +- * wx_reuse_rx_page - page flip buffer and store it back on the ring +- * @rx_ring: rx descriptor ring to store buffers on +- * @old_buff: donor buffer to have page reused +- * +- * Synchronizes page for reuse by the adapter +- **/ +-static void wx_reuse_rx_page(struct wx_ring *rx_ring, +- struct wx_rx_buffer *old_buff) +-{ +- u16 nta = rx_ring->next_to_alloc; +- struct wx_rx_buffer *new_buff; +- +- new_buff = &rx_ring->rx_buffer_info[nta]; +- +- /* update, and store next to alloc */ +- nta++; +- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; +- +- /* transfer page from old buffer to new buffer */ +- new_buff->page = old_buff->page; +- new_buff->page_dma = old_buff->page_dma; +- new_buff->page_offset = old_buff->page_offset; +- new_buff->pagecnt_bias = old_buff->pagecnt_bias; +-} +- + static void wx_dma_sync_frag(struct wx_ring *rx_ring, + struct wx_rx_buffer *rx_buffer) + { +@@ -270,8 +216,6 @@ static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring, + size, + DMA_FROM_DEVICE); + skip_sync: +- rx_buffer->pagecnt_bias--; +- + return rx_buffer; + } + +@@ -280,19 +224,9 @@ static void wx_put_rx_buffer(struct wx_ring *rx_ring, + struct sk_buff *skb, + int rx_buffer_pgcnt) + { +- if (wx_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { +- /* hand second half of page back to the ring */ +- wx_reuse_rx_page(rx_ring, rx_buffer); +- } else { +- if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma) +- /* the page has been released from the ring */ +- WX_CB(skb)->page_released = true; +- else +- page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); +- +- __page_frag_cache_drain(rx_buffer->page, +- rx_buffer->pagecnt_bias); +- } ++ if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma) ++ /* the page has been released from the ring */ ++ WX_CB(skb)->page_released = true; + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; +@@ -335,11 +269,12 @@ static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring, + if (size <= WX_RXBUFFER_256) { + memcpy(__skb_put(skb, size), page_addr, + ALIGN(size, sizeof(long))); +- rx_buffer->pagecnt_bias++; +- ++ page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, true); + return skb; + } + ++ skb_mark_for_recycle(skb); ++ + if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)) + WX_CB(skb)->dma = rx_buffer->dma; + +@@ -382,8 +317,6 @@ static bool wx_alloc_mapped_page(struct wx_ring *rx_ring, + bi->page_dma = dma; + bi->page = page; + bi->page_offset = 0; +- page_ref_add(page, USHRT_MAX - 1); +- bi->pagecnt_bias = USHRT_MAX; + + return true; + } +@@ -721,7 +654,6 @@ static int wx_clean_rx_irq(struct wx_q_vector *q_vector, + + /* exit if we failed to retrieve a buffer */ + if (!skb) { +- rx_buffer->pagecnt_bias++; + break; + } + +@@ -2241,8 +2173,6 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring) + + /* free resources associated with mapping */ + page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); +- __page_frag_cache_drain(rx_buffer->page, +- rx_buffer->pagecnt_bias); + + i++; + rx_buffer++; +diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h +index c5cbd177ef627..c555af9ed51b2 100644 +--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h ++++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h +@@ -759,7 +759,6 @@ struct wx_rx_buffer { + dma_addr_t page_dma; + struct page *page; + unsigned int page_offset; +- u16 pagecnt_bias; + }; + + struct wx_queue_stats { +-- +2.43.0 + diff --git a/queue-6.6/net-prevent-mss-overflow-in-skb_segment.patch b/queue-6.6/net-prevent-mss-overflow-in-skb_segment.patch new file mode 100644 index 00000000000..7a247fc456f --- /dev/null +++ b/queue-6.6/net-prevent-mss-overflow-in-skb_segment.patch @@ -0,0 +1,117 @@ +From 2a2cf32c320b383e25a5c9a3ce57d5a7e7d97038 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 12 Dec 2023 16:46:21 +0000 +Subject: net: prevent mss overflow in skb_segment() + +From: Eric Dumazet + +[ Upstream commit 23d05d563b7e7b0314e65c8e882bc27eac2da8e7 ] + +Once again syzbot is able to crash the kernel in skb_segment() [1] + +GSO_BY_FRAGS is a forbidden value, but unfortunately the following +computation in skb_segment() can reach it quite easily : + + mss = mss * partial_segs; + +65535 = 3 * 5 * 17 * 257, so many initial values of mss can lead to +a bad final result. + +Make sure to limit segmentation so that the new mss value is smaller +than GSO_BY_FRAGS. + +[1] + +general protection fault, probably for non-canonical address 0xdffffc000000000e: 0000 [#1] PREEMPT SMP KASAN +KASAN: null-ptr-deref in range [0x0000000000000070-0x0000000000000077] +CPU: 1 PID: 5079 Comm: syz-executor993 Not tainted 6.7.0-rc4-syzkaller-00141-g1ae4cd3cbdd0 #0 +Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 11/10/2023 +RIP: 0010:skb_segment+0x181d/0x3f30 net/core/skbuff.c:4551 +Code: 83 e3 02 e9 fb ed ff ff e8 90 68 1c f9 48 8b 84 24 f8 00 00 00 48 8d 78 70 48 b8 00 00 00 00 00 fc ff df 48 89 fa 48 c1 ea 03 <0f> b6 04 02 84 c0 74 08 3c 03 0f 8e 8a 21 00 00 48 8b 84 24 f8 00 +RSP: 0018:ffffc900043473d0 EFLAGS: 00010202 +RAX: dffffc0000000000 RBX: 0000000000010046 RCX: ffffffff886b1597 +RDX: 000000000000000e RSI: ffffffff886b2520 RDI: 0000000000000070 +RBP: ffffc90004347578 R08: 0000000000000005 R09: 000000000000ffff +R10: 000000000000ffff R11: 0000000000000002 R12: ffff888063202ac0 +R13: 0000000000010000 R14: 000000000000ffff R15: 0000000000000046 +FS: 0000555556e7e380(0000) GS:ffff8880b9900000(0000) knlGS:0000000000000000 +CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +CR2: 0000000020010000 CR3: 0000000027ee2000 CR4: 00000000003506f0 +DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 +DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 +Call Trace: + +udp6_ufo_fragment+0xa0e/0xd00 net/ipv6/udp_offload.c:109 +ipv6_gso_segment+0x534/0x17e0 net/ipv6/ip6_offload.c:120 +skb_mac_gso_segment+0x290/0x610 net/core/gso.c:53 +__skb_gso_segment+0x339/0x710 net/core/gso.c:124 +skb_gso_segment include/net/gso.h:83 [inline] +validate_xmit_skb+0x36c/0xeb0 net/core/dev.c:3626 +__dev_queue_xmit+0x6f3/0x3d60 net/core/dev.c:4338 +dev_queue_xmit include/linux/netdevice.h:3134 [inline] +packet_xmit+0x257/0x380 net/packet/af_packet.c:276 +packet_snd net/packet/af_packet.c:3087 [inline] +packet_sendmsg+0x24c6/0x5220 net/packet/af_packet.c:3119 +sock_sendmsg_nosec net/socket.c:730 [inline] +__sock_sendmsg+0xd5/0x180 net/socket.c:745 +__sys_sendto+0x255/0x340 net/socket.c:2190 +__do_sys_sendto net/socket.c:2202 [inline] +__se_sys_sendto net/socket.c:2198 [inline] +__x64_sys_sendto+0xe0/0x1b0 net/socket.c:2198 +do_syscall_x64 arch/x86/entry/common.c:52 [inline] +do_syscall_64+0x40/0x110 arch/x86/entry/common.c:83 +entry_SYSCALL_64_after_hwframe+0x63/0x6b +RIP: 0033:0x7f8692032aa9 +Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 d1 19 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48 +RSP: 002b:00007fff8d685418 EFLAGS: 00000246 ORIG_RAX: 000000000000002c +RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 00007f8692032aa9 +RDX: 0000000000010048 RSI: 00000000200000c0 RDI: 0000000000000003 +RBP: 00000000000f4240 R08: 0000000020000540 R09: 0000000000000014 +R10: 0000000000000000 R11: 0000000000000246 R12: 00007fff8d685480 +R13: 0000000000000001 R14: 00007fff8d685480 R15: 0000000000000003 + +Modules linked in: +---[ end trace 0000000000000000 ]--- +RIP: 0010:skb_segment+0x181d/0x3f30 net/core/skbuff.c:4551 +Code: 83 e3 02 e9 fb ed ff ff e8 90 68 1c f9 48 8b 84 24 f8 00 00 00 48 8d 78 70 48 b8 00 00 00 00 00 fc ff df 48 89 fa 48 c1 ea 03 <0f> b6 04 02 84 c0 74 08 3c 03 0f 8e 8a 21 00 00 48 8b 84 24 f8 00 +RSP: 0018:ffffc900043473d0 EFLAGS: 00010202 +RAX: dffffc0000000000 RBX: 0000000000010046 RCX: ffffffff886b1597 +RDX: 000000000000000e RSI: ffffffff886b2520 RDI: 0000000000000070 +RBP: ffffc90004347578 R08: 0000000000000005 R09: 000000000000ffff +R10: 000000000000ffff R11: 0000000000000002 R12: ffff888063202ac0 +R13: 0000000000010000 R14: 000000000000ffff R15: 0000000000000046 +FS: 0000555556e7e380(0000) GS:ffff8880b9900000(0000) knlGS:0000000000000000 +CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +CR2: 0000000020010000 CR3: 0000000027ee2000 CR4: 00000000003506f0 +DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 +DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 + +Fixes: 3953c46c3ac7 ("sk_buff: allow segmenting based on frag sizes") +Signed-off-by: Eric Dumazet +Cc: Marcelo Ricardo Leitner +Reviewed-by: Willem de Bruijn +Link: https://lore.kernel.org/r/20231212164621.4131800-1-edumazet@google.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + net/core/skbuff.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index 6d204cf54c574..011d69029112a 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -4508,8 +4508,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, + /* GSO partial only requires that we trim off any excess that + * doesn't fit into an MSS sized block, so take care of that + * now. ++ * Cap len to not accidentally hit GSO_BY_FRAGS. + */ +- partial_segs = len / mss; ++ partial_segs = min(len, GSO_BY_FRAGS - 1U) / mss; + if (partial_segs > 1) + mss *= partial_segs; + else +-- +2.43.0 + diff --git a/queue-6.6/net-qla3xxx-fix-potential-memleak-in-ql_alloc_buffer.patch b/queue-6.6/net-qla3xxx-fix-potential-memleak-in-ql_alloc_buffer.patch new file mode 100644 index 00000000000..78fbb661252 --- /dev/null +++ b/queue-6.6/net-qla3xxx-fix-potential-memleak-in-ql_alloc_buffer.patch @@ -0,0 +1,44 @@ +From 239d1768ac039de14d2ce408b87fd5986e3833dc Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 27 Dec 2023 15:02:27 +0800 +Subject: net/qla3xxx: fix potential memleak in ql_alloc_buffer_queues + +From: Dinghao Liu + +[ Upstream commit 89f45c30172c80e55c887f32f1af8e184124577b ] + +When dma_alloc_coherent() fails, we should free qdev->lrg_buf +to prevent potential memleak. + +Fixes: 1357bfcf7106 ("qla3xxx: Dynamically size the rx buffer queue based on the MTU.") +Signed-off-by: Dinghao Liu +Link: https://lore.kernel.org/r/20231227070227.10527-1-dinghao.liu@zju.edu.cn +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/qlogic/qla3xxx.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c +index 0d57ffcedf0c6..fc78bc959ded8 100644 +--- a/drivers/net/ethernet/qlogic/qla3xxx.c ++++ b/drivers/net/ethernet/qlogic/qla3xxx.c +@@ -2591,6 +2591,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) + + if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { + netdev_err(qdev->ndev, "lBufQ failed\n"); ++ kfree(qdev->lrg_buf); + return -ENOMEM; + } + qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; +@@ -2615,6 +2616,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) + qdev->lrg_buf_q_alloc_size, + qdev->lrg_buf_q_alloc_virt_addr, + qdev->lrg_buf_q_alloc_phy_addr); ++ kfree(qdev->lrg_buf); + return -ENOMEM; + } + +-- +2.43.0 + diff --git a/queue-6.6/net-ravb-wait-for-operating-mode-to-be-applied.patch b/queue-6.6/net-ravb-wait-for-operating-mode-to-be-applied.patch new file mode 100644 index 00000000000..ec1bc8df876 --- /dev/null +++ b/queue-6.6/net-ravb-wait-for-operating-mode-to-be-applied.patch @@ -0,0 +1,181 @@ +From 7a27e573c2e2fc52be2db89e89467244f3918c05 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 3 Jan 2024 10:13:53 +0200 +Subject: net: ravb: Wait for operating mode to be applied + +From: Claudiu Beznea + +[ Upstream commit 9039cd4c61635b2d541009a7cd5e2cc052402f28 ] + +CSR.OPS bits specify the current operating mode and (according to +documentation) they are updated by HW when the operating mode change +request is processed. To comply with this check CSR.OPS before proceeding. + +Commit introduces ravb_set_opmode() that does all the necessities for +setting the operating mode (set CCC.OPC (and CCC.GAC, CCC.CSEL, if any) and +wait for CSR.OPS) and call it where needed. This should comply with all the +HW manuals requirements as different manual variants specify that different +modes need to be checked in CSR.OPS when setting CCC.OPC. + +If gPTP active in config mode is supported and it needs to be enabled, the +CCC.GAC and CCC.CSEL needs to be configured along with CCC.OPC in the same +write access. For this, ravb_set_opmode() allows passing GAC and CSEL as +part of opmode and the function updates accordingly CCC register. + +Fixes: c156633f1353 ("Renesas Ethernet AVB driver proper") +Signed-off-by: Claudiu Beznea +Reviewed-by: Sergey Shtylyov +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/renesas/ravb_main.c | 65 +++++++++++++++--------- + 1 file changed, 42 insertions(+), 23 deletions(-) + +diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c +index bb56cf4090423..3c2a6b23c202a 100644 +--- a/drivers/net/ethernet/renesas/ravb_main.c ++++ b/drivers/net/ethernet/renesas/ravb_main.c +@@ -66,16 +66,27 @@ int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value) + return -ETIMEDOUT; + } + +-static int ravb_config(struct net_device *ndev) ++static int ravb_set_opmode(struct net_device *ndev, u32 opmode) + { ++ u32 csr_ops = 1U << (opmode & CCC_OPC); ++ u32 ccc_mask = CCC_OPC; + int error; + +- /* Set config mode */ +- ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); +- /* Check if the operating mode is changed to the config mode */ +- error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG); +- if (error) +- netdev_err(ndev, "failed to switch device to config mode\n"); ++ /* If gPTP active in config mode is supported it needs to be configured ++ * along with CSEL and operating mode in the same access. This is a ++ * hardware limitation. ++ */ ++ if (opmode & CCC_GAC) ++ ccc_mask |= CCC_GAC | CCC_CSEL; ++ ++ /* Set operating mode */ ++ ravb_modify(ndev, CCC, ccc_mask, opmode); ++ /* Check if the operating mode is changed to the requested one */ ++ error = ravb_wait(ndev, CSR, CSR_OPS, csr_ops); ++ if (error) { ++ netdev_err(ndev, "failed to switch device to requested mode (%u)\n", ++ opmode & CCC_OPC); ++ } + + return error; + } +@@ -673,7 +684,7 @@ static int ravb_dmac_init(struct net_device *ndev) + int error; + + /* Set CONFIG mode */ +- error = ravb_config(ndev); ++ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); + if (error) + return error; + +@@ -682,9 +693,7 @@ static int ravb_dmac_init(struct net_device *ndev) + return error; + + /* Setting the control will start the AVB-DMAC process. */ +- ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION); +- +- return 0; ++ return ravb_set_opmode(ndev, CCC_OPC_OPERATION); + } + + static void ravb_get_tx_tstamp(struct net_device *ndev) +@@ -1046,7 +1055,7 @@ static int ravb_stop_dma(struct net_device *ndev) + return error; + + /* Stop AVB-DMAC process */ +- return ravb_config(ndev); ++ return ravb_set_opmode(ndev, CCC_OPC_CONFIG); + } + + /* E-MAC interrupt handler */ +@@ -2560,21 +2569,25 @@ static int ravb_set_gti(struct net_device *ndev) + return 0; + } + +-static void ravb_set_config_mode(struct net_device *ndev) ++static int ravb_set_config_mode(struct net_device *ndev) + { + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; ++ int error; + + if (info->gptp) { +- ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); ++ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); ++ if (error) ++ return error; + /* Set CSEL value */ + ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB); + } else if (info->ccc_gac) { +- ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG | +- CCC_GAC | CCC_CSEL_HPB); ++ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB); + } else { +- ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); ++ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); + } ++ ++ return error; + } + + /* Set tx and rx clock internal delay modes */ +@@ -2794,7 +2807,9 @@ static int ravb_probe(struct platform_device *pdev) + ndev->ethtool_ops = &ravb_ethtool_ops; + + /* Set AVB config mode */ +- ravb_set_config_mode(ndev); ++ error = ravb_set_config_mode(ndev); ++ if (error) ++ goto out_disable_gptp_clk; + + if (info->gptp || info->ccc_gac) { + /* Set GTI value */ +@@ -2917,8 +2932,7 @@ static int ravb_remove(struct platform_device *pdev) + dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, + priv->desc_bat_dma); + +- /* Set reset mode */ +- ravb_write(ndev, CCC_OPC_RESET, CCC); ++ ravb_set_opmode(ndev, CCC_OPC_RESET); + + clk_disable_unprepare(priv->gptp_clk); + clk_disable_unprepare(priv->refclk); +@@ -3002,8 +3016,11 @@ static int __maybe_unused ravb_resume(struct device *dev) + int ret = 0; + + /* If WoL is enabled set reset mode to rearm the WoL logic */ +- if (priv->wol_enabled) +- ravb_write(ndev, CCC_OPC_RESET, CCC); ++ if (priv->wol_enabled) { ++ ret = ravb_set_opmode(ndev, CCC_OPC_RESET); ++ if (ret) ++ return ret; ++ } + + /* All register have been reset to default values. + * Restore all registers which where setup at probe time and +@@ -3011,7 +3028,9 @@ static int __maybe_unused ravb_resume(struct device *dev) + */ + + /* Set AVB config mode */ +- ravb_set_config_mode(ndev); ++ ret = ravb_set_config_mode(ndev); ++ if (ret) ++ return ret; + + if (info->gptp || info->ccc_gac) { + /* Set GTI value */ +-- +2.43.0 + diff --git a/queue-6.6/net-save-and-restore-msg_namelen-in-sock_sendmsg.patch b/queue-6.6/net-save-and-restore-msg_namelen-in-sock_sendmsg.patch new file mode 100644 index 00000000000..6275791df9d --- /dev/null +++ b/queue-6.6/net-save-and-restore-msg_namelen-in-sock_sendmsg.patch @@ -0,0 +1,55 @@ +From 2331ded635a94a4d4b90106a3784a011039f8620 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 21 Dec 2023 09:12:30 -0400 +Subject: net: Save and restore msg_namelen in sock_sendmsg + +From: Marc Dionne + +[ Upstream commit 01b2885d9415152bcb12ff1f7788f500a74ea0ed ] + +Commit 86a7e0b69bd5 ("net: prevent rewrite of msg_name in +sock_sendmsg()") made sock_sendmsg save the incoming msg_name pointer +and restore it before returning, to insulate the caller against +msg_name being changed by the called code. If the address length +was also changed however, we may return with an inconsistent structure +where the length doesn't match the address, and attempts to reuse it may +lead to lost packets. + +For example, a kernel that doesn't have commit 1c5950fc6fe9 ("udp6: fix +potential access to stale information") will replace a v4 mapped address +with its ipv4 equivalent, and shorten namelen accordingly from 28 to 16. +If the caller attempts to reuse the resulting msg structure, it will have +the original ipv6 (v4 mapped) address but an incorrect v4 length. + +Fixes: 86a7e0b69bd5 ("net: prevent rewrite of msg_name in sock_sendmsg()") +Signed-off-by: Marc Dionne +Reviewed-by: Willem de Bruijn +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + net/socket.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/net/socket.c b/net/socket.c +index c4a6f55329552..8d83c4bb163b4 100644 +--- a/net/socket.c ++++ b/net/socket.c +@@ -757,6 +757,7 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg) + { + struct sockaddr_storage *save_addr = (struct sockaddr_storage *)msg->msg_name; + struct sockaddr_storage address; ++ int save_len = msg->msg_namelen; + int ret; + + if (msg->msg_name) { +@@ -766,6 +767,7 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg) + + ret = __sock_sendmsg(sock, msg); + msg->msg_name = save_addr; ++ msg->msg_namelen = save_len; + + return ret; + } +-- +2.43.0 + diff --git a/queue-6.6/net-sched-em_text-fix-possible-memory-leak-in-em_tex.patch b/queue-6.6/net-sched-em_text-fix-possible-memory-leak-in-em_tex.patch new file mode 100644 index 00000000000..b4c93d1f194 --- /dev/null +++ b/queue-6.6/net-sched-em_text-fix-possible-memory-leak-in-em_tex.patch @@ -0,0 +1,40 @@ +From 18306f71fa022710811fa67197ed5f80c289ca2d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 21 Dec 2023 10:25:31 +0800 +Subject: net: sched: em_text: fix possible memory leak in em_text_destroy() + +From: Hangyu Hua + +[ Upstream commit 8fcb0382af6f1ef50936f1be05b8149eb2f88496 ] + +m->data needs to be freed when em_text_destroy is called. + +Fixes: d675c989ed2d ("[PKT_SCHED]: Packet classification based on textsearch (ematch)") +Acked-by: Jamal Hadi Salim +Signed-off-by: Hangyu Hua +Reviewed-by: Simon Horman +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + net/sched/em_text.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/net/sched/em_text.c b/net/sched/em_text.c +index 6f3c1fb2fb44c..f176afb70559e 100644 +--- a/net/sched/em_text.c ++++ b/net/sched/em_text.c +@@ -97,8 +97,10 @@ static int em_text_change(struct net *net, void *data, int len, + + static void em_text_destroy(struct tcf_ematch *m) + { +- if (EM_TEXT_PRIV(m) && EM_TEXT_PRIV(m)->config) ++ if (EM_TEXT_PRIV(m) && EM_TEXT_PRIV(m)->config) { + textsearch_destroy(EM_TEXT_PRIV(m)->config); ++ kfree(EM_TEXT_PRIV(m)); ++ } + } + + static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m) +-- +2.43.0 + diff --git a/queue-6.6/net-smc-fix-invalid-link-access-in-dumping-smc-r-con.patch b/queue-6.6/net-smc-fix-invalid-link-access-in-dumping-smc-r-con.patch new file mode 100644 index 00000000000..8e0e43ea7aa --- /dev/null +++ b/queue-6.6/net-smc-fix-invalid-link-access-in-dumping-smc-r-con.patch @@ -0,0 +1,91 @@ +From a3dc1903ca2f9e8ac7bb34feae81f71c9d2acfc7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 27 Dec 2023 15:40:35 +0800 +Subject: net/smc: fix invalid link access in dumping SMC-R connections + +From: Wen Gu + +[ Upstream commit 9dbe086c69b8902c85cece394760ac212e9e4ccc ] + +A crash was found when dumping SMC-R connections. It can be reproduced +by following steps: + +- environment: two RNICs on both sides. +- run SMC-R between two sides, now a SMC_LGR_SYMMETRIC type link group + will be created. +- set the first RNIC down on either side and link group will turn to + SMC_LGR_ASYMMETRIC_LOCAL then. +- run 'smcss -R' and the crash will be triggered. + + BUG: kernel NULL pointer dereference, address: 0000000000000010 + #PF: supervisor read access in kernel mode + #PF: error_code(0x0000) - not-present page + PGD 8000000101fdd067 P4D 8000000101fdd067 PUD 10ce46067 PMD 0 + Oops: 0000 [#1] PREEMPT SMP PTI + CPU: 3 PID: 1810 Comm: smcss Kdump: loaded Tainted: G W E 6.7.0-rc6+ #51 + RIP: 0010:__smc_diag_dump.constprop.0+0x36e/0x620 [smc_diag] + Call Trace: + + ? __die+0x24/0x70 + ? page_fault_oops+0x66/0x150 + ? exc_page_fault+0x69/0x140 + ? asm_exc_page_fault+0x26/0x30 + ? __smc_diag_dump.constprop.0+0x36e/0x620 [smc_diag] + smc_diag_dump_proto+0xd0/0xf0 [smc_diag] + smc_diag_dump+0x26/0x60 [smc_diag] + netlink_dump+0x19f/0x320 + __netlink_dump_start+0x1dc/0x300 + smc_diag_handler_dump+0x6a/0x80 [smc_diag] + ? __pfx_smc_diag_dump+0x10/0x10 [smc_diag] + sock_diag_rcv_msg+0x121/0x140 + ? __pfx_sock_diag_rcv_msg+0x10/0x10 + netlink_rcv_skb+0x5a/0x110 + sock_diag_rcv+0x28/0x40 + netlink_unicast+0x22a/0x330 + netlink_sendmsg+0x240/0x4a0 + __sock_sendmsg+0xb0/0xc0 + ____sys_sendmsg+0x24e/0x300 + ? copy_msghdr_from_user+0x62/0x80 + ___sys_sendmsg+0x7c/0xd0 + ? __do_fault+0x34/0x1a0 + ? do_read_fault+0x5f/0x100 + ? do_fault+0xb0/0x110 + __sys_sendmsg+0x4d/0x80 + do_syscall_64+0x45/0xf0 + entry_SYSCALL_64_after_hwframe+0x6e/0x76 + +When the first RNIC is set down, the lgr->lnk[0] will be cleared and an +asymmetric link will be allocated in lgr->link[SMC_LINKS_PER_LGR_MAX - 1] +by smc_llc_alloc_alt_link(). Then when we try to dump SMC-R connections +in __smc_diag_dump(), the invalid lgr->lnk[0] will be accessed, resulting +in this issue. So fix it by accessing the right link. + +Fixes: f16a7dd5cf27 ("smc: netlink interface for SMC sockets") +Reported-by: henaumars +Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=7616 +Signed-off-by: Wen Gu +Reviewed-by: Tony Lu +Link: https://lore.kernel.org/r/1703662835-53416-1-git-send-email-guwen@linux.alibaba.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + net/smc/smc_diag.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c +index 7ff2152971a5b..2c464d76b06ce 100644 +--- a/net/smc/smc_diag.c ++++ b/net/smc/smc_diag.c +@@ -153,8 +153,7 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb, + .lnk[0].link_id = link->link_id, + }; + +- memcpy(linfo.lnk[0].ibname, +- smc->conn.lgr->lnk[0].smcibdev->ibdev->name, ++ memcpy(linfo.lnk[0].ibname, link->smcibdev->ibdev->name, + sizeof(link->smcibdev->ibdev->name)); + smc_gid_be16_convert(linfo.lnk[0].gid, link->gid); + smc_gid_be16_convert(linfo.lnk[0].peer_gid, link->peer_gid); +-- +2.43.0 + diff --git a/queue-6.6/netfilter-nf_nat-fix-action-not-being-set-for-all-ct.patch b/queue-6.6/netfilter-nf_nat-fix-action-not-being-set-for-all-ct.patch new file mode 100644 index 00000000000..59757bfd993 --- /dev/null +++ b/queue-6.6/netfilter-nf_nat-fix-action-not-being-set-for-all-ct.patch @@ -0,0 +1,54 @@ +From 01c7d3a6293fcabe7b9d90e9db00770d6d436790 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 22 Dec 2023 11:43:11 +1300 +Subject: netfilter: nf_nat: fix action not being set for all ct states + +From: Brad Cowie + +[ Upstream commit e6345d2824a3f58aab82428d11645e0da861ac13 ] + +This fixes openvswitch's handling of nat packets in the related state. + +In nf_ct_nat_execute(), which is called from nf_ct_nat(), ICMP/ICMPv6 +packets in the IP_CT_RELATED or IP_CT_RELATED_REPLY state, which have +not been dropped, will follow the goto, however the placement of the +goto label means that updating the action bit field will be bypassed. + +This causes ovs_nat_update_key() to not be called from ovs_ct_nat() +which means the openvswitch match key for the ICMP/ICMPv6 packet is not +updated and the pre-nat value will be retained for the key, which will +result in the wrong openflow rule being matched for that packet. + +Move the goto label above where the action bit field is being set so +that it is updated in all cases where the packet is accepted. + +Fixes: ebddb1404900 ("net: move the nat function to nf_nat_ovs for ovs and tc") +Signed-off-by: Brad Cowie +Reviewed-by: Simon Horman +Acked-by: Xin Long +Acked-by: Aaron Conole +Signed-off-by: Pablo Neira Ayuso +Signed-off-by: Sasha Levin +--- + net/netfilter/nf_nat_ovs.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/net/netfilter/nf_nat_ovs.c b/net/netfilter/nf_nat_ovs.c +index 551abd2da6143..0f9a559f62079 100644 +--- a/net/netfilter/nf_nat_ovs.c ++++ b/net/netfilter/nf_nat_ovs.c +@@ -75,9 +75,10 @@ static int nf_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, + } + + err = nf_nat_packet(ct, ctinfo, hooknum, skb); ++out: + if (err == NF_ACCEPT) + *action |= BIT(maniptype); +-out: ++ + return err; + } + +-- +2.43.0 + diff --git a/queue-6.6/netfilter-nf_tables-set-transport-offset-from-mac-he.patch b/queue-6.6/netfilter-nf_tables-set-transport-offset-from-mac-he.patch new file mode 100644 index 00000000000..b55f96e42a1 --- /dev/null +++ b/queue-6.6/netfilter-nf_tables-set-transport-offset-from-mac-he.patch @@ -0,0 +1,75 @@ +From 45b2123062603350a97462499dc4ab1f0a90e160 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 14 Dec 2023 11:50:12 +0100 +Subject: netfilter: nf_tables: set transport offset from mac header for + netdev/egress + +From: Pablo Neira Ayuso + +[ Upstream commit 0ae8e4cca78781401b17721bfb72718fdf7b4912 ] + +Before this patch, transport offset (pkt->thoff) provides an offset +relative to the network header. This is fine for the inet families +because skb->data points to the network header in such case. However, +from netdev/egress, skb->data points to the mac header (if available), +thus, pkt->thoff is missing the mac header length. + +Add skb_network_offset() to the transport offset (pkt->thoff) for +netdev, so transport header mangling works as expected. Adjust payload +fast eval function to use skb->data now that pkt->thoff provides an +absolute offset. This explains why users report that matching on +egress/netdev works but payload mangling does not. + +This patch implicitly fixes payload mangling for IPv4 packets in +netdev/egress given skb_store_bits() requires an offset from skb->data +to reach the transport header. + +I suspect that nft_exthdr and the trace infra were also broken from +netdev/egress because they also take skb->data as start, and pkt->thoff +was not correct. + +Note that IPv6 is fine because ipv6_find_hdr() already provides a +transport offset starting from skb->data, which includes +skb_network_offset(). + +The bridge family also uses nft_set_pktinfo_ipv4_validate(), but there +skb_network_offset() is zero, so the update in this patch does not alter +the existing behaviour. + +Fixes: 42df6e1d221d ("netfilter: Introduce egress hook") +Signed-off-by: Pablo Neira Ayuso +Signed-off-by: Sasha Levin +--- + include/net/netfilter/nf_tables_ipv4.h | 2 +- + net/netfilter/nf_tables_core.c | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h +index 947973623dc77..60a7d0ce30804 100644 +--- a/include/net/netfilter/nf_tables_ipv4.h ++++ b/include/net/netfilter/nf_tables_ipv4.h +@@ -30,7 +30,7 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt) + return -1; + + len = iph_totlen(pkt->skb, iph); +- thoff = iph->ihl * 4; ++ thoff = skb_network_offset(pkt->skb) + (iph->ihl * 4); + if (pkt->skb->len < len) + return -1; + else if (len < thoff) +diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c +index 4d0ce12221f66..711c22ab701dd 100644 +--- a/net/netfilter/nf_tables_core.c ++++ b/net/netfilter/nf_tables_core.c +@@ -158,7 +158,7 @@ static bool nft_payload_fast_eval(const struct nft_expr *expr, + else { + if (!(pkt->flags & NFT_PKTINFO_L4PROTO)) + return false; +- ptr = skb_network_header(skb) + nft_thoff(pkt); ++ ptr = skb->data + nft_thoff(pkt); + } + + ptr += priv->offset; +-- +2.43.0 + diff --git a/queue-6.6/netfilter-nft_immediate-drop-chain-reference-counter.patch b/queue-6.6/netfilter-nft_immediate-drop-chain-reference-counter.patch new file mode 100644 index 00000000000..cd723c40b25 --- /dev/null +++ b/queue-6.6/netfilter-nft_immediate-drop-chain-reference-counter.patch @@ -0,0 +1,36 @@ +From 7f994bcedfa454e18698672ba81af865715c7a31 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 1 Jan 2024 20:15:33 +0100 +Subject: netfilter: nft_immediate: drop chain reference counter on error + +From: Pablo Neira Ayuso + +[ Upstream commit b29be0ca8e816119ccdf95cc7d7c7be9bde005f1 ] + +In the init path, nft_data_init() bumps the chain reference counter, +decrement it on error by following the error path which calls +nft_data_release() to restore it. + +Fixes: 4bedf9eee016 ("netfilter: nf_tables: fix chain binding transaction logic") +Signed-off-by: Pablo Neira Ayuso +Signed-off-by: Sasha Levin +--- + net/netfilter/nft_immediate.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c +index fccb3cf7749c1..6475c7abc1fe3 100644 +--- a/net/netfilter/nft_immediate.c ++++ b/net/netfilter/nft_immediate.c +@@ -78,7 +78,7 @@ static int nft_immediate_init(const struct nft_ctx *ctx, + case NFT_GOTO: + err = nf_tables_bind_chain(ctx, chain); + if (err < 0) +- return err; ++ goto err1; + break; + default: + break; +-- +2.43.0 + diff --git a/queue-6.6/nfc-llcp_core-hold-a-ref-to-llcp_local-dev-when-hold.patch b/queue-6.6/nfc-llcp_core-hold-a-ref-to-llcp_local-dev-when-hold.patch new file mode 100644 index 00000000000..1d1c423e953 --- /dev/null +++ b/queue-6.6/nfc-llcp_core-hold-a-ref-to-llcp_local-dev-when-hold.patch @@ -0,0 +1,128 @@ +From 890929ed6c898362d8437fc95233920f684c4046 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 19 Dec 2023 23:19:43 +0530 +Subject: nfc: llcp_core: Hold a ref to llcp_local->dev when holding a ref to + llcp_local + +From: Siddh Raman Pant + +[ Upstream commit c95f919567d6f1914f13350af61a1b044ac85014 ] + +llcp_sock_sendmsg() calls nfc_llcp_send_ui_frame() which in turn calls +nfc_alloc_send_skb(), which accesses the nfc_dev from the llcp_sock for +getting the headroom and tailroom needed for skb allocation. + +Parallelly the nfc_dev can be freed, as the refcount is decreased via +nfc_free_device(), leading to a UAF reported by Syzkaller, which can +be summarized as follows: + +(1) llcp_sock_sendmsg() -> nfc_llcp_send_ui_frame() + -> nfc_alloc_send_skb() -> Dereference *nfc_dev +(2) virtual_ncidev_close() -> nci_free_device() -> nfc_free_device() + -> put_device() -> nfc_release() -> Free *nfc_dev + +When a reference to llcp_local is acquired, we do not acquire the same +for the nfc_dev. This leads to freeing even when the llcp_local is in +use, and this is the case with the UAF described above too. + +Thus, when we acquire a reference to llcp_local, we should acquire a +reference to nfc_dev, and release the references appropriately later. + +References for llcp_local is initialized in nfc_llcp_register_device() +(which is called by nfc_register_device()). Thus, we should acquire a +reference to nfc_dev there. + +nfc_unregister_device() calls nfc_llcp_unregister_device() which in +turn calls nfc_llcp_local_put(). Thus, the reference to nfc_dev is +appropriately released later. + +Reported-and-tested-by: syzbot+bbe84a4010eeea00982d@syzkaller.appspotmail.com +Closes: https://syzkaller.appspot.com/bug?extid=bbe84a4010eeea00982d +Fixes: c7aa12252f51 ("NFC: Take a reference on the LLCP local pointer when creating a socket") +Reviewed-by: Suman Ghosh +Signed-off-by: Siddh Raman Pant +Reviewed-by: Krzysztof Kozlowski +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + net/nfc/llcp_core.c | 39 ++++++++++++++++++++++++++++++++++++--- + 1 file changed, 36 insertions(+), 3 deletions(-) + +diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c +index 1dac28136e6a3..18be13fb9b75a 100644 +--- a/net/nfc/llcp_core.c ++++ b/net/nfc/llcp_core.c +@@ -145,6 +145,13 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool device, + + static struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local) + { ++ /* Since using nfc_llcp_local may result in usage of nfc_dev, whenever ++ * we hold a reference to local, we also need to hold a reference to ++ * the device to avoid UAF. ++ */ ++ if (!nfc_get_device(local->dev->idx)) ++ return NULL; ++ + kref_get(&local->ref); + + return local; +@@ -177,10 +184,18 @@ static void local_release(struct kref *ref) + + int nfc_llcp_local_put(struct nfc_llcp_local *local) + { ++ struct nfc_dev *dev; ++ int ret; ++ + if (local == NULL) + return 0; + +- return kref_put(&local->ref, local_release); ++ dev = local->dev; ++ ++ ret = kref_put(&local->ref, local_release); ++ nfc_put_device(dev); ++ ++ return ret; + } + + static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local, +@@ -959,8 +974,17 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local, + } + + new_sock = nfc_llcp_sock(new_sk); +- new_sock->dev = local->dev; ++ + new_sock->local = nfc_llcp_local_get(local); ++ if (!new_sock->local) { ++ reason = LLCP_DM_REJ; ++ sock_put(&new_sock->sk); ++ release_sock(&sock->sk); ++ sock_put(&sock->sk); ++ goto fail; ++ } ++ ++ new_sock->dev = local->dev; + new_sock->rw = sock->rw; + new_sock->miux = sock->miux; + new_sock->nfc_protocol = sock->nfc_protocol; +@@ -1597,7 +1621,16 @@ int nfc_llcp_register_device(struct nfc_dev *ndev) + if (local == NULL) + return -ENOMEM; + +- local->dev = ndev; ++ /* As we are going to initialize local's refcount, we need to get the ++ * nfc_dev to avoid UAF, otherwise there is no point in continuing. ++ * See nfc_llcp_local_get(). ++ */ ++ local->dev = nfc_get_device(ndev->idx); ++ if (!local->dev) { ++ kfree(local); ++ return -ENODEV; ++ } ++ + INIT_LIST_HEAD(&local->list); + kref_init(&local->ref); + mutex_init(&local->sdp_lock); +-- +2.43.0 + diff --git a/queue-6.6/octeontx2-af-always-configure-nix-tx-link-credits-ba.patch b/queue-6.6/octeontx2-af-always-configure-nix-tx-link-credits-ba.patch new file mode 100644 index 00000000000..7df5cde967e --- /dev/null +++ b/queue-6.6/octeontx2-af-always-configure-nix-tx-link-credits-ba.patch @@ -0,0 +1,184 @@ +From 75e1f6093c66ac84253c9e40a3569df242b88859 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 2 Jan 2024 15:26:43 +0530 +Subject: octeontx2-af: Always configure NIX TX link credits based on max frame + size + +From: Naveen Mamindlapalli + +[ Upstream commit a0d9528f6daf7fe8de217fa80a94d2989d2a57a7 ] + +Currently the NIX TX link credits are initialized based on the max frame +size that can be transmitted on a link but when the MTU is changed, the +NIX TX link credits are reprogrammed by the SW based on the new MTU value. +Since SMQ max packet length is programmed to max frame size by default, +there is a chance that NIX TX may stall while sending a max frame sized +packet on the link with insufficient credits to send the packet all at +once. This patch avoids stall issue by not changing the link credits +dynamically when the MTU is changed. + +Fixes: 1c74b89171c3 ("octeontx2-af: Wait for TX link idle for credits change") +Signed-off-by: Naveen Mamindlapalli +Signed-off-by: Sunil Kovvuri Goutham +Signed-off-by: Nithin Kumar Dabilpuram +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + .../ethernet/marvell/octeontx2/af/rvu_nix.c | 110 +----------------- + 1 file changed, 3 insertions(+), 107 deletions(-) + +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +index 4227ebb4a758d..2b6ab748ce25a 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +@@ -4143,90 +4143,18 @@ static void nix_find_link_frs(struct rvu *rvu, + req->minlen = minlen; + } + +-static int +-nix_config_link_credits(struct rvu *rvu, int blkaddr, int link, +- u16 pcifunc, u64 tx_credits) +-{ +- struct rvu_hwinfo *hw = rvu->hw; +- int pf = rvu_get_pf(pcifunc); +- u8 cgx_id = 0, lmac_id = 0; +- unsigned long poll_tmo; +- bool restore_tx_en = 0; +- struct nix_hw *nix_hw; +- u64 cfg, sw_xoff = 0; +- u32 schq = 0; +- u32 credits; +- int rc; +- +- nix_hw = get_nix_hw(rvu->hw, blkaddr); +- if (!nix_hw) +- return NIX_AF_ERR_INVALID_NIXBLK; +- +- if (tx_credits == nix_hw->tx_credits[link]) +- return 0; +- +- /* Enable cgx tx if disabled for credits to be back */ +- if (is_pf_cgxmapped(rvu, pf)) { +- rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); +- restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), +- lmac_id, true); +- } +- +- mutex_lock(&rvu->rsrc_lock); +- /* Disable new traffic to link */ +- if (hw->cap.nix_shaping) { +- schq = nix_get_tx_link(rvu, pcifunc); +- sw_xoff = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq)); +- rvu_write64(rvu, blkaddr, +- NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0)); +- } +- +- rc = NIX_AF_ERR_LINK_CREDITS; +- poll_tmo = jiffies + usecs_to_jiffies(200000); +- /* Wait for credits to return */ +- do { +- if (time_after(jiffies, poll_tmo)) +- goto exit; +- usleep_range(100, 200); +- +- cfg = rvu_read64(rvu, blkaddr, +- NIX_AF_TX_LINKX_NORM_CREDIT(link)); +- credits = (cfg >> 12) & 0xFFFFFULL; +- } while (credits != nix_hw->tx_credits[link]); +- +- cfg &= ~(0xFFFFFULL << 12); +- cfg |= (tx_credits << 12); +- rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); +- rc = 0; +- +- nix_hw->tx_credits[link] = tx_credits; +- +-exit: +- /* Enable traffic back */ +- if (hw->cap.nix_shaping && !sw_xoff) +- rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), 0); +- +- /* Restore state of cgx tx */ +- if (restore_tx_en) +- rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); +- +- mutex_unlock(&rvu->rsrc_lock); +- return rc; +-} +- + int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, + struct msg_rsp *rsp) + { + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + int pf = rvu_get_pf(pcifunc); +- int blkaddr, schq, link = -1; +- struct nix_txsch *txsch; +- u64 cfg, lmac_fifo_len; ++ int blkaddr, link = -1; + struct nix_hw *nix_hw; + struct rvu_pfvf *pfvf; + u8 cgx = 0, lmac = 0; + u16 max_mtu; ++ u64 cfg; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) +@@ -4247,25 +4175,6 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, + if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) + return NIX_AF_ERR_FRS_INVALID; + +- /* Check if requester wants to update SMQ's */ +- if (!req->update_smq) +- goto rx_frscfg; +- +- /* Update min/maxlen in each of the SMQ attached to this PF/VF */ +- txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; +- mutex_lock(&rvu->rsrc_lock); +- for (schq = 0; schq < txsch->schq.max; schq++) { +- if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) +- continue; +- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); +- cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8); +- if (req->update_minlen) +- cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F); +- rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); +- } +- mutex_unlock(&rvu->rsrc_lock); +- +-rx_frscfg: + /* Check if config is for SDP link */ + if (req->sdp_link) { + if (!hw->sdp_links) +@@ -4288,7 +4197,6 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, + if (link < 0) + return NIX_AF_ERR_RX_LINK_INVALID; + +- + linkcfg: + nix_find_link_frs(rvu, req, pcifunc); + +@@ -4298,19 +4206,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, + cfg = (cfg & ~0xFFFFULL) | req->minlen; + rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg); + +- if (req->sdp_link || pf == 0) +- return 0; +- +- /* Update transmit credits for CGX links */ +- lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, lmac); +- if (!lmac_fifo_len) { +- dev_err(rvu->dev, +- "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n", +- __func__, cgx, lmac); +- return 0; +- } +- return nix_config_link_credits(rvu, blkaddr, link, pcifunc, +- (lmac_fifo_len - req->maxlen) / 16); ++ return 0; + } + + int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, +-- +2.43.0 + diff --git a/queue-6.6/octeontx2-af-fix-marking-couple-of-structure-as-__pa.patch b/queue-6.6/octeontx2-af-fix-marking-couple-of-structure-as-__pa.patch new file mode 100644 index 00000000000..5c639e9bd95 --- /dev/null +++ b/queue-6.6/octeontx2-af-fix-marking-couple-of-structure-as-__pa.patch @@ -0,0 +1,46 @@ +From 46554b439ec88abaca47a25d8be0d9e7564da53d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 19 Dec 2023 19:56:33 +0530 +Subject: octeontx2-af: Fix marking couple of structure as __packed + +From: Suman Ghosh + +[ Upstream commit 0ee2384a5a0f3b4eeac8d10bb01a0609d245a4d1 ] + +Couple of structures was not marked as __packed. This patch +fixes the same and mark them as __packed. + +Fixes: 42006910b5ea ("octeontx2-af: cleanup KPU config data") +Signed-off-by: Suman Ghosh +Reviewed-by: Jacob Keller +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/marvell/octeontx2/af/npc.h | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h +index de9fbd98dfb76..c92c3f4631d54 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h ++++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h +@@ -520,7 +520,7 @@ struct npc_lt_def { + u8 ltype_mask; + u8 ltype_match; + u8 lid; +-}; ++} __packed; + + struct npc_lt_def_ipsec { + u8 ltype_mask; +@@ -528,7 +528,7 @@ struct npc_lt_def_ipsec { + u8 lid; + u8 spi_offset; + u8 spi_nz; +-}; ++} __packed; + + struct npc_lt_def_apad { + u8 ltype_mask; +-- +2.43.0 + diff --git a/queue-6.6/octeontx2-af-re-enable-mac-tx-in-otx2_stop-processin.patch b/queue-6.6/octeontx2-af-re-enable-mac-tx-in-otx2_stop-processin.patch new file mode 100644 index 00000000000..bdbc84e50cb --- /dev/null +++ b/queue-6.6/octeontx2-af-re-enable-mac-tx-in-otx2_stop-processin.patch @@ -0,0 +1,93 @@ +From 698f45153c4341c504fb98b4a6a24a6fee848168 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 2 Jan 2024 19:44:00 +0530 +Subject: octeontx2-af: Re-enable MAC TX in otx2_stop processing + +From: Naveen Mamindlapalli + +[ Upstream commit 818ed8933bd17bc91a9fa8b94a898189c546fc1a ] + +During QoS scheduling testing with multiple strict priority flows, the +netdev tx watchdog timeout routine is invoked when a low priority QoS +queue doesn't get a chance to transmit the packets because other high +priority flows are completely subscribing the transmit link. The netdev +tx watchdog timeout routine will stop MAC RX and TX functionality in +otx2_stop() routine before cleanup of HW TX queues which results in SMQ +flush errors because the packets belonging to low priority queues will +never gets flushed since MAC TX is disabled. This patch fixes the issue +by re-enabling MAC TX to ensure the packets in HW pipeline gets flushed +properly. + +Fixes: a7faa68b4e7f ("octeontx2-af: Start/Stop traffic in CGX along with NPC") +Signed-off-by: Naveen Mamindlapalli +Signed-off-by: Sunil Kovvuri Goutham +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/marvell/octeontx2/af/rvu.h | 1 + + .../net/ethernet/marvell/octeontx2/af/rvu_cgx.c | 17 +++++++++++++++++ + .../net/ethernet/marvell/octeontx2/af/rvu_nix.c | 8 +++++++- + 3 files changed, 25 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +index cce2806aaa50c..8802961b8889f 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +@@ -905,6 +905,7 @@ u32 rvu_cgx_get_fifolen(struct rvu *rvu); + void *rvu_first_cgx_pdata(struct rvu *rvu); + int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id); + int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable); ++int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable); + int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause, + u16 pfc_en); + int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause); +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +index f2b1edf1bb43c..ce987ccd43e29 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +@@ -465,6 +465,23 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) + return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start); + } + ++int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable) ++{ ++ int pf = rvu_get_pf(pcifunc); ++ struct mac_ops *mac_ops; ++ u8 cgx_id, lmac_id; ++ void *cgxd; ++ ++ if (!is_cgx_config_permitted(rvu, pcifunc)) ++ return LMAC_AF_ERR_PERM_DENIED; ++ ++ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); ++ cgxd = rvu_cgx_pdata(cgx_id, rvu); ++ mac_ops = get_mac_ops(cgxd); ++ ++ return mac_ops->mac_tx_enable(cgxd, lmac_id, enable); ++} ++ + int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable) + { + struct mac_ops *mac_ops; +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +index 2b6ab748ce25a..58744313f0eb6 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +@@ -4737,7 +4737,13 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, + pfvf = rvu_get_pfvf(rvu, pcifunc); + clear_bit(NIXLF_INITIALIZED, &pfvf->flags); + +- return rvu_cgx_start_stop_io(rvu, pcifunc, false); ++ err = rvu_cgx_start_stop_io(rvu, pcifunc, false); ++ if (err) ++ return err; ++ ++ rvu_cgx_tx_enable(rvu, pcifunc, true); ++ ++ return 0; + } + + #define RX_SA_BASE GENMASK_ULL(52, 7) +-- +2.43.0 + diff --git a/queue-6.6/phy-mediatek-mipi-mt8183-fix-minimal-supported-frequ.patch b/queue-6.6/phy-mediatek-mipi-mt8183-fix-minimal-supported-frequ.patch new file mode 100644 index 00000000000..90daff13634 --- /dev/null +++ b/queue-6.6/phy-mediatek-mipi-mt8183-fix-minimal-supported-frequ.patch @@ -0,0 +1,40 @@ +From a18cee098582ad84337fb6948de7b24874fcad9e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 23 Nov 2023 12:02:02 +0100 +Subject: phy: mediatek: mipi: mt8183: fix minimal supported frequency + +From: Michael Walle + +[ Upstream commit 06f76e464ac81c6915430b7155769ea4ef16efe4 ] + +The lowest supported clock frequency of the PHY is 125MHz (see also +mtk_mipi_tx_pll_enable()), but the clamping in .round_rate() has the +wrong minimal value, which will make the .enable() op return -EINVAL on +low frequencies. Fix the minimal clamping value. + +Fixes: efda51a58b4a ("drm/mediatek: add mipi_tx driver for mt8183") +Signed-off-by: Michael Walle +Reviewed-by: AngeloGioacchino Del Regno +Link: https://lore.kernel.org/r/20231123110202.2025585-1-mwalle@kernel.org +Signed-off-by: Vinod Koul +Signed-off-by: Sasha Levin +--- + drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c b/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c +index f021ec5a70e5c..553725e1269c9 100644 +--- a/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c ++++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c +@@ -100,7 +100,7 @@ static void mtk_mipi_tx_pll_disable(struct clk_hw *hw) + static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) + { +- return clamp_val(rate, 50000000, 1600000000); ++ return clamp_val(rate, 125000000, 1600000000); + } + + static const struct clk_ops mtk_mipi_tx_pll_ops = { +-- +2.43.0 + diff --git a/queue-6.6/phy-sunplus-return-negative-error-code-in-sp_usb_phy.patch b/queue-6.6/phy-sunplus-return-negative-error-code-in-sp_usb_phy.patch new file mode 100644 index 00000000000..315ccb19880 --- /dev/null +++ b/queue-6.6/phy-sunplus-return-negative-error-code-in-sp_usb_phy.patch @@ -0,0 +1,37 @@ +From d4255a21e9a25d03d7ca4d3e7c60af5e7368a316 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 20 Nov 2023 17:10:47 +0800 +Subject: phy: sunplus: return negative error code in sp_usb_phy_probe + +From: Su Hui + +[ Upstream commit 2a9c713825b3127ece11984abf973672c9779518 ] + +devm_phy_create() return negative error code, 'ret' should be +'PTR_ERR(phy)' rather than '-PTR_ERR(phy)'. + +Fixes: 99d9ccd97385 ("phy: usb: Add USB2.0 phy driver for Sunplus SP7021") +Signed-off-by: Su Hui +Link: https://lore.kernel.org/r/20231120091046.163781-1-suhui@nfschina.com +Signed-off-by: Vinod Koul +Signed-off-by: Sasha Levin +--- + drivers/phy/sunplus/phy-sunplus-usb2.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/phy/sunplus/phy-sunplus-usb2.c b/drivers/phy/sunplus/phy-sunplus-usb2.c +index 0efe74ac9c6af..637a5fbae6d9a 100644 +--- a/drivers/phy/sunplus/phy-sunplus-usb2.c ++++ b/drivers/phy/sunplus/phy-sunplus-usb2.c +@@ -275,7 +275,7 @@ static int sp_usb_phy_probe(struct platform_device *pdev) + + phy = devm_phy_create(&pdev->dev, NULL, &sp_uphy_ops); + if (IS_ERR(phy)) { +- ret = -PTR_ERR(phy); ++ ret = PTR_ERR(phy); + return ret; + } + +-- +2.43.0 + diff --git a/queue-6.6/phy-ti-gmii-sel-fix-register-offset-when-parent-is-n.patch b/queue-6.6/phy-ti-gmii-sel-fix-register-offset-when-parent-is-n.patch new file mode 100644 index 00000000000..1f8e343a5c2 --- /dev/null +++ b/queue-6.6/phy-ti-gmii-sel-fix-register-offset-when-parent-is-n.patch @@ -0,0 +1,59 @@ +From db461092ee78f9629b78a214dedcf6589956f4d2 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 25 Oct 2023 09:33:02 -0500 +Subject: phy: ti: gmii-sel: Fix register offset when parent is not a syscon + node + +From: Andrew Davis + +[ Upstream commit 0f40d5099cd6d828fd7de6227d3eabe86016724c ] + +When the node for this phy selector is a child node of a syscon node then the +property 'reg' is used as an offset into the parent regmap. When the node +is standalone and gets its own regmap this offset is pre-applied. So we need +to track which method was used to get the regmap and not apply the offset +in the standalone case. + +Fixes: 1fdfa7cccd35 ("phy: ti: gmii-sel: Allow parent to not be syscon node") +Signed-off-by: Andrew Davis +Reviewed-by: Roger Quadros +Link: https://lore.kernel.org/r/20231025143302.1265633-1-afd@ti.com +Signed-off-by: Vinod Koul +Signed-off-by: Sasha Levin +--- + drivers/phy/ti/phy-gmii-sel.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c +index 555b323f45da1..bc847d3879f79 100644 +--- a/drivers/phy/ti/phy-gmii-sel.c ++++ b/drivers/phy/ti/phy-gmii-sel.c +@@ -64,6 +64,7 @@ struct phy_gmii_sel_priv { + u32 num_ports; + u32 reg_offset; + u32 qsgmii_main_ports; ++ bool no_offset; + }; + + static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode) +@@ -402,7 +403,8 @@ static int phy_gmii_sel_init_ports(struct phy_gmii_sel_priv *priv) + priv->num_ports = size / sizeof(u32); + if (!priv->num_ports) + return -EINVAL; +- priv->reg_offset = __be32_to_cpu(*offset); ++ if (!priv->no_offset) ++ priv->reg_offset = __be32_to_cpu(*offset); + } + + if_phys = devm_kcalloc(dev, priv->num_ports, +@@ -471,6 +473,7 @@ static int phy_gmii_sel_probe(struct platform_device *pdev) + dev_err(dev, "Failed to get syscon %d\n", ret); + return ret; + } ++ priv->no_offset = true; + } + + ret = phy_gmii_sel_init_ports(priv); +-- +2.43.0 + diff --git a/queue-6.6/powerpc-pseries-vas-migration-suspend-waits-for-no-i.patch b/queue-6.6/powerpc-pseries-vas-migration-suspend-waits-for-no-i.patch new file mode 100644 index 00000000000..c1bb6b9c827 --- /dev/null +++ b/queue-6.6/powerpc-pseries-vas-migration-suspend-waits-for-no-i.patch @@ -0,0 +1,240 @@ +From 7ebae968cc47f7c6c8f6579c811eefd19b2be158 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 25 Nov 2023 15:51:04 -0800 +Subject: powerpc/pseries/vas: Migration suspend waits for no in-progress open + windows + +From: Haren Myneni + +[ Upstream commit 0cf72f7f14d12cb065c3d01954cf42fc5638aa69 ] + +The hypervisor returns migration failure if all VAS windows are not +closed. During pre-migration stage, vas_migration_handler() sets +migration_in_progress flag and closes all windows from the list. +The allocate VAS window routine checks the migration flag, setup +the window and then add it to the list. So there is possibility of +the migration handler missing the window that is still in the +process of setup. + +t1: Allocate and open VAS t2: Migration event + window + +lock vas_pseries_mutex +If migration_in_progress set + unlock vas_pseries_mutex + return +open window HCALL +unlock vas_pseries_mutex +Modify window HCALL lock vas_pseries_mutex +setup window migration_in_progress=true + Closes all windows from the list + // May miss windows that are + // not in the list + unlock vas_pseries_mutex +lock vas_pseries_mutex return +if nr_closed_windows == 0 + // No DLPAR CPU or migration + add window to the list + // Window will be added to the + // list after the setup is completed + unlock vas_pseries_mutex + return +unlock vas_pseries_mutex +Close VAS window +// due to DLPAR CPU or migration +return -EBUSY + +This patch resolves the issue with the following steps: +- Set the migration_in_progress flag without holding mutex. +- Introduce nr_open_wins_progress counter in VAS capabilities + struct +- This counter tracks the number of open windows are still in + progress +- The allocate setup window thread closes windows if the migration + is set and decrements nr_open_window_progress counter +- The migration handler waits for no in-progress open windows. + +The code flow with the fix is as follows: + +t1: Allocate and open VAS t2: Migration event + window + +lock vas_pseries_mutex +If migration_in_progress set + unlock vas_pseries_mutex + return +open window HCALL +nr_open_wins_progress++ +// Window opened, but not +// added to the list yet +unlock vas_pseries_mutex +Modify window HCALL migration_in_progress=true +setup window lock vas_pseries_mutex + Closes all windows from the list + While nr_open_wins_progress { + unlock vas_pseries_mutex +lock vas_pseries_mutex sleep +if nr_closed_windows == 0 // Wait if any open window in +or migration is not started // progress. The open window + // No DLPAR CPU or migration // thread closes the window without + add window to the list // adding to the list and return if + nr_open_wins_progress-- // the migration is in progress. + unlock vas_pseries_mutex + return +Close VAS window +nr_open_wins_progress-- +unlock vas_pseries_mutex +return -EBUSY lock vas_pseries_mutex + } + unlock vas_pseries_mutex + return + +Fixes: 37e6764895ef ("powerpc/pseries/vas: Add VAS migration handler") +Signed-off-by: Haren Myneni +Signed-off-by: Michael Ellerman +Link: https://msgid.link/20231125235104.3405008-1-haren@linux.ibm.com +Signed-off-by: Sasha Levin +--- + arch/powerpc/platforms/pseries/vas.c | 51 ++++++++++++++++++++++++---- + arch/powerpc/platforms/pseries/vas.h | 2 ++ + 2 files changed, 46 insertions(+), 7 deletions(-) + +diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c +index b1f25bac280b4..71d52a670d951 100644 +--- a/arch/powerpc/platforms/pseries/vas.c ++++ b/arch/powerpc/platforms/pseries/vas.c +@@ -385,11 +385,15 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags, + * same fault IRQ is not freed by the OS before. + */ + mutex_lock(&vas_pseries_mutex); +- if (migration_in_progress) ++ if (migration_in_progress) { + rc = -EBUSY; +- else ++ } else { + rc = allocate_setup_window(txwin, (u64 *)&domain[0], + cop_feat_caps->win_type); ++ if (!rc) ++ caps->nr_open_wins_progress++; ++ } ++ + mutex_unlock(&vas_pseries_mutex); + if (rc) + goto out; +@@ -404,8 +408,17 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags, + goto out_free; + + txwin->win_type = cop_feat_caps->win_type; +- mutex_lock(&vas_pseries_mutex); ++ + /* ++ * The migration SUSPEND thread sets migration_in_progress and ++ * closes all open windows from the list. But the window is ++ * added to the list after open and modify HCALLs. So possible ++ * that migration_in_progress is set before modify HCALL which ++ * may cause some windows are still open when the hypervisor ++ * initiates the migration. ++ * So checks the migration_in_progress flag again and close all ++ * open windows. ++ * + * Possible to lose the acquired credit with DLPAR core + * removal after the window is opened. So if there are any + * closed windows (means with lost credits), do not give new +@@ -413,9 +426,11 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags, + * after the existing windows are reopened when credits are + * available. + */ +- if (!caps->nr_close_wins) { ++ mutex_lock(&vas_pseries_mutex); ++ if (!caps->nr_close_wins && !migration_in_progress) { + list_add(&txwin->win_list, &caps->list); + caps->nr_open_windows++; ++ caps->nr_open_wins_progress--; + mutex_unlock(&vas_pseries_mutex); + vas_user_win_add_mm_context(&txwin->vas_win.task_ref); + return &txwin->vas_win; +@@ -433,6 +448,12 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags, + */ + free_irq_setup(txwin); + h_deallocate_vas_window(txwin->vas_win.winid); ++ /* ++ * Hold mutex and reduce nr_open_wins_progress counter. ++ */ ++ mutex_lock(&vas_pseries_mutex); ++ caps->nr_open_wins_progress--; ++ mutex_unlock(&vas_pseries_mutex); + out: + atomic_dec(&cop_feat_caps->nr_used_credits); + kfree(txwin); +@@ -937,14 +958,14 @@ int vas_migration_handler(int action) + struct vas_caps *vcaps; + int i, rc = 0; + ++ pr_info("VAS migration event %d\n", action); ++ + /* + * NX-GZIP is not enabled. Nothing to do for migration. + */ + if (!copypaste_feat) + return rc; + +- mutex_lock(&vas_pseries_mutex); +- + if (action == VAS_SUSPEND) + migration_in_progress = true; + else +@@ -990,12 +1011,27 @@ int vas_migration_handler(int action) + + switch (action) { + case VAS_SUSPEND: ++ mutex_lock(&vas_pseries_mutex); + rc = reconfig_close_windows(vcaps, vcaps->nr_open_windows, + true); ++ /* ++ * Windows are included in the list after successful ++ * open. So wait for closing these in-progress open ++ * windows in vas_allocate_window() which will be ++ * done if the migration_in_progress is set. ++ */ ++ while (vcaps->nr_open_wins_progress) { ++ mutex_unlock(&vas_pseries_mutex); ++ msleep(10); ++ mutex_lock(&vas_pseries_mutex); ++ } ++ mutex_unlock(&vas_pseries_mutex); + break; + case VAS_RESUME: ++ mutex_lock(&vas_pseries_mutex); + atomic_set(&caps->nr_total_credits, new_nr_creds); + rc = reconfig_open_windows(vcaps, new_nr_creds, true); ++ mutex_unlock(&vas_pseries_mutex); + break; + default: + /* should not happen */ +@@ -1011,8 +1047,9 @@ int vas_migration_handler(int action) + goto out; + } + ++ pr_info("VAS migration event (%d) successful\n", action); ++ + out: +- mutex_unlock(&vas_pseries_mutex); + return rc; + } + +diff --git a/arch/powerpc/platforms/pseries/vas.h b/arch/powerpc/platforms/pseries/vas.h +index 7115043ec4883..45567cd131783 100644 +--- a/arch/powerpc/platforms/pseries/vas.h ++++ b/arch/powerpc/platforms/pseries/vas.h +@@ -91,6 +91,8 @@ struct vas_cop_feat_caps { + struct vas_caps { + struct vas_cop_feat_caps caps; + struct list_head list; /* List of open windows */ ++ int nr_open_wins_progress; /* Number of open windows in */ ++ /* progress. Used in migration */ + int nr_close_wins; /* closed windows in the hypervisor for DLPAR */ + int nr_open_windows; /* Number of successful open windows */ + u8 feat; /* Feature type */ +-- +2.43.0 + diff --git a/queue-6.6/r8169-fix-pci-error-on-system-resume.patch b/queue-6.6/r8169-fix-pci-error-on-system-resume.patch new file mode 100644 index 00000000000..d25d815489b --- /dev/null +++ b/queue-6.6/r8169-fix-pci-error-on-system-resume.patch @@ -0,0 +1,49 @@ +From f7af030b679e789719ea5de43cea833c87ae27f2 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 22 Dec 2023 12:34:09 +0800 +Subject: r8169: Fix PCI error on system resume + +From: Kai-Heng Feng + +[ Upstream commit 9c476269bff2908a20930c58085bf0b05ebd569a ] + +Some r8168 NICs stop working upon system resume: + +[ 688.051096] r8169 0000:02:00.1 enp2s0f1: rtl_ep_ocp_read_cond == 0 (loop: 10, delay: 10000). +[ 688.175131] r8169 0000:02:00.1 enp2s0f1: Link is Down +... +[ 691.534611] r8169 0000:02:00.1 enp2s0f1: PCI error (cmd = 0x0407, status_errs = 0x0000) + +Not sure if it's related, but those NICs have a BMC device at function +0: +02:00.0 Unassigned class [ff00]: Realtek Semiconductor Co., Ltd. Realtek RealManage BMC [10ec:816e] (rev 1a) + +Trial and error shows that increase the loop wait on +rtl_ep_ocp_read_cond to 30 can eliminate the issue, so let +rtl8168ep_driver_start() to wait a bit longer. + +Fixes: e6d6ca6e1204 ("r8169: Add support for another RTL8168FP") +Signed-off-by: Kai-Heng Feng +Reviewed-by: Heiner Kallweit +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/realtek/r8169_main.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c +index bb787a52bc754..81fd31f6fac46 100644 +--- a/drivers/net/ethernet/realtek/r8169_main.c ++++ b/drivers/net/ethernet/realtek/r8169_main.c +@@ -1211,7 +1211,7 @@ static void rtl8168ep_driver_start(struct rtl8169_private *tp) + { + r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START); + r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); +- rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 10); ++ rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30); + } + + static void rtl8168_driver_start(struct rtl8169_private *tp) +-- +2.43.0 + diff --git a/queue-6.6/rcu-break-rcu_node_0-rq-__lock-order.patch b/queue-6.6/rcu-break-rcu_node_0-rq-__lock-order.patch new file mode 100644 index 00000000000..59e99141ef2 --- /dev/null +++ b/queue-6.6/rcu-break-rcu_node_0-rq-__lock-order.patch @@ -0,0 +1,148 @@ +From 0e1e7911cf400c7f6ba161b4d3b87e9170ce450d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 31 Oct 2023 09:53:08 +0100 +Subject: rcu: Break rcu_node_0 --> &rq->__lock order + +From: Peter Zijlstra + +[ Upstream commit 85d68222ddc5f4522e456d97d201166acb50f716 ] + +Commit 851a723e45d1 ("sched: Always clear user_cpus_ptr in +do_set_cpus_allowed()") added a kfree() call to free any user +provided affinity mask, if present. It was changed later to use +kfree_rcu() in commit 9a5418bc48ba ("sched/core: Use kfree_rcu() +in do_set_cpus_allowed()") to avoid a circular locking dependency +problem. + +It turns out that even kfree_rcu() isn't safe for avoiding +circular locking problem. As reported by kernel test robot, +the following circular locking dependency now exists: + + &rdp->nocb_lock --> rcu_node_0 --> &rq->__lock + +Solve this by breaking the rcu_node_0 --> &rq->__lock chain by moving +the resched_cpu() out from under rcu_node lock. + +[peterz: heavily borrowed from Waiman's Changelog] +[paulmck: applied Z qiang feedback] + +Fixes: 851a723e45d1 ("sched: Always clear user_cpus_ptr in do_set_cpus_allowed()") +Reported-by: kernel test robot +Acked-by: Waiman Long +Signed-off-by: Peter Zijlstra (Intel) +Link: https://lore.kernel.org/oe-lkp/202310302207.a25f1a30-oliver.sang@intel.com +Signed-off-by: Paul E. McKenney +Signed-off-by: Frederic Weisbecker +Signed-off-by: Sasha Levin +--- + kernel/rcu/tree.c | 36 +++++++++++++++++++++++++----------- + 1 file changed, 25 insertions(+), 11 deletions(-) + +diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c +index 7b4517dc46579..92a090e161865 100644 +--- a/kernel/rcu/tree.c ++++ b/kernel/rcu/tree.c +@@ -755,14 +755,19 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp) + } + + /* +- * Return true if the specified CPU has passed through a quiescent +- * state by virtue of being in or having passed through an dynticks +- * idle state since the last call to dyntick_save_progress_counter() +- * for this same CPU, or by virtue of having been offline. ++ * Returns positive if the specified CPU has passed through a quiescent state ++ * by virtue of being in or having passed through an dynticks idle state since ++ * the last call to dyntick_save_progress_counter() for this same CPU, or by ++ * virtue of having been offline. ++ * ++ * Returns negative if the specified CPU needs a force resched. ++ * ++ * Returns zero otherwise. + */ + static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) + { + unsigned long jtsq; ++ int ret = 0; + struct rcu_node *rnp = rdp->mynode; + + /* +@@ -848,8 +853,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) + (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) || + rcu_state.cbovld)) { + WRITE_ONCE(rdp->rcu_urgent_qs, true); +- resched_cpu(rdp->cpu); + WRITE_ONCE(rdp->last_fqs_resched, jiffies); ++ ret = -1; + } + + /* +@@ -862,8 +867,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) + if (time_after(jiffies, rcu_state.jiffies_resched)) { + if (time_after(jiffies, + READ_ONCE(rdp->last_fqs_resched) + jtsq)) { +- resched_cpu(rdp->cpu); + WRITE_ONCE(rdp->last_fqs_resched, jiffies); ++ ret = -1; + } + if (IS_ENABLED(CONFIG_IRQ_WORK) && + !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && +@@ -892,7 +897,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) + } + } + +- return 0; ++ return ret; + } + + /* Trace-event wrapper function for trace_rcu_future_grace_period. */ +@@ -2270,15 +2275,15 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) + { + int cpu; + unsigned long flags; +- unsigned long mask; +- struct rcu_data *rdp; + struct rcu_node *rnp; + + rcu_state.cbovld = rcu_state.cbovldnext; + rcu_state.cbovldnext = false; + rcu_for_each_leaf_node(rnp) { ++ unsigned long mask = 0; ++ unsigned long rsmask = 0; ++ + cond_resched_tasks_rcu_qs(); +- mask = 0; + raw_spin_lock_irqsave_rcu_node(rnp, flags); + rcu_state.cbovldnext |= !!rnp->cbovldmask; + if (rnp->qsmask == 0) { +@@ -2296,11 +2301,17 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) + continue; + } + for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) { ++ struct rcu_data *rdp; ++ int ret; ++ + rdp = per_cpu_ptr(&rcu_data, cpu); +- if (f(rdp)) { ++ ret = f(rdp); ++ if (ret > 0) { + mask |= rdp->grpmask; + rcu_disable_urgency_upon_qs(rdp); + } ++ if (ret < 0) ++ rsmask |= rdp->grpmask; + } + if (mask != 0) { + /* Idle/offline CPUs, report (releases rnp->lock). */ +@@ -2309,6 +2320,9 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) + /* Nothing to do here, so just drop the lock. */ + raw_spin_unlock_irqrestore_rcu_node(rnp, flags); + } ++ ++ for_each_leaf_node_cpu_mask(rnp, cpu, rsmask) ++ resched_cpu(cpu); + } + } + +-- +2.43.0 + diff --git a/queue-6.6/rcu-introduce-rcu_cpu_online.patch b/queue-6.6/rcu-introduce-rcu_cpu_online.patch new file mode 100644 index 00000000000..20c3fe49e3c --- /dev/null +++ b/queue-6.6/rcu-introduce-rcu_cpu_online.patch @@ -0,0 +1,66 @@ +From 4ab8ebcd72181f7baa6aba161c5c4366ae1c5b11 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 27 Oct 2023 16:40:47 +0200 +Subject: rcu: Introduce rcu_cpu_online() + +From: Frederic Weisbecker + +[ Upstream commit 2be4686d866ad5896f2bb94d82fe892197aea9c7 ] + +Export the RCU point of view as to when a CPU is considered offline +(ie: when does RCU consider that a CPU is sufficiently down in the +hotplug process to not feature any possible read side). + +This will be used by RCU-tasks whose vision of an offline CPU should +reasonably match the one of RCU core. + +Fixes: cff9b2332ab7 ("kernel/sched: Modify initial boot task idle setup") +Acked-by: Peter Zijlstra (Intel) +Signed-off-by: Frederic Weisbecker +Signed-off-by: Sasha Levin +--- + kernel/rcu/rcu.h | 2 ++ + kernel/rcu/tree.c | 7 +++++++ + 2 files changed, 9 insertions(+) + +diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h +index 98e13be411afd..3d1851f82dbb6 100644 +--- a/kernel/rcu/rcu.h ++++ b/kernel/rcu/rcu.h +@@ -493,6 +493,7 @@ static inline void rcu_expedite_gp(void) { } + static inline void rcu_unexpedite_gp(void) { } + static inline void rcu_async_hurry(void) { } + static inline void rcu_async_relax(void) { } ++static inline bool rcu_cpu_online(int cpu) { return true; } + #else /* #ifdef CONFIG_TINY_RCU */ + bool rcu_gp_is_normal(void); /* Internal RCU use. */ + bool rcu_gp_is_expedited(void); /* Internal RCU use. */ +@@ -502,6 +503,7 @@ void rcu_unexpedite_gp(void); + void rcu_async_hurry(void); + void rcu_async_relax(void); + void rcupdate_announce_bootup_oddness(void); ++bool rcu_cpu_online(int cpu); + #ifdef CONFIG_TASKS_RCU_GENERIC + void show_rcu_tasks_gp_kthreads(void); + #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ +diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c +index 92a090e161865..9af42eae1ba38 100644 +--- a/kernel/rcu/tree.c ++++ b/kernel/rcu/tree.c +@@ -4139,6 +4139,13 @@ static bool rcu_rdp_cpu_online(struct rcu_data *rdp) + return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode)); + } + ++bool rcu_cpu_online(int cpu) ++{ ++ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); ++ ++ return rcu_rdp_cpu_online(rdp); ++} ++ + #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) + + /* +-- +2.43.0 + diff --git a/queue-6.6/rcu-tasks-handle-new-pf_idle-semantics.patch b/queue-6.6/rcu-tasks-handle-new-pf_idle-semantics.patch new file mode 100644 index 00000000000..4d52993c795 --- /dev/null +++ b/queue-6.6/rcu-tasks-handle-new-pf_idle-semantics.patch @@ -0,0 +1,103 @@ +From e96491a39a50bcd1320f955f0bb8160c5702379d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 27 Oct 2023 16:40:48 +0200 +Subject: rcu/tasks: Handle new PF_IDLE semantics + +From: Frederic Weisbecker + +[ Upstream commit 9715ed501b585d47444865071674c961c0cc0020 ] + +The commit: + + cff9b2332ab7 ("kernel/sched: Modify initial boot task idle setup") + +has changed the semantics of what is to be considered an idle task in +such a way that CPU boot code preceding the actual idle loop is excluded +from it. + +This has however introduced new potential RCU-tasks stalls when either: + +1) Grace period is started before init/0 had a chance to set PF_IDLE, + keeping it stuck in the holdout list until idle ever schedules. + +2) Grace period is started when some possible CPUs have never been + online, keeping their idle tasks stuck in the holdout list until the + CPU ever boots up. + +3) Similar to 1) but with secondary CPUs: Grace period is started + concurrently with secondary CPU booting, putting its idle task in + the holdout list because PF_IDLE isn't yet observed on it. It stays + then stuck in the holdout list until that CPU ever schedules. The + effect is mitigated here by the hotplug AP thread that must run to + bring the CPU up. + +Fix this with handling the new semantics of PF_IDLE, keeping in mind +that it may or may not be set on an idle task. Take advantage of that to +strengthen the coverage of an RCU-tasks quiescent state within an idle +task, excluding the CPU boot code from it. Only the code running within +the idle loop is now a quiescent state, along with offline CPUs. + +Fixes: cff9b2332ab7 ("kernel/sched: Modify initial boot task idle setup") +Suggested-by: Joel Fernandes +Suggested-by: Paul E . McKenney" +Acked-by: Peter Zijlstra (Intel) +Signed-off-by: Frederic Weisbecker +Signed-off-by: Sasha Levin +--- + kernel/rcu/tasks.h | 30 ++++++++++++++++++++++++++++-- + 1 file changed, 28 insertions(+), 2 deletions(-) + +diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h +index 8d65f7d576a34..b5dc1e5d78c83 100644 +--- a/kernel/rcu/tasks.h ++++ b/kernel/rcu/tasks.h +@@ -892,10 +892,36 @@ static void rcu_tasks_pregp_step(struct list_head *hop) + synchronize_rcu(); + } + ++/* Check for quiescent states since the pregp's synchronize_rcu() */ ++static bool rcu_tasks_is_holdout(struct task_struct *t) ++{ ++ int cpu; ++ ++ /* Has the task been seen voluntarily sleeping? */ ++ if (!READ_ONCE(t->on_rq)) ++ return false; ++ ++ /* ++ * Idle tasks (or idle injection) within the idle loop are RCU-tasks ++ * quiescent states. But CPU boot code performed by the idle task ++ * isn't a quiescent state. ++ */ ++ if (is_idle_task(t)) ++ return false; ++ ++ cpu = task_cpu(t); ++ ++ /* Idle tasks on offline CPUs are RCU-tasks quiescent states. */ ++ if (t == idle_task(cpu) && !rcu_cpu_online(cpu)) ++ return false; ++ ++ return true; ++} ++ + /* Per-task initial processing. */ + static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop) + { +- if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) { ++ if (t != current && rcu_tasks_is_holdout(t)) { + get_task_struct(t); + t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); + WRITE_ONCE(t->rcu_tasks_holdout, true); +@@ -944,7 +970,7 @@ static void check_holdout_task(struct task_struct *t, + + if (!READ_ONCE(t->rcu_tasks_holdout) || + t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || +- !READ_ONCE(t->on_rq) || ++ !rcu_tasks_is_holdout(t) || + (IS_ENABLED(CONFIG_NO_HZ_FULL) && + !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { + WRITE_ONCE(t->rcu_tasks_holdout, false); +-- +2.43.0 + diff --git a/queue-6.6/rcu-tasks-trace-handle-new-pf_idle-semantics.patch b/queue-6.6/rcu-tasks-trace-handle-new-pf_idle-semantics.patch new file mode 100644 index 00000000000..c686bf5d235 --- /dev/null +++ b/queue-6.6/rcu-tasks-trace-handle-new-pf_idle-semantics.patch @@ -0,0 +1,51 @@ +From 37bac3338e531e5b238010f691ed5a9a7f436938 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 27 Oct 2023 16:40:49 +0200 +Subject: rcu/tasks-trace: Handle new PF_IDLE semantics + +From: Frederic Weisbecker + +[ Upstream commit a80712b9cc7e57830260ec5e1feb9cdb59e1da2f ] + +The commit: + + cff9b2332ab7 ("kernel/sched: Modify initial boot task idle setup") + +has changed the semantics of what is to be considered an idle task in +such a way that the idle task of an offline CPU may not carry the +PF_IDLE flag anymore. + +However RCU-tasks-trace tests the opposite assertion, still assuming +that idle tasks carry the PF_IDLE flag during their whole lifecycle. + +Remove this assumption to avoid spurious warnings but keep the initial +test verifying that the idle task is the current task on any offline +CPU. + +Reported-by: Naresh Kamboju +Fixes: cff9b2332ab7 ("kernel/sched: Modify initial boot task idle setup") +Suggested-by: Joel Fernandes +Suggested-by: Paul E . McKenney" +Acked-by: Peter Zijlstra (Intel) +Signed-off-by: Frederic Weisbecker +Signed-off-by: Sasha Levin +--- + kernel/rcu/tasks.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h +index b5dc1e5d78c83..65e000ca332cc 100644 +--- a/kernel/rcu/tasks.h ++++ b/kernel/rcu/tasks.h +@@ -1548,7 +1548,7 @@ static int trc_inspect_reader(struct task_struct *t, void *bhp_in) + } else { + // The task is not running, so C-language access is safe. + nesting = t->trc_reader_nesting; +- WARN_ON_ONCE(ofl && task_curr(t) && !is_idle_task(t)); ++ WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t)))); + if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl) + n_heavy_reader_ofl_updates++; + } +-- +2.43.0 + diff --git a/queue-6.6/rdma-mlx5-fix-mkey-cache-wq-flush.patch b/queue-6.6/rdma-mlx5-fix-mkey-cache-wq-flush.patch new file mode 100644 index 00000000000..2ceeddd2409 --- /dev/null +++ b/queue-6.6/rdma-mlx5-fix-mkey-cache-wq-flush.patch @@ -0,0 +1,49 @@ +From 142cc4da7da41a4f9c73bbe2ebfdf9a2d9fabbde Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 25 Oct 2023 20:49:59 +0300 +Subject: RDMA/mlx5: Fix mkey cache WQ flush + +From: Moshe Shemesh + +[ Upstream commit a53e215f90079f617360439b1b6284820731e34c ] + +The cited patch tries to ensure no pending works on the mkey cache +workqueue by disabling adding new works and call flush_workqueue(). +But this workqueue also has delayed works which might still be pending +the delay time to be queued. + +Add cancel_delayed_work() for the delayed works which waits to be queued +and then the flush_workqueue() will flush all works which are already +queued and running. + +Fixes: 374012b00457 ("RDMA/mlx5: Fix mkey cache possible deadlock on cleanup") +Link: https://lore.kernel.org/r/b8722f14e7ed81452f791764a26d2ed4cfa11478.1698256179.git.leon@kernel.org +Signed-off-by: Moshe Shemesh +Signed-off-by: Leon Romanovsky +Signed-off-by: Jason Gunthorpe +Signed-off-by: Sasha Levin +--- + drivers/infiniband/hw/mlx5/mr.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c +index 8a3762d9ff58c..e0629898c3c06 100644 +--- a/drivers/infiniband/hw/mlx5/mr.c ++++ b/drivers/infiniband/hw/mlx5/mr.c +@@ -1026,11 +1026,13 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev) + return; + + mutex_lock(&dev->cache.rb_lock); ++ cancel_delayed_work(&dev->cache.remove_ent_dwork); + for (node = rb_first(root); node; node = rb_next(node)) { + ent = rb_entry(node, struct mlx5_cache_ent, node); + xa_lock_irq(&ent->mkeys); + ent->disabled = true; + xa_unlock_irq(&ent->mkeys); ++ cancel_delayed_work(&ent->dwork); + } + mutex_unlock(&dev->cache.rb_lock); + +-- +2.43.0 + diff --git a/queue-6.6/riscv-don-t-probe-unaligned-access-speed-if-already-.patch b/queue-6.6/riscv-don-t-probe-unaligned-access-speed-if-already-.patch new file mode 100644 index 00000000000..0b479b84a3e --- /dev/null +++ b/queue-6.6/riscv-don-t-probe-unaligned-access-speed-if-already-.patch @@ -0,0 +1,42 @@ +From e02202f779f44fdd468f1995c9c8e21625a49f84 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 12 Sep 2023 23:40:40 +0800 +Subject: riscv: don't probe unaligned access speed if already done + +From: Jisheng Zhang + +[ Upstream commit c20d36cc2a2073d4cdcda92bd7a1bb9b3b3b7c79 ] + +If misaligned_access_speed percpu var isn't so called "HWPROBE +MISALIGNED UNKNOWN", it means the probe has happened(this is possible +for example, hotplug off then hotplug on one cpu), and the percpu var +has been set, don't probe again in this case. + +Signed-off-by: Jisheng Zhang +Fixes: 584ea6564bca ("RISC-V: Probe for unaligned access speed") +Reviewed-by: Conor Dooley +Link: https://lore.kernel.org/r/20230912154040.3306-1-jszhang@kernel.org +Signed-off-by: Palmer Dabbelt +Signed-off-by: Sasha Levin +--- + arch/riscv/kernel/cpufeature.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c +index 1cfbba65d11ae..e12cd22755c78 100644 +--- a/arch/riscv/kernel/cpufeature.c ++++ b/arch/riscv/kernel/cpufeature.c +@@ -568,6 +568,10 @@ void check_unaligned_access(int cpu) + void *src; + long speed = RISCV_HWPROBE_MISALIGNED_SLOW; + ++ /* We are already set since the last check */ ++ if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN) ++ return; ++ + page = alloc_pages(GFP_NOWAIT, get_order(MISALIGNED_BUFFER_SIZE)); + if (!page) { + pr_warn("Can't alloc pages to measure memcpy performance"); +-- +2.43.0 + diff --git a/queue-6.6/riscv-kvm-update-external-interrupt-atomically-for-i.patch b/queue-6.6/riscv-kvm-update-external-interrupt-atomically-for-i.patch new file mode 100644 index 00000000000..ec6ff1c832f --- /dev/null +++ b/queue-6.6/riscv-kvm-update-external-interrupt-atomically-for-i.patch @@ -0,0 +1,108 @@ +From f93d15794db13e526a5e9d1b23ad81bd7dcb2c4e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 13 Dec 2023 06:16:09 +0000 +Subject: RISCV: KVM: update external interrupt atomically for IMSIC swfile + +From: Yong-Xuan Wang + +[ Upstream commit 4ad9843e1ea088bd2529290234c6c4c6374836a7 ] + +The emulated IMSIC update the external interrupt pending depending on +the value of eidelivery and topei. It might lose an interrupt when it +is interrupted before setting the new value to the pending status. + +For example, when VCPU0 sends an IPI to VCPU1 via IMSIC: + +VCPU0 VCPU1 + + CSRSWAP topei = 0 + The VCPU1 has claimed all the external + interrupt in its interrupt handler. + + topei of VCPU1's IMSIC = 0 + +set pending in VCPU1's IMSIC + +topei of VCPU1' IMSIC = 1 + +set the external interrupt +pending of VCPU1 + + clear the external interrupt pending + of VCPU1 + +When the VCPU1 switches back to VS mode, it exits the interrupt handler +because the result of CSRSWAP topei is 0. If there are no other external +interrupts injected into the VCPU1's IMSIC, VCPU1 will never know this +pending interrupt unless it initiative read the topei. + +If the interruption occurs between updating interrupt pending in IMSIC +and updating external interrupt pending of VCPU, it will not cause a +problem. Suppose that the VCPU1 clears the IPI pending in IMSIC right +after VCPU0 sets the pending, the external interrupt pending of VCPU1 +will not be set because the topei is 0. But when the VCPU1 goes back to +VS mode, the pending IPI will be reported by the CSRSWAP topei, it will +not lose this interrupt. + +So we only need to make the external interrupt updating procedure as a +critical section to avoid the problem. + +Fixes: db8b7e97d613 ("RISC-V: KVM: Add in-kernel virtualization of AIA IMSIC") +Tested-by: Roy Lin +Tested-by: Wayling Chen +Co-developed-by: Vincent Chen +Signed-off-by: Vincent Chen +Signed-off-by: Yong-Xuan Wang +Signed-off-by: Anup Patel +Signed-off-by: Sasha Levin +--- + arch/riscv/kvm/aia_imsic.c | 13 +++++++++++++ + 1 file changed, 13 insertions(+) + +diff --git a/arch/riscv/kvm/aia_imsic.c b/arch/riscv/kvm/aia_imsic.c +index 6cf23b8adb712..e808723a85f1b 100644 +--- a/arch/riscv/kvm/aia_imsic.c ++++ b/arch/riscv/kvm/aia_imsic.c +@@ -55,6 +55,7 @@ struct imsic { + /* IMSIC SW-file */ + struct imsic_mrif *swfile; + phys_addr_t swfile_pa; ++ spinlock_t swfile_extirq_lock; + }; + + #define imsic_vs_csr_read(__c) \ +@@ -613,12 +614,23 @@ static void imsic_swfile_extirq_update(struct kvm_vcpu *vcpu) + { + struct imsic *imsic = vcpu->arch.aia_context.imsic_state; + struct imsic_mrif *mrif = imsic->swfile; ++ unsigned long flags; ++ ++ /* ++ * The critical section is necessary during external interrupt ++ * updates to avoid the risk of losing interrupts due to potential ++ * interruptions between reading topei and updating pending status. ++ */ ++ ++ spin_lock_irqsave(&imsic->swfile_extirq_lock, flags); + + if (imsic_mrif_atomic_read(mrif, &mrif->eidelivery) && + imsic_mrif_topei(mrif, imsic->nr_eix, imsic->nr_msis)) + kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT); + else + kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT); ++ ++ spin_unlock_irqrestore(&imsic->swfile_extirq_lock, flags); + } + + static void imsic_swfile_read(struct kvm_vcpu *vcpu, bool clear, +@@ -1039,6 +1051,7 @@ int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu) + } + imsic->swfile = page_to_virt(swfile_page); + imsic->swfile_pa = page_to_phys(swfile_page); ++ spin_lock_init(&imsic->swfile_extirq_lock); + + /* Setup IO device */ + kvm_iodevice_init(&imsic->iodev, &imsic_iodoev_ops); +-- +2.43.0 + diff --git a/queue-6.6/selftests-bonding-do-not-set-port-down-when-adding-t.patch b/queue-6.6/selftests-bonding-do-not-set-port-down-when-adding-t.patch new file mode 100644 index 00000000000..22fcb021d0d --- /dev/null +++ b/queue-6.6/selftests-bonding-do-not-set-port-down-when-adding-t.patch @@ -0,0 +1,53 @@ +From b235a63fcc3defc43bdd3ffcdf64b222e4ba3c6b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 23 Dec 2023 20:59:22 +0800 +Subject: selftests: bonding: do not set port down when adding to bond + +From: Hangbin Liu + +[ Upstream commit 61fa2493ca76fd7bb74e13f0205274f4ab0aa696 ] + +Similar to commit be809424659c ("selftests: bonding: do not set port down +before adding to bond"). The bond-arp-interval-causes-panic test failed +after commit a4abfa627c38 ("net: rtnetlink: Enslave device before bringing +it up") as the kernel will set the port down _after_ adding to bond if setting +port down specifically. + +Fix it by removing the link down operation when adding to bond. + +Fixes: 2ffd57327ff1 ("selftests: bonding: cause oops in bond_rr_gen_slave_id") +Signed-off-by: Hangbin Liu +Tested-by: Benjamin Poirier +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + .../drivers/net/bonding/bond-arp-interval-causes-panic.sh | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh b/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh +index 4917dbb35a44d..5667febee3286 100755 +--- a/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh ++++ b/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh +@@ -30,16 +30,16 @@ ip netns exec server ip addr add ${server_ip4}/24 dev eth0 + + ip netns exec client ip link add dev bond0 down type bond mode 1 \ + miimon 100 all_slaves_active 1 +-ip netns exec client ip link set dev eth0 down master bond0 ++ip netns exec client ip link set dev eth0 master bond0 + ip netns exec client ip link set dev bond0 up + ip netns exec client ip addr add ${client_ip4}/24 dev bond0 + ip netns exec client ping -c 5 $server_ip4 >/dev/null + +-ip netns exec client ip link set dev eth0 down nomaster ++ip netns exec client ip link set dev eth0 nomaster + ip netns exec client ip link set dev bond0 down + ip netns exec client ip link set dev bond0 type bond mode 0 \ + arp_interval 1000 arp_ip_target "+${server_ip4}" +-ip netns exec client ip link set dev eth0 down master bond0 ++ip netns exec client ip link set dev eth0 master bond0 + ip netns exec client ip link set dev bond0 up + ip netns exec client ping -c 5 $server_ip4 >/dev/null + +-- +2.43.0 + diff --git a/queue-6.6/series b/queue-6.6/series index 78e00459032..513ba449a4a 100644 --- a/queue-6.6/series +++ b/queue-6.6/series @@ -1 +1,95 @@ keys-dns-fix-missing-size-check-of-v1-server-list-header.patch +accel-qaic-fix-gem-import-path-code.patch +accel-qaic-implement-quirk-for-soc_hw_version.patch +wifi-iwlwifi-pcie-don-t-synchronize-irqs-from-irq.patch +drm-bridge-parade-ps8640-never-store-more-than-msg-s.patch +drm-bridge-ti-sn65dsi86-never-store-more-than-msg-si.patch +drm-bridge-ps8640-fix-size-mismatch-warning-w-len.patch +netfilter-nf_tables-set-transport-offset-from-mac-he.patch +nfc-llcp_core-hold-a-ref-to-llcp_local-dev-when-hold.patch +octeontx2-af-fix-marking-couple-of-structure-as-__pa.patch +drm-i915-dp-fix-passing-the-correct-dpcd_rev-for-drm.patch +drm-i915-perf-update-handling-of-mmio-triggered-repo.patch +ice-fix-link_down_on_close-message.patch +ice-shut-down-vsi-with-link-down-on-close-enabled.patch +i40e-fix-filter-input-checks-to-prevent-config-with-.patch +igc-report-vlan-ethertype-matching-back-to-user.patch +igc-check-vlan-tci-mask.patch +igc-check-vlan-ethertype-mask.patch +asoc-fsl_rpmsg-fix-error-handler-with-pm_runtime_ena.patch +asoc-mediatek-mt8186-fix-aud_pad_top-register-and-of.patch +mlxbf_gige-fix-receive-packet-race-condition.patch +net-sched-em_text-fix-possible-memory-leak-in-em_tex.patch +r8169-fix-pci-error-on-system-resume.patch +net-implement-missing-getsockopt-so_timestamping_new.patch +selftests-bonding-do-not-set-port-down-when-adding-t.patch +arm-sun9i-smp-fix-array-index-out-of-bounds-read-in-.patch +sfc-fix-a-double-free-bug-in-efx_probe_filters.patch +net-bcmgenet-fix-fcs-generation-for-fragmented-skbuf.patch +netfilter-nf_nat-fix-action-not-being-set-for-all-ct.patch +netfilter-nft_immediate-drop-chain-reference-counter.patch +net-save-and-restore-msg_namelen-in-sock_sendmsg.patch +i40e-fix-use-after-free-in-i40e_aqc_add_filters.patch +asoc-meson-g12a-toacodec-validate-written-enum-value.patch +asoc-meson-g12a-tohdmitx-validate-written-enum-value.patch +asoc-meson-g12a-toacodec-fix-event-generation.patch +asoc-meson-g12a-tohdmitx-fix-event-generation-for-s-.patch +i40e-restore-vf-msi-x-state-during-pci-reset.patch +igc-fix-hicredit-calculation.patch +apparmor-fix-move_mount-mediation-by-detecting-if-so.patch +virtio_net-avoid-data-races-on-dev-stats-fields.patch +virtio_net-fix-missing-dma-unmap-for-resize.patch +net-qla3xxx-fix-potential-memleak-in-ql_alloc_buffer.patch +net-smc-fix-invalid-link-access-in-dumping-smc-r-con.patch +octeontx2-af-always-configure-nix-tx-link-credits-ba.patch +octeontx2-af-re-enable-mac-tx-in-otx2_stop-processin.patch +asix-add-check-for-usbnet_get_endpoints.patch +net-ravb-wait-for-operating-mode-to-be-applied.patch +bnxt_en-remove-mis-applied-code-from-bnxt_cfg_ntp_fi.patch +net-implement-missing-so_timestamping_new-cmsg-suppo.patch +mm-convert-dax-lock-unlock-page-to-lock-unlock-folio.patch +mm-memory-failure-pass-the-folio-and-the-page-to-col.patch +xsk-add-multi-buffer-support-for-sockets-sharing-ume.patch +media-qcom-camss-fix-v4l2-async-notifier-error-path.patch +media-qcom-camss-fix-genpd-cleanup.patch +tcp-derive-delack_max-from-rto_min.patch +bpftool-fix-wcast-qual-warning.patch +bpftool-align-output-skeleton-elf-code.patch +crypto-xts-use-spawn-for-underlying-single-block-cip.patch +crypto-qat-fix-double-free-during-reset.patch +crypto-hisilicon-qm-fix-eq-aeq-interrupt-issue.patch +vfio-mtty-overhaul-mtty-interrupt-handling.patch +clk-si521xx-increase-stack-based-print-buffer-size-i.patch +rdma-mlx5-fix-mkey-cache-wq-flush.patch +acpi-thermal-fix-acpi_thermal_unregister_thermal_zon.patch +rcu-break-rcu_node_0-rq-__lock-order.patch +rcu-introduce-rcu_cpu_online.patch +rcu-tasks-handle-new-pf_idle-semantics.patch +rcu-tasks-trace-handle-new-pf_idle-semantics.patch +riscv-don-t-probe-unaligned-access-speed-if-already-.patch +kvm-s390-vsie-fix-wrong-vir-37-when-mso-is-used.patch +phy-ti-gmii-sel-fix-register-offset-when-parent-is-n.patch +dmaengine-ti-k3-psil-am62-fix-spi-pdma-data.patch +dmaengine-ti-k3-psil-am62a-fix-spi-pdma-data.patch +dmaengine-fsl-edma-do-not-suspend-and-resume-the-mas.patch +dmaengine-fsl-edma-add-judgment-on-enabling-round-ro.patch +iio-imu-adis16475-use-bit-numbers-in-assign_bit.patch +iommu-vt-d-support-enforce_cache_coherency-only-for-.patch +phy-mediatek-mipi-mt8183-fix-minimal-supported-frequ.patch +phy-sunplus-return-negative-error-code-in-sp_usb_phy.patch +clk-rockchip-rk3128-fix-aclk_peri_src-s-parent.patch +clk-rockchip-rk3128-fix-sclk_sdmmc-s-clock-name.patch +drm-i915-call-intel_pre_plane_updates-also-for-pipes.patch +drm-amd-display-increase-num-voltage-states-to-40.patch +cxl-add-cxl_decoders_committed-helper.patch +cxl-core-always-hold-region_rwsem-while-reading-pois.patch +kernel-resource-increment-by-align-value-in-get_free.patch +drm-amd-display-increase-frame-warning-limit-with-ka.patch +dmaengine-idxd-protect-int_handle-field-in-hw-descri.patch +dmaengine-fsl-edma-fix-wrong-pointer-check-in-fsl_ed.patch +riscv-kvm-update-external-interrupt-atomically-for-i.patch +powerpc-pseries-vas-migration-suspend-waits-for-no-i.patch +net-prevent-mss-overflow-in-skb_segment.patch +cxl-pmu-ensure-put_device-on-pmu-devices.patch +net-libwx-fix-memory-leak-on-free-page.patch +net-constify-sk_dst_get-and-__sk_dst_get-argument.patch diff --git a/queue-6.6/sfc-fix-a-double-free-bug-in-efx_probe_filters.patch b/queue-6.6/sfc-fix-a-double-free-bug-in-efx_probe_filters.patch new file mode 100644 index 00000000000..5d70511ce6a --- /dev/null +++ b/queue-6.6/sfc-fix-a-double-free-bug-in-efx_probe_filters.patch @@ -0,0 +1,51 @@ +From 4536b3722007c628fac71350b7e75525babc4cf9 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 25 Dec 2023 19:29:14 +0800 +Subject: sfc: fix a double-free bug in efx_probe_filters + +From: Zhipeng Lu + +[ Upstream commit d5a306aedba34e640b11d7026dbbafb78ee3a5f6 ] + +In efx_probe_filters, the channel->rps_flow_id is freed in a +efx_for_each_channel marco when success equals to 0. +However, after the following call chain: + +ef100_net_open + |-> efx_probe_filters + |-> ef100_net_stop + |-> efx_remove_filters + +The channel->rps_flow_id is freed again in the efx_for_each_channel of +efx_remove_filters, triggering a double-free bug. + +Fixes: a9dc3d5612ce ("sfc_ef100: RX filter table management and related gubbins") +Reviewed-by: Simon Horman +Reviewed-by: Edward Cree +Signed-off-by: Zhipeng Lu +Link: https://lore.kernel.org/r/20231225112915.3544581-1-alexious@zju.edu.cn +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/sfc/rx_common.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c +index d2f35ee15effe..fac227d372db4 100644 +--- a/drivers/net/ethernet/sfc/rx_common.c ++++ b/drivers/net/ethernet/sfc/rx_common.c +@@ -823,8 +823,10 @@ int efx_probe_filters(struct efx_nic *efx) + } + + if (!success) { +- efx_for_each_channel(channel, efx) ++ efx_for_each_channel(channel, efx) { + kfree(channel->rps_flow_id); ++ channel->rps_flow_id = NULL; ++ } + efx->type->filter_table_remove(efx); + rc = -ENOMEM; + goto out_unlock; +-- +2.43.0 + diff --git a/queue-6.6/tcp-derive-delack_max-from-rto_min.patch b/queue-6.6/tcp-derive-delack_max-from-rto_min.patch new file mode 100644 index 00000000000..16b10b3f47e --- /dev/null +++ b/queue-6.6/tcp-derive-delack_max-from-rto_min.patch @@ -0,0 +1,119 @@ +From 6bf958a473c640a661d24e4ffcdd1adb26bd6a41 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 20 Sep 2023 17:29:43 +0000 +Subject: tcp: derive delack_max from rto_min + +From: Eric Dumazet + +[ Upstream commit bbf80d713fe75cfbecda26e7c03a9a8d22af2f4f ] + +While BPF allows to set icsk->->icsk_delack_max +and/or icsk->icsk_rto_min, we have an ip route +attribute (RTAX_RTO_MIN) to be able to tune rto_min, +but nothing to consequently adjust max delayed ack, +which vary from 40ms to 200 ms (TCP_DELACK_{MIN|MAX}). + +This makes RTAX_RTO_MIN of almost no practical use, +unless customers are in big trouble. + +Modern days datacenter communications want to set +rto_min to ~5 ms, and the max delayed ack one jiffie +smaller to avoid spurious retransmits. + +After this patch, an "rto_min 5" route attribute will +effectively lower max delayed ack timers to 4 ms. + +Note in the following ss output, "rto:6 ... ato:4" + +$ ss -temoi dst XXXXXX +State Recv-Q Send-Q Local Address:Port Peer Address:Port Process +ESTAB 0 0 [2002:a05:6608:295::]:52950 [2002:a05:6608:297::]:41597 + ino:255134 sk:1001 <-> + skmem:(r0,rb1707063,t872,tb262144,f0,w0,o0,bl0,d0) ts sack + cubic wscale:8,8 rto:6 rtt:0.02/0.002 ato:4 mss:4096 pmtu:4500 + rcvmss:536 advmss:4096 cwnd:10 bytes_sent:54823160 bytes_acked:54823121 + bytes_received:54823120 segs_out:1370582 segs_in:1370580 + data_segs_out:1370579 data_segs_in:1370578 send 16.4Gbps + pacing_rate 32.6Gbps delivery_rate 1.72Gbps delivered:1370579 + busy:26920ms unacked:1 rcv_rtt:34.615 rcv_space:65920 + rcv_ssthresh:65535 minrtt:0.015 snd_wnd:65536 + +While we could argue this patch fixes a bug with RTAX_RTO_MIN, +I do not add a Fixes: tag, so that we can soak it a bit before +asking backports to stable branches. + +Signed-off-by: Eric Dumazet +Acked-by: Soheil Hassas Yeganeh +Acked-by: Neal Cardwell +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + include/net/tcp.h | 2 ++ + net/ipv4/tcp.c | 3 ++- + net/ipv4/tcp_output.c | 16 +++++++++++++++- + 3 files changed, 19 insertions(+), 2 deletions(-) + +diff --git a/include/net/tcp.h b/include/net/tcp.h +index a88bf8f6db235..9cbcfb3c95dac 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -723,6 +723,8 @@ static inline void tcp_fast_path_check(struct sock *sk) + tcp_fast_path_on(tp); + } + ++u32 tcp_delack_max(const struct sock *sk); ++ + /* Compute the actual rto_min value */ + static inline u32 tcp_rto_min(struct sock *sk) + { +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index ec46d74c20938..f124f6c639157 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -3774,7 +3774,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) + info->tcpi_options |= TCPI_OPT_SYN_DATA; + + info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); +- info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); ++ info->tcpi_ato = jiffies_to_usecs(min(icsk->icsk_ack.ato, ++ tcp_delack_max(sk))); + info->tcpi_snd_mss = tp->mss_cache; + info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; + +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index cab3c1162c3a6..ab3b7b4b4429b 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -4003,6 +4003,20 @@ int tcp_connect(struct sock *sk) + } + EXPORT_SYMBOL(tcp_connect); + ++u32 tcp_delack_max(const struct sock *sk) ++{ ++ const struct dst_entry *dst = __sk_dst_get(sk); ++ u32 delack_max = inet_csk(sk)->icsk_delack_max; ++ ++ if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) { ++ u32 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN); ++ u32 delack_from_rto_min = max_t(int, 1, rto_min - 1); ++ ++ delack_max = min_t(u32, delack_max, delack_from_rto_min); ++ } ++ return delack_max; ++} ++ + /* Send out a delayed ack, the caller does the policy checking + * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() + * for details. +@@ -4038,7 +4052,7 @@ void tcp_send_delayed_ack(struct sock *sk) + ato = min(ato, max_ato); + } + +- ato = min_t(u32, ato, inet_csk(sk)->icsk_delack_max); ++ ato = min_t(u32, ato, tcp_delack_max(sk)); + + /* Stay within the limit we were given */ + timeout = jiffies + ato; +-- +2.43.0 + diff --git a/queue-6.6/vfio-mtty-overhaul-mtty-interrupt-handling.patch b/queue-6.6/vfio-mtty-overhaul-mtty-interrupt-handling.patch new file mode 100644 index 00000000000..acb8dcc3237 --- /dev/null +++ b/queue-6.6/vfio-mtty-overhaul-mtty-interrupt-handling.patch @@ -0,0 +1,406 @@ +From 6c21c85f6895e8d681ea1cca2172fcc1d70b8f88 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 16 Oct 2023 16:47:35 -0600 +Subject: vfio/mtty: Overhaul mtty interrupt handling +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Alex Williamson + +[ Upstream commit 293fbc28818135743f54d46c418ede3e4a20a742 ] + +The mtty driver does not currently conform to the vfio SET_IRQS uAPI. +For example, it claims to support mask and unmask of INTx, but actually +does nothing. It claims to support AUTOMASK for INTx, but doesn't. It +fails to teardown eventfds under the full semantics specified by the +SET_IRQS ioctl. It also fails to teardown eventfds when the device is +closed, leading to memory leaks. It claims to support the request IRQ, +but doesn't. + +Fix all these. + +A side effect of this is that QEMU will now report a warning: + +vfio : Failed to set up UNMASK eventfd signaling for interrupt \ +INTX-0: VFIO_DEVICE_SET_IRQS failure: Inappropriate ioctl for device + +The fact is that the unmask eventfd was never supported but quietly +failed. mtty never honored the AUTOMASK behavior, therefore there +was nothing to unmask. QEMU is verbose about the failure, but +properly falls back to userspace unmasking. + +Fixes: 9d1a546c53b4 ("docs: Sample driver to demonstrate how to use Mediated device framework.") +Reviewed-by: Cédric Le Goater +Link: https://lore.kernel.org/r/20231016224736.2575718-2-alex.williamson@redhat.com +Signed-off-by: Alex Williamson +Signed-off-by: Sasha Levin +--- + samples/vfio-mdev/mtty.c | 239 +++++++++++++++++++++++++++------------ + 1 file changed, 166 insertions(+), 73 deletions(-) + +diff --git a/samples/vfio-mdev/mtty.c b/samples/vfio-mdev/mtty.c +index 5af00387c519e..245db52bedf29 100644 +--- a/samples/vfio-mdev/mtty.c ++++ b/samples/vfio-mdev/mtty.c +@@ -127,7 +127,6 @@ struct serial_port { + /* State of each mdev device */ + struct mdev_state { + struct vfio_device vdev; +- int irq_fd; + struct eventfd_ctx *intx_evtfd; + struct eventfd_ctx *msi_evtfd; + int irq_index; +@@ -141,6 +140,7 @@ struct mdev_state { + struct mutex rxtx_lock; + struct vfio_device_info dev_info; + int nr_ports; ++ u8 intx_mask:1; + }; + + static struct mtty_type { +@@ -166,10 +166,6 @@ static const struct file_operations vd_fops = { + + static const struct vfio_device_ops mtty_dev_ops; + +-/* function prototypes */ +- +-static int mtty_trigger_interrupt(struct mdev_state *mdev_state); +- + /* Helper functions */ + + static void dump_buffer(u8 *buf, uint32_t count) +@@ -186,6 +182,36 @@ static void dump_buffer(u8 *buf, uint32_t count) + #endif + } + ++static bool is_intx(struct mdev_state *mdev_state) ++{ ++ return mdev_state->irq_index == VFIO_PCI_INTX_IRQ_INDEX; ++} ++ ++static bool is_msi(struct mdev_state *mdev_state) ++{ ++ return mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX; ++} ++ ++static bool is_noirq(struct mdev_state *mdev_state) ++{ ++ return !is_intx(mdev_state) && !is_msi(mdev_state); ++} ++ ++static void mtty_trigger_interrupt(struct mdev_state *mdev_state) ++{ ++ lockdep_assert_held(&mdev_state->ops_lock); ++ ++ if (is_msi(mdev_state)) { ++ if (mdev_state->msi_evtfd) ++ eventfd_signal(mdev_state->msi_evtfd, 1); ++ } else if (is_intx(mdev_state)) { ++ if (mdev_state->intx_evtfd && !mdev_state->intx_mask) { ++ eventfd_signal(mdev_state->intx_evtfd, 1); ++ mdev_state->intx_mask = true; ++ } ++ } ++} ++ + static void mtty_create_config_space(struct mdev_state *mdev_state) + { + /* PCI dev ID */ +@@ -921,6 +947,25 @@ static ssize_t mtty_write(struct vfio_device *vdev, const char __user *buf, + return -EFAULT; + } + ++static void mtty_disable_intx(struct mdev_state *mdev_state) ++{ ++ if (mdev_state->intx_evtfd) { ++ eventfd_ctx_put(mdev_state->intx_evtfd); ++ mdev_state->intx_evtfd = NULL; ++ mdev_state->intx_mask = false; ++ mdev_state->irq_index = -1; ++ } ++} ++ ++static void mtty_disable_msi(struct mdev_state *mdev_state) ++{ ++ if (mdev_state->msi_evtfd) { ++ eventfd_ctx_put(mdev_state->msi_evtfd); ++ mdev_state->msi_evtfd = NULL; ++ mdev_state->irq_index = -1; ++ } ++} ++ + static int mtty_set_irqs(struct mdev_state *mdev_state, uint32_t flags, + unsigned int index, unsigned int start, + unsigned int count, void *data) +@@ -932,59 +977,113 @@ static int mtty_set_irqs(struct mdev_state *mdev_state, uint32_t flags, + case VFIO_PCI_INTX_IRQ_INDEX: + switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { + case VFIO_IRQ_SET_ACTION_MASK: ++ if (!is_intx(mdev_state) || start != 0 || count != 1) { ++ ret = -EINVAL; ++ break; ++ } ++ ++ if (flags & VFIO_IRQ_SET_DATA_NONE) { ++ mdev_state->intx_mask = true; ++ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { ++ uint8_t mask = *(uint8_t *)data; ++ ++ if (mask) ++ mdev_state->intx_mask = true; ++ } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { ++ ret = -ENOTTY; /* No support for mask fd */ ++ } ++ break; + case VFIO_IRQ_SET_ACTION_UNMASK: ++ if (!is_intx(mdev_state) || start != 0 || count != 1) { ++ ret = -EINVAL; ++ break; ++ } ++ ++ if (flags & VFIO_IRQ_SET_DATA_NONE) { ++ mdev_state->intx_mask = false; ++ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { ++ uint8_t mask = *(uint8_t *)data; ++ ++ if (mask) ++ mdev_state->intx_mask = false; ++ } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { ++ ret = -ENOTTY; /* No support for unmask fd */ ++ } + break; + case VFIO_IRQ_SET_ACTION_TRIGGER: +- { +- if (flags & VFIO_IRQ_SET_DATA_NONE) { +- pr_info("%s: disable INTx\n", __func__); +- if (mdev_state->intx_evtfd) +- eventfd_ctx_put(mdev_state->intx_evtfd); ++ if (is_intx(mdev_state) && !count && ++ (flags & VFIO_IRQ_SET_DATA_NONE)) { ++ mtty_disable_intx(mdev_state); ++ break; ++ } ++ ++ if (!(is_intx(mdev_state) || is_noirq(mdev_state)) || ++ start != 0 || count != 1) { ++ ret = -EINVAL; + break; + } + + if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + int fd = *(int *)data; ++ struct eventfd_ctx *evt; ++ ++ mtty_disable_intx(mdev_state); ++ ++ if (fd < 0) ++ break; + +- if (fd > 0) { +- struct eventfd_ctx *evt; +- +- evt = eventfd_ctx_fdget(fd); +- if (IS_ERR(evt)) { +- ret = PTR_ERR(evt); +- break; +- } +- mdev_state->intx_evtfd = evt; +- mdev_state->irq_fd = fd; +- mdev_state->irq_index = index; ++ evt = eventfd_ctx_fdget(fd); ++ if (IS_ERR(evt)) { ++ ret = PTR_ERR(evt); + break; + } ++ mdev_state->intx_evtfd = evt; ++ mdev_state->irq_index = index; ++ break; ++ } ++ ++ if (!is_intx(mdev_state)) { ++ ret = -EINVAL; ++ break; ++ } ++ ++ if (flags & VFIO_IRQ_SET_DATA_NONE) { ++ mtty_trigger_interrupt(mdev_state); ++ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { ++ uint8_t trigger = *(uint8_t *)data; ++ ++ if (trigger) ++ mtty_trigger_interrupt(mdev_state); + } + break; + } +- } + break; + case VFIO_PCI_MSI_IRQ_INDEX: + switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { + case VFIO_IRQ_SET_ACTION_MASK: + case VFIO_IRQ_SET_ACTION_UNMASK: ++ ret = -ENOTTY; + break; + case VFIO_IRQ_SET_ACTION_TRIGGER: +- if (flags & VFIO_IRQ_SET_DATA_NONE) { +- if (mdev_state->msi_evtfd) +- eventfd_ctx_put(mdev_state->msi_evtfd); +- pr_info("%s: disable MSI\n", __func__); +- mdev_state->irq_index = VFIO_PCI_INTX_IRQ_INDEX; ++ if (is_msi(mdev_state) && !count && ++ (flags & VFIO_IRQ_SET_DATA_NONE)) { ++ mtty_disable_msi(mdev_state); + break; + } ++ ++ if (!(is_msi(mdev_state) || is_noirq(mdev_state)) || ++ start != 0 || count != 1) { ++ ret = -EINVAL; ++ break; ++ } ++ + if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + int fd = *(int *)data; + struct eventfd_ctx *evt; + +- if (fd <= 0) +- break; ++ mtty_disable_msi(mdev_state); + +- if (mdev_state->msi_evtfd) ++ if (fd < 0) + break; + + evt = eventfd_ctx_fdget(fd); +@@ -993,20 +1092,37 @@ static int mtty_set_irqs(struct mdev_state *mdev_state, uint32_t flags, + break; + } + mdev_state->msi_evtfd = evt; +- mdev_state->irq_fd = fd; + mdev_state->irq_index = index; ++ break; ++ } ++ ++ if (!is_msi(mdev_state)) { ++ ret = -EINVAL; ++ break; ++ } ++ ++ if (flags & VFIO_IRQ_SET_DATA_NONE) { ++ mtty_trigger_interrupt(mdev_state); ++ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { ++ uint8_t trigger = *(uint8_t *)data; ++ ++ if (trigger) ++ mtty_trigger_interrupt(mdev_state); + } + break; +- } +- break; ++ } ++ break; + case VFIO_PCI_MSIX_IRQ_INDEX: +- pr_info("%s: MSIX_IRQ\n", __func__); ++ dev_dbg(mdev_state->vdev.dev, "%s: MSIX_IRQ\n", __func__); ++ ret = -ENOTTY; + break; + case VFIO_PCI_ERR_IRQ_INDEX: +- pr_info("%s: ERR_IRQ\n", __func__); ++ dev_dbg(mdev_state->vdev.dev, "%s: ERR_IRQ\n", __func__); ++ ret = -ENOTTY; + break; + case VFIO_PCI_REQ_IRQ_INDEX: +- pr_info("%s: REQ_IRQ\n", __func__); ++ dev_dbg(mdev_state->vdev.dev, "%s: REQ_IRQ\n", __func__); ++ ret = -ENOTTY; + break; + } + +@@ -1014,33 +1130,6 @@ static int mtty_set_irqs(struct mdev_state *mdev_state, uint32_t flags, + return ret; + } + +-static int mtty_trigger_interrupt(struct mdev_state *mdev_state) +-{ +- int ret = -1; +- +- if ((mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX) && +- (!mdev_state->msi_evtfd)) +- return -EINVAL; +- else if ((mdev_state->irq_index == VFIO_PCI_INTX_IRQ_INDEX) && +- (!mdev_state->intx_evtfd)) { +- pr_info("%s: Intr eventfd not found\n", __func__); +- return -EINVAL; +- } +- +- if (mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX) +- ret = eventfd_signal(mdev_state->msi_evtfd, 1); +- else +- ret = eventfd_signal(mdev_state->intx_evtfd, 1); +- +-#if defined(DEBUG_INTR) +- pr_info("Intx triggered\n"); +-#endif +- if (ret != 1) +- pr_err("%s: eventfd signal failed (%d)\n", __func__, ret); +- +- return ret; +-} +- + static int mtty_get_region_info(struct mdev_state *mdev_state, + struct vfio_region_info *region_info, + u16 *cap_type_id, void **cap_type) +@@ -1084,22 +1173,16 @@ static int mtty_get_region_info(struct mdev_state *mdev_state, + + static int mtty_get_irq_info(struct vfio_irq_info *irq_info) + { +- switch (irq_info->index) { +- case VFIO_PCI_INTX_IRQ_INDEX: +- case VFIO_PCI_MSI_IRQ_INDEX: +- case VFIO_PCI_REQ_IRQ_INDEX: +- break; +- +- default: ++ if (irq_info->index != VFIO_PCI_INTX_IRQ_INDEX && ++ irq_info->index != VFIO_PCI_MSI_IRQ_INDEX) + return -EINVAL; +- } + + irq_info->flags = VFIO_IRQ_INFO_EVENTFD; + irq_info->count = 1; + + if (irq_info->index == VFIO_PCI_INTX_IRQ_INDEX) +- irq_info->flags |= (VFIO_IRQ_INFO_MASKABLE | +- VFIO_IRQ_INFO_AUTOMASKED); ++ irq_info->flags |= VFIO_IRQ_INFO_MASKABLE | ++ VFIO_IRQ_INFO_AUTOMASKED; + else + irq_info->flags |= VFIO_IRQ_INFO_NORESIZE; + +@@ -1262,6 +1345,15 @@ static unsigned int mtty_get_available(struct mdev_type *mtype) + return atomic_read(&mdev_avail_ports) / type->nr_ports; + } + ++static void mtty_close(struct vfio_device *vdev) ++{ ++ struct mdev_state *mdev_state = ++ container_of(vdev, struct mdev_state, vdev); ++ ++ mtty_disable_intx(mdev_state); ++ mtty_disable_msi(mdev_state); ++} ++ + static const struct vfio_device_ops mtty_dev_ops = { + .name = "vfio-mtty", + .init = mtty_init_dev, +@@ -1273,6 +1365,7 @@ static const struct vfio_device_ops mtty_dev_ops = { + .unbind_iommufd = vfio_iommufd_emulated_unbind, + .attach_ioas = vfio_iommufd_emulated_attach_ioas, + .detach_ioas = vfio_iommufd_emulated_detach_ioas, ++ .close_device = mtty_close, + }; + + static struct mdev_driver mtty_driver = { +-- +2.43.0 + diff --git a/queue-6.6/virtio_net-avoid-data-races-on-dev-stats-fields.patch b/queue-6.6/virtio_net-avoid-data-races-on-dev-stats-fields.patch new file mode 100644 index 00000000000..860dceab3a8 --- /dev/null +++ b/queue-6.6/virtio_net-avoid-data-races-on-dev-stats-fields.patch @@ -0,0 +1,142 @@ +From 5aecfecc759f6a4469592770e1f5296d29ca8670 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 21 Sep 2023 08:52:17 +0000 +Subject: virtio_net: avoid data-races on dev->stats fields + +From: Eric Dumazet + +[ Upstream commit d12a26b74fb77434b73fe39022266c4b00907219 ] + +Use DEV_STATS_INC() and DEV_STATS_READ() which provide +atomicity on paths that can be used concurrently. + +Reported-by: syzbot +Signed-off-by: Eric Dumazet +Reviewed-by: Xuan Zhuo +Cc: "Michael S. Tsirkin" +Cc: Jason Wang +Signed-off-by: David S. Miller +Stable-dep-of: 2311e06b9bf3 ("virtio_net: fix missing dma unmap for resize") +Signed-off-by: Sasha Levin +--- + drivers/net/virtio_net.c | 30 +++++++++++++++--------------- + 1 file changed, 15 insertions(+), 15 deletions(-) + +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c +index 0c0be6b872c6a..c1c634782672f 100644 +--- a/drivers/net/virtio_net.c ++++ b/drivers/net/virtio_net.c +@@ -1258,7 +1258,7 @@ static struct sk_buff *receive_small(struct net_device *dev, + if (unlikely(len > GOOD_PACKET_LEN)) { + pr_debug("%s: rx error: len %u exceeds max size %d\n", + dev->name, len, GOOD_PACKET_LEN); +- dev->stats.rx_length_errors++; ++ DEV_STATS_INC(dev, rx_length_errors); + goto err; + } + +@@ -1323,7 +1323,7 @@ static void mergeable_buf_free(struct receive_queue *rq, int num_buf, + if (unlikely(!buf)) { + pr_debug("%s: rx error: %d buffers missing\n", + dev->name, num_buf); +- dev->stats.rx_length_errors++; ++ DEV_STATS_INC(dev, rx_length_errors); + break; + } + u64_stats_add(&stats->bytes, len); +@@ -1432,7 +1432,7 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev, + pr_debug("%s: rx error: %d buffers out of %d missing\n", + dev->name, *num_buf, + virtio16_to_cpu(vi->vdev, hdr->num_buffers)); +- dev->stats.rx_length_errors++; ++ DEV_STATS_INC(dev, rx_length_errors); + goto err; + } + +@@ -1451,7 +1451,7 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev, + put_page(page); + pr_debug("%s: rx error: len %u exceeds truesize %lu\n", + dev->name, len, (unsigned long)(truesize - room)); +- dev->stats.rx_length_errors++; ++ DEV_STATS_INC(dev, rx_length_errors); + goto err; + } + +@@ -1630,7 +1630,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, + if (unlikely(len > truesize - room)) { + pr_debug("%s: rx error: len %u exceeds truesize %lu\n", + dev->name, len, (unsigned long)(truesize - room)); +- dev->stats.rx_length_errors++; ++ DEV_STATS_INC(dev, rx_length_errors); + goto err_skb; + } + +@@ -1662,7 +1662,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, + dev->name, num_buf, + virtio16_to_cpu(vi->vdev, + hdr->num_buffers)); +- dev->stats.rx_length_errors++; ++ DEV_STATS_INC(dev, rx_length_errors); + goto err_buf; + } + +@@ -1676,7 +1676,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, + if (unlikely(len > truesize - room)) { + pr_debug("%s: rx error: len %u exceeds truesize %lu\n", + dev->name, len, (unsigned long)(truesize - room)); +- dev->stats.rx_length_errors++; ++ DEV_STATS_INC(dev, rx_length_errors); + goto err_skb; + } + +@@ -1763,7 +1763,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, + + if (unlikely(len < vi->hdr_len + ETH_HLEN)) { + pr_debug("%s: short packet %i\n", dev->name, len); +- dev->stats.rx_length_errors++; ++ DEV_STATS_INC(dev, rx_length_errors); + virtnet_rq_free_unused_buf(rq->vq, buf); + return; + } +@@ -1803,7 +1803,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, + return; + + frame_err: +- dev->stats.rx_frame_errors++; ++ DEV_STATS_INC(dev, rx_frame_errors); + dev_kfree_skb(skb); + } + +@@ -2352,12 +2352,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) + + /* This should not happen! */ + if (unlikely(err)) { +- dev->stats.tx_fifo_errors++; ++ DEV_STATS_INC(dev, tx_fifo_errors); + if (net_ratelimit()) + dev_warn(&dev->dev, + "Unexpected TXQ (%d) queue failure: %d\n", + qnum, err); +- dev->stats.tx_dropped++; ++ DEV_STATS_INC(dev, tx_dropped); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } +@@ -2576,10 +2576,10 @@ static void virtnet_stats(struct net_device *dev, + tot->tx_errors += terrors; + } + +- tot->tx_dropped = dev->stats.tx_dropped; +- tot->tx_fifo_errors = dev->stats.tx_fifo_errors; +- tot->rx_length_errors = dev->stats.rx_length_errors; +- tot->rx_frame_errors = dev->stats.rx_frame_errors; ++ tot->tx_dropped = DEV_STATS_READ(dev, tx_dropped); ++ tot->tx_fifo_errors = DEV_STATS_READ(dev, tx_fifo_errors); ++ tot->rx_length_errors = DEV_STATS_READ(dev, rx_length_errors); ++ tot->rx_frame_errors = DEV_STATS_READ(dev, rx_frame_errors); + } + + static void virtnet_ack_link_announce(struct virtnet_info *vi) +-- +2.43.0 + diff --git a/queue-6.6/virtio_net-fix-missing-dma-unmap-for-resize.patch b/queue-6.6/virtio_net-fix-missing-dma-unmap-for-resize.patch new file mode 100644 index 00000000000..4ac0eb32305 --- /dev/null +++ b/queue-6.6/virtio_net-fix-missing-dma-unmap-for-resize.patch @@ -0,0 +1,164 @@ +From ffea31c355009d1d4cbc6bdc34fd2f33b7a0ec4c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 26 Dec 2023 17:43:33 +0800 +Subject: virtio_net: fix missing dma unmap for resize + +From: Xuan Zhuo + +[ Upstream commit 2311e06b9bf3d44e15f9175af177a782806f688f ] + +For rq, we have three cases getting buffers from virtio core: + +1. virtqueue_get_buf{,_ctx} +2. virtqueue_detach_unused_buf +3. callback for virtqueue_resize + +But in commit 295525e29a5b("virtio_net: merge dma operations when +filling mergeable buffers"), I missed the dma unmap for the #3 case. + +That will leak some memory, because I did not release the pages referred +by the unused buffers. + +If we do such script, we will make the system OOM. + + while true + do + ethtool -G ens4 rx 128 + ethtool -G ens4 rx 256 + free -m + done + +Fixes: 295525e29a5b ("virtio_net: merge dma operations when filling mergeable buffers") +Signed-off-by: Xuan Zhuo +Acked-by: Michael S. Tsirkin +Link: https://lore.kernel.org/r/20231226094333.47740-1-xuanzhuo@linux.alibaba.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/virtio_net.c | 60 ++++++++++++++++++++-------------------- + 1 file changed, 30 insertions(+), 30 deletions(-) + +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c +index c1c634782672f..deb2229ab4d82 100644 +--- a/drivers/net/virtio_net.c ++++ b/drivers/net/virtio_net.c +@@ -334,7 +334,6 @@ struct virtio_net_common_hdr { + }; + }; + +-static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf); + static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf); + + static bool is_xdp_frame(void *ptr) +@@ -408,6 +407,17 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) + return p; + } + ++static void virtnet_rq_free_buf(struct virtnet_info *vi, ++ struct receive_queue *rq, void *buf) ++{ ++ if (vi->mergeable_rx_bufs) ++ put_page(virt_to_head_page(buf)); ++ else if (vi->big_packets) ++ give_pages(rq, buf); ++ else ++ put_page(virt_to_head_page(buf)); ++} ++ + static void enable_delayed_refill(struct virtnet_info *vi) + { + spin_lock_bh(&vi->refill_lock); +@@ -634,17 +644,6 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx) + return buf; + } + +-static void *virtnet_rq_detach_unused_buf(struct receive_queue *rq) +-{ +- void *buf; +- +- buf = virtqueue_detach_unused_buf(rq->vq); +- if (buf && rq->do_dma) +- virtnet_rq_unmap(rq, buf, 0); +- +- return buf; +-} +- + static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len) + { + struct virtnet_rq_dma *dma; +@@ -744,6 +743,20 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi) + } + } + ++static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf) ++{ ++ struct virtnet_info *vi = vq->vdev->priv; ++ struct receive_queue *rq; ++ int i = vq2rxq(vq); ++ ++ rq = &vi->rq[i]; ++ ++ if (rq->do_dma) ++ virtnet_rq_unmap(rq, buf, 0); ++ ++ virtnet_rq_free_buf(vi, rq, buf); ++} ++ + static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) + { + unsigned int len; +@@ -1764,7 +1777,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, + if (unlikely(len < vi->hdr_len + ETH_HLEN)) { + pr_debug("%s: short packet %i\n", dev->name, len); + DEV_STATS_INC(dev, rx_length_errors); +- virtnet_rq_free_unused_buf(rq->vq, buf); ++ virtnet_rq_free_buf(vi, rq, buf); + return; + } + +@@ -2392,7 +2405,7 @@ static int virtnet_rx_resize(struct virtnet_info *vi, + if (running) + napi_disable(&rq->napi); + +- err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf); ++ err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf); + if (err) + netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); + +@@ -4031,19 +4044,6 @@ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf) + xdp_return_frame(ptr_to_xdp(buf)); + } + +-static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf) +-{ +- struct virtnet_info *vi = vq->vdev->priv; +- int i = vq2rxq(vq); +- +- if (vi->mergeable_rx_bufs) +- put_page(virt_to_head_page(buf)); +- else if (vi->big_packets) +- give_pages(&vi->rq[i], buf); +- else +- put_page(virt_to_head_page(buf)); +-} +- + static void free_unused_bufs(struct virtnet_info *vi) + { + void *buf; +@@ -4057,10 +4057,10 @@ static void free_unused_bufs(struct virtnet_info *vi) + } + + for (i = 0; i < vi->max_queue_pairs; i++) { +- struct receive_queue *rq = &vi->rq[i]; ++ struct virtqueue *vq = vi->rq[i].vq; + +- while ((buf = virtnet_rq_detach_unused_buf(rq)) != NULL) +- virtnet_rq_free_unused_buf(rq->vq, buf); ++ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) ++ virtnet_rq_unmap_free_buf(vq, buf); + cond_resched(); + } + } +-- +2.43.0 + diff --git a/queue-6.6/wifi-iwlwifi-pcie-don-t-synchronize-irqs-from-irq.patch b/queue-6.6/wifi-iwlwifi-pcie-don-t-synchronize-irqs-from-irq.patch new file mode 100644 index 00000000000..79d9783dfa3 --- /dev/null +++ b/queue-6.6/wifi-iwlwifi-pcie-don-t-synchronize-irqs-from-irq.patch @@ -0,0 +1,170 @@ +From 480a0f4def2cc2f87619db3e3a42f3bda5e1bf28 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 15 Dec 2023 11:13:34 +0100 +Subject: wifi: iwlwifi: pcie: don't synchronize IRQs from IRQ + +From: Johannes Berg + +[ Upstream commit 400f6ebbc175286576c7f7fddf3c347d09d12310 ] + +On older devices (before unified image!) we can end up calling +stop_device from an rfkill interrupt. However, in stop_device +we attempt to synchronize IRQs, which then of course deadlocks. + +Avoid this by checking the context, if running from the IRQ +thread then don't synchronize. This wouldn't be correct on a +new device since RSS is supported, but older devices only have +a single interrupt/queue. + +Fixes: 37fb29bd1f90 ("wifi: iwlwifi: pcie: synchronize IRQs before NAPI") +Reviewed-by: Miri Korenblit +Reviewed-by: Emmanuel Grumbach +Signed-off-by: Johannes Berg +Signed-off-by: Kalle Valo +Link: https://msgid.link/20231215111335.59aab00baed7.Iadfe154d6248e7f9dfd69522e5429dbbd72925d7@changeid +Signed-off-by: Sasha Levin +--- + .../net/wireless/intel/iwlwifi/pcie/internal.h | 4 ++-- + drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 8 ++++---- + drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 17 +++++++++-------- + 3 files changed, 15 insertions(+), 14 deletions(-) + +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +index 0f6493dab8cbd..5602441df2b7e 100644 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +@@ -749,7 +749,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) + } + } + +-void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans); ++void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq); + + static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) + { +@@ -796,7 +796,7 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans) + return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans)); + } + +-void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); ++void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq); + void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); + + #ifdef CONFIG_IWLWIFI_DEBUGFS +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +index 4614acee9f7ba..a9415d333490c 100644 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +@@ -1785,7 +1785,7 @@ static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) + return inta; + } + +-void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans) ++void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq) + { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct isr_statistics *isr_stats = &trans_pcie->isr_stats; +@@ -1809,7 +1809,7 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans) + isr_stats->rfkill++; + + if (prev != report) +- iwl_trans_pcie_rf_kill(trans, report); ++ iwl_trans_pcie_rf_kill(trans, report, from_irq); + mutex_unlock(&trans_pcie->mutex); + + if (hw_rfkill) { +@@ -1949,7 +1949,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) + + /* HW RF KILL switch toggled */ + if (inta & CSR_INT_BIT_RF_KILL) { +- iwl_pcie_handle_rfkill_irq(trans); ++ iwl_pcie_handle_rfkill_irq(trans, true); + handled |= CSR_INT_BIT_RF_KILL; + } + +@@ -2366,7 +2366,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) + + /* HW RF KILL switch toggled */ + if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) +- iwl_pcie_handle_rfkill_irq(trans); ++ iwl_pcie_handle_rfkill_irq(trans, true); + + if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) { + IWL_ERR(trans, +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +index 2e23ccd7d7938..1bc4a0089c6ff 100644 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +@@ -1082,7 +1082,7 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans) + report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); + + if (prev != report) +- iwl_trans_pcie_rf_kill(trans, report); ++ iwl_trans_pcie_rf_kill(trans, report, false); + + return hw_rfkill; + } +@@ -1236,7 +1236,7 @@ static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) + trans_pcie->hw_mask = trans_pcie->hw_init_mask; + } + +-static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans) ++static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq) + { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + +@@ -1263,7 +1263,8 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans) + if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { + IWL_DEBUG_INFO(trans, + "DEVICE_ENABLED bit was set and is now cleared\n"); +- iwl_pcie_synchronize_irqs(trans); ++ if (!from_irq) ++ iwl_pcie_synchronize_irqs(trans); + iwl_pcie_rx_napi_sync(trans); + iwl_pcie_tx_stop(trans); + iwl_pcie_rx_stop(trans); +@@ -1453,7 +1454,7 @@ void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, + clear_bit(STATUS_RFKILL_OPMODE, &trans->status); + } + if (hw_rfkill != was_in_rfkill) +- iwl_trans_pcie_rf_kill(trans, hw_rfkill); ++ iwl_trans_pcie_rf_kill(trans, hw_rfkill, false); + } + + static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) +@@ -1468,12 +1469,12 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) + mutex_lock(&trans_pcie->mutex); + trans_pcie->opmode_down = true; + was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status); +- _iwl_trans_pcie_stop_device(trans); ++ _iwl_trans_pcie_stop_device(trans, false); + iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill); + mutex_unlock(&trans_pcie->mutex); + } + +-void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) ++void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq) + { + struct iwl_trans_pcie __maybe_unused *trans_pcie = + IWL_TRANS_GET_PCIE_TRANS(trans); +@@ -1486,7 +1487,7 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) + if (trans->trans_cfg->gen2) + _iwl_trans_pcie_gen2_stop_device(trans); + else +- _iwl_trans_pcie_stop_device(trans); ++ _iwl_trans_pcie_stop_device(trans, from_irq); + } + } + +@@ -2869,7 +2870,7 @@ static ssize_t iwl_dbgfs_rfkill_write(struct file *file, + IWL_WARN(trans, "changing debug rfkill %d->%d\n", + trans_pcie->debug_rfkill, new_value); + trans_pcie->debug_rfkill = new_value; +- iwl_pcie_handle_rfkill_irq(trans); ++ iwl_pcie_handle_rfkill_irq(trans, false); + + return count; + } +-- +2.43.0 + diff --git a/queue-6.6/xsk-add-multi-buffer-support-for-sockets-sharing-ume.patch b/queue-6.6/xsk-add-multi-buffer-support-for-sockets-sharing-ume.patch new file mode 100644 index 00000000000..1413dcf5b40 --- /dev/null +++ b/queue-6.6/xsk-add-multi-buffer-support-for-sockets-sharing-ume.patch @@ -0,0 +1,74 @@ +From 6436ee6684af950128d3fd91775b23bcbe2ae6c7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 7 Sep 2023 09:20:32 +0530 +Subject: xsk: add multi-buffer support for sockets sharing umem + +From: Tirthendu Sarkar + +[ Upstream commit d609f3d228a8efe991f44f11f24146e2a5209755 ] + +Userspace applications indicate their multi-buffer capability to xsk +using XSK_USE_SG socket bind flag. For sockets using shared umem the +bind flag may contain XSK_USE_SG only for the first socket. For any +subsequent socket the only option supported is XDP_SHARED_UMEM. + +Add option XDP_UMEM_SG_FLAG in umem config flags to store the +multi-buffer handling capability when indicated by XSK_USE_SG option in +bing flag by the first socket. Use this to derive multi-buffer capability +for subsequent sockets in xsk core. + +Signed-off-by: Tirthendu Sarkar +Fixes: 81470b5c3c66 ("xsk: introduce XSK_USE_SG bind flag for xsk socket") +Acked-by: Maciej Fijalkowski +Link: https://lore.kernel.org/r/20230907035032.2627879-1-tirthendu.sarkar@intel.com +Signed-off-by: Alexei Starovoitov +Signed-off-by: Sasha Levin +--- + include/net/xdp_sock.h | 2 ++ + net/xdp/xsk.c | 2 +- + net/xdp/xsk_buff_pool.c | 3 +++ + 3 files changed, 6 insertions(+), 1 deletion(-) + +diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h +index 1617af3801620..69b472604b86f 100644 +--- a/include/net/xdp_sock.h ++++ b/include/net/xdp_sock.h +@@ -14,6 +14,8 @@ + #include + #include + ++#define XDP_UMEM_SG_FLAG (1 << 1) ++ + struct net_device; + struct xsk_queue; + struct xdp_buff; +diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c +index 3515e19852d88..774a6d1916e40 100644 +--- a/net/xdp/xsk.c ++++ b/net/xdp/xsk.c +@@ -1227,7 +1227,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) + + xs->dev = dev; + xs->zc = xs->umem->zc; +- xs->sg = !!(flags & XDP_USE_SG); ++ xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG); + xs->queue_id = qid; + xp_add_xsk(xs->pool, xs); + +diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c +index b3f7b310811ed..49cb9f9a09bee 100644 +--- a/net/xdp/xsk_buff_pool.c ++++ b/net/xdp/xsk_buff_pool.c +@@ -170,6 +170,9 @@ int xp_assign_dev(struct xsk_buff_pool *pool, + if (err) + return err; + ++ if (flags & XDP_USE_SG) ++ pool->umem->flags |= XDP_UMEM_SG_FLAG; ++ + if (flags & XDP_USE_NEED_WAKEUP) + pool->uses_need_wakeup = true; + /* Tx needs to be explicitly woken up the first time. Also +-- +2.43.0 + -- 2.47.3