]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.6-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 8 Apr 2025 07:09:30 +0000 (09:09 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 8 Apr 2025 07:09:30 +0000 (09:09 +0200)
added patches:
acpi-x86-extend-lenovo-yoga-tab-3-quirk-with-skip-gpio-event-handlers.patch
loongarch-bpf-don-t-override-subprog-s-return-value.patch
loongarch-bpf-fix-off-by-one-error-in-build_prologue.patch
loongarch-bpf-use-move_addr-for-bpf_pseudo_func.patch
loongarch-increase-arch_dma_minalign-up-to-16.patch
ntb_perf-delete-duplicate-dmaengine_unmap_put-call-in-perf_copy_chunk.patch
perf-x86-intel-apply-static-call-for-drain_pebs.patch
perf-x86-intel-avoid-disable-pmu-if-cpuc-enabled-in-sample-read.patch
platform-x86-isst-correct-command-storage-data-length.patch
usbnet-fix-npe-during-rx_complete.patch
x86-hyperv-fix-check-of-return-value-from-snp_set_vmsa.patch
x86-microcode-amd-fix-__apply_microcode_amd-s-return-value.patch

13 files changed:
queue-6.6/acpi-x86-extend-lenovo-yoga-tab-3-quirk-with-skip-gpio-event-handlers.patch [new file with mode: 0644]
queue-6.6/loongarch-bpf-don-t-override-subprog-s-return-value.patch [new file with mode: 0644]
queue-6.6/loongarch-bpf-fix-off-by-one-error-in-build_prologue.patch [new file with mode: 0644]
queue-6.6/loongarch-bpf-use-move_addr-for-bpf_pseudo_func.patch [new file with mode: 0644]
queue-6.6/loongarch-increase-arch_dma_minalign-up-to-16.patch [new file with mode: 0644]
queue-6.6/ntb_perf-delete-duplicate-dmaengine_unmap_put-call-in-perf_copy_chunk.patch [new file with mode: 0644]
queue-6.6/perf-x86-intel-apply-static-call-for-drain_pebs.patch [new file with mode: 0644]
queue-6.6/perf-x86-intel-avoid-disable-pmu-if-cpuc-enabled-in-sample-read.patch [new file with mode: 0644]
queue-6.6/platform-x86-isst-correct-command-storage-data-length.patch [new file with mode: 0644]
queue-6.6/series
queue-6.6/usbnet-fix-npe-during-rx_complete.patch [new file with mode: 0644]
queue-6.6/x86-hyperv-fix-check-of-return-value-from-snp_set_vmsa.patch [new file with mode: 0644]
queue-6.6/x86-microcode-amd-fix-__apply_microcode_amd-s-return-value.patch [new file with mode: 0644]

diff --git a/queue-6.6/acpi-x86-extend-lenovo-yoga-tab-3-quirk-with-skip-gpio-event-handlers.patch b/queue-6.6/acpi-x86-extend-lenovo-yoga-tab-3-quirk-with-skip-gpio-event-handlers.patch
new file mode 100644 (file)
index 0000000..97b40ee
--- /dev/null
@@ -0,0 +1,50 @@
+From 2fa87c71d2adb4b82c105f9191e6120340feff00 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Tue, 25 Mar 2025 22:04:50 +0100
+Subject: ACPI: x86: Extend Lenovo Yoga Tab 3 quirk with skip GPIO event-handlers
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit 2fa87c71d2adb4b82c105f9191e6120340feff00 upstream.
+
+Depending on the secureboot signature on EFI\BOOT\BOOTX86.EFI the
+Lenovo Yoga Tab 3 UEFI will switch its OSID ACPI variable between
+1 (Windows) and 4 (Android(GMIN)).
+
+In Windows mode a GPIO event handler gets installed for GPO1 pin 5,
+causing Linux' x86-android-tables code which deals with the general
+brokenness of this device's ACPI tables to fail to probe with:
+
+[   17.853705] x86_android_tablets: error -16 getting GPIO INT33FF:01 5
+[   17.859623] x86_android_tablets x86_android_tablets: probe with driver
+
+which renders sound, the touchscreen, charging-management,
+battery-monitoring and more non functional.
+
+Add ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS to the existing quirks for this
+device to fix this.
+
+Reported-by: Agoston Lorincz <pipacsba@gmail.com>
+Closes: https://lore.kernel.org/platform-driver-x86/CAMEzqD+DNXrAvUOHviB2O2bjtcbmo3xH=kunKr4nubuMLbb_0A@mail.gmail.com/
+Cc: All applicable <stable@kernel.org>
+Fixes: fe820db35275 ("ACPI: x86: Add skip i2c clients quirk for Lenovo Yoga Tab 3 Pro (YT3-X90F)")
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Link: https://patch.msgid.link/20250325210450.358506-1-hdegoede@redhat.com
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/acpi/x86/utils.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/acpi/x86/utils.c
++++ b/drivers/acpi/x86/utils.c
+@@ -367,7 +367,8 @@ static const struct dmi_system_id acpi_q
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
+               },
+               .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+-                                      ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
++                                      ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
++                                      ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+       },
+       {
+               /* Medion Lifetab S10346 */
diff --git a/queue-6.6/loongarch-bpf-don-t-override-subprog-s-return-value.patch b/queue-6.6/loongarch-bpf-don-t-override-subprog-s-return-value.patch
new file mode 100644 (file)
index 0000000..d9be681
--- /dev/null
@@ -0,0 +1,43 @@
+From 60f3caff1492e5b8616b9578c4bedb5c0a88ed14 Mon Sep 17 00:00:00 2001
+From: Hengqi Chen <hengqi.chen@gmail.com>
+Date: Sun, 30 Mar 2025 16:31:09 +0800
+Subject: LoongArch: BPF: Don't override subprog's return value
+
+From: Hengqi Chen <hengqi.chen@gmail.com>
+
+commit 60f3caff1492e5b8616b9578c4bedb5c0a88ed14 upstream.
+
+The verifier test `calls: div by 0 in subprog` triggers a panic at the
+ld.bu instruction. The ld.bu insn is trying to load byte from memory
+address returned by the subprog. The subprog actually set the correct
+address at the a5 register (dedicated register for BPF return values).
+But at commit 73c359d1d356 ("LoongArch: BPF: Sign-extend return values")
+we also sign extended a5 to the a0 register (return value in LoongArch).
+For function call insn, we later propagate the a0 register back to a5
+register. This is right for native calls but wrong for bpf2bpf calls
+which expect zero-extended return value in a5 register. So only move a0
+to a5 for native calls (i.e. non-BPF_PSEUDO_CALL).
+
+Cc: stable@vger.kernel.org
+Fixes: 73c359d1d356 ("LoongArch: BPF: Sign-extend return values")
+Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/net/bpf_jit.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -844,7 +844,10 @@ static int build_insn(const struct bpf_i
+               move_addr(ctx, t1, func_addr);
+               emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0);
+-              move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
++
++              if (insn->src_reg != BPF_PSEUDO_CALL)
++                      move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
++
+               break;
+       /* tail call */
diff --git a/queue-6.6/loongarch-bpf-fix-off-by-one-error-in-build_prologue.patch b/queue-6.6/loongarch-bpf-fix-off-by-one-error-in-build_prologue.patch
new file mode 100644 (file)
index 0000000..ce8938f
--- /dev/null
@@ -0,0 +1,60 @@
+From 7e2586991e36663c9bc48c828b83eab180ad30a9 Mon Sep 17 00:00:00 2001
+From: Hengqi Chen <hengqi.chen@gmail.com>
+Date: Sun, 30 Mar 2025 16:31:09 +0800
+Subject: LoongArch: BPF: Fix off-by-one error in build_prologue()
+
+From: Hengqi Chen <hengqi.chen@gmail.com>
+
+commit 7e2586991e36663c9bc48c828b83eab180ad30a9 upstream.
+
+Vincent reported that running BPF progs with tailcalls on LoongArch
+causes kernel hard lockup. Debugging the issues shows that the JITed
+image missing a jirl instruction at the end of the epilogue.
+
+There are two passes in JIT compiling, the first pass set the flags and
+the second pass generates JIT code based on those flags. With BPF progs
+mixing bpf2bpf and tailcalls, build_prologue() generates N insns in the
+first pass and then generates N+1 insns in the second pass. This makes
+epilogue_offset off by one and we will jump to some unexpected insn and
+cause lockup. Fix this by inserting a nop insn.
+
+Cc: stable@vger.kernel.org
+Fixes: 5dc615520c4d ("LoongArch: Add BPF JIT support")
+Fixes: bb035ef0cc91 ("LoongArch: BPF: Support mixing bpf2bpf and tailcalls")
+Reported-by: Vincent Li <vincent.mc.li@gmail.com>
+Tested-by: Vincent Li <vincent.mc.li@gmail.com>
+Closes: https://lore.kernel.org/loongarch/CAK3+h2w6WESdBN3UCr3WKHByD7D6Q_Ve1EDAjotVrnx6Or_c8g@mail.gmail.com/
+Closes: https://lore.kernel.org/bpf/CAK3+h2woEjG_N=-XzqEGaAeCmgu2eTCUc7p6bP4u8Q+DFHm-7g@mail.gmail.com/
+Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/net/bpf_jit.c |    2 ++
+ arch/loongarch/net/bpf_jit.h |    5 +++++
+ 2 files changed, 7 insertions(+)
+
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -142,6 +142,8 @@ static void build_prologue(struct jit_ct
+        */
+       if (seen_tail_call(ctx) && seen_call(ctx))
+               move_reg(ctx, TCC_SAVED, REG_TCC);
++      else
++              emit_insn(ctx, nop);
+       ctx->stack_size = stack_adjust;
+ }
+--- a/arch/loongarch/net/bpf_jit.h
++++ b/arch/loongarch/net/bpf_jit.h
+@@ -27,6 +27,11 @@ struct jit_data {
+       struct jit_ctx ctx;
+ };
++static inline void emit_nop(union loongarch_instruction *insn)
++{
++      insn->word = INSN_NOP;
++}
++
+ #define emit_insn(ctx, func, ...)                                             \
+ do {                                                                          \
+       if (ctx->image != NULL) {                                               \
diff --git a/queue-6.6/loongarch-bpf-use-move_addr-for-bpf_pseudo_func.patch b/queue-6.6/loongarch-bpf-use-move_addr-for-bpf_pseudo_func.patch
new file mode 100644 (file)
index 0000000..abf7c0e
--- /dev/null
@@ -0,0 +1,54 @@
+From 52266f1015a8b5aabec7d127f83d105f702b388e Mon Sep 17 00:00:00 2001
+From: Hengqi Chen <hengqi.chen@gmail.com>
+Date: Sun, 30 Mar 2025 16:31:09 +0800
+Subject: LoongArch: BPF: Use move_addr() for BPF_PSEUDO_FUNC
+
+From: Hengqi Chen <hengqi.chen@gmail.com>
+
+commit 52266f1015a8b5aabec7d127f83d105f702b388e upstream.
+
+Vincent reported that running XDP synproxy program on LoongArch results
+in the following error:
+
+    JIT doesn't support bpf-to-bpf calls
+
+With dmesg:
+
+    multi-func JIT bug 1391 != 1390
+
+The root cause is that verifier will refill the imm with the correct
+addresses of bpf_calls for BPF_PSEUDO_FUNC instructions and then run
+the last pass of JIT. So we generate different JIT code for the same
+instruction in two passes (one for placeholder and the other for the
+real address). Let's use move_addr() instead.
+
+See commit 64f50f6575721ef0 ("LoongArch, bpf: Use 4 instructions for
+function address in JIT") for a similar fix.
+
+Cc: stable@vger.kernel.org
+Fixes: 69c087ba6225 ("bpf: Add bpf_for_each_map_elem() helper")
+Fixes: bb035ef0cc91 ("LoongArch: BPF: Support mixing bpf2bpf and tailcalls")
+Reported-by: Vincent Li <vincent.mc.li@gmail.com>
+Tested-by: Vincent Li <vincent.mc.li@gmail.com>
+Closes: https://lore.kernel.org/loongarch/CAK3+h2yfM9FTNiXvEQBkvtuoJrvzmN4c_NZsFXqEk4Cj1tsBNA@mail.gmail.com/T/#u
+Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/net/bpf_jit.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -872,7 +872,10 @@ static int build_insn(const struct bpf_i
+       {
+               const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
+-              move_imm(ctx, dst, imm64, is32);
++              if (bpf_pseudo_func(insn))
++                      move_addr(ctx, dst, imm64);
++              else
++                      move_imm(ctx, dst, imm64, is32);
+               return 1;
+       }
diff --git a/queue-6.6/loongarch-increase-arch_dma_minalign-up-to-16.patch b/queue-6.6/loongarch-increase-arch_dma_minalign-up-to-16.patch
new file mode 100644 (file)
index 0000000..69150f5
--- /dev/null
@@ -0,0 +1,35 @@
+From 4103cfe9dcb88010ae4911d3ff417457d1b6a720 Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhuacai@loongson.cn>
+Date: Sun, 30 Mar 2025 16:31:09 +0800
+Subject: LoongArch: Increase ARCH_DMA_MINALIGN up to 16
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+commit 4103cfe9dcb88010ae4911d3ff417457d1b6a720 upstream.
+
+ARCH_DMA_MINALIGN is 1 by default, but some LoongArch-specific devices
+(such as APBDMA) require 16 bytes alignment. When the data buffer length
+is too small, the hardware may make an error writing cacheline. Thus, it
+is dangerous to allocate a small memory buffer for DMA. It's always safe
+to define ARCH_DMA_MINALIGN as L1_CACHE_BYTES but unnecessary (kmalloc()
+need small memory objects). Therefore, just increase it to 16.
+
+Cc: stable@vger.kernel.org
+Tested-by: Binbin Zhou <zhoubinbin@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/include/asm/cache.h |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/loongarch/include/asm/cache.h
++++ b/arch/loongarch/include/asm/cache.h
+@@ -8,6 +8,8 @@
+ #define L1_CACHE_SHIFT                CONFIG_L1_CACHE_SHIFT
+ #define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#define ARCH_DMA_MINALIGN     (16)
++
+ #define __read_mostly __section(".data..read_mostly")
+ #endif /* _ASM_CACHE_H */
diff --git a/queue-6.6/ntb_perf-delete-duplicate-dmaengine_unmap_put-call-in-perf_copy_chunk.patch b/queue-6.6/ntb_perf-delete-duplicate-dmaengine_unmap_put-call-in-perf_copy_chunk.patch
new file mode 100644 (file)
index 0000000..f664c69
--- /dev/null
@@ -0,0 +1,41 @@
+From 4279e72cab31dd3eb8c89591eb9d2affa90ab6aa Mon Sep 17 00:00:00 2001
+From: Markus Elfring <elfring@users.sourceforge.net>
+Date: Mon, 23 Sep 2024 10:38:11 +0200
+Subject: ntb_perf: Delete duplicate dmaengine_unmap_put() call in perf_copy_chunk()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Markus Elfring <elfring@users.sourceforge.net>
+
+commit 4279e72cab31dd3eb8c89591eb9d2affa90ab6aa upstream.
+
+The function call “dmaengine_unmap_put(unmap)” was used in an if branch.
+The same call was immediately triggered by a subsequent goto statement.
+Thus avoid such a call repetition.
+
+This issue was detected by using the Coccinelle software.
+
+Fixes: 5648e56d03fa ("NTB: ntb_perf: Add full multi-port NTB API support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Markus Elfring <elfring@users.sourceforge.net>
+Signed-off-by: Jon Mason <jdmason@kudzu.us>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/ntb/test/ntb_perf.c |    4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/ntb/test/ntb_perf.c
++++ b/drivers/ntb/test/ntb_perf.c
+@@ -839,10 +839,8 @@ static int perf_copy_chunk(struct perf_t
+       dma_set_unmap(tx, unmap);
+       ret = dma_submit_error(dmaengine_submit(tx));
+-      if (ret) {
+-              dmaengine_unmap_put(unmap);
++      if (ret)
+               goto err_free_resource;
+-      }
+       dmaengine_unmap_put(unmap);
diff --git a/queue-6.6/perf-x86-intel-apply-static-call-for-drain_pebs.patch b/queue-6.6/perf-x86-intel-apply-static-call-for-drain_pebs.patch
new file mode 100644 (file)
index 0000000..d0a2642
--- /dev/null
@@ -0,0 +1,60 @@
+From 314dfe10576912e1d786b13c5d4eee8c51b63caa Mon Sep 17 00:00:00 2001
+From: "Peter Zijlstra (Intel)" <peterz@infradead.org>
+Date: Tue, 21 Jan 2025 07:23:00 -0800
+Subject: perf/x86/intel: Apply static call for drain_pebs
+
+From: Peter Zijlstra (Intel) <peterz@infradead.org>
+
+commit 314dfe10576912e1d786b13c5d4eee8c51b63caa upstream.
+
+The x86_pmu_drain_pebs static call was introduced in commit 7c9903c9bf71
+("x86/perf, static_call: Optimize x86_pmu methods"), but it's not really
+used to replace the old method.
+
+Apply the static call for drain_pebs.
+
+Fixes: 7c9903c9bf71 ("x86/perf, static_call: Optimize x86_pmu methods")
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20250121152303.3128733-1-kan.liang@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/intel/core.c |    2 +-
+ arch/x86/events/intel/ds.c   |    2 +-
+ arch/x86/events/perf_event.h |    1 +
+ 3 files changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -3006,7 +3006,7 @@ static int handle_pmi_common(struct pt_r
+               handled++;
+               x86_pmu_handle_guest_pebs(regs, &data);
+-              x86_pmu.drain_pebs(regs, &data);
++              static_call(x86_pmu_drain_pebs)(regs, &data);
+               status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
+               /*
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -847,7 +847,7 @@ static inline void intel_pmu_drain_pebs_
+ {
+       struct perf_sample_data data;
+-      x86_pmu.drain_pebs(NULL, &data);
++      static_call(x86_pmu_drain_pebs)(NULL, &data);
+ }
+ /*
+--- a/arch/x86/events/perf_event.h
++++ b/arch/x86/events/perf_event.h
+@@ -1052,6 +1052,7 @@ extern struct x86_pmu x86_pmu __read_mos
+ DECLARE_STATIC_CALL(x86_pmu_set_period, *x86_pmu.set_period);
+ DECLARE_STATIC_CALL(x86_pmu_update,     *x86_pmu.update);
++DECLARE_STATIC_CALL(x86_pmu_drain_pebs,       *x86_pmu.drain_pebs);
+ static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx)
+ {
diff --git a/queue-6.6/perf-x86-intel-avoid-disable-pmu-if-cpuc-enabled-in-sample-read.patch b/queue-6.6/perf-x86-intel-avoid-disable-pmu-if-cpuc-enabled-in-sample-read.patch
new file mode 100644 (file)
index 0000000..0a0d4be
--- /dev/null
@@ -0,0 +1,128 @@
+From f9bdf1f953392c9edd69a7f884f78c0390127029 Mon Sep 17 00:00:00 2001
+From: Kan Liang <kan.liang@linux.intel.com>
+Date: Tue, 21 Jan 2025 07:23:01 -0800
+Subject: perf/x86/intel: Avoid disable PMU if !cpuc->enabled in sample read
+
+From: Kan Liang <kan.liang@linux.intel.com>
+
+commit f9bdf1f953392c9edd69a7f884f78c0390127029 upstream.
+
+The WARN_ON(this_cpu_read(cpu_hw_events.enabled)) in the
+intel_pmu_save_and_restart_reload() is triggered, when sampling read
+topdown events.
+
+In a NMI handler, the cpu_hw_events.enabled is set and used to indicate
+the status of core PMU. The generic pmu->pmu_disable_count, updated in
+the perf_pmu_disable/enable pair, is not touched.
+However, the perf_pmu_disable/enable pair is invoked when sampling read
+in a NMI handler. The cpuc->enabled is mistakenly set by the
+perf_pmu_enable().
+
+Avoid disabling PMU if the core PMU is already disabled.
+Merge the logic together.
+
+Fixes: 7b2c05a15d29 ("perf/x86/intel: Generic support for hardware TopDown metrics")
+Suggested-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20250121152303.3128733-2-kan.liang@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/intel/core.c |   41 +++++++++++++++++++++++------------------
+ arch/x86/events/intel/ds.c   |   11 +----------
+ arch/x86/events/perf_event.h |    2 +-
+ 3 files changed, 25 insertions(+), 29 deletions(-)
+
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -2720,28 +2720,33 @@ static u64 adl_update_topdown_event(stru
+ DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update);
+-static void intel_pmu_read_topdown_event(struct perf_event *event)
++static void intel_pmu_read_event(struct perf_event *event)
+ {
+-      struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
++      if (event->hw.flags & (PERF_X86_EVENT_AUTO_RELOAD | PERF_X86_EVENT_TOPDOWN)) {
++              struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
++              bool pmu_enabled = cpuc->enabled;
++
++              /* Only need to call update_topdown_event() once for group read. */
++              if (is_metric_event(event) && (cpuc->txn_flags & PERF_PMU_TXN_READ))
++                      return;
++
++              cpuc->enabled = 0;
++              if (pmu_enabled)
++                      intel_pmu_disable_all();
++
++              if (is_topdown_event(event))
++                      static_call(intel_pmu_update_topdown_event)(event);
++              else
++                      intel_pmu_drain_pebs_buffer();
++
++              cpuc->enabled = pmu_enabled;
++              if (pmu_enabled)
++                      intel_pmu_enable_all(0);
+-      /* Only need to call update_topdown_event() once for group read. */
+-      if ((cpuc->txn_flags & PERF_PMU_TXN_READ) &&
+-          !is_slots_event(event))
+               return;
++      }
+-      perf_pmu_disable(event->pmu);
+-      static_call(intel_pmu_update_topdown_event)(event);
+-      perf_pmu_enable(event->pmu);
+-}
+-
+-static void intel_pmu_read_event(struct perf_event *event)
+-{
+-      if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
+-              intel_pmu_auto_reload_read(event);
+-      else if (is_topdown_count(event))
+-              intel_pmu_read_topdown_event(event);
+-      else
+-              x86_perf_event_update(event);
++      x86_perf_event_update(event);
+ }
+ static void intel_pmu_enable_fixed(struct perf_event *event)
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -843,7 +843,7 @@ unlock:
+       return 1;
+ }
+-static inline void intel_pmu_drain_pebs_buffer(void)
++void intel_pmu_drain_pebs_buffer(void)
+ {
+       struct perf_sample_data data;
+@@ -1965,15 +1965,6 @@ get_next_pebs_record_by_bit(void *base,
+       return NULL;
+ }
+-void intel_pmu_auto_reload_read(struct perf_event *event)
+-{
+-      WARN_ON(!(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD));
+-
+-      perf_pmu_disable(event->pmu);
+-      intel_pmu_drain_pebs_buffer();
+-      perf_pmu_enable(event->pmu);
+-}
+-
+ /*
+  * Special variant of intel_pmu_save_and_restart() for auto-reload.
+  */
+--- a/arch/x86/events/perf_event.h
++++ b/arch/x86/events/perf_event.h
+@@ -1540,7 +1540,7 @@ void intel_pmu_pebs_disable_all(void);
+ void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
+-void intel_pmu_auto_reload_read(struct perf_event *event);
++void intel_pmu_drain_pebs_buffer(void);
+ void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr);
diff --git a/queue-6.6/platform-x86-isst-correct-command-storage-data-length.patch b/queue-6.6/platform-x86-isst-correct-command-storage-data-length.patch
new file mode 100644 (file)
index 0000000..8cd041f
--- /dev/null
@@ -0,0 +1,45 @@
+From 9462e74c5c983cce34019bfb27f734552bebe59f Mon Sep 17 00:00:00 2001
+From: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Date: Fri, 28 Mar 2025 15:47:49 -0700
+Subject: platform/x86: ISST: Correct command storage data length
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+
+commit 9462e74c5c983cce34019bfb27f734552bebe59f upstream.
+
+After resume/online turbo limit ratio (TRL) is restored partially if
+the admin explicitly changed TRL from user space.
+
+A hash table is used to store SST mail box and MSR settings when modified
+to restore those settings after resume or online. This uses a struct
+isst_cmd field "data" to store these settings. This is a 64 bit field.
+But isst_store_new_cmd() is only assigning as u32. This results in
+truncation of 32 bits.
+
+Change the argument to u64 from u32.
+
+Fixes: f607874f35cb ("platform/x86: ISST: Restore state on resume")
+Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20250328224749.2691272-1-srinivas.pandruvada@linux.intel.com
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/platform/x86/intel/speed_select_if/isst_if_common.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
++++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+@@ -84,7 +84,7 @@ static DECLARE_HASHTABLE(isst_hash, 8);
+ static DEFINE_MUTEX(isst_hash_lock);
+ static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param,
+-                            u32 data)
++                            u64 data)
+ {
+       struct isst_cmd *sst_cmd;
index ebe1d51687cd5909b8adfbf9d1cabe8752fada3a..bbff02ed1056f48f34397f28a42e73a2e7bf6236 100644 (file)
@@ -227,3 +227,15 @@ tracing-hist-add-poll-pollin-support-on-hist-file.patch
 tracing-hist-support-pollpri-event-for-poll-on-histo.patch
 tracing-correct-the-refcount-if-the-hist-hist_debug-.patch
 drm-amd-display-check-link_index-before-accessing-dc-links.patch
+usbnet-fix-npe-during-rx_complete.patch
+loongarch-increase-arch_dma_minalign-up-to-16.patch
+loongarch-bpf-fix-off-by-one-error-in-build_prologue.patch
+loongarch-bpf-don-t-override-subprog-s-return-value.patch
+loongarch-bpf-use-move_addr-for-bpf_pseudo_func.patch
+x86-hyperv-fix-check-of-return-value-from-snp_set_vmsa.patch
+x86-microcode-amd-fix-__apply_microcode_amd-s-return-value.patch
+acpi-x86-extend-lenovo-yoga-tab-3-quirk-with-skip-gpio-event-handlers.patch
+platform-x86-isst-correct-command-storage-data-length.patch
+ntb_perf-delete-duplicate-dmaengine_unmap_put-call-in-perf_copy_chunk.patch
+perf-x86-intel-apply-static-call-for-drain_pebs.patch
+perf-x86-intel-avoid-disable-pmu-if-cpuc-enabled-in-sample-read.patch
diff --git a/queue-6.6/usbnet-fix-npe-during-rx_complete.patch b/queue-6.6/usbnet-fix-npe-during-rx_complete.patch
new file mode 100644 (file)
index 0000000..b9cfdea
--- /dev/null
@@ -0,0 +1,57 @@
+From 51de3600093429e3b712e5f091d767babc5dd6df Mon Sep 17 00:00:00 2001
+From: Ying Lu <luying1@xiaomi.com>
+Date: Wed, 2 Apr 2025 16:58:59 +0800
+Subject: usbnet:fix NPE during rx_complete
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ying Lu <luying1@xiaomi.com>
+
+commit 51de3600093429e3b712e5f091d767babc5dd6df upstream.
+
+Missing usbnet_going_away Check in Critical Path.
+The usb_submit_urb function lacks a usbnet_going_away
+validation, whereas __usbnet_queue_skb includes this check.
+
+This inconsistency creates a race condition where:
+A URB request may succeed, but the corresponding SKB data
+fails to be queued.
+
+Subsequent processes:
+(e.g., rx_complete → defer_bh → __skb_unlink(skb, list))
+attempt to access skb->next, triggering a NULL pointer
+dereference (Kernel Panic).
+
+Fixes: 04e906839a05 ("usbnet: fix cyclical race on disconnect with work queue")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ying Lu <luying1@xiaomi.com>
+Link: https://patch.msgid.link/4c9ef2efaa07eb7f9a5042b74348a67e5a3a7aea.1743584159.git.luying1@xiaomi.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/usbnet.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -530,7 +530,8 @@ static int rx_submit (struct usbnet *dev
+           netif_device_present (dev->net) &&
+           test_bit(EVENT_DEV_OPEN, &dev->flags) &&
+           !test_bit (EVENT_RX_HALT, &dev->flags) &&
+-          !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) {
++          !test_bit (EVENT_DEV_ASLEEP, &dev->flags) &&
++          !usbnet_going_away(dev)) {
+               switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
+               case -EPIPE:
+                       usbnet_defer_kevent (dev, EVENT_RX_HALT);
+@@ -551,8 +552,7 @@ static int rx_submit (struct usbnet *dev
+                       tasklet_schedule (&dev->bh);
+                       break;
+               case 0:
+-                      if (!usbnet_going_away(dev))
+-                              __usbnet_queue_skb(&dev->rxq, skb, rx_start);
++                      __usbnet_queue_skb(&dev->rxq, skb, rx_start);
+               }
+       } else {
+               netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
diff --git a/queue-6.6/x86-hyperv-fix-check-of-return-value-from-snp_set_vmsa.patch b/queue-6.6/x86-hyperv-fix-check-of-return-value-from-snp_set_vmsa.patch
new file mode 100644 (file)
index 0000000..7ead8ba
--- /dev/null
@@ -0,0 +1,34 @@
+From e792d843aa3c9d039074cdce728d5803262e57a7 Mon Sep 17 00:00:00 2001
+From: Tianyu Lan <tiala@microsoft.com>
+Date: Thu, 13 Mar 2025 04:52:17 -0400
+Subject: x86/hyperv: Fix check of return value from snp_set_vmsa()
+
+From: Tianyu Lan <tiala@microsoft.com>
+
+commit e792d843aa3c9d039074cdce728d5803262e57a7 upstream.
+
+snp_set_vmsa() returns 0 as success result and so fix it.
+
+Cc: stable@vger.kernel.org
+Fixes: 44676bb9d566 ("x86/hyperv: Add smp support for SEV-SNP guest")
+Signed-off-by: Tianyu Lan <tiala@microsoft.com>
+Reviewed-by: Michael Kelley <mhklinux@outlook.com>
+Link: https://lore.kernel.org/r/20250313085217.45483-1-ltykernel@gmail.com
+Signed-off-by: Wei Liu <wei.liu@kernel.org>
+Message-ID: <20250313085217.45483-1-ltykernel@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/hyperv/ivm.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/hyperv/ivm.c
++++ b/arch/x86/hyperv/ivm.c
+@@ -338,7 +338,7 @@ int hv_snp_boot_ap(int cpu, unsigned lon
+       vmsa->sev_features = sev_status >> 2;
+       ret = snp_set_vmsa(vmsa, true);
+-      if (!ret) {
++      if (ret) {
+               pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret);
+               free_page((u64)vmsa);
+               return ret;
diff --git a/queue-6.6/x86-microcode-amd-fix-__apply_microcode_amd-s-return-value.patch b/queue-6.6/x86-microcode-amd-fix-__apply_microcode_amd-s-return-value.patch
new file mode 100644 (file)
index 0000000..cb44be8
--- /dev/null
@@ -0,0 +1,33 @@
+From 31ab12df723543047c3fc19cb8f8c4498ec6267f Mon Sep 17 00:00:00 2001
+From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Date: Thu, 27 Mar 2025 19:05:02 -0400
+Subject: x86/microcode/AMD: Fix __apply_microcode_amd()'s return value
+
+From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+
+commit 31ab12df723543047c3fc19cb8f8c4498ec6267f upstream.
+
+When verify_sha256_digest() fails, __apply_microcode_amd() should propagate
+the failure by returning false (and not -1 which is promoted to true).
+
+Fixes: 50cef76d5cb0 ("x86/microcode/AMD: Load only SHA256-checksummed patches")
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20250327230503.1850368-2-boris.ostrovsky@oracle.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/microcode/amd.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -603,7 +603,7 @@ static bool __apply_microcode_amd(struct
+       unsigned long p_addr = (unsigned long)&mc->hdr.data_code;
+       if (!verify_sha256_digest(mc->hdr.patch_id, *cur_rev, (const u8 *)p_addr, psize))
+-              return -1;
++              return false;
+       native_wrmsrl(MSR_AMD64_PATCH_LOADER, p_addr);