--- /dev/null
+From 2fa87c71d2adb4b82c105f9191e6120340feff00 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Tue, 25 Mar 2025 22:04:50 +0100
+Subject: ACPI: x86: Extend Lenovo Yoga Tab 3 quirk with skip GPIO event-handlers
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit 2fa87c71d2adb4b82c105f9191e6120340feff00 upstream.
+
+Depending on the secureboot signature on EFI\BOOT\BOOTX86.EFI the
+Lenovo Yoga Tab 3 UEFI will switch its OSID ACPI variable between
+1 (Windows) and 4 (Android(GMIN)).
+
+In Windows mode a GPIO event handler gets installed for GPO1 pin 5,
+causing Linux' x86-android-tables code which deals with the general
+brokenness of this device's ACPI tables to fail to probe with:
+
+[ 17.853705] x86_android_tablets: error -16 getting GPIO INT33FF:01 5
+[ 17.859623] x86_android_tablets x86_android_tablets: probe with driver
+
+which renders sound, the touchscreen, charging-management,
+battery-monitoring and more non functional.
+
+Add ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS to the existing quirks for this
+device to fix this.
+
+Reported-by: Agoston Lorincz <pipacsba@gmail.com>
+Closes: https://lore.kernel.org/platform-driver-x86/CAMEzqD+DNXrAvUOHviB2O2bjtcbmo3xH=kunKr4nubuMLbb_0A@mail.gmail.com/
+Cc: All applicable <stable@kernel.org>
+Fixes: fe820db35275 ("ACPI: x86: Add skip i2c clients quirk for Lenovo Yoga Tab 3 Pro (YT3-X90F)")
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Link: https://patch.msgid.link/20250325210450.358506-1-hdegoede@redhat.com
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/acpi/x86/utils.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/acpi/x86/utils.c
++++ b/drivers/acpi/x86/utils.c
+@@ -313,7 +313,8 @@ static const struct dmi_system_id acpi_q
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+- ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
++ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
++ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+ },
+ {
+ /* Medion Lifetab S10346 */
--- /dev/null
+From 7e2586991e36663c9bc48c828b83eab180ad30a9 Mon Sep 17 00:00:00 2001
+From: Hengqi Chen <hengqi.chen@gmail.com>
+Date: Sun, 30 Mar 2025 16:31:09 +0800
+Subject: LoongArch: BPF: Fix off-by-one error in build_prologue()
+
+From: Hengqi Chen <hengqi.chen@gmail.com>
+
+commit 7e2586991e36663c9bc48c828b83eab180ad30a9 upstream.
+
+Vincent reported that running BPF progs with tailcalls on LoongArch
+causes kernel hard lockup. Debugging the issues shows that the JITed
+image missing a jirl instruction at the end of the epilogue.
+
+There are two passes in JIT compiling, the first pass set the flags and
+the second pass generates JIT code based on those flags. With BPF progs
+mixing bpf2bpf and tailcalls, build_prologue() generates N insns in the
+first pass and then generates N+1 insns in the second pass. This makes
+epilogue_offset off by one and we will jump to some unexpected insn and
+cause lockup. Fix this by inserting a nop insn.
+
+Cc: stable@vger.kernel.org
+Fixes: 5dc615520c4d ("LoongArch: Add BPF JIT support")
+Fixes: bb035ef0cc91 ("LoongArch: BPF: Support mixing bpf2bpf and tailcalls")
+Reported-by: Vincent Li <vincent.mc.li@gmail.com>
+Tested-by: Vincent Li <vincent.mc.li@gmail.com>
+Closes: https://lore.kernel.org/loongarch/CAK3+h2w6WESdBN3UCr3WKHByD7D6Q_Ve1EDAjotVrnx6Or_c8g@mail.gmail.com/
+Closes: https://lore.kernel.org/bpf/CAK3+h2woEjG_N=-XzqEGaAeCmgu2eTCUc7p6bP4u8Q+DFHm-7g@mail.gmail.com/
+Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/net/bpf_jit.c | 2 ++
+ arch/loongarch/net/bpf_jit.h | 5 +++++
+ 2 files changed, 7 insertions(+)
+
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -142,6 +142,8 @@ static void build_prologue(struct jit_ct
+ */
+ if (seen_tail_call(ctx) && seen_call(ctx))
+ move_reg(ctx, TCC_SAVED, REG_TCC);
++ else
++ emit_insn(ctx, nop);
+
+ ctx->stack_size = stack_adjust;
+ }
+--- a/arch/loongarch/net/bpf_jit.h
++++ b/arch/loongarch/net/bpf_jit.h
+@@ -25,6 +25,11 @@ struct jit_data {
+ struct jit_ctx ctx;
+ };
+
++static inline void emit_nop(union loongarch_instruction *insn)
++{
++ insn->word = INSN_NOP;
++}
++
+ #define emit_insn(ctx, func, ...) \
+ do { \
+ if (ctx->image != NULL) { \
--- /dev/null
+From 52266f1015a8b5aabec7d127f83d105f702b388e Mon Sep 17 00:00:00 2001
+From: Hengqi Chen <hengqi.chen@gmail.com>
+Date: Sun, 30 Mar 2025 16:31:09 +0800
+Subject: LoongArch: BPF: Use move_addr() for BPF_PSEUDO_FUNC
+
+From: Hengqi Chen <hengqi.chen@gmail.com>
+
+commit 52266f1015a8b5aabec7d127f83d105f702b388e upstream.
+
+Vincent reported that running XDP synproxy program on LoongArch results
+in the following error:
+
+ JIT doesn't support bpf-to-bpf calls
+
+With dmesg:
+
+ multi-func JIT bug 1391 != 1390
+
+The root cause is that verifier will refill the imm with the correct
+addresses of bpf_calls for BPF_PSEUDO_FUNC instructions and then run
+the last pass of JIT. So we generate different JIT code for the same
+instruction in two passes (one for placeholder and the other for the
+real address). Let's use move_addr() instead.
+
+See commit 64f50f6575721ef0 ("LoongArch, bpf: Use 4 instructions for
+function address in JIT") for a similar fix.
+
+Cc: stable@vger.kernel.org
+Fixes: 69c087ba6225 ("bpf: Add bpf_for_each_map_elem() helper")
+Fixes: bb035ef0cc91 ("LoongArch: BPF: Support mixing bpf2bpf and tailcalls")
+Reported-by: Vincent Li <vincent.mc.li@gmail.com>
+Tested-by: Vincent Li <vincent.mc.li@gmail.com>
+Closes: https://lore.kernel.org/loongarch/CAK3+h2yfM9FTNiXvEQBkvtuoJrvzmN4c_NZsFXqEk4Cj1tsBNA@mail.gmail.com/T/#u
+Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/net/bpf_jit.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -810,7 +810,10 @@ static int build_insn(const struct bpf_i
+ {
+ const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
+
+- move_imm(ctx, dst, imm64, is32);
++ if (bpf_pseudo_func(insn))
++ move_addr(ctx, dst, imm64);
++ else
++ move_imm(ctx, dst, imm64, is32);
+ return 1;
+ }
+
--- /dev/null
+From 4103cfe9dcb88010ae4911d3ff417457d1b6a720 Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhuacai@loongson.cn>
+Date: Sun, 30 Mar 2025 16:31:09 +0800
+Subject: LoongArch: Increase ARCH_DMA_MINALIGN up to 16
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+commit 4103cfe9dcb88010ae4911d3ff417457d1b6a720 upstream.
+
+ARCH_DMA_MINALIGN is 1 by default, but some LoongArch-specific devices
+(such as APBDMA) require 16 bytes alignment. When the data buffer length
+is too small, the hardware may make an error writing cacheline. Thus, it
+is dangerous to allocate a small memory buffer for DMA. It's always safe
+to define ARCH_DMA_MINALIGN as L1_CACHE_BYTES but unnecessary (kmalloc()
+need small memory objects). Therefore, just increase it to 16.
+
+Cc: stable@vger.kernel.org
+Tested-by: Binbin Zhou <zhoubinbin@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/include/asm/cache.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/loongarch/include/asm/cache.h
++++ b/arch/loongarch/include/asm/cache.h
+@@ -8,6 +8,8 @@
+ #define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT
+ #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
++#define ARCH_DMA_MINALIGN (16)
++
+ #define __read_mostly __section(".data..read_mostly")
+
+ #endif /* _ASM_CACHE_H */
--- /dev/null
+From 4279e72cab31dd3eb8c89591eb9d2affa90ab6aa Mon Sep 17 00:00:00 2001
+From: Markus Elfring <elfring@users.sourceforge.net>
+Date: Mon, 23 Sep 2024 10:38:11 +0200
+Subject: ntb_perf: Delete duplicate dmaengine_unmap_put() call in perf_copy_chunk()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Markus Elfring <elfring@users.sourceforge.net>
+
+commit 4279e72cab31dd3eb8c89591eb9d2affa90ab6aa upstream.
+
+The function call “dmaengine_unmap_put(unmap)” was used in an if branch.
+The same call was immediately triggered by a subsequent goto statement.
+Thus avoid such a call repetition.
+
+This issue was detected by using the Coccinelle software.
+
+Fixes: 5648e56d03fa ("NTB: ntb_perf: Add full multi-port NTB API support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Markus Elfring <elfring@users.sourceforge.net>
+Signed-off-by: Jon Mason <jdmason@kudzu.us>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/ntb/test/ntb_perf.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/ntb/test/ntb_perf.c
++++ b/drivers/ntb/test/ntb_perf.c
+@@ -839,10 +839,8 @@ static int perf_copy_chunk(struct perf_t
+ dma_set_unmap(tx, unmap);
+
+ ret = dma_submit_error(dmaengine_submit(tx));
+- if (ret) {
+- dmaengine_unmap_put(unmap);
++ if (ret)
+ goto err_free_resource;
+- }
+
+ dmaengine_unmap_put(unmap);
+
--- /dev/null
+From 314dfe10576912e1d786b13c5d4eee8c51b63caa Mon Sep 17 00:00:00 2001
+From: "Peter Zijlstra (Intel)" <peterz@infradead.org>
+Date: Tue, 21 Jan 2025 07:23:00 -0800
+Subject: perf/x86/intel: Apply static call for drain_pebs
+
+From: Peter Zijlstra (Intel) <peterz@infradead.org>
+
+commit 314dfe10576912e1d786b13c5d4eee8c51b63caa upstream.
+
+The x86_pmu_drain_pebs static call was introduced in commit 7c9903c9bf71
+("x86/perf, static_call: Optimize x86_pmu methods"), but it's not really
+used to replace the old method.
+
+Apply the static call for drain_pebs.
+
+Fixes: 7c9903c9bf71 ("x86/perf, static_call: Optimize x86_pmu methods")
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20250121152303.3128733-1-kan.liang@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/intel/core.c | 2 +-
+ arch/x86/events/intel/ds.c | 2 +-
+ arch/x86/events/perf_event.h | 1 +
+ 3 files changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -2975,7 +2975,7 @@ static int handle_pmi_common(struct pt_r
+
+ handled++;
+ x86_pmu_handle_guest_pebs(regs, &data);
+- x86_pmu.drain_pebs(regs, &data);
++ static_call(x86_pmu_drain_pebs)(regs, &data);
+ status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
+
+ /*
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -793,7 +793,7 @@ static inline void intel_pmu_drain_pebs_
+ {
+ struct perf_sample_data data;
+
+- x86_pmu.drain_pebs(NULL, &data);
++ static_call(x86_pmu_drain_pebs)(NULL, &data);
+ }
+
+ /*
+--- a/arch/x86/events/perf_event.h
++++ b/arch/x86/events/perf_event.h
+@@ -1047,6 +1047,7 @@ extern struct x86_pmu x86_pmu __read_mos
+
+ DECLARE_STATIC_CALL(x86_pmu_set_period, *x86_pmu.set_period);
+ DECLARE_STATIC_CALL(x86_pmu_update, *x86_pmu.update);
++DECLARE_STATIC_CALL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs);
+
+ static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx)
+ {
--- /dev/null
+From f9bdf1f953392c9edd69a7f884f78c0390127029 Mon Sep 17 00:00:00 2001
+From: Kan Liang <kan.liang@linux.intel.com>
+Date: Tue, 21 Jan 2025 07:23:01 -0800
+Subject: perf/x86/intel: Avoid disable PMU if !cpuc->enabled in sample read
+
+From: Kan Liang <kan.liang@linux.intel.com>
+
+commit f9bdf1f953392c9edd69a7f884f78c0390127029 upstream.
+
+The WARN_ON(this_cpu_read(cpu_hw_events.enabled)) in the
+intel_pmu_save_and_restart_reload() is triggered, when sampling read
+topdown events.
+
+In a NMI handler, the cpu_hw_events.enabled is set and used to indicate
+the status of core PMU. The generic pmu->pmu_disable_count, updated in
+the perf_pmu_disable/enable pair, is not touched.
+However, the perf_pmu_disable/enable pair is invoked when sampling read
+in a NMI handler. The cpuc->enabled is mistakenly set by the
+perf_pmu_enable().
+
+Avoid disabling PMU if the core PMU is already disabled.
+Merge the logic together.
+
+Fixes: 7b2c05a15d29 ("perf/x86/intel: Generic support for hardware TopDown metrics")
+Suggested-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20250121152303.3128733-2-kan.liang@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/intel/core.c | 41 +++++++++++++++++++++++------------------
+ arch/x86/events/intel/ds.c | 11 +----------
+ arch/x86/events/perf_event.h | 2 +-
+ 3 files changed, 25 insertions(+), 29 deletions(-)
+
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -2689,28 +2689,33 @@ static u64 adl_update_topdown_event(stru
+
+ DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update);
+
+-static void intel_pmu_read_topdown_event(struct perf_event *event)
++static void intel_pmu_read_event(struct perf_event *event)
+ {
+- struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
++ if (event->hw.flags & (PERF_X86_EVENT_AUTO_RELOAD | PERF_X86_EVENT_TOPDOWN)) {
++ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
++ bool pmu_enabled = cpuc->enabled;
++
++ /* Only need to call update_topdown_event() once for group read. */
++ if (is_metric_event(event) && (cpuc->txn_flags & PERF_PMU_TXN_READ))
++ return;
++
++ cpuc->enabled = 0;
++ if (pmu_enabled)
++ intel_pmu_disable_all();
++
++ if (is_topdown_event(event))
++ static_call(intel_pmu_update_topdown_event)(event);
++ else
++ intel_pmu_drain_pebs_buffer();
++
++ cpuc->enabled = pmu_enabled;
++ if (pmu_enabled)
++ intel_pmu_enable_all(0);
+
+- /* Only need to call update_topdown_event() once for group read. */
+- if ((cpuc->txn_flags & PERF_PMU_TXN_READ) &&
+- !is_slots_event(event))
+ return;
++ }
+
+- perf_pmu_disable(event->pmu);
+- static_call(intel_pmu_update_topdown_event)(event);
+- perf_pmu_enable(event->pmu);
+-}
+-
+-static void intel_pmu_read_event(struct perf_event *event)
+-{
+- if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
+- intel_pmu_auto_reload_read(event);
+- else if (is_topdown_count(event))
+- intel_pmu_read_topdown_event(event);
+- else
+- x86_perf_event_update(event);
++ x86_perf_event_update(event);
+ }
+
+ static void intel_pmu_enable_fixed(struct perf_event *event)
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -789,7 +789,7 @@ unlock:
+ return 1;
+ }
+
+-static inline void intel_pmu_drain_pebs_buffer(void)
++void intel_pmu_drain_pebs_buffer(void)
+ {
+ struct perf_sample_data data;
+
+@@ -1902,15 +1902,6 @@ get_next_pebs_record_by_bit(void *base,
+ return NULL;
+ }
+
+-void intel_pmu_auto_reload_read(struct perf_event *event)
+-{
+- WARN_ON(!(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD));
+-
+- perf_pmu_disable(event->pmu);
+- intel_pmu_drain_pebs_buffer();
+- perf_pmu_enable(event->pmu);
+-}
+-
+ /*
+ * Special variant of intel_pmu_save_and_restart() for auto-reload.
+ */
+--- a/arch/x86/events/perf_event.h
++++ b/arch/x86/events/perf_event.h
+@@ -1536,7 +1536,7 @@ void intel_pmu_pebs_disable_all(void);
+
+ void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
+
+-void intel_pmu_auto_reload_read(struct perf_event *event);
++void intel_pmu_drain_pebs_buffer(void);
+
+ void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr);
+
--- /dev/null
+From 9462e74c5c983cce34019bfb27f734552bebe59f Mon Sep 17 00:00:00 2001
+From: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Date: Fri, 28 Mar 2025 15:47:49 -0700
+Subject: platform/x86: ISST: Correct command storage data length
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+
+commit 9462e74c5c983cce34019bfb27f734552bebe59f upstream.
+
+After resume/online turbo limit ratio (TRL) is restored partially if
+the admin explicitly changed TRL from user space.
+
+A hash table is used to store SST mail box and MSR settings when modified
+to restore those settings after resume or online. This uses a struct
+isst_cmd field "data" to store these settings. This is a 64 bit field.
+But isst_store_new_cmd() is only assigning as u32. This results in
+truncation of 32 bits.
+
+Change the argument to u64 from u32.
+
+Fixes: f607874f35cb ("platform/x86: ISST: Restore state on resume")
+Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20250328224749.2691272-1-srinivas.pandruvada@linux.intel.com
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/platform/x86/intel/speed_select_if/isst_if_common.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
++++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+@@ -77,7 +77,7 @@ static DECLARE_HASHTABLE(isst_hash, 8);
+ static DEFINE_MUTEX(isst_hash_lock);
+
+ static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param,
+- u32 data)
++ u64 data)
+ {
+ struct isst_cmd *sst_cmd;
+
drm-amdgpu-gfx11-fix-num_mec.patch
tty-serial-fsl_lpuart-use-uartmodir-register-bits-fo.patch
tty-serial-fsl_lpuart-disable-transmitter-before-cha.patch
+usbnet-fix-npe-during-rx_complete.patch
+loongarch-increase-arch_dma_minalign-up-to-16.patch
+loongarch-bpf-fix-off-by-one-error-in-build_prologue.patch
+loongarch-bpf-use-move_addr-for-bpf_pseudo_func.patch
+acpi-x86-extend-lenovo-yoga-tab-3-quirk-with-skip-gpio-event-handlers.patch
+platform-x86-isst-correct-command-storage-data-length.patch
+ntb_perf-delete-duplicate-dmaengine_unmap_put-call-in-perf_copy_chunk.patch
+perf-x86-intel-apply-static-call-for-drain_pebs.patch
+perf-x86-intel-avoid-disable-pmu-if-cpuc-enabled-in-sample-read.patch
--- /dev/null
+From 51de3600093429e3b712e5f091d767babc5dd6df Mon Sep 17 00:00:00 2001
+From: Ying Lu <luying1@xiaomi.com>
+Date: Wed, 2 Apr 2025 16:58:59 +0800
+Subject: usbnet:fix NPE during rx_complete
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ying Lu <luying1@xiaomi.com>
+
+commit 51de3600093429e3b712e5f091d767babc5dd6df upstream.
+
+Missing usbnet_going_away Check in Critical Path.
+The usb_submit_urb function lacks a usbnet_going_away
+validation, whereas __usbnet_queue_skb includes this check.
+
+This inconsistency creates a race condition where:
+A URB request may succeed, but the corresponding SKB data
+fails to be queued.
+
+Subsequent processes:
+(e.g., rx_complete → defer_bh → __skb_unlink(skb, list))
+attempt to access skb->next, triggering a NULL pointer
+dereference (Kernel Panic).
+
+Fixes: 04e906839a05 ("usbnet: fix cyclical race on disconnect with work queue")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ying Lu <luying1@xiaomi.com>
+Link: https://patch.msgid.link/4c9ef2efaa07eb7f9a5042b74348a67e5a3a7aea.1743584159.git.luying1@xiaomi.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/usbnet.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -530,7 +530,8 @@ static int rx_submit (struct usbnet *dev
+ netif_device_present (dev->net) &&
+ test_bit(EVENT_DEV_OPEN, &dev->flags) &&
+ !test_bit (EVENT_RX_HALT, &dev->flags) &&
+- !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) {
++ !test_bit (EVENT_DEV_ASLEEP, &dev->flags) &&
++ !usbnet_going_away(dev)) {
+ switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
+ case -EPIPE:
+ usbnet_defer_kevent (dev, EVENT_RX_HALT);
+@@ -551,8 +552,7 @@ static int rx_submit (struct usbnet *dev
+ tasklet_schedule (&dev->bh);
+ break;
+ case 0:
+- if (!usbnet_going_away(dev))
+- __usbnet_queue_skb(&dev->rxq, skb, rx_start);
++ __usbnet_queue_skb(&dev->rxq, skb, rx_start);
+ }
+ } else {
+ netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");