--- /dev/null
+From 3aa385a9c75c09b59dcab2ff76423439d23673ab Mon Sep 17 00:00:00 2001
+From: Linus Walleij <linus.walleij@linaro.org>
+Date: Mon, 3 Nov 2025 10:36:18 +0100
+Subject: iio: accel: bmc150: Fix irq assumption regression
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Linus Walleij <linus.walleij@linaro.org>
+
+commit 3aa385a9c75c09b59dcab2ff76423439d23673ab upstream.
+
+The code in bmc150-accel-core.c unconditionally calls
+bmc150_accel_set_interrupt() in the iio_buffer_setup_ops,
+such as on the runtime PM resume path giving a kernel
+splat like this if the device has no interrupts:
+
+Unable to handle kernel NULL pointer dereference at virtual
+ address 00000001 when read
+
+PC is at bmc150_accel_set_interrupt+0x98/0x194
+LR is at __pm_runtime_resume+0x5c/0x64
+(...)
+Call trace:
+bmc150_accel_set_interrupt from bmc150_accel_buffer_postenable+0x40/0x108
+bmc150_accel_buffer_postenable from __iio_update_buffers+0xbe0/0xcbc
+__iio_update_buffers from enable_store+0x84/0xc8
+enable_store from kernfs_fop_write_iter+0x154/0x1b4
+
+This bug seems to have been in the driver since the beginning,
+but it only manifests recently, I do not know why.
+
+Store the IRQ number in the state struct, as this is a common
+pattern in other drivers, then use this to determine if we have
+IRQ support or not.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@intel.com>
+Reviewed-by: Nuno Sá <nuno.sa@analog.com>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/accel/bmc150-accel-core.c | 5 +++++
+ drivers/iio/accel/bmc150-accel.h | 1 +
+ 2 files changed, 6 insertions(+)
+
+--- a/drivers/iio/accel/bmc150-accel-core.c
++++ b/drivers/iio/accel/bmc150-accel-core.c
+@@ -569,6 +569,10 @@ static int bmc150_accel_set_interrupt(st
+ const struct bmc150_accel_interrupt_info *info = intr->info;
+ int ret;
+
++ /* We do not always have an IRQ */
++ if (data->irq <= 0)
++ return 0;
++
+ if (state) {
+ if (atomic_inc_return(&intr->users) > 1)
+ return 0;
+@@ -1742,6 +1746,7 @@ int bmc150_accel_core_probe(struct devic
+ }
+
+ if (irq > 0) {
++ data->irq = irq;
+ ret = devm_request_threaded_irq(dev, irq,
+ bmc150_accel_irq_handler,
+ bmc150_accel_irq_thread_handler,
+--- a/drivers/iio/accel/bmc150-accel.h
++++ b/drivers/iio/accel/bmc150-accel.h
+@@ -57,6 +57,7 @@ enum bmc150_accel_trigger_id {
+
+ struct bmc150_accel_data {
+ struct regmap *regmap;
++ int irq;
+ struct regulator_bulk_data regulators[2];
+ struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS];
+ struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS];
--- /dev/null
+From 21553258b94861a73d7f2cf15469d69240e1170d Mon Sep 17 00:00:00 2001
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Date: Fri, 10 Oct 2025 20:58:48 +0200
+Subject: iio:common:ssp_sensors: Fix an error handling path ssp_probe()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+commit 21553258b94861a73d7f2cf15469d69240e1170d upstream.
+
+If an error occurs after a successful mfd_add_devices() call, it should be
+undone by a corresponding mfd_remove_devices() call, as already done in the
+remove function.
+
+Fixes: 50dd64d57eee ("iio: common: ssp_sensors: Add sensorhub driver")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Reviewed-by: Nuno Sá <nuno.sa@analog.com>
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/common/ssp_sensors/ssp_dev.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/iio/common/ssp_sensors/ssp_dev.c
++++ b/drivers/iio/common/ssp_sensors/ssp_dev.c
+@@ -515,7 +515,7 @@ static int ssp_probe(struct spi_device *
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Failed to setup spi\n");
+- return ret;
++ goto err_setup_spi;
+ }
+
+ data->fw_dl_state = SSP_FW_DL_STATE_NONE;
+@@ -580,6 +580,8 @@ err_read_reg:
+ err_setup_irq:
+ mutex_destroy(&data->pending_lock);
+ mutex_destroy(&data->comm_lock);
++err_setup_spi:
++ mfd_remove_devices(&spi->dev);
+
+ dev_err(&spi->dev, "Probe failed!\n");
+
--- /dev/null
+From 3af0c1fb1cdc351b64ff1a4bc06d491490c1f10a Mon Sep 17 00:00:00 2001
+From: Francesco Lavra <flavra@baylibre.com>
+Date: Fri, 17 Oct 2025 19:32:08 +0200
+Subject: iio: imu: st_lsm6dsx: fix array size for st_lsm6dsx_settings fields
+
+From: Francesco Lavra <flavra@baylibre.com>
+
+commit 3af0c1fb1cdc351b64ff1a4bc06d491490c1f10a upstream.
+
+The `decimator` and `batch` fields of struct st_lsm6dsx_settings
+are arrays indexed by sensor type, not by sensor hardware
+identifier; moreover, the `batch` field is only used for the
+accelerometer and gyroscope.
+Change the array size for `decimator` from ST_LSM6DSX_MAX_ID to
+ST_LSM6DSX_ID_MAX, and change the array size for `batch` from
+ST_LSM6DSX_MAX_ID to 2; move the enum st_lsm6dsx_sensor_id
+definition so that the ST_LSM6DSX_ID_MAX value is usable within
+the struct st_lsm6dsx_settings definition.
+
+Fixes: 801a6e0af0c6c ("iio: imu: st_lsm6dsx: add support to LSM6DSO")
+Signed-off-by: Francesco Lavra <flavra@baylibre.com>
+Acked-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h | 22 +++++++++++-----------
+ 1 file changed, 11 insertions(+), 11 deletions(-)
+
+--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+@@ -226,6 +226,15 @@ struct st_lsm6dsx_event_settings {
+ u8 wakeup_src_x_mask;
+ };
+
++enum st_lsm6dsx_sensor_id {
++ ST_LSM6DSX_ID_GYRO,
++ ST_LSM6DSX_ID_ACC,
++ ST_LSM6DSX_ID_EXT0,
++ ST_LSM6DSX_ID_EXT1,
++ ST_LSM6DSX_ID_EXT2,
++ ST_LSM6DSX_ID_MAX
++};
++
+ enum st_lsm6dsx_ext_sensor_id {
+ ST_LSM6DSX_ID_MAGN,
+ };
+@@ -311,23 +320,14 @@ struct st_lsm6dsx_settings {
+ struct st_lsm6dsx_reg drdy_mask;
+ struct st_lsm6dsx_odr_table_entry odr_table[2];
+ struct st_lsm6dsx_fs_table_entry fs_table[2];
+- struct st_lsm6dsx_reg decimator[ST_LSM6DSX_MAX_ID];
+- struct st_lsm6dsx_reg batch[ST_LSM6DSX_MAX_ID];
++ struct st_lsm6dsx_reg decimator[ST_LSM6DSX_ID_MAX];
++ struct st_lsm6dsx_reg batch[2];
+ struct st_lsm6dsx_fifo_ops fifo_ops;
+ struct st_lsm6dsx_hw_ts_settings ts_settings;
+ struct st_lsm6dsx_shub_settings shub_settings;
+ struct st_lsm6dsx_event_settings event_settings;
+ };
+
+-enum st_lsm6dsx_sensor_id {
+- ST_LSM6DSX_ID_GYRO,
+- ST_LSM6DSX_ID_ACC,
+- ST_LSM6DSX_ID_EXT0,
+- ST_LSM6DSX_ID_EXT1,
+- ST_LSM6DSX_ID_EXT2,
+- ST_LSM6DSX_ID_MAX,
+-};
+-
+ enum st_lsm6dsx_fifo_mode {
+ ST_LSM6DSX_FIFO_BYPASS = 0x0,
+ ST_LSM6DSX_FIFO_CONT = 0x6,
--- /dev/null
+From 841ecc979b18d3227fad5e2d6a1e6f92688776b5 Mon Sep 17 00:00:00 2001
+From: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Date: Fri, 28 Nov 2025 16:53:46 +0000
+Subject: MIPS: mm: kmalloc tlb_vpn array to avoid stack overflow
+
+From: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+
+commit 841ecc979b18d3227fad5e2d6a1e6f92688776b5 upstream.
+
+Owing to Config4.MMUSizeExt and VTLB/FTLB MMU features later MIPSr2+
+cores can have more than 64 TLB entries. Therefore allocate an array
+for uniquification instead of placing too an small array on the stack.
+
+Fixes: 35ad7e181541 ("MIPS: mm: tlb-r4k: Uniquify TLB entries on init")
+Co-developed-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Cc: stable@vger.kernel.org # v6.17+: 9f048fa48740: MIPS: mm: Prevent a TLB shutdown on initial uniquification
+Cc: stable@vger.kernel.org # v6.17+
+Tested-by: Gregory CLEMENT <gregory.clement@bootlin.com>
+Tested-by: Klara Modin <klarasmodin@gmail.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/mm/tlb-r4k.c | 18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -12,6 +12,7 @@
+ #include <linux/init.h>
+ #include <linux/sched.h>
+ #include <linux/smp.h>
++#include <linux/memblock.h>
+ #include <linux/mm.h>
+ #include <linux/hugetlb.h>
+ #include <linux/export.h>
+@@ -512,17 +513,26 @@ static int r4k_vpn_cmp(const void *a, co
+ * Initialise all TLB entries with unique values that do not clash with
+ * what we have been handed over and what we'll be using ourselves.
+ */
+-static void r4k_tlb_uniquify(void)
++static void __ref r4k_tlb_uniquify(void)
+ {
+- unsigned long tlb_vpns[1 << MIPS_CONF1_TLBS_SIZE];
+ int tlbsize = current_cpu_data.tlbsize;
++ bool use_slab = slab_is_available();
+ int start = num_wired_entries();
++ phys_addr_t tlb_vpn_size;
++ unsigned long *tlb_vpns;
+ unsigned long vpn_mask;
+ int cnt, ent, idx, i;
+
+ vpn_mask = GENMASK(cpu_vmbits - 1, 13);
+ vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31;
+
++ tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
++ tlb_vpns = (use_slab ?
++ kmalloc(tlb_vpn_size, GFP_KERNEL) :
++ memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
++ if (WARN_ON(!tlb_vpns))
++ return; /* Pray local_flush_tlb_all() is good enough. */
++
+ htw_stop();
+
+ for (i = start, cnt = 0; i < tlbsize; i++, cnt++) {
+@@ -575,6 +585,10 @@ static void r4k_tlb_uniquify(void)
+ tlbw_use_hazard();
+ htw_start();
+ flush_micro_tlb();
++ if (use_slab)
++ kfree(tlb_vpns);
++ else
++ memblock_free(tlb_vpns, tlb_vpn_size);
+ }
+
+ /*
--- /dev/null
+From 9f048fa487409e364cf866c957cf0b0d782ca5a3 Mon Sep 17 00:00:00 2001
+From: "Maciej W. Rozycki" <macro@orcam.me.uk>
+Date: Thu, 13 Nov 2025 05:21:10 +0000
+Subject: MIPS: mm: Prevent a TLB shutdown on initial uniquification
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 9f048fa487409e364cf866c957cf0b0d782ca5a3 upstream.
+
+Depending on the particular CPU implementation a TLB shutdown may occur
+if multiple matching entries are detected upon the execution of a TLBP
+or the TLBWI/TLBWR instructions. Given that we don't know what entries
+we have been handed we need to be very careful with the initial TLB
+setup and avoid all these instructions.
+
+Therefore read all the TLB entries one by one with the TLBR instruction,
+bypassing the content addressing logic, and truncate any large pages in
+place so as to avoid a case in the second step where an incoming entry
+for a large page at a lower address overlaps with a replacement entry
+chosen at another index. Then preinitialize the TLB using addresses
+outside our usual unique range and avoiding clashes with any entries
+received, before making the usual call to local_flush_tlb_all().
+
+This fixes (at least) R4x00 cores if TLBP hits multiple matching TLB
+entries (SGI IP22 PROM for examples sets up all TLBs to the same virtual
+address).
+
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Fixes: 35ad7e181541 ("MIPS: mm: tlb-r4k: Uniquify TLB entries on init")
+Cc: stable@vger.kernel.org
+Reviewed-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Tested-by: Jiaxun Yang <jiaxun.yang@flygoat.com> # Boston I6400, M5150 sim
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/mm/tlb-r4k.c | 102 ++++++++++++++++++++++++++++++-------------------
+ 1 file changed, 64 insertions(+), 38 deletions(-)
+
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -15,6 +15,7 @@
+ #include <linux/mm.h>
+ #include <linux/hugetlb.h>
+ #include <linux/export.h>
++#include <linux/sort.h>
+
+ #include <asm/cpu.h>
+ #include <asm/cpu-type.h>
+@@ -498,55 +499,79 @@ static int __init set_ntlb(char *str)
+
+ __setup("ntlb=", set_ntlb);
+
+-/* Initialise all TLB entries with unique values */
++
++/* Comparison function for EntryHi VPN fields. */
++static int r4k_vpn_cmp(const void *a, const void *b)
++{
++ long v = *(unsigned long *)a - *(unsigned long *)b;
++ int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0;
++ return s ? (v != 0) | v >> s : v;
++}
++
++/*
++ * Initialise all TLB entries with unique values that do not clash with
++ * what we have been handed over and what we'll be using ourselves.
++ */
+ static void r4k_tlb_uniquify(void)
+ {
+- int entry = num_wired_entries();
++ unsigned long tlb_vpns[1 << MIPS_CONF1_TLBS_SIZE];
++ int tlbsize = current_cpu_data.tlbsize;
++ int start = num_wired_entries();
++ unsigned long vpn_mask;
++ int cnt, ent, idx, i;
++
++ vpn_mask = GENMASK(cpu_vmbits - 1, 13);
++ vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31;
+
+ htw_stop();
+- write_c0_entrylo0(0);
+- write_c0_entrylo1(0);
+
+- while (entry < current_cpu_data.tlbsize) {
+- unsigned long asid_mask = cpu_asid_mask(¤t_cpu_data);
+- unsigned long asid = 0;
+- int idx;
++ for (i = start, cnt = 0; i < tlbsize; i++, cnt++) {
++ unsigned long vpn;
+
+- /* Skip wired MMID to make ginvt_mmid work */
+- if (cpu_has_mmid)
+- asid = MMID_KERNEL_WIRED + 1;
++ write_c0_index(i);
++ mtc0_tlbr_hazard();
++ tlb_read();
++ tlb_read_hazard();
++ vpn = read_c0_entryhi();
++ vpn &= vpn_mask & PAGE_MASK;
++ tlb_vpns[cnt] = vpn;
+
+- /* Check for match before using UNIQUE_ENTRYHI */
+- do {
+- if (cpu_has_mmid) {
+- write_c0_memorymapid(asid);
+- write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+- } else {
+- write_c0_entryhi(UNIQUE_ENTRYHI(entry) | asid);
+- }
+- mtc0_tlbw_hazard();
+- tlb_probe();
+- tlb_probe_hazard();
+- idx = read_c0_index();
+- /* No match or match is on current entry */
+- if (idx < 0 || idx == entry)
+- break;
+- /*
+- * If we hit a match, we need to try again with
+- * a different ASID.
+- */
+- asid++;
+- } while (asid < asid_mask);
+-
+- if (idx >= 0 && idx != entry)
+- panic("Unable to uniquify TLB entry %d", idx);
+-
+- write_c0_index(entry);
++ /* Prevent any large pages from overlapping regular ones. */
++ write_c0_pagemask(read_c0_pagemask() & PM_DEFAULT_MASK);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+- entry++;
++ tlbw_use_hazard();
+ }
+
++ sort(tlb_vpns, cnt, sizeof(tlb_vpns[0]), r4k_vpn_cmp, NULL);
++
++ write_c0_pagemask(PM_DEFAULT_MASK);
++ write_c0_entrylo0(0);
++ write_c0_entrylo1(0);
++
++ idx = 0;
++ ent = tlbsize;
++ for (i = start; i < tlbsize; i++)
++ while (1) {
++ unsigned long entryhi, vpn;
++
++ entryhi = UNIQUE_ENTRYHI(ent);
++ vpn = entryhi & vpn_mask & PAGE_MASK;
++
++ if (idx >= cnt || vpn < tlb_vpns[idx]) {
++ write_c0_entryhi(entryhi);
++ write_c0_index(i);
++ mtc0_tlbw_hazard();
++ tlb_write_indexed();
++ ent++;
++ break;
++ } else if (vpn == tlb_vpns[idx]) {
++ ent++;
++ } else {
++ idx++;
++ }
++ }
++
+ tlbw_use_hazard();
+ htw_start();
+ flush_micro_tlb();
+@@ -592,6 +617,7 @@ static void r4k_tlb_configure(void)
+
+ /* From this point on the ARC firmware is dead. */
+ r4k_tlb_uniquify();
++ local_flush_tlb_all();
+
+ /* Did I tell you that ARC SUCKS? */
+ }
--- /dev/null
+From 6d08340d1e354787d6c65a8c3cdd4d41ffb8a5ed Mon Sep 17 00:00:00 2001
+From: Jiri Olsa <jolsa@kernel.org>
+Date: Tue, 4 Nov 2025 22:54:02 +0100
+Subject: Revert "perf/x86: Always store regs->ip in perf_callchain_kernel()"
+
+From: Jiri Olsa <jolsa@kernel.org>
+
+commit 6d08340d1e354787d6c65a8c3cdd4d41ffb8a5ed upstream.
+
+This reverts commit 83f44ae0f8afcc9da659799db8693f74847e66b3.
+
+Currently we store initial stacktrace entry twice for non-HW ot_regs, which
+means callers that fail perf_hw_regs(regs) condition in perf_callchain_kernel.
+
+It's easy to reproduce this bpftrace:
+
+ # bpftrace -e 'tracepoint:sched:sched_process_exec { print(kstack()); }'
+ Attaching 1 probe...
+
+ bprm_execve+1767
+ bprm_execve+1767
+ do_execveat_common.isra.0+425
+ __x64_sys_execve+56
+ do_syscall_64+133
+ entry_SYSCALL_64_after_hwframe+118
+
+When perf_callchain_kernel calls unwind_start with first_frame, AFAICS
+we do not skip regs->ip, but it's added as part of the unwind process.
+Hence reverting the extra perf_callchain_store for non-hw regs leg.
+
+I was not able to bisect this, so I'm not really sure why this was needed
+in v5.2 and why it's not working anymore, but I could see double entries
+as far as v5.10.
+
+I did the test for both ORC and framepointer unwind with and without the
+this fix and except for the initial entry the stacktraces are the same.
+
+Acked-by: Song Liu <song@kernel.org>
+Signed-off-by: Jiri Olsa <jolsa@kernel.org>
+Link: https://lore.kernel.org/r/20251104215405.168643-2-jolsa@kernel.org
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/core.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -2772,13 +2772,13 @@ perf_callchain_kernel(struct perf_callch
+ return;
+ }
+
+- if (perf_callchain_store(entry, regs->ip))
+- return;
+-
+- if (perf_hw_regs(regs))
++ if (perf_hw_regs(regs)) {
++ if (perf_callchain_store(entry, regs->ip))
++ return;
+ unwind_start(&state, current, regs, NULL);
+- else
++ } else {
+ unwind_start(&state, current, NULL, (void *)regs->sp);
++ }
+
+ for (; !unwind_done(&state); unwind_next_frame(&state)) {
+ addr = unwind_get_return_address(&state);
net-atlantic-fix-fragment-overflow-handling-in-rx-pa.patch
mailbox-mailbox-test-fix-debugfs_create_dir-error-ch.patch
spi-bcm63xx-fix-premature-cs-deassertion-on-rx-only-.patch
+revert-perf-x86-always-store-regs-ip-in-perf_callchain_kernel.patch
+iio-imu-st_lsm6dsx-fix-array-size-for-st_lsm6dsx_settings-fields.patch
+iio-common-ssp_sensors-fix-an-error-handling-path-ssp_probe.patch
+iio-accel-bmc150-fix-irq-assumption-regression.patch
+mips-mm-prevent-a-tlb-shutdown-on-initial-uniquification.patch
+mips-mm-kmalloc-tlb_vpn-array-to-avoid-stack-overflow.patch