--- /dev/null
+From 54add64c9da03b0fc23323c1ff8e7ba2521e92e1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 1 Oct 2024 23:28:34 +0200
+Subject: ACPI: battery: Fix possible crash when unregistering a battery hook
+
+From: Armin Wolf <W_Armin@gmx.de>
+
+[ Upstream commit 76959aff14a0012ad6b984ec7686d163deccdc16 ]
+
+When a battery hook returns an error when adding a new battery, then
+the battery hook is automatically unregistered.
+However the battery hook provider cannot know that, so it will later
+call battery_hook_unregister() on the already unregistered battery
+hook, resulting in a crash.
+
+Fix this by using the list head to mark already unregistered battery
+hooks as already being unregistered so that they can be ignored by
+battery_hook_unregister().
+
+Fixes: fa93854f7a7e ("battery: Add the battery hooking API")
+Signed-off-by: Armin Wolf <W_Armin@gmx.de>
+Link: https://patch.msgid.link/20241001212835.341788-3-W_Armin@gmx.de
+Cc: All applicable <stable@vger.kernel.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/acpi/battery.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
+index 04610036e5dc5..916cdf44be893 100644
+--- a/drivers/acpi/battery.c
++++ b/drivers/acpi/battery.c
+@@ -715,7 +715,7 @@ static void battery_hook_unregister_unlocked(struct acpi_battery_hook *hook)
+ if (!hook->remove_battery(battery->bat, hook))
+ power_supply_changed(battery->bat);
+ }
+- list_del(&hook->list);
++ list_del_init(&hook->list);
+
+ pr_info("extension unregistered: %s\n", hook->name);
+ }
+@@ -723,7 +723,14 @@ static void battery_hook_unregister_unlocked(struct acpi_battery_hook *hook)
+ void battery_hook_unregister(struct acpi_battery_hook *hook)
+ {
+ mutex_lock(&hook_mutex);
+- battery_hook_unregister_unlocked(hook);
++ /*
++ * Ignore already unregistered battery hooks. This might happen
++ * if a battery hook was previously unloaded due to an error when
++ * adding a new battery.
++ */
++ if (!list_empty(&hook->list))
++ battery_hook_unregister_unlocked(hook);
++
+ mutex_unlock(&hook_mutex);
+ }
+ EXPORT_SYMBOL_GPL(battery_hook_unregister);
+@@ -733,7 +740,6 @@ void battery_hook_register(struct acpi_battery_hook *hook)
+ struct acpi_battery *battery;
+
+ mutex_lock(&hook_mutex);
+- INIT_LIST_HEAD(&hook->list);
+ list_add(&hook->list, &battery_hook_list);
+ /*
+ * Now that the driver is registered, we need
+--
+2.43.0
+
--- /dev/null
+From c751e2054fc881001f090ea2067d9304aee37eaf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 1 Oct 2024 23:28:33 +0200
+Subject: ACPI: battery: Simplify battery hook locking
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Armin Wolf <W_Armin@gmx.de>
+
+[ Upstream commit 86309cbed26139e1caae7629dcca1027d9a28e75 ]
+
+Move the conditional locking from __battery_hook_unregister()
+into battery_hook_unregister() and rename the low-level function
+to simplify the locking during battery hook removal.
+
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Reviewed-by: Pali Rohár <pali@kernel.org>
+Signed-off-by: Armin Wolf <W_Armin@gmx.de>
+Link: https://patch.msgid.link/20241001212835.341788-2-W_Armin@gmx.de
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Stable-dep-of: 76959aff14a0 ("ACPI: battery: Fix possible crash when unregistering a battery hook")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/acpi/battery.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
+index 44ca989f16466..04610036e5dc5 100644
+--- a/drivers/acpi/battery.c
++++ b/drivers/acpi/battery.c
+@@ -703,28 +703,28 @@ static LIST_HEAD(acpi_battery_list);
+ static LIST_HEAD(battery_hook_list);
+ static DEFINE_MUTEX(hook_mutex);
+
+-static void __battery_hook_unregister(struct acpi_battery_hook *hook, int lock)
++static void battery_hook_unregister_unlocked(struct acpi_battery_hook *hook)
+ {
+ struct acpi_battery *battery;
++
+ /*
+ * In order to remove a hook, we first need to
+ * de-register all the batteries that are registered.
+ */
+- if (lock)
+- mutex_lock(&hook_mutex);
+ list_for_each_entry(battery, &acpi_battery_list, list) {
+ if (!hook->remove_battery(battery->bat, hook))
+ power_supply_changed(battery->bat);
+ }
+ list_del(&hook->list);
+- if (lock)
+- mutex_unlock(&hook_mutex);
++
+ pr_info("extension unregistered: %s\n", hook->name);
+ }
+
+ void battery_hook_unregister(struct acpi_battery_hook *hook)
+ {
+- __battery_hook_unregister(hook, 1);
++ mutex_lock(&hook_mutex);
++ battery_hook_unregister_unlocked(hook);
++ mutex_unlock(&hook_mutex);
+ }
+ EXPORT_SYMBOL_GPL(battery_hook_unregister);
+
+@@ -750,7 +750,7 @@ void battery_hook_register(struct acpi_battery_hook *hook)
+ * hooks.
+ */
+ pr_err("extension failed to load: %s", hook->name);
+- __battery_hook_unregister(hook, 0);
++ battery_hook_unregister_unlocked(hook);
+ goto end;
+ }
+
+@@ -789,7 +789,7 @@ static void battery_hook_add_battery(struct acpi_battery *battery)
+ */
+ pr_err("error in extension, unloading: %s",
+ hook_node->name);
+- __battery_hook_unregister(hook_node, 0);
++ battery_hook_unregister_unlocked(hook_node);
+ }
+ }
+ mutex_unlock(&hook_mutex);
+@@ -822,7 +822,7 @@ static void __exit battery_hook_exit(void)
+ * need to remove the hooks.
+ */
+ list_for_each_entry_safe(hook, ptr, &battery_hook_list, list) {
+- __battery_hook_unregister(hook, 1);
++ battery_hook_unregister(hook);
+ }
+ mutex_destroy(&hook_mutex);
+ }
+--
+2.43.0
+
--- /dev/null
+From 0270e700c6721659527004719f95d29faba729b5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Oct 2024 13:06:37 +0100
+Subject: arm64: cputype: Add Neoverse-N3 definitions
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 924725707d80bc2588cefafef76ff3f164d299bc ]
+
+Add cputype definitions for Neoverse-N3. These will be used for errata
+detection in subsequent patches.
+
+These values can be found in Table A-261 ("MIDR_EL1 bit descriptions")
+in issue 02 of the Neoverse-N3 TRM, which can be found at:
+
+ https://developer.arm.com/documentation/107997/0000/?lang=en
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20240930111705.3352047-2-mark.rutland@arm.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+[ Mark: trivial backport ]
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/include/asm/cputype.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
+index 5a7dfeb8e8eb5..488f8e7513495 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -94,6 +94,7 @@
+ #define ARM_CPU_PART_NEOVERSE_V3 0xD84
+ #define ARM_CPU_PART_CORTEX_X925 0xD85
+ #define ARM_CPU_PART_CORTEX_A725 0xD87
++#define ARM_CPU_PART_NEOVERSE_N3 0xD8E
+
+ #define APM_CPU_PART_XGENE 0x000
+ #define APM_CPU_VAR_POTENZA 0x00
+@@ -176,6 +177,7 @@
+ #define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
+ #define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
+ #define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)
++#define MIDR_NEOVERSE_N3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N3)
+ #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+ #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
+ #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
+--
+2.43.0
+
--- /dev/null
+From 0db83741d094f6f72ce17665560fab7e77e43943 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Oct 2024 13:06:38 +0100
+Subject: arm64: errata: Expand speculative SSBS workaround once more
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 081eb7932c2b244f63317a982c5e3990e2c7fbdd ]
+
+A number of Arm Ltd CPUs suffer from errata whereby an MSR to the SSBS
+special-purpose register does not affect subsequent speculative
+instructions, permitting speculative store bypassing for a window of
+time.
+
+We worked around this for a number of CPUs in commits:
+
+* 7187bb7d0b5c7dfa ("arm64: errata: Add workaround for Arm errata 3194386 and 3312417")
+* 75b3c43eab594bfb ("arm64: errata: Expand speculative SSBS workaround")
+* 145502cac7ea70b5 ("arm64: errata: Expand speculative SSBS workaround (again)")
+
+Since then, a (hopefully final) batch of updates have been published,
+with two more affected CPUs. For the affected CPUs the existing
+mitigation is sufficient, as described in their respective Software
+Developer Errata Notice (SDEN) documents:
+
+* Cortex-A715 (MP148) SDEN v15.0, erratum 3456084
+ https://developer.arm.com/documentation/SDEN-2148827/1500/
+
+* Neoverse-N3 (MP195) SDEN v5.0, erratum 3456111
+ https://developer.arm.com/documentation/SDEN-3050973/0500/
+
+Enable the existing mitigation by adding the relevant MIDRs to
+erratum_spec_ssbs_list, and update silicon-errata.rst and the
+Kconfig text accordingly.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20240930111705.3352047-3-mark.rutland@arm.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+[ Mark: trivial backport ]
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/arch/arm64/silicon-errata.rst | 4 ++++
+ arch/arm64/Kconfig | 2 ++
+ arch/arm64/kernel/cpu_errata.c | 2 ++
+ 3 files changed, 8 insertions(+)
+
+diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst
+index 3bc51669ead7d..8cd4f365044b6 100644
+--- a/Documentation/arch/arm64/silicon-errata.rst
++++ b/Documentation/arch/arm64/silicon-errata.rst
+@@ -146,6 +146,8 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Cortex-A715 | #2645198 | ARM64_ERRATUM_2645198 |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM | Cortex-A715 | #3456084 | ARM64_ERRATUM_3194386 |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Cortex-A720 | #3456091 | ARM64_ERRATUM_3194386 |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Cortex-A725 | #3456106 | ARM64_ERRATUM_3194386 |
+@@ -186,6 +188,8 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Neoverse-N2 | #3324339 | ARM64_ERRATUM_3194386 |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM | Neoverse-N3 | #3456111 | ARM64_ERRATUM_3194386 |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Neoverse-V1 | #1619801 | N/A |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Neoverse-V1 | #3324341 | ARM64_ERRATUM_3194386 |
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 03faaecfa2444..43d79f87fa180 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -1078,6 +1078,7 @@ config ARM64_ERRATUM_3194386
+ * ARM Cortex-A78C erratum 3324346
+ * ARM Cortex-A78C erratum 3324347
+ * ARM Cortex-A710 erratam 3324338
++ * ARM Cortex-A715 errartum 3456084
+ * ARM Cortex-A720 erratum 3456091
+ * ARM Cortex-A725 erratum 3456106
+ * ARM Cortex-X1 erratum 3324344
+@@ -1088,6 +1089,7 @@ config ARM64_ERRATUM_3194386
+ * ARM Cortex-X925 erratum 3324334
+ * ARM Neoverse-N1 erratum 3324349
+ * ARM Neoverse N2 erratum 3324339
++ * ARM Neoverse-N3 erratum 3456111
+ * ARM Neoverse-V1 erratum 3324341
+ * ARM Neoverse V2 erratum 3324336
+ * ARM Neoverse-V3 erratum 3312417
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index aec2867daadc2..a78f247029aec 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -439,6 +439,7 @@ static const struct midr_range erratum_spec_ssbs_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A725),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
+@@ -450,6 +451,7 @@ static const struct midr_range erratum_spec_ssbs_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N3),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
+--
+2.43.0
+
--- /dev/null
+From f4fc6103b1524ce271403e39e61e4ba38c678c5f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Jun 2024 21:39:33 +0300
+Subject: build-id: require program headers to be right after ELF header
+
+From: Alexey Dobriyan <adobriyan@gmail.com>
+
+[ Upstream commit 961a2851324561caed579764ffbee3db82b32829 ]
+
+Neither ELF spec not ELF loader require program header to be placed right
+after ELF header, but build-id code very much assumes such placement:
+
+See
+
+ find_get_page(vma->vm_file->f_mapping, 0);
+
+line and checks against PAGE_SIZE.
+
+Returns errors for now until someone rewrites build-id parser
+to be more inline with load_elf_binary().
+
+Link: https://lkml.kernel.org/r/d58bc281-6ca7-467a-9a64-40fa214bd63e@p183
+Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
+Reviewed-by: Jiri Olsa <jolsa@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: 905415ff3ffb ("lib/buildid: harden build ID parsing logic")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ lib/buildid.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+diff --git a/lib/buildid.c b/lib/buildid.c
+index 7954dd92e36c0..e02b5507418b4 100644
+--- a/lib/buildid.c
++++ b/lib/buildid.c
+@@ -73,6 +73,13 @@ static int get_build_id_32(const void *page_addr, unsigned char *build_id,
+ Elf32_Phdr *phdr;
+ int i;
+
++ /*
++ * FIXME
++ * Neither ELF spec nor ELF loader require that program headers
++ * start immediately after ELF header.
++ */
++ if (ehdr->e_phoff != sizeof(Elf32_Ehdr))
++ return -EINVAL;
+ /* only supports phdr that fits in one page */
+ if (ehdr->e_phnum >
+ (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
+@@ -98,6 +105,13 @@ static int get_build_id_64(const void *page_addr, unsigned char *build_id,
+ Elf64_Phdr *phdr;
+ int i;
+
++ /*
++ * FIXME
++ * Neither ELF spec nor ELF loader require that program headers
++ * start immediately after ELF header.
++ */
++ if (ehdr->e_phoff != sizeof(Elf64_Ehdr))
++ return -EINVAL;
+ /* only supports phdr that fits in one page */
+ if (ehdr->e_phnum >
+ (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
+--
+2.43.0
+
--- /dev/null
+From 27b6fa43c0d345efbe9ebab58afbf397c9eaa20a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 6 Oct 2024 22:51:06 +0200
+Subject: cpufreq: intel_pstate: Make hwp_notify_lock a raw spinlock
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <ukleinek@debian.org>
+
+commit 8b4865cd904650cbed7f2407e653934c621b8127 upstream.
+
+notify_hwp_interrupt() is called via sysvec_thermal() ->
+smp_thermal_vector() -> intel_thermal_interrupt() in hard irq context.
+For this reason it must not use a simple spin_lock that sleeps with
+PREEMPT_RT enabled. So convert it to a raw spinlock.
+
+Reported-by: xiao sheng wen <atzlinux@sina.com>
+Link: https://bugs.debian.org/1076483
+Signed-off-by: Uwe Kleine-König <ukleinek@debian.org>
+Acked-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Tested-by: xiao sheng wen <atzlinux@sina.com>
+Link: https://patch.msgid.link/20240919081121.10784-2-ukleinek@debian.org
+Cc: All applicable <stable@vger.kernel.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+[ukleinek: Backport to v6.10.y]
+Signed-off-by: Uwe Kleine-König <ukleinek@debian.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cpufreq/intel_pstate.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index c31914a9876fa..b694e474acece 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -1622,7 +1622,7 @@ static void intel_pstate_notify_work(struct work_struct *work)
+ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
+ }
+
+-static DEFINE_SPINLOCK(hwp_notify_lock);
++static DEFINE_RAW_SPINLOCK(hwp_notify_lock);
+ static cpumask_t hwp_intr_enable_mask;
+
+ void notify_hwp_interrupt(void)
+@@ -1638,7 +1638,7 @@ void notify_hwp_interrupt(void)
+ if (!(value & 0x01))
+ return;
+
+- spin_lock_irqsave(&hwp_notify_lock, flags);
++ raw_spin_lock_irqsave(&hwp_notify_lock, flags);
+
+ if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))
+ goto ack_intr;
+@@ -1646,13 +1646,13 @@ void notify_hwp_interrupt(void)
+ schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work,
+ msecs_to_jiffies(10));
+
+- spin_unlock_irqrestore(&hwp_notify_lock, flags);
++ raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
+
+ return;
+
+ ack_intr:
+ wrmsrl_safe(MSR_HWP_STATUS, 0);
+- spin_unlock_irqrestore(&hwp_notify_lock, flags);
++ raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
+ }
+
+ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
+@@ -1665,9 +1665,9 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
+ /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
+ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
+
+- spin_lock_irq(&hwp_notify_lock);
++ raw_spin_lock_irq(&hwp_notify_lock);
+ cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask);
+- spin_unlock_irq(&hwp_notify_lock);
++ raw_spin_unlock_irq(&hwp_notify_lock);
+
+ if (cancel_work)
+ cancel_delayed_work_sync(&cpudata->hwp_notify_work);
+@@ -1677,10 +1677,10 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
+ {
+ /* Enable HWP notification interrupt for guaranteed performance change */
+ if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
+- spin_lock_irq(&hwp_notify_lock);
++ raw_spin_lock_irq(&hwp_notify_lock);
+ INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
+ cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
+- spin_unlock_irq(&hwp_notify_lock);
++ raw_spin_unlock_irq(&hwp_notify_lock);
+
+ /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
+ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01);
+--
+2.43.0
+
--- /dev/null
+From 77c7f85ecfa551cfef7241253336169092c3c195 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 15 Sep 2024 14:28:37 -0500
+Subject: drm/amd/display: Allow backlight to go below
+ `AMDGPU_DM_DEFAULT_MIN_BACKLIGHT`
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+[ Upstream commit 87d749a6aab73d8069d0345afaa98297816cb220 ]
+
+The issue with panel power savings compatibility below
+`AMDGPU_DM_DEFAULT_MIN_BACKLIGHT` happens at
+`AMDGPU_DM_DEFAULT_MIN_BACKLIGHT` as well.
+
+That issue will be fixed separately, so don't prevent the backlight
+brightness from going that low.
+
+Cc: Harry Wentland <harry.wentland@amd.com>
+Cc: Thomas Weißschuh <linux@weissschuh.net>
+Link: https://lore.kernel.org/amd-gfx/be04226a-a9e3-4a45-a83b-6d263c6557d8@t-8ch.de/T/#m400dee4e2fc61fe9470334d20a7c8c89c9aef44f
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index d2554b4a14fe9..83f4ff9e848d7 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4176,7 +4176,7 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
+ int spread = caps.max_input_signal - caps.min_input_signal;
+
+ if (caps.max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
+- caps.min_input_signal < AMDGPU_DM_DEFAULT_MIN_BACKLIGHT ||
++ caps.min_input_signal < 0 ||
+ spread > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
+ spread < AMDGPU_DM_MIN_SPREAD) {
+ DRM_DEBUG_KMS("DM: Invalid backlight caps: min=%d, max=%d\n",
+--
+2.43.0
+
--- /dev/null
+From da4c5ae6bdcc00706e2427d28bd0415de5c65fae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Apr 2024 22:47:47 -0700
+Subject: drm/xe: Delete unused GuC submission_state.suspend
+
+From: Matthew Brost <matthew.brost@intel.com>
+
+[ Upstream commit 3f371a98deada9aee53d908c9aa53f6cdcb1300b ]
+
+GuC submission_state.suspend is unused, delete it.
+
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240425054747.1918811-1-matthew.brost@intel.com
+Stable-dep-of: 2d2be279f1ca ("drm/xe: fix UAF around queue destruction")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_guc_submit.c | 4 ----
+ drivers/gpu/drm/xe/xe_guc_types.h | 9 ---------
+ 2 files changed, 13 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
+index 958dde8422d7e..a40287a7c3de8 100644
+--- a/drivers/gpu/drm/xe/xe_guc_submit.c
++++ b/drivers/gpu/drm/xe/xe_guc_submit.c
+@@ -251,7 +251,6 @@ static void primelockdep(struct xe_guc *guc)
+ fs_reclaim_acquire(GFP_KERNEL);
+
+ mutex_lock(&guc->submission_state.lock);
+- might_lock(&guc->submission_state.suspend.lock);
+ mutex_unlock(&guc->submission_state.lock);
+
+ fs_reclaim_release(GFP_KERNEL);
+@@ -279,9 +278,6 @@ int xe_guc_submit_init(struct xe_guc *guc)
+
+ xa_init(&guc->submission_state.exec_queue_lookup);
+
+- spin_lock_init(&guc->submission_state.suspend.lock);
+- guc->submission_state.suspend.context = dma_fence_context_alloc(1);
+-
+ primelockdep(guc);
+
+ return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
+diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h
+index 82bd93f7867d1..546ac6350a31f 100644
+--- a/drivers/gpu/drm/xe/xe_guc_types.h
++++ b/drivers/gpu/drm/xe/xe_guc_types.h
+@@ -72,15 +72,6 @@ struct xe_guc {
+ atomic_t stopped;
+ /** @submission_state.lock: protects submission state */
+ struct mutex lock;
+- /** @submission_state.suspend: suspend fence state */
+- struct {
+- /** @submission_state.suspend.lock: suspend fences lock */
+- spinlock_t lock;
+- /** @submission_state.suspend.context: suspend fences context */
+- u64 context;
+- /** @submission_state.suspend.seqno: suspend fences seqno */
+- u32 seqno;
+- } suspend;
+ #ifdef CONFIG_PROVE_LOCKING
+ #define NUM_SUBMIT_WQ 256
+ /** @submission_state.submit_wq_pool: submission ordered workqueues pool */
+--
+2.43.0
+
--- /dev/null
+From 671d0dcf2db7037f2c00f262a699860c3609d664 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Sep 2024 15:56:48 +0100
+Subject: drm/xe: fix UAF around queue destruction
+
+From: Matthew Auld <matthew.auld@intel.com>
+
+[ Upstream commit 2d2be279f1ca9e7288282d4214f16eea8a727cdb ]
+
+We currently do stuff like queuing the final destruction step on a
+random system wq, which will outlive the driver instance. With bad
+timing we can teardown the driver with one or more work workqueue still
+being alive leading to various UAF splats. Add a fini step to ensure
+user queues are properly torn down. At this point GuC should already be
+nuked so queue itself should no longer be referenced from hw pov.
+
+v2 (Matt B)
+ - Looks much safer to use a waitqueue and then just wait for the
+ xa_array to become empty before triggering the drain.
+
+Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/2317
+Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
+Signed-off-by: Matthew Auld <matthew.auld@intel.com>
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: <stable@vger.kernel.org> # v6.8+
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240923145647.77707-2-matthew.auld@intel.com
+(cherry picked from commit 861108666cc0e999cffeab6aff17b662e68774e3)
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_device.c | 6 +++++-
+ drivers/gpu/drm/xe/xe_device_types.h | 3 +++
+ drivers/gpu/drm/xe/xe_guc_submit.c | 26 +++++++++++++++++++++++++-
+ drivers/gpu/drm/xe/xe_guc_types.h | 2 ++
+ 4 files changed, 35 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
+index a1cbdafbff75e..599bf7f9e8c5c 100644
+--- a/drivers/gpu/drm/xe/xe_device.c
++++ b/drivers/gpu/drm/xe/xe_device.c
+@@ -231,6 +231,9 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy)
+ if (xe->unordered_wq)
+ destroy_workqueue(xe->unordered_wq);
+
++ if (xe->destroy_wq)
++ destroy_workqueue(xe->destroy_wq);
++
+ ttm_device_fini(&xe->ttm);
+ }
+
+@@ -293,8 +296,9 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
+ xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", 0);
+ xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0);
+ xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0);
++ xe->destroy_wq = alloc_workqueue("xe-destroy-wq", 0, 0);
+ if (!xe->ordered_wq || !xe->unordered_wq ||
+- !xe->preempt_fence_wq) {
++ !xe->preempt_fence_wq || !xe->destroy_wq) {
+ /*
+ * Cleanup done in xe_device_destroy via
+ * drmm_add_action_or_reset register above
+diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
+index 2e62450d86e18..f671300e0c9bd 100644
+--- a/drivers/gpu/drm/xe/xe_device_types.h
++++ b/drivers/gpu/drm/xe/xe_device_types.h
+@@ -376,6 +376,9 @@ struct xe_device {
+ /** @unordered_wq: used to serialize unordered work, mostly display */
+ struct workqueue_struct *unordered_wq;
+
++ /** @destroy_wq: used to serialize user destroy work, like queue */
++ struct workqueue_struct *destroy_wq;
++
+ /** @tiles: device tiles */
+ struct xe_tile tiles[XE_MAX_TILES_PER_DEVICE];
+
+diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
+index a40287a7c3de8..a0f8299488030 100644
+--- a/drivers/gpu/drm/xe/xe_guc_submit.c
++++ b/drivers/gpu/drm/xe/xe_guc_submit.c
+@@ -233,10 +233,26 @@ static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
+ }
+ #endif
+
++static void xe_guc_submit_fini(struct xe_guc *guc)
++{
++ struct xe_device *xe = guc_to_xe(guc);
++ struct xe_gt *gt = guc_to_gt(guc);
++ int ret;
++
++ ret = wait_event_timeout(guc->submission_state.fini_wq,
++ xa_empty(&guc->submission_state.exec_queue_lookup),
++ HZ * 5);
++
++ drain_workqueue(xe->destroy_wq);
++
++ xe_gt_assert(gt, ret);
++}
++
+ static void guc_submit_fini(struct drm_device *drm, void *arg)
+ {
+ struct xe_guc *guc = arg;
+
++ xe_guc_submit_fini(guc);
+ xa_destroy(&guc->submission_state.exec_queue_lookup);
+ free_submit_wq(guc);
+ }
+@@ -278,6 +294,8 @@ int xe_guc_submit_init(struct xe_guc *guc)
+
+ xa_init(&guc->submission_state.exec_queue_lookup);
+
++ init_waitqueue_head(&guc->submission_state.fini_wq);
++
+ primelockdep(guc);
+
+ return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
+@@ -294,6 +312,9 @@ static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa
+
+ xe_guc_id_mgr_release_locked(&guc->submission_state.idm,
+ q->guc->id, q->width);
++
++ if (xa_empty(&guc->submission_state.exec_queue_lookup))
++ wake_up(&guc->submission_state.fini_wq);
+ }
+
+ static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
+@@ -1025,13 +1046,16 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
+
+ static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
+ {
++ struct xe_guc *guc = exec_queue_to_guc(q);
++ struct xe_device *xe = guc_to_xe(guc);
++
+ INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async);
+
+ /* We must block on kernel engines so slabs are empty on driver unload */
+ if (q->flags & EXEC_QUEUE_FLAG_PERMANENT)
+ __guc_exec_queue_fini_async(&q->guc->fini_async);
+ else
+- queue_work(system_wq, &q->guc->fini_async);
++ queue_work(xe->destroy_wq, &q->guc->fini_async);
+ }
+
+ static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
+diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h
+index 546ac6350a31f..69046f6982717 100644
+--- a/drivers/gpu/drm/xe/xe_guc_types.h
++++ b/drivers/gpu/drm/xe/xe_guc_types.h
+@@ -81,6 +81,8 @@ struct xe_guc {
+ #endif
+ /** @submission_state.enabled: submission is enabled */
+ bool enabled;
++ /** @submission_state.fini_wq: submit fini wait queue */
++ wait_queue_head_t fini_wq;
+ } submission_state;
+ /** @hwconfig: Hardware config state */
+ struct {
+--
+2.43.0
+
--- /dev/null
+From 338e6a9f555aed0dced894f2e865c335d15a71d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 Jul 2024 23:15:49 +0200
+Subject: iio: pressure: bmp280: Fix regmap for BMP280 device
+
+From: Vasileios Amoiridis <vassilisamir@gmail.com>
+
+[ Upstream commit b9065b0250e1705935445ede0a18c1850afe7b75 ]
+
+Up to now, the BMP280 device is using the regmap of the BME280 which
+has registers that exist only in the BME280 device.
+
+Fixes: 14e8015f8569 ("iio: pressure: bmp280: split driver in logical parts")
+Signed-off-by: Vasileios Amoiridis <vassilisamir@gmail.com>
+Link: https://patch.msgid.link/20240711211558.106327-2-vassilisamir@gmail.com
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/pressure/bmp280-core.c | 2 +-
+ drivers/iio/pressure/bmp280-regmap.c | 45 ++++++++++++++++++++++++++--
+ drivers/iio/pressure/bmp280.h | 1 +
+ 3 files changed, 44 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
+index 51413ab86e66e..55ea708489b66 100644
+--- a/drivers/iio/pressure/bmp280-core.c
++++ b/drivers/iio/pressure/bmp280-core.c
+@@ -853,7 +853,7 @@ const struct bmp280_chip_info bme280_chip_info = {
+ .id_reg = BMP280_REG_ID,
+ .chip_id = bme280_chip_ids,
+ .num_chip_id = ARRAY_SIZE(bme280_chip_ids),
+- .regmap_config = &bmp280_regmap_config,
++ .regmap_config = &bme280_regmap_config,
+ .start_up_time = 2000,
+ .channels = bmp280_channels,
+ .num_channels = 3,
+diff --git a/drivers/iio/pressure/bmp280-regmap.c b/drivers/iio/pressure/bmp280-regmap.c
+index fa52839474b18..d27d68edd9065 100644
+--- a/drivers/iio/pressure/bmp280-regmap.c
++++ b/drivers/iio/pressure/bmp280-regmap.c
+@@ -41,7 +41,7 @@ const struct regmap_config bmp180_regmap_config = {
+ };
+ EXPORT_SYMBOL_NS(bmp180_regmap_config, IIO_BMP280);
+
+-static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg)
++static bool bme280_is_writeable_reg(struct device *dev, unsigned int reg)
+ {
+ switch (reg) {
+ case BMP280_REG_CONFIG:
+@@ -54,7 +54,35 @@ static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg)
+ }
+ }
+
++static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case BMP280_REG_CONFIG:
++ case BMP280_REG_CTRL_MEAS:
++ case BMP280_REG_RESET:
++ return true;
++ default:
++ return false;
++ }
++}
++
+ static bool bmp280_is_volatile_reg(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case BMP280_REG_TEMP_XLSB:
++ case BMP280_REG_TEMP_LSB:
++ case BMP280_REG_TEMP_MSB:
++ case BMP280_REG_PRESS_XLSB:
++ case BMP280_REG_PRESS_LSB:
++ case BMP280_REG_PRESS_MSB:
++ case BMP280_REG_STATUS:
++ return true;
++ default:
++ return false;
++ }
++}
++
++static bool bme280_is_volatile_reg(struct device *dev, unsigned int reg)
+ {
+ switch (reg) {
+ case BME280_REG_HUMIDITY_LSB:
+@@ -71,7 +99,6 @@ static bool bmp280_is_volatile_reg(struct device *dev, unsigned int reg)
+ return false;
+ }
+ }
+-
+ static bool bmp380_is_writeable_reg(struct device *dev, unsigned int reg)
+ {
+ switch (reg) {
+@@ -167,7 +194,7 @@ const struct regmap_config bmp280_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+- .max_register = BME280_REG_HUMIDITY_LSB,
++ .max_register = BMP280_REG_TEMP_XLSB,
+ .cache_type = REGCACHE_RBTREE,
+
+ .writeable_reg = bmp280_is_writeable_reg,
+@@ -175,6 +202,18 @@ const struct regmap_config bmp280_regmap_config = {
+ };
+ EXPORT_SYMBOL_NS(bmp280_regmap_config, IIO_BMP280);
+
++const struct regmap_config bme280_regmap_config = {
++ .reg_bits = 8,
++ .val_bits = 8,
++
++ .max_register = BME280_REG_HUMIDITY_LSB,
++ .cache_type = REGCACHE_RBTREE,
++
++ .writeable_reg = bme280_is_writeable_reg,
++ .volatile_reg = bme280_is_volatile_reg,
++};
++EXPORT_SYMBOL_NS(bme280_regmap_config, IIO_BMP280);
++
+ const struct regmap_config bmp380_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h
+index 91d4457a92301..a651cb8009931 100644
+--- a/drivers/iio/pressure/bmp280.h
++++ b/drivers/iio/pressure/bmp280.h
+@@ -470,6 +470,7 @@ extern const struct bmp280_chip_info bmp580_chip_info;
+ /* Regmap configurations */
+ extern const struct regmap_config bmp180_regmap_config;
+ extern const struct regmap_config bmp280_regmap_config;
++extern const struct regmap_config bme280_regmap_config;
+ extern const struct regmap_config bmp380_regmap_config;
+ extern const struct regmap_config bmp580_regmap_config;
+
+--
+2.43.0
+
--- /dev/null
+From b5392254600c54ba6fb9b2ac11d293d98caae452 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 Jul 2024 23:15:50 +0200
+Subject: iio: pressure: bmp280: Fix waiting time for BMP3xx configuration
+
+From: Vasileios Amoiridis <vassilisamir@gmail.com>
+
+[ Upstream commit 262a6634bcc4f0c1c53d13aa89882909f281a6aa ]
+
+According to the datasheet, both pressure and temperature can go up to
+oversampling x32. With this option, the maximum measurement time is not
+80ms (this is for press x32 and temp x2), but it is 130ms nominal
+(calculated from table 3.9.2) and since most of the maximum values
+are around +15%, it is configured to 150ms.
+
+Fixes: 8d329309184d ("iio: pressure: bmp280: Add support for BMP380 sensor family")
+Signed-off-by: Vasileios Amoiridis <vassilisamir@gmail.com>
+Link: https://patch.msgid.link/20240711211558.106327-3-vassilisamir@gmail.com
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/pressure/bmp280-core.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
+index 55ea708489b66..1549f361a473f 100644
+--- a/drivers/iio/pressure/bmp280-core.c
++++ b/drivers/iio/pressure/bmp280-core.c
+@@ -1204,10 +1204,11 @@ static int bmp380_chip_config(struct bmp280_data *data)
+ }
+ /*
+ * Waits for measurement before checking configuration error
+- * flag. Selected longest measure time indicated in
+- * section 3.9.1 in the datasheet.
++ * flag. Selected longest measurement time, calculated from
++ * formula in datasheet section 3.9.2 with an offset of ~+15%
++ * as it seen as well in table 3.9.1.
+ */
+- msleep(80);
++ msleep(150);
+
+ /* Check config error flag */
+ ret = regmap_read(data->regmap, BMP380_REG_ERROR, &tmp);
+--
+2.43.0
+
--- /dev/null
+From 23e42d7be67558d7399789d2cb294e4c51871f13 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 Apr 2024 21:00:37 +0200
+Subject: iio: pressure: bmp280: Improve indentation and line wrapping
+
+From: Vasileios Amoiridis <vassilisamir@gmail.com>
+
+[ Upstream commit 439ce8961bdd2e925c1f6adc82ce9fe3931e2c08 ]
+
+Fix indentations that are not following the standards, remove
+extra white lines and add missing white lines.
+
+Signed-off-by: Vasileios Amoiridis <vassilisamir@gmail.com>
+Link: https://lore.kernel.org/r/20240429190046.24252-2-vassilisamir@gmail.com
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Stable-dep-of: b9065b0250e1 ("iio: pressure: bmp280: Fix regmap for BMP280 device")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/pressure/bmp280-core.c | 108 ++++++++++++++++-------------
+ drivers/iio/pressure/bmp280-spi.c | 4 +-
+ 2 files changed, 61 insertions(+), 51 deletions(-)
+
+diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
+index 221fa2c552ae2..82c177e0ff933 100644
+--- a/drivers/iio/pressure/bmp280-core.c
++++ b/drivers/iio/pressure/bmp280-core.c
+@@ -52,7 +52,6 @@
+ */
+ enum { AC1, AC2, AC3, AC4, AC5, AC6, B1, B2, MB, MC, MD };
+
+-
+ enum bmp380_odr {
+ BMP380_ODR_200HZ,
+ BMP380_ODR_100HZ,
+@@ -181,18 +180,19 @@ static int bmp280_read_calib(struct bmp280_data *data)
+ struct bmp280_calib *calib = &data->calib.bmp280;
+ int ret;
+
+-
+ /* Read temperature and pressure calibration values. */
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_TEMP_START,
+- data->bmp280_cal_buf, sizeof(data->bmp280_cal_buf));
++ data->bmp280_cal_buf,
++ sizeof(data->bmp280_cal_buf));
+ if (ret < 0) {
+ dev_err(data->dev,
+- "failed to read temperature and pressure calibration parameters\n");
++ "failed to read calibration parameters\n");
+ return ret;
+ }
+
+- /* Toss the temperature and pressure calibration data into the entropy pool */
+- add_device_randomness(data->bmp280_cal_buf, sizeof(data->bmp280_cal_buf));
++ /* Toss calibration data into the entropy pool */
++ add_device_randomness(data->bmp280_cal_buf,
++ sizeof(data->bmp280_cal_buf));
+
+ /* Parse temperature calibration values. */
+ calib->T1 = le16_to_cpu(data->bmp280_cal_buf[T1]);
+@@ -223,7 +223,7 @@ static int bme280_read_calib(struct bmp280_data *data)
+ /* Load shared calibration params with bmp280 first */
+ ret = bmp280_read_calib(data);
+ if (ret < 0) {
+- dev_err(dev, "failed to read common bmp280 calibration parameters\n");
++ dev_err(dev, "failed to read calibration parameters\n");
+ return ret;
+ }
+
+@@ -283,6 +283,7 @@ static int bme280_read_calib(struct bmp280_data *data)
+
+ return 0;
+ }
++
+ /*
+ * Returns humidity in percent, resolution is 0.01 percent. Output value of
+ * "47445" represents 47445/1024 = 46.333 %RH.
+@@ -305,7 +306,7 @@ static u32 bmp280_compensate_humidity(struct bmp280_data *data,
+ var = clamp_val(var, 0, 419430400);
+
+ return var >> 12;
+-};
++}
+
+ /*
+ * Returns temperature in DegC, resolution is 0.01 DegC. Output value of
+@@ -538,7 +539,7 @@ static int bmp280_read_raw(struct iio_dev *indio_dev,
+ }
+
+ static int bmp280_write_oversampling_ratio_humid(struct bmp280_data *data,
+- int val)
++ int val)
+ {
+ const int *avail = data->chip_info->oversampling_humid_avail;
+ const int n = data->chip_info->num_oversampling_humid_avail;
+@@ -563,7 +564,7 @@ static int bmp280_write_oversampling_ratio_humid(struct bmp280_data *data,
+ }
+
+ static int bmp280_write_oversampling_ratio_temp(struct bmp280_data *data,
+- int val)
++ int val)
+ {
+ const int *avail = data->chip_info->oversampling_temp_avail;
+ const int n = data->chip_info->num_oversampling_temp_avail;
+@@ -588,7 +589,7 @@ static int bmp280_write_oversampling_ratio_temp(struct bmp280_data *data,
+ }
+
+ static int bmp280_write_oversampling_ratio_press(struct bmp280_data *data,
+- int val)
++ int val)
+ {
+ const int *avail = data->chip_info->oversampling_press_avail;
+ const int n = data->chip_info->num_oversampling_press_avail;
+@@ -772,13 +773,12 @@ static int bmp280_chip_config(struct bmp280_data *data)
+ int ret;
+
+ ret = regmap_write_bits(data->regmap, BMP280_REG_CTRL_MEAS,
+- BMP280_OSRS_TEMP_MASK |
+- BMP280_OSRS_PRESS_MASK |
+- BMP280_MODE_MASK,
+- osrs | BMP280_MODE_NORMAL);
++ BMP280_OSRS_TEMP_MASK |
++ BMP280_OSRS_PRESS_MASK |
++ BMP280_MODE_MASK,
++ osrs | BMP280_MODE_NORMAL);
+ if (ret < 0) {
+- dev_err(data->dev,
+- "failed to write ctrl_meas register\n");
++ dev_err(data->dev, "failed to write ctrl_meas register\n");
+ return ret;
+ }
+
+@@ -786,8 +786,7 @@ static int bmp280_chip_config(struct bmp280_data *data)
+ BMP280_FILTER_MASK,
+ BMP280_FILTER_4X);
+ if (ret < 0) {
+- dev_err(data->dev,
+- "failed to write config register\n");
++ dev_err(data->dev, "failed to write config register\n");
+ return ret;
+ }
+
+@@ -926,8 +925,8 @@ static int bmp380_cmd(struct bmp280_data *data, u8 cmd)
+ }
+
+ /*
+- * Returns temperature in Celsius degrees, resolution is 0.01º C. Output value of
+- * "5123" equals 51.2º C. t_fine carries fine temperature as global value.
++ * Returns temperature in Celsius degrees, resolution is 0.01º C. Output value
++ * of "5123" equals 51.2º C. t_fine carries fine temperature as global value.
+ *
+ * Taken from datasheet, Section Appendix 9, "Compensation formula" and repo
+ * https://github.com/BoschSensortec/BMP3-Sensor-API.
+@@ -1069,7 +1068,8 @@ static int bmp380_read_calib(struct bmp280_data *data)
+
+ /* Read temperature and pressure calibration data */
+ ret = regmap_bulk_read(data->regmap, BMP380_REG_CALIB_TEMP_START,
+- data->bmp380_cal_buf, sizeof(data->bmp380_cal_buf));
++ data->bmp380_cal_buf,
++ sizeof(data->bmp380_cal_buf));
+ if (ret) {
+ dev_err(data->dev,
+ "failed to read temperature calibration parameters\n");
+@@ -1077,7 +1077,8 @@ static int bmp380_read_calib(struct bmp280_data *data)
+ }
+
+ /* Toss the temperature calibration data into the entropy pool */
+- add_device_randomness(data->bmp380_cal_buf, sizeof(data->bmp380_cal_buf));
++ add_device_randomness(data->bmp380_cal_buf,
++ sizeof(data->bmp380_cal_buf));
+
+ /* Parse calibration values */
+ calib->T1 = get_unaligned_le16(&data->bmp380_cal_buf[BMP380_T1]);
+@@ -1159,7 +1160,8 @@ static int bmp380_chip_config(struct bmp280_data *data)
+
+ /* Configure output data rate */
+ ret = regmap_update_bits_check(data->regmap, BMP380_REG_ODR,
+- BMP380_ODRS_MASK, data->sampling_freq, &aux);
++ BMP380_ODRS_MASK, data->sampling_freq,
++ &aux);
+ if (ret) {
+ dev_err(data->dev, "failed to write ODR selection register\n");
+ return ret;
+@@ -1178,12 +1180,13 @@ static int bmp380_chip_config(struct bmp280_data *data)
+
+ if (change) {
+ /*
+- * The configurations errors are detected on the fly during a measurement
+- * cycle. If the sampling frequency is too low, it's faster to reset
+- * the measurement loop than wait until the next measurement is due.
++ * The configurations errors are detected on the fly during a
++ * measurement cycle. If the sampling frequency is too low, it's
++ * faster to reset the measurement loop than wait until the next
++ * measurement is due.
+ *
+- * Resets sensor measurement loop toggling between sleep and normal
+- * operating modes.
++ * Resets sensor measurement loop toggling between sleep and
++ * normal operating modes.
+ */
+ ret = regmap_write_bits(data->regmap, BMP380_REG_POWER_CONTROL,
+ BMP380_MODE_MASK,
+@@ -1201,22 +1204,21 @@ static int bmp380_chip_config(struct bmp280_data *data)
+ return ret;
+ }
+ /*
+- * Waits for measurement before checking configuration error flag.
+- * Selected longest measure time indicated in section 3.9.1
+- * in the datasheet.
++ * Waits for measurement before checking configuration error
++ * flag. Selected longest measure time indicated in
++ * section 3.9.1 in the datasheet.
+ */
+ msleep(80);
+
+ /* Check config error flag */
+ ret = regmap_read(data->regmap, BMP380_REG_ERROR, &tmp);
+ if (ret) {
+- dev_err(data->dev,
+- "failed to read error register\n");
++ dev_err(data->dev, "failed to read error register\n");
+ return ret;
+ }
+ if (tmp & BMP380_ERR_CONF_MASK) {
+ dev_warn(data->dev,
+- "sensor flagged configuration as incompatible\n");
++ "sensor flagged configuration as incompatible\n");
+ return -EINVAL;
+ }
+ }
+@@ -1317,9 +1319,11 @@ static int bmp580_nvm_operation(struct bmp280_data *data, bool is_write)
+ }
+
+ /* Start NVM operation sequence */
+- ret = regmap_write(data->regmap, BMP580_REG_CMD, BMP580_CMD_NVM_OP_SEQ_0);
++ ret = regmap_write(data->regmap, BMP580_REG_CMD,
++ BMP580_CMD_NVM_OP_SEQ_0);
+ if (ret) {
+- dev_err(data->dev, "failed to send nvm operation's first sequence\n");
++ dev_err(data->dev,
++ "failed to send nvm operation's first sequence\n");
+ return ret;
+ }
+ if (is_write) {
+@@ -1327,7 +1331,8 @@ static int bmp580_nvm_operation(struct bmp280_data *data, bool is_write)
+ ret = regmap_write(data->regmap, BMP580_REG_CMD,
+ BMP580_CMD_NVM_WRITE_SEQ_1);
+ if (ret) {
+- dev_err(data->dev, "failed to send nvm write sequence\n");
++ dev_err(data->dev,
++ "failed to send nvm write sequence\n");
+ return ret;
+ }
+ /* Datasheet says on 4.8.1.2 it takes approximately 10ms */
+@@ -1338,7 +1343,8 @@ static int bmp580_nvm_operation(struct bmp280_data *data, bool is_write)
+ ret = regmap_write(data->regmap, BMP580_REG_CMD,
+ BMP580_CMD_NVM_READ_SEQ_1);
+ if (ret) {
+- dev_err(data->dev, "failed to send nvm read sequence\n");
++ dev_err(data->dev,
++ "failed to send nvm read sequence\n");
+ return ret;
+ }
+ /* Datasheet says on 4.8.1.1 it takes approximately 200us */
+@@ -1501,8 +1507,8 @@ static int bmp580_nvmem_read(void *priv, unsigned int offset, void *val,
+ if (ret)
+ goto exit;
+
+- ret = regmap_bulk_read(data->regmap, BMP580_REG_NVM_DATA_LSB, &data->le16,
+- sizeof(data->le16));
++ ret = regmap_bulk_read(data->regmap, BMP580_REG_NVM_DATA_LSB,
++ &data->le16, sizeof(data->le16));
+ if (ret) {
+ dev_err(data->dev, "error reading nvm data regs\n");
+ goto exit;
+@@ -1546,7 +1552,8 @@ static int bmp580_nvmem_write(void *priv, unsigned int offset, void *val,
+ while (bytes >= sizeof(*buf)) {
+ addr = bmp580_nvmem_addrs[offset / sizeof(*buf)];
+
+- ret = regmap_write(data->regmap, BMP580_REG_NVM_ADDR, BMP580_NVM_PROG_EN |
++ ret = regmap_write(data->regmap, BMP580_REG_NVM_ADDR,
++ BMP580_NVM_PROG_EN |
+ FIELD_PREP(BMP580_NVM_ROW_ADDR_MASK, addr));
+ if (ret) {
+ dev_err(data->dev, "error writing nvm address\n");
+@@ -1554,8 +1561,8 @@ static int bmp580_nvmem_write(void *priv, unsigned int offset, void *val,
+ }
+ data->le16 = cpu_to_le16(*buf++);
+
+- ret = regmap_bulk_write(data->regmap, BMP580_REG_NVM_DATA_LSB, &data->le16,
+- sizeof(data->le16));
++ ret = regmap_bulk_write(data->regmap, BMP580_REG_NVM_DATA_LSB,
++ &data->le16, sizeof(data->le16));
+ if (ret) {
+ dev_err(data->dev, "error writing LSB NVM data regs\n");
+ goto exit;
+@@ -1662,7 +1669,8 @@ static int bmp580_chip_config(struct bmp280_data *data)
+ BMP580_OSR_PRESS_EN;
+
+ ret = regmap_update_bits_check(data->regmap, BMP580_REG_OSR_CONFIG,
+- BMP580_OSR_TEMP_MASK | BMP580_OSR_PRESS_MASK |
++ BMP580_OSR_TEMP_MASK |
++ BMP580_OSR_PRESS_MASK |
+ BMP580_OSR_PRESS_EN,
+ reg_val, &aux);
+ if (ret) {
+@@ -1713,7 +1721,8 @@ static int bmp580_chip_config(struct bmp280_data *data)
+ */
+ ret = regmap_read(data->regmap, BMP580_REG_EFF_OSR, &tmp);
+ if (ret) {
+- dev_err(data->dev, "error reading effective OSR register\n");
++ dev_err(data->dev,
++ "error reading effective OSR register\n");
+ return ret;
+ }
+ if (!(tmp & BMP580_EFF_OSR_VALID_ODR)) {
+@@ -1848,7 +1857,8 @@ static int bmp180_read_calib(struct bmp280_data *data)
+ }
+
+ /* Toss the calibration data into the entropy pool */
+- add_device_randomness(data->bmp180_cal_buf, sizeof(data->bmp180_cal_buf));
++ add_device_randomness(data->bmp180_cal_buf,
++ sizeof(data->bmp180_cal_buf));
+
+ calib->AC1 = be16_to_cpu(data->bmp180_cal_buf[AC1]);
+ calib->AC2 = be16_to_cpu(data->bmp180_cal_buf[AC2]);
+@@ -1963,8 +1973,7 @@ static u32 bmp180_compensate_press(struct bmp280_data *data, s32 adc_press)
+ return p + ((x1 + x2 + 3791) >> 4);
+ }
+
+-static int bmp180_read_press(struct bmp280_data *data,
+- int *val, int *val2)
++static int bmp180_read_press(struct bmp280_data *data, int *val, int *val2)
+ {
+ u32 comp_press;
+ s32 adc_press;
+@@ -2241,6 +2250,7 @@ static int bmp280_runtime_resume(struct device *dev)
+ ret = regulator_bulk_enable(BMP280_NUM_SUPPLIES, data->supplies);
+ if (ret)
+ return ret;
++
+ usleep_range(data->start_up_time, data->start_up_time + 100);
+ return data->chip_info->chip_config(data);
+ }
+diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c
+index 4e19ea0b4d398..62b4e58104cf9 100644
+--- a/drivers/iio/pressure/bmp280-spi.c
++++ b/drivers/iio/pressure/bmp280-spi.c
+@@ -13,7 +13,7 @@
+ #include "bmp280.h"
+
+ static int bmp280_regmap_spi_write(void *context, const void *data,
+- size_t count)
++ size_t count)
+ {
+ struct spi_device *spi = to_spi_device(context);
+ u8 buf[2];
+@@ -29,7 +29,7 @@ static int bmp280_regmap_spi_write(void *context, const void *data,
+ }
+
+ static int bmp280_regmap_spi_read(void *context, const void *reg,
+- size_t reg_size, void *val, size_t val_size)
++ size_t reg_size, void *val, size_t val_size)
+ {
+ struct spi_device *spi = to_spi_device(context);
+
+--
+2.43.0
+
--- /dev/null
+From 71f1c61e1ff7da54d7a8c776a40a47c58949024d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 Apr 2024 21:00:38 +0200
+Subject: iio: pressure: bmp280: Use BME prefix for BME280 specifics
+
+From: Vasileios Amoiridis <vassilisamir@gmail.com>
+
+[ Upstream commit b23be4cd99a6f1f46963b87952632268174e62c1 ]
+
+Change the rest of the defines and function names that are
+used specifically by the BME280 humidity sensor to BME280
+as it is done for the rest of the BMP{0,1,3,5}80 sensors.
+
+Signed-off-by: Vasileios Amoiridis <vassilisamir@gmail.com>
+Link: https://lore.kernel.org/r/20240429190046.24252-3-vassilisamir@gmail.com
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Stable-dep-of: b9065b0250e1 ("iio: pressure: bmp280: Fix regmap for BMP280 device")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/pressure/bmp280-core.c | 37 +++++++++++------------
+ drivers/iio/pressure/bmp280-regmap.c | 8 ++---
+ drivers/iio/pressure/bmp280.h | 45 +++++++++++++++-------------
+ 3 files changed, 46 insertions(+), 44 deletions(-)
+
+diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
+index 82c177e0ff933..51413ab86e66e 100644
+--- a/drivers/iio/pressure/bmp280-core.c
++++ b/drivers/iio/pressure/bmp280-core.c
+@@ -235,14 +235,14 @@ static int bme280_read_calib(struct bmp280_data *data)
+ * Humidity data is only available on BME280.
+ */
+
+- ret = regmap_read(data->regmap, BMP280_REG_COMP_H1, &tmp);
++ ret = regmap_read(data->regmap, BME280_REG_COMP_H1, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read H1 comp value\n");
+ return ret;
+ }
+ calib->H1 = tmp;
+
+- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H2,
++ ret = regmap_bulk_read(data->regmap, BME280_REG_COMP_H2,
+ &data->le16, sizeof(data->le16));
+ if (ret < 0) {
+ dev_err(dev, "failed to read H2 comp value\n");
+@@ -250,14 +250,14 @@ static int bme280_read_calib(struct bmp280_data *data)
+ }
+ calib->H2 = sign_extend32(le16_to_cpu(data->le16), 15);
+
+- ret = regmap_read(data->regmap, BMP280_REG_COMP_H3, &tmp);
++ ret = regmap_read(data->regmap, BME280_REG_COMP_H3, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read H3 comp value\n");
+ return ret;
+ }
+ calib->H3 = tmp;
+
+- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H4,
++ ret = regmap_bulk_read(data->regmap, BME280_REG_COMP_H4,
+ &data->be16, sizeof(data->be16));
+ if (ret < 0) {
+ dev_err(dev, "failed to read H4 comp value\n");
+@@ -266,15 +266,15 @@ static int bme280_read_calib(struct bmp280_data *data)
+ calib->H4 = sign_extend32(((be16_to_cpu(data->be16) >> 4) & 0xff0) |
+ (be16_to_cpu(data->be16) & 0xf), 11);
+
+- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H5,
++ ret = regmap_bulk_read(data->regmap, BME280_REG_COMP_H5,
+ &data->le16, sizeof(data->le16));
+ if (ret < 0) {
+ dev_err(dev, "failed to read H5 comp value\n");
+ return ret;
+ }
+- calib->H5 = sign_extend32(FIELD_GET(BMP280_COMP_H5_MASK, le16_to_cpu(data->le16)), 11);
++ calib->H5 = sign_extend32(FIELD_GET(BME280_COMP_H5_MASK, le16_to_cpu(data->le16)), 11);
+
+- ret = regmap_read(data->regmap, BMP280_REG_COMP_H6, &tmp);
++ ret = regmap_read(data->regmap, BME280_REG_COMP_H6, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read H6 comp value\n");
+ return ret;
+@@ -290,7 +290,7 @@ static int bme280_read_calib(struct bmp280_data *data)
+ *
+ * Taken from BME280 datasheet, Section 4.2.3, "Compensation formula".
+ */
+-static u32 bmp280_compensate_humidity(struct bmp280_data *data,
++static u32 bme280_compensate_humidity(struct bmp280_data *data,
+ s32 adc_humidity)
+ {
+ struct bmp280_calib *calib = &data->calib.bmp280;
+@@ -430,7 +430,7 @@ static int bmp280_read_press(struct bmp280_data *data,
+ return IIO_VAL_FRACTIONAL;
+ }
+
+-static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
++static int bme280_read_humid(struct bmp280_data *data, int *val, int *val2)
+ {
+ u32 comp_humidity;
+ s32 adc_humidity;
+@@ -441,7 +441,7 @@ static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
+ if (ret < 0)
+ return ret;
+
+- ret = regmap_bulk_read(data->regmap, BMP280_REG_HUMIDITY_MSB,
++ ret = regmap_bulk_read(data->regmap, BME280_REG_HUMIDITY_MSB,
+ &data->be16, sizeof(data->be16));
+ if (ret < 0) {
+ dev_err(data->dev, "failed to read humidity\n");
+@@ -454,7 +454,7 @@ static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
+ dev_err(data->dev, "reading humidity skipped\n");
+ return -EIO;
+ }
+- comp_humidity = bmp280_compensate_humidity(data, adc_humidity);
++ comp_humidity = bme280_compensate_humidity(data, adc_humidity);
+
+ *val = comp_humidity * 1000 / 1024;
+
+@@ -538,7 +538,7 @@ static int bmp280_read_raw(struct iio_dev *indio_dev,
+ return ret;
+ }
+
+-static int bmp280_write_oversampling_ratio_humid(struct bmp280_data *data,
++static int bme280_write_oversampling_ratio_humid(struct bmp280_data *data,
+ int val)
+ {
+ const int *avail = data->chip_info->oversampling_humid_avail;
+@@ -682,7 +682,7 @@ static int bmp280_write_raw(struct iio_dev *indio_dev,
+ mutex_lock(&data->lock);
+ switch (chan->type) {
+ case IIO_HUMIDITYRELATIVE:
+- ret = bmp280_write_oversampling_ratio_humid(data, val);
++ ret = bme280_write_oversampling_ratio_humid(data, val);
+ break;
+ case IIO_PRESSURE:
+ ret = bmp280_write_oversampling_ratio_press(data, val);
+@@ -832,16 +832,15 @@ EXPORT_SYMBOL_NS(bmp280_chip_info, IIO_BMP280);
+
+ static int bme280_chip_config(struct bmp280_data *data)
+ {
+- u8 osrs = FIELD_PREP(BMP280_OSRS_HUMIDITY_MASK, data->oversampling_humid + 1);
++ u8 osrs = FIELD_PREP(BME280_OSRS_HUMIDITY_MASK, data->oversampling_humid + 1);
+ int ret;
+
+ /*
+ * Oversampling of humidity must be set before oversampling of
+ * temperature/pressure is set to become effective.
+ */
+- ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_HUMIDITY,
+- BMP280_OSRS_HUMIDITY_MASK, osrs);
+-
++ ret = regmap_update_bits(data->regmap, BME280_REG_CTRL_HUMIDITY,
++ BME280_OSRS_HUMIDITY_MASK, osrs);
+ if (ret < 0)
+ return ret;
+
+@@ -869,12 +868,12 @@ const struct bmp280_chip_info bme280_chip_info = {
+
+ .oversampling_humid_avail = bmp280_oversampling_avail,
+ .num_oversampling_humid_avail = ARRAY_SIZE(bmp280_oversampling_avail),
+- .oversampling_humid_default = BMP280_OSRS_HUMIDITY_16X - 1,
++ .oversampling_humid_default = BME280_OSRS_HUMIDITY_16X - 1,
+
+ .chip_config = bme280_chip_config,
+ .read_temp = bmp280_read_temp,
+ .read_press = bmp280_read_press,
+- .read_humid = bmp280_read_humid,
++ .read_humid = bme280_read_humid,
+ .read_calib = bme280_read_calib,
+ };
+ EXPORT_SYMBOL_NS(bme280_chip_info, IIO_BMP280);
+diff --git a/drivers/iio/pressure/bmp280-regmap.c b/drivers/iio/pressure/bmp280-regmap.c
+index 3ee56720428c5..fa52839474b18 100644
+--- a/drivers/iio/pressure/bmp280-regmap.c
++++ b/drivers/iio/pressure/bmp280-regmap.c
+@@ -45,7 +45,7 @@ static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg)
+ {
+ switch (reg) {
+ case BMP280_REG_CONFIG:
+- case BMP280_REG_CTRL_HUMIDITY:
++ case BME280_REG_CTRL_HUMIDITY:
+ case BMP280_REG_CTRL_MEAS:
+ case BMP280_REG_RESET:
+ return true;
+@@ -57,8 +57,8 @@ static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg)
+ static bool bmp280_is_volatile_reg(struct device *dev, unsigned int reg)
+ {
+ switch (reg) {
+- case BMP280_REG_HUMIDITY_LSB:
+- case BMP280_REG_HUMIDITY_MSB:
++ case BME280_REG_HUMIDITY_LSB:
++ case BME280_REG_HUMIDITY_MSB:
+ case BMP280_REG_TEMP_XLSB:
+ case BMP280_REG_TEMP_LSB:
+ case BMP280_REG_TEMP_MSB:
+@@ -167,7 +167,7 @@ const struct regmap_config bmp280_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+- .max_register = BMP280_REG_HUMIDITY_LSB,
++ .max_register = BME280_REG_HUMIDITY_LSB,
+ .cache_type = REGCACHE_RBTREE,
+
+ .writeable_reg = bmp280_is_writeable_reg,
+diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h
+index 5812a344ed8e8..91d4457a92301 100644
+--- a/drivers/iio/pressure/bmp280.h
++++ b/drivers/iio/pressure/bmp280.h
+@@ -192,8 +192,6 @@
+ #define BMP380_PRESS_SKIPPED 0x800000
+
+ /* BMP280 specific registers */
+-#define BMP280_REG_HUMIDITY_LSB 0xFE
+-#define BMP280_REG_HUMIDITY_MSB 0xFD
+ #define BMP280_REG_TEMP_XLSB 0xFC
+ #define BMP280_REG_TEMP_LSB 0xFB
+ #define BMP280_REG_TEMP_MSB 0xFA
+@@ -207,15 +205,6 @@
+ #define BMP280_REG_CONFIG 0xF5
+ #define BMP280_REG_CTRL_MEAS 0xF4
+ #define BMP280_REG_STATUS 0xF3
+-#define BMP280_REG_CTRL_HUMIDITY 0xF2
+-
+-/* Due to non linear mapping, and data sizes we can't do a bulk read */
+-#define BMP280_REG_COMP_H1 0xA1
+-#define BMP280_REG_COMP_H2 0xE1
+-#define BMP280_REG_COMP_H3 0xE3
+-#define BMP280_REG_COMP_H4 0xE4
+-#define BMP280_REG_COMP_H5 0xE5
+-#define BMP280_REG_COMP_H6 0xE7
+
+ #define BMP280_REG_COMP_TEMP_START 0x88
+ #define BMP280_COMP_TEMP_REG_COUNT 6
+@@ -223,8 +212,6 @@
+ #define BMP280_REG_COMP_PRESS_START 0x8E
+ #define BMP280_COMP_PRESS_REG_COUNT 18
+
+-#define BMP280_COMP_H5_MASK GENMASK(15, 4)
+-
+ #define BMP280_CONTIGUOUS_CALIB_REGS (BMP280_COMP_TEMP_REG_COUNT + \
+ BMP280_COMP_PRESS_REG_COUNT)
+
+@@ -235,14 +222,6 @@
+ #define BMP280_FILTER_8X 3
+ #define BMP280_FILTER_16X 4
+
+-#define BMP280_OSRS_HUMIDITY_MASK GENMASK(2, 0)
+-#define BMP280_OSRS_HUMIDITY_SKIP 0
+-#define BMP280_OSRS_HUMIDITY_1X 1
+-#define BMP280_OSRS_HUMIDITY_2X 2
+-#define BMP280_OSRS_HUMIDITY_4X 3
+-#define BMP280_OSRS_HUMIDITY_8X 4
+-#define BMP280_OSRS_HUMIDITY_16X 5
+-
+ #define BMP280_OSRS_TEMP_MASK GENMASK(7, 5)
+ #define BMP280_OSRS_TEMP_SKIP 0
+ #define BMP280_OSRS_TEMP_1X 1
+@@ -264,6 +243,30 @@
+ #define BMP280_MODE_FORCED 1
+ #define BMP280_MODE_NORMAL 3
+
++/* BME280 specific registers */
++#define BME280_REG_HUMIDITY_LSB 0xFE
++#define BME280_REG_HUMIDITY_MSB 0xFD
++
++#define BME280_REG_CTRL_HUMIDITY 0xF2
++
++/* Due to non linear mapping, and data sizes we can't do a bulk read */
++#define BME280_REG_COMP_H1 0xA1
++#define BME280_REG_COMP_H2 0xE1
++#define BME280_REG_COMP_H3 0xE3
++#define BME280_REG_COMP_H4 0xE4
++#define BME280_REG_COMP_H5 0xE5
++#define BME280_REG_COMP_H6 0xE7
++
++#define BME280_COMP_H5_MASK GENMASK(15, 4)
++
++#define BME280_OSRS_HUMIDITY_MASK GENMASK(2, 0)
++#define BME280_OSRS_HUMIDITY_SKIP 0
++#define BME280_OSRS_HUMIDITY_1X 1
++#define BME280_OSRS_HUMIDITY_2X 2
++#define BME280_OSRS_HUMIDITY_4X 3
++#define BME280_OSRS_HUMIDITY_8X 4
++#define BME280_OSRS_HUMIDITY_16X 5
++
+ /* BMP180 specific registers */
+ #define BMP180_REG_OUT_XLSB 0xF8
+ #define BMP180_REG_OUT_LSB 0xF7
+--
+2.43.0
+
--- /dev/null
+From df05227340ee0e8b55828e70ebddb7849fb3c40c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 1 Oct 2024 18:02:22 +0900
+Subject: kconfig: qconf: fix buffer overflow in debug links
+
+From: Masahiro Yamada <masahiroy@kernel.org>
+
+[ Upstream commit 984ed20ece1c6c20789ece040cbff3eb1a388fa9 ]
+
+If you enable "Option -> Show Debug Info" and click a link, the program
+terminates with the following error:
+
+ *** buffer overflow detected ***: terminated
+
+The buffer overflow is caused by the following line:
+
+ strcat(data, "$");
+
+The buffer needs one more byte to accommodate the additional character.
+
+Fixes: c4f7398bee9c ("kconfig: qconf: make debug links work again")
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ scripts/kconfig/qconf.cc | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
+index c6c42c0f4e5d5..b7fc5aeb78cc0 100644
+--- a/scripts/kconfig/qconf.cc
++++ b/scripts/kconfig/qconf.cc
+@@ -1174,7 +1174,7 @@ void ConfigInfoView::clicked(const QUrl &url)
+ {
+ QByteArray str = url.toEncoded();
+ const std::size_t count = str.size();
+- char *data = new char[count + 1];
++ char *data = new char[count + 2]; // '$' + '\0'
+ struct symbol **result;
+ struct menu *m = NULL;
+
+--
+2.43.0
+
--- /dev/null
+From 887baf3b8e2f6df76ad2fb0c3b8bd0d47429446b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Aug 2024 10:42:23 -0700
+Subject: lib/buildid: harden build ID parsing logic
+
+From: Andrii Nakryiko <andrii@kernel.org>
+
+[ Upstream commit 905415ff3ffb1d7e5afa62bacabd79776bd24606 ]
+
+Harden build ID parsing logic, adding explicit READ_ONCE() where it's
+important to have a consistent value read and validated just once.
+
+Also, as pointed out by Andi Kleen, we need to make sure that entire ELF
+note is within a page bounds, so move the overflow check up and add an
+extra note_size boundaries validation.
+
+Fixes tag below points to the code that moved this code into
+lib/buildid.c, and then subsequently was used in perf subsystem, making
+this code exposed to perf_event_open() users in v5.12+.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Eduard Zingerman <eddyz87@gmail.com>
+Reviewed-by: Jann Horn <jannh@google.com>
+Suggested-by: Andi Kleen <ak@linux.intel.com>
+Fixes: bd7525dacd7e ("bpf: Move stack_map_get_build_id into lib")
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Link: https://lore.kernel.org/r/20240829174232.3133883-2-andrii@kernel.org
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ lib/buildid.c | 76 +++++++++++++++++++++++++++++----------------------
+ 1 file changed, 44 insertions(+), 32 deletions(-)
+
+diff --git a/lib/buildid.c b/lib/buildid.c
+index e02b5507418b4..26007cc99a38f 100644
+--- a/lib/buildid.c
++++ b/lib/buildid.c
+@@ -18,31 +18,37 @@ static int parse_build_id_buf(unsigned char *build_id,
+ const void *note_start,
+ Elf32_Word note_size)
+ {
+- Elf32_Word note_offs = 0, new_offs;
+-
+- while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
+- Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
++ const char note_name[] = "GNU";
++ const size_t note_name_sz = sizeof(note_name);
++ u64 note_off = 0, new_off, name_sz, desc_sz;
++ const char *data;
++
++ while (note_off + sizeof(Elf32_Nhdr) < note_size &&
++ note_off + sizeof(Elf32_Nhdr) > note_off /* overflow */) {
++ Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_off);
++
++ name_sz = READ_ONCE(nhdr->n_namesz);
++ desc_sz = READ_ONCE(nhdr->n_descsz);
++
++ new_off = note_off + sizeof(Elf32_Nhdr);
++ if (check_add_overflow(new_off, ALIGN(name_sz, 4), &new_off) ||
++ check_add_overflow(new_off, ALIGN(desc_sz, 4), &new_off) ||
++ new_off > note_size)
++ break;
+
+ if (nhdr->n_type == BUILD_ID &&
+- nhdr->n_namesz == sizeof("GNU") &&
+- !strcmp((char *)(nhdr + 1), "GNU") &&
+- nhdr->n_descsz > 0 &&
+- nhdr->n_descsz <= BUILD_ID_SIZE_MAX) {
+- memcpy(build_id,
+- note_start + note_offs +
+- ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
+- nhdr->n_descsz);
+- memset(build_id + nhdr->n_descsz, 0,
+- BUILD_ID_SIZE_MAX - nhdr->n_descsz);
++ name_sz == note_name_sz &&
++ memcmp(nhdr + 1, note_name, note_name_sz) == 0 &&
++ desc_sz > 0 && desc_sz <= BUILD_ID_SIZE_MAX) {
++ data = note_start + note_off + ALIGN(note_name_sz, 4);
++ memcpy(build_id, data, desc_sz);
++ memset(build_id + desc_sz, 0, BUILD_ID_SIZE_MAX - desc_sz);
+ if (size)
+- *size = nhdr->n_descsz;
++ *size = desc_sz;
+ return 0;
+ }
+- new_offs = note_offs + sizeof(Elf32_Nhdr) +
+- ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4);
+- if (new_offs <= note_offs) /* overflow */
+- break;
+- note_offs = new_offs;
++
++ note_off = new_off;
+ }
+
+ return -EINVAL;
+@@ -71,7 +77,7 @@ static int get_build_id_32(const void *page_addr, unsigned char *build_id,
+ {
+ Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr;
+ Elf32_Phdr *phdr;
+- int i;
++ __u32 i, phnum;
+
+ /*
+ * FIXME
+@@ -80,18 +86,19 @@ static int get_build_id_32(const void *page_addr, unsigned char *build_id,
+ */
+ if (ehdr->e_phoff != sizeof(Elf32_Ehdr))
+ return -EINVAL;
++
++ phnum = READ_ONCE(ehdr->e_phnum);
+ /* only supports phdr that fits in one page */
+- if (ehdr->e_phnum >
+- (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
++ if (phnum > (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
+ return -EINVAL;
+
+ phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr));
+
+- for (i = 0; i < ehdr->e_phnum; ++i) {
++ for (i = 0; i < phnum; ++i) {
+ if (phdr[i].p_type == PT_NOTE &&
+ !parse_build_id(page_addr, build_id, size,
+- page_addr + phdr[i].p_offset,
+- phdr[i].p_filesz))
++ page_addr + READ_ONCE(phdr[i].p_offset),
++ READ_ONCE(phdr[i].p_filesz)))
+ return 0;
+ }
+ return -EINVAL;
+@@ -103,7 +110,7 @@ static int get_build_id_64(const void *page_addr, unsigned char *build_id,
+ {
+ Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr;
+ Elf64_Phdr *phdr;
+- int i;
++ __u32 i, phnum;
+
+ /*
+ * FIXME
+@@ -112,18 +119,19 @@ static int get_build_id_64(const void *page_addr, unsigned char *build_id,
+ */
+ if (ehdr->e_phoff != sizeof(Elf64_Ehdr))
+ return -EINVAL;
++
++ phnum = READ_ONCE(ehdr->e_phnum);
+ /* only supports phdr that fits in one page */
+- if (ehdr->e_phnum >
+- (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
++ if (phnum > (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
+ return -EINVAL;
+
+ phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr));
+
+- for (i = 0; i < ehdr->e_phnum; ++i) {
++ for (i = 0; i < phnum; ++i) {
+ if (phdr[i].p_type == PT_NOTE &&
+ !parse_build_id(page_addr, build_id, size,
+- page_addr + phdr[i].p_offset,
+- phdr[i].p_filesz))
++ page_addr + READ_ONCE(phdr[i].p_offset),
++ READ_ONCE(phdr[i].p_filesz)))
+ return 0;
+ }
+ return -EINVAL;
+@@ -152,6 +160,10 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
+ page = find_get_page(vma->vm_file->f_mapping, 0);
+ if (!page)
+ return -EFAULT; /* page not mapped */
++ if (!PageUptodate(page)) {
++ put_page(page);
++ return -EFAULT;
++ }
+
+ ret = -EINVAL;
+ page_addr = kmap_local_page(page);
+--
+2.43.0
+
--- /dev/null
+From 98fa309a628522136f37f541e0fef03c0d9b55b8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Oct 2024 19:23:51 +0000
+Subject: mm: z3fold: deprecate CONFIG_Z3FOLD
+
+From: Yosry Ahmed <yosryahmed@google.com>
+
+The z3fold compressed pages allocator is rarely used, most users use
+zsmalloc. The only disadvantage of zsmalloc in comparison is the
+dependency on MMU, and zbud is a more common option for !MMU as it was the
+default zswap allocator for a long time.
+
+Historically, zsmalloc had worse latency than zbud and z3fold but offered
+better memory savings. This is no longer the case as shown by a simple
+recent analysis [1]. That analysis showed that z3fold does not have any
+advantage over zsmalloc or zbud considering both performance and memory
+usage. In a kernel build test on tmpfs in a limited cgroup, z3fold took
+3% more time and used 1.8% more memory. The latency of zswap_load() was
+7% higher, and that of zswap_store() was 10% higher. Zsmalloc is better
+in all metrics.
+
+Moreover, z3fold apparently has latent bugs, which was made noticeable by
+a recent soft lockup bug report with z3fold [2]. Switching to zsmalloc
+not only fixed the problem, but also reduced the swap usage from 6~8G to
+1~2G. Other users have also reported being bitten by mistakenly enabling
+z3fold.
+
+Other than hurting users, z3fold is repeatedly causing wasted engineering
+effort. Apart from investigating the above bug, it came up in multiple
+development discussions (e.g. [3]) as something we need to handle, when
+there aren't any legit users (at least not intentionally).
+
+The natural course of action is to deprecate z3fold, and remove in a few
+cycles if no objections are raised from active users. Next on the list
+should be zbud, as it offers marginal latency gains at the cost of huge
+memory waste when compared to zsmalloc. That one will need to wait until
+zsmalloc does not depend on MMU.
+
+Rename the user-visible config option from CONFIG_Z3FOLD to
+CONFIG_Z3FOLD_DEPRECATED so that users with CONFIG_Z3FOLD=y get a new
+prompt with explanation during make oldconfig. Also, remove
+CONFIG_Z3FOLD=y from defconfigs.
+
+[1]https://lore.kernel.org/lkml/CAJD7tkbRF6od-2x_L8-A1QL3=2Ww13sCj4S3i4bNndqF+3+_Vg@mail.gmail.com/
+[2]https://lore.kernel.org/lkml/EF0ABD3E-A239-4111-A8AB-5C442E759CF3@gmail.com/
+[3]https://lore.kernel.org/lkml/CAJD7tkbnmeVugfunffSovJf9FAgy9rhBVt_tx=nxUveLUfqVsA@mail.gmail.com/
+
+[arnd@arndb.de: deprecate ZSWAP_ZPOOL_DEFAULT_Z3FOLD as well]
+ Link: https://lkml.kernel.org/r/20240909202625.1054880-1-arnd@kernel.org
+Link: https://lkml.kernel.org/r/20240904233343.933462-1-yosryahmed@google.com
+Signed-off-by: Yosry Ahmed <yosryahmed@google.com>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Acked-by: Chris Down <chris@chrisdown.name>
+Acked-by: Nhat Pham <nphamcs@gmail.com>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Acked-by: Vitaly Wool <vitaly.wool@konsulko.com>
+Acked-by: Christoph Hellwig <hch@lst.de>
+Cc: Aneesh Kumar K.V <aneesh.kumar@kernel.org>
+Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
+Cc: Huacai Chen <chenhuacai@kernel.org>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
+Cc: Nicholas Piggin <npiggin@gmail.com>
+Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
+Cc: WANG Xuerui <kernel@xen0n.name>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+(cherry picked from commit 7a2369b74abf76cd3e54c45b30f6addb497f831b)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/configs/loongson3_defconfig | 1 -
+ arch/powerpc/configs/ppc64_defconfig | 1 -
+ mm/Kconfig | 25 ++++++++++++++++------
+ 3 files changed, 19 insertions(+), 8 deletions(-)
+
+diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
+index b4252c357c8e2..75b366407a60a 100644
+--- a/arch/loongarch/configs/loongson3_defconfig
++++ b/arch/loongarch/configs/loongson3_defconfig
+@@ -96,7 +96,6 @@ CONFIG_ZPOOL=y
+ CONFIG_ZSWAP=y
+ CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y
+ CONFIG_ZBUD=y
+-CONFIG_Z3FOLD=y
+ CONFIG_ZSMALLOC=m
+ # CONFIG_COMPAT_BRK is not set
+ CONFIG_MEMORY_HOTPLUG=y
+diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
+index 544a65fda77bc..d39284489aa26 100644
+--- a/arch/powerpc/configs/ppc64_defconfig
++++ b/arch/powerpc/configs/ppc64_defconfig
+@@ -81,7 +81,6 @@ CONFIG_MODULE_SIG_SHA512=y
+ CONFIG_PARTITION_ADVANCED=y
+ CONFIG_BINFMT_MISC=m
+ CONFIG_ZSWAP=y
+-CONFIG_Z3FOLD=y
+ CONFIG_ZSMALLOC=y
+ # CONFIG_SLAB_MERGE_DEFAULT is not set
+ CONFIG_SLAB_FREELIST_RANDOM=y
+diff --git a/mm/Kconfig b/mm/Kconfig
+index b4cb45255a541..baf7ce6a888c0 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -146,12 +146,15 @@ config ZSWAP_ZPOOL_DEFAULT_ZBUD
+ help
+ Use the zbud allocator as the default allocator.
+
+-config ZSWAP_ZPOOL_DEFAULT_Z3FOLD
+- bool "z3fold"
+- select Z3FOLD
++config ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED
++ bool "z3foldi (DEPRECATED)"
++ select Z3FOLD_DEPRECATED
+ help
+ Use the z3fold allocator as the default allocator.
+
++ Deprecated and scheduled for removal in a few cycles,
++ see CONFIG_Z3FOLD_DEPRECATED.
++
+ config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
+ bool "zsmalloc"
+ select ZSMALLOC
+@@ -163,7 +166,7 @@ config ZSWAP_ZPOOL_DEFAULT
+ string
+ depends on ZSWAP
+ default "zbud" if ZSWAP_ZPOOL_DEFAULT_ZBUD
+- default "z3fold" if ZSWAP_ZPOOL_DEFAULT_Z3FOLD
++ default "z3fold" if ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED
+ default "zsmalloc" if ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
+ default ""
+
+@@ -177,15 +180,25 @@ config ZBUD
+ deterministic reclaim properties that make it preferable to a higher
+ density approach when reclaim will be used.
+
+-config Z3FOLD
+- tristate "3:1 compression allocator (z3fold)"
++config Z3FOLD_DEPRECATED
++ tristate "3:1 compression allocator (z3fold) (DEPRECATED)"
+ depends on ZSWAP
+ help
++ Deprecated and scheduled for removal in a few cycles. If you have
++ a good reason for using Z3FOLD over ZSMALLOC, please contact
++ linux-mm@kvack.org and the zswap maintainers.
++
+ A special purpose allocator for storing compressed pages.
+ It is designed to store up to three compressed pages per physical
+ page. It is a ZBUD derivative so the simplicity and determinism are
+ still there.
+
++config Z3FOLD
++ tristate
++ default y if Z3FOLD_DEPRECATED=y
++ default m if Z3FOLD_DEPRECATED=m
++ depends on Z3FOLD_DEPRECATED
++
+ config ZSMALLOC
+ tristate
+ prompt "N:1 compression allocator (zsmalloc)" if ZSWAP
+--
+2.43.0
+
--- /dev/null
+From a004a5f92a2218bfaaca54e0706fdb27595aba22 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Jun 2024 13:17:26 -0700
+Subject: net: mana: Add support for page sizes other than 4KB on ARM64
+
+From: Haiyang Zhang <haiyangz@microsoft.com>
+
+[ Upstream commit 382d1741b5b2feffef7942dd074206372afe1a96 ]
+
+As defined by the MANA Hardware spec, the queue size for DMA is 4KB
+minimal, and power of 2. And, the HWC queue size has to be exactly
+4KB.
+
+To support page sizes other than 4KB on ARM64, define the minimal
+queue size as a macro separately from the PAGE_SIZE, which we always
+assumed it to be 4KB before supporting ARM64.
+
+Also, add MANA specific macros and update code related to size
+alignment, DMA region calculations, etc.
+
+Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
+Reviewed-by: Michael Kelley <mhklinux@outlook.com>
+Link: https://lore.kernel.org/r/1718655446-6576-1-git-send-email-haiyangz@microsoft.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 9e517a8e9d9a ("RDMA/mana_ib: use the correct page table index based on hardware page size")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/microsoft/Kconfig | 2 +-
+ drivers/net/ethernet/microsoft/mana/gdma_main.c | 10 +++++-----
+ drivers/net/ethernet/microsoft/mana/hw_channel.c | 14 +++++++-------
+ drivers/net/ethernet/microsoft/mana/mana_en.c | 8 ++++----
+ drivers/net/ethernet/microsoft/mana/shm_channel.c | 13 +++++++------
+ include/net/mana/gdma.h | 10 +++++++++-
+ include/net/mana/mana.h | 3 ++-
+ 7 files changed, 35 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/net/ethernet/microsoft/Kconfig b/drivers/net/ethernet/microsoft/Kconfig
+index 286f0d5697a16..901fbffbf718e 100644
+--- a/drivers/net/ethernet/microsoft/Kconfig
++++ b/drivers/net/ethernet/microsoft/Kconfig
+@@ -18,7 +18,7 @@ if NET_VENDOR_MICROSOFT
+ config MICROSOFT_MANA
+ tristate "Microsoft Azure Network Adapter (MANA) support"
+ depends on PCI_MSI
+- depends on X86_64 || (ARM64 && !CPU_BIG_ENDIAN && ARM64_4K_PAGES)
++ depends on X86_64 || (ARM64 && !CPU_BIG_ENDIAN)
+ depends on PCI_HYPERV
+ select AUXILIARY_BUS
+ select PAGE_POOL
+diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
+index 1332db9a08eb9..e1d70d21e207f 100644
+--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
++++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
+@@ -182,7 +182,7 @@ int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
+ dma_addr_t dma_handle;
+ void *buf;
+
+- if (length < PAGE_SIZE || !is_power_of_2(length))
++ if (length < MANA_PAGE_SIZE || !is_power_of_2(length))
+ return -EINVAL;
+
+ gmi->dev = gc->dev;
+@@ -717,7 +717,7 @@ EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, NET_MANA);
+ static int mana_gd_create_dma_region(struct gdma_dev *gd,
+ struct gdma_mem_info *gmi)
+ {
+- unsigned int num_page = gmi->length / PAGE_SIZE;
++ unsigned int num_page = gmi->length / MANA_PAGE_SIZE;
+ struct gdma_create_dma_region_req *req = NULL;
+ struct gdma_create_dma_region_resp resp = {};
+ struct gdma_context *gc = gd->gdma_context;
+@@ -727,10 +727,10 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
+ int err;
+ int i;
+
+- if (length < PAGE_SIZE || !is_power_of_2(length))
++ if (length < MANA_PAGE_SIZE || !is_power_of_2(length))
+ return -EINVAL;
+
+- if (offset_in_page(gmi->virt_addr) != 0)
++ if (!MANA_PAGE_ALIGNED(gmi->virt_addr))
+ return -EINVAL;
+
+ hwc = gc->hwc.driver_data;
+@@ -751,7 +751,7 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
+ req->page_addr_list_len = num_page;
+
+ for (i = 0; i < num_page; i++)
+- req->page_addr_list[i] = gmi->dma_handle + i * PAGE_SIZE;
++ req->page_addr_list[i] = gmi->dma_handle + i * MANA_PAGE_SIZE;
+
+ err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp);
+ if (err)
+diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
+index 0a868679d342e..a00f915c51881 100644
+--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
++++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
+@@ -368,12 +368,12 @@ static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
+ int err;
+
+ eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
+- if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
+- eq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
++ if (eq_size < MANA_MIN_QSIZE)
++ eq_size = MANA_MIN_QSIZE;
+
+ cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
+- if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
+- cq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
++ if (cq_size < MANA_MIN_QSIZE)
++ cq_size = MANA_MIN_QSIZE;
+
+ hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL);
+ if (!hwc_cq)
+@@ -435,7 +435,7 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
+
+ dma_buf->num_reqs = q_depth;
+
+- buf_size = PAGE_ALIGN(q_depth * max_msg_size);
++ buf_size = MANA_PAGE_ALIGN(q_depth * max_msg_size);
+
+ gmi = &dma_buf->mem_info;
+ err = mana_gd_alloc_memory(gc, buf_size, gmi);
+@@ -503,8 +503,8 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc,
+ else
+ queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
+
+- if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE)
+- queue_size = MINIMUM_SUPPORTED_PAGE_SIZE;
++ if (queue_size < MANA_MIN_QSIZE)
++ queue_size = MANA_MIN_QSIZE;
+
+ hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL);
+ if (!hwc_wq)
+diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
+index bb77327bfa815..a637556dcfae8 100644
+--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
++++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
+@@ -1901,10 +1901,10 @@ static int mana_create_txq(struct mana_port_context *apc,
+ * to prevent overflow.
+ */
+ txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
+- BUILD_BUG_ON(!PAGE_ALIGNED(txq_size));
++ BUILD_BUG_ON(!MANA_PAGE_ALIGNED(txq_size));
+
+ cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
+- cq_size = PAGE_ALIGN(cq_size);
++ cq_size = MANA_PAGE_ALIGN(cq_size);
+
+ gc = gd->gdma_context;
+
+@@ -2203,8 +2203,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
+ if (err)
+ goto out;
+
+- rq_size = PAGE_ALIGN(rq_size);
+- cq_size = PAGE_ALIGN(cq_size);
++ rq_size = MANA_PAGE_ALIGN(rq_size);
++ cq_size = MANA_PAGE_ALIGN(cq_size);
+
+ /* Create RQ */
+ memset(&spec, 0, sizeof(spec));
+diff --git a/drivers/net/ethernet/microsoft/mana/shm_channel.c b/drivers/net/ethernet/microsoft/mana/shm_channel.c
+index 5553af9c8085a..0f1679ebad96b 100644
+--- a/drivers/net/ethernet/microsoft/mana/shm_channel.c
++++ b/drivers/net/ethernet/microsoft/mana/shm_channel.c
+@@ -6,6 +6,7 @@
+ #include <linux/io.h>
+ #include <linux/mm.h>
+
++#include <net/mana/gdma.h>
+ #include <net/mana/shm_channel.h>
+
+ #define PAGE_FRAME_L48_WIDTH_BYTES 6
+@@ -155,8 +156,8 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
+ return err;
+ }
+
+- if (!PAGE_ALIGNED(eq_addr) || !PAGE_ALIGNED(cq_addr) ||
+- !PAGE_ALIGNED(rq_addr) || !PAGE_ALIGNED(sq_addr))
++ if (!MANA_PAGE_ALIGNED(eq_addr) || !MANA_PAGE_ALIGNED(cq_addr) ||
++ !MANA_PAGE_ALIGNED(rq_addr) || !MANA_PAGE_ALIGNED(sq_addr))
+ return -EINVAL;
+
+ if ((eq_msix_index & VECTOR_MASK) != eq_msix_index)
+@@ -183,7 +184,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
+
+ /* EQ addr: low 48 bits of frame address */
+ shmem = (u64 *)ptr;
+- frame_addr = PHYS_PFN(eq_addr);
++ frame_addr = MANA_PFN(eq_addr);
+ *shmem = frame_addr & PAGE_FRAME_L48_MASK;
+ all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
+ (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
+@@ -191,7 +192,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
+
+ /* CQ addr: low 48 bits of frame address */
+ shmem = (u64 *)ptr;
+- frame_addr = PHYS_PFN(cq_addr);
++ frame_addr = MANA_PFN(cq_addr);
+ *shmem = frame_addr & PAGE_FRAME_L48_MASK;
+ all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
+ (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
+@@ -199,7 +200,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
+
+ /* RQ addr: low 48 bits of frame address */
+ shmem = (u64 *)ptr;
+- frame_addr = PHYS_PFN(rq_addr);
++ frame_addr = MANA_PFN(rq_addr);
+ *shmem = frame_addr & PAGE_FRAME_L48_MASK;
+ all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
+ (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
+@@ -207,7 +208,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
+
+ /* SQ addr: low 48 bits of frame address */
+ shmem = (u64 *)ptr;
+- frame_addr = PHYS_PFN(sq_addr);
++ frame_addr = MANA_PFN(sq_addr);
+ *shmem = frame_addr & PAGE_FRAME_L48_MASK;
+ all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
+ (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
+diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
+index 27684135bb4d1..35507588a14d5 100644
+--- a/include/net/mana/gdma.h
++++ b/include/net/mana/gdma.h
+@@ -224,7 +224,15 @@ struct gdma_dev {
+ struct auxiliary_device *adev;
+ };
+
+-#define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
++/* MANA_PAGE_SIZE is the DMA unit */
++#define MANA_PAGE_SHIFT 12
++#define MANA_PAGE_SIZE BIT(MANA_PAGE_SHIFT)
++#define MANA_PAGE_ALIGN(x) ALIGN((x), MANA_PAGE_SIZE)
++#define MANA_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), MANA_PAGE_SIZE)
++#define MANA_PFN(a) ((a) >> MANA_PAGE_SHIFT)
++
++/* Required by HW */
++#define MANA_MIN_QSIZE MANA_PAGE_SIZE
+
+ #define GDMA_CQE_SIZE 64
+ #define GDMA_EQE_SIZE 16
+diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
+index 5927bd9d46bef..f384d3aaac741 100644
+--- a/include/net/mana/mana.h
++++ b/include/net/mana/mana.h
+@@ -42,7 +42,8 @@ enum TRI_STATE {
+
+ #define MAX_SEND_BUFFERS_PER_QUEUE 256
+
+-#define EQ_SIZE (8 * PAGE_SIZE)
++#define EQ_SIZE (8 * MANA_PAGE_SIZE)
++
+ #define LOG2_EQ_THROTTLE 3
+
+ #define MAX_PORTS_IN_MANA_DEV 256
+--
+2.43.0
+
--- /dev/null
+From 13ee95868836e89032a91cd386be3a1bbf80a80c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Aug 2024 13:40:03 -0400
+Subject: NFSD: Async COPY result needs to return a write verifier
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit 9ed666eba4e0a2bb8ffaa3739d830b64d4f2aaad ]
+
+Currently, when NFSD handles an asynchronous COPY, it returns a
+zero write verifier, relying on the subsequent CB_OFFLOAD callback
+to pass the write verifier and a stable_how4 value to the client.
+
+However, if the CB_OFFLOAD never arrives at the client (for example,
+if a network partition occurs just as the server sends the
+CB_OFFLOAD operation), the client will never receive this verifier.
+Thus, if the client sends a follow-up COMMIT, there is no way for
+the client to assess the COMMIT result.
+
+The usual recovery for a missing CB_OFFLOAD is for the client to
+send an OFFLOAD_STATUS operation, but that operation does not carry
+a write verifier in its result. Neither does it carry a stable_how4
+value, so the client /must/ send a COMMIT in this case -- which will
+always fail because currently there's still no write verifier in the
+COPY result.
+
+Thus the server needs to return a normal write verifier in its COPY
+result even if the COPY operation is to be performed asynchronously.
+
+If the server recognizes the callback stateid in subsequent
+OFFLOAD_STATUS operations, then obviously it has not restarted, and
+the write verifier the client received in the COPY result is still
+valid and can be used to assess a COMMIT of the copied data, if one
+is needed.
+
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Stable-dep-of: aadc3bbea163 ("NFSD: Limit the number of concurrent async COPY operations")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfsd/nfs4proc.c | 23 ++++++++---------------
+ 1 file changed, 8 insertions(+), 15 deletions(-)
+
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 2e39cf2e502a3..60c526adc27c6 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -751,15 +751,6 @@ nfsd4_access(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ &access->ac_supported);
+ }
+
+-static void gen_boot_verifier(nfs4_verifier *verifier, struct net *net)
+-{
+- __be32 *verf = (__be32 *)verifier->data;
+-
+- BUILD_BUG_ON(2*sizeof(*verf) != sizeof(verifier->data));
+-
+- nfsd_copy_write_verifier(verf, net_generic(net, nfsd_net_id));
+-}
+-
+ static __be32
+ nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
+@@ -1630,7 +1621,6 @@ static void nfsd4_init_copy_res(struct nfsd4_copy *copy, bool sync)
+ test_bit(NFSD4_COPY_F_COMMITTED, ©->cp_flags) ?
+ NFS_FILE_SYNC : NFS_UNSTABLE;
+ nfsd4_copy_set_sync(copy, sync);
+- gen_boot_verifier(©->cp_res.wr_verifier, copy->cp_clp->net);
+ }
+
+ static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy,
+@@ -1803,9 +1793,11 @@ static __be32
+ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
+ {
++ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
++ struct nfsd4_copy *async_copy = NULL;
+ struct nfsd4_copy *copy = &u->copy;
++ struct nfsd42_write_res *result;
+ __be32 status;
+- struct nfsd4_copy *async_copy = NULL;
+
+ /*
+ * Currently, async COPY is not reliable. Force all COPY
+@@ -1814,6 +1806,9 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ */
+ nfsd4_copy_set_sync(copy, true);
+
++ result = ©->cp_res;
++ nfsd_copy_write_verifier((__be32 *)&result->wr_verifier.data, nn);
++
+ copy->cp_clp = cstate->clp;
+ if (nfsd4_ssc_is_inter(copy)) {
+ trace_nfsd_copy_inter(copy);
+@@ -1838,8 +1833,6 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ memcpy(©->fh, &cstate->current_fh.fh_handle,
+ sizeof(struct knfsd_fh));
+ if (nfsd4_copy_is_async(copy)) {
+- struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+-
+ status = nfserrno(-ENOMEM);
+ async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
+ if (!async_copy)
+@@ -1851,8 +1844,8 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ goto out_err;
+ if (!nfs4_init_copy_state(nn, copy))
+ goto out_err;
+- memcpy(©->cp_res.cb_stateid, ©->cp_stateid.cs_stid,
+- sizeof(copy->cp_res.cb_stateid));
++ memcpy(&result->cb_stateid, ©->cp_stateid.cs_stid,
++ sizeof(result->cb_stateid));
+ dup_copy_fields(copy, async_copy);
+ async_copy->copy_task = kthread_create(nfsd4_do_async_copy,
+ async_copy, "%s", "copy thread");
+--
+2.43.0
+
--- /dev/null
+From 1cac163ac58d07633292dbbc082546b510ff536a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Aug 2024 13:40:04 -0400
+Subject: NFSD: Limit the number of concurrent async COPY operations
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit aadc3bbea163b6caaaebfdd2b6c4667fbc726752 ]
+
+Nothing appears to limit the number of concurrent async COPY
+operations that clients can start. In addition, AFAICT each async
+COPY can copy an unlimited number of 4MB chunks, so can run for a
+long time. Thus IMO async COPY can become a DoS vector.
+
+Add a restriction mechanism that bounds the number of concurrent
+background COPY operations. Start simple and try to be fair -- this
+patch implements a per-namespace limit.
+
+An async COPY request that occurs while this limit is exceeded gets
+NFS4ERR_DELAY. The requesting client can choose to send the request
+again after a delay or fall back to a traditional read/write style
+copy.
+
+If there is need to make the mechanism more sophisticated, we can
+visit that in future patches.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfsd/netns.h | 1 +
+ fs/nfsd/nfs4proc.c | 11 +++++++++--
+ fs/nfsd/nfs4state.c | 1 +
+ fs/nfsd/xdr4.h | 1 +
+ 4 files changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
+index 14ec156563209..5cae26917436c 100644
+--- a/fs/nfsd/netns.h
++++ b/fs/nfsd/netns.h
+@@ -148,6 +148,7 @@ struct nfsd_net {
+ u32 s2s_cp_cl_id;
+ struct idr s2s_cp_stateids;
+ spinlock_t s2s_cp_lock;
++ atomic_t pending_async_copies;
+
+ /*
+ * Version information
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 60c526adc27c6..5768b2ff1d1d1 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1279,6 +1279,7 @@ static void nfs4_put_copy(struct nfsd4_copy *copy)
+ {
+ if (!refcount_dec_and_test(©->refcount))
+ return;
++ atomic_dec(©->cp_nn->pending_async_copies);
+ kfree(copy->cp_src);
+ kfree(copy);
+ }
+@@ -1833,10 +1834,16 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ memcpy(©->fh, &cstate->current_fh.fh_handle,
+ sizeof(struct knfsd_fh));
+ if (nfsd4_copy_is_async(copy)) {
+- status = nfserrno(-ENOMEM);
+ async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
+ if (!async_copy)
+ goto out_err;
++ async_copy->cp_nn = nn;
++ /* Arbitrary cap on number of pending async copy operations */
++ if (atomic_inc_return(&nn->pending_async_copies) >
++ (int)rqstp->rq_pool->sp_nrthreads) {
++ atomic_dec(&nn->pending_async_copies);
++ goto out_err;
++ }
+ INIT_LIST_HEAD(&async_copy->copies);
+ refcount_set(&async_copy->refcount, 1);
+ async_copy->cp_src = kmalloc(sizeof(*async_copy->cp_src), GFP_KERNEL);
+@@ -1876,7 +1883,7 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ }
+ if (async_copy)
+ cleanup_async_copy(async_copy);
+- status = nfserrno(-ENOMEM);
++ status = nfserr_jukebox;
+ goto out;
+ }
+
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index f4eae4b65572a..3837f4e417247 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -8575,6 +8575,7 @@ static int nfs4_state_create_net(struct net *net)
+ spin_lock_init(&nn->client_lock);
+ spin_lock_init(&nn->s2s_cp_lock);
+ idr_init(&nn->s2s_cp_stateids);
++ atomic_set(&nn->pending_async_copies, 0);
+
+ spin_lock_init(&nn->blocked_locks_lock);
+ INIT_LIST_HEAD(&nn->blocked_locks_lru);
+diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
+index fbdd42cde1fa5..2a21a7662e030 100644
+--- a/fs/nfsd/xdr4.h
++++ b/fs/nfsd/xdr4.h
+@@ -713,6 +713,7 @@ struct nfsd4_copy {
+ struct nfsd4_ssc_umount_item *ss_nsui;
+ struct nfs_fh c_fh;
+ nfs4_stateid stateid;
++ struct nfsd_net *cp_nn;
+ };
+
+ static inline void nfsd4_copy_set_sync(struct nfsd4_copy *copy, bool sync)
+--
+2.43.0
+
--- /dev/null
+From e45a4b57058714c5c6b3c82e363527c3cab737d9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Sep 2024 23:04:46 +0200
+Subject: r8169: add tally counter fields added with RTL8125
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+[ Upstream commit ced8e8b8f40accfcce4a2bbd8b150aa76d5eff9a ]
+
+RTL8125 added fields to the tally counter, what may result in the chip
+dma'ing these new fields to unallocated memory. Therefore make sure
+that the allocated memory area is big enough to hold all of the
+tally counter values, even if we use only parts of it.
+
+Fixes: f1bce4ad2f1c ("r8169: add support for RTL8125")
+Cc: stable@vger.kernel.org
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/741d26a9-2b2b-485d-91d9-ecb302e345b5@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/realtek/r8169_main.c | 27 +++++++++++++++++++++++
+ 1 file changed, 27 insertions(+)
+
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index aa6a73882f914..f5396aafe9ab6 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -577,6 +577,33 @@ struct rtl8169_counters {
+ __le32 rx_multicast;
+ __le16 tx_aborted;
+ __le16 tx_underrun;
++ /* new since RTL8125 */
++ __le64 tx_octets;
++ __le64 rx_octets;
++ __le64 rx_multicast64;
++ __le64 tx_unicast64;
++ __le64 tx_broadcast64;
++ __le64 tx_multicast64;
++ __le32 tx_pause_on;
++ __le32 tx_pause_off;
++ __le32 tx_pause_all;
++ __le32 tx_deferred;
++ __le32 tx_late_collision;
++ __le32 tx_all_collision;
++ __le32 tx_aborted32;
++ __le32 align_errors32;
++ __le32 rx_frame_too_long;
++ __le32 rx_runt;
++ __le32 rx_pause_on;
++ __le32 rx_pause_off;
++ __le32 rx_pause_all;
++ __le32 rx_unknown_opcode;
++ __le32 rx_mac_error;
++ __le32 tx_underrun32;
++ __le32 rx_mac_missed;
++ __le32 rx_tcam_dropped;
++ __le32 tdu;
++ __le32 rdu;
+ };
+
+ struct rtl8169_tc_offsets {
+--
+2.43.0
+
--- /dev/null
+From 9e05ecf1b045ae1494e92bfa9d453d3a9a2148ec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Sep 2024 15:00:21 +0100
+Subject: r8169: Fix spelling mistake: "tx_underun" -> "tx_underrun"
+
+From: Colin Ian King <colin.i.king@gmail.com>
+
+[ Upstream commit 8df9439389a44fb2cc4ef695e08d6a8870b1616c ]
+
+There is a spelling mistake in the struct field tx_underun, rename
+it to tx_underrun.
+
+Signed-off-by: Colin Ian King <colin.i.king@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Heiner Kallweit <hkallweit1@gmail.com>
+Link: https://patch.msgid.link/20240909140021.64884-1-colin.i.king@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: ced8e8b8f40a ("r8169: add tally counter fields added with RTL8125")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/realtek/r8169_main.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index b6e89fc5a4ae7..aa6a73882f914 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -576,7 +576,7 @@ struct rtl8169_counters {
+ __le64 rx_broadcast;
+ __le32 rx_multicast;
+ __le16 tx_aborted;
+- __le16 tx_underun;
++ __le16 tx_underrun;
+ };
+
+ struct rtl8169_tc_offsets {
+@@ -1841,7 +1841,7 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev,
+ data[9] = le64_to_cpu(counters->rx_broadcast);
+ data[10] = le32_to_cpu(counters->rx_multicast);
+ data[11] = le16_to_cpu(counters->tx_aborted);
+- data[12] = le16_to_cpu(counters->tx_underun);
++ data[12] = le16_to_cpu(counters->tx_underrun);
+ }
+
+ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+--
+2.43.0
+
--- /dev/null
+From e3d9ba6f5def7ece80a155d75555ce949429538e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Aug 2024 08:16:32 -0700
+Subject: RDMA/mana_ib: use the correct page table index based on hardware page
+ size
+
+From: Long Li <longli@microsoft.com>
+
+[ Upstream commit 9e517a8e9d9a303bf9bde35e5c5374795544c152 ]
+
+MANA hardware uses 4k page size. When calculating the page table index,
+it should use the hardware page size, not the system page size.
+
+Cc: stable@vger.kernel.org
+Fixes: 0266a177631d ("RDMA/mana_ib: Add a driver for Microsoft Azure Network Adapter")
+Signed-off-by: Long Li <longli@microsoft.com>
+Link: https://patch.msgid.link/1725030993-16213-1-git-send-email-longli@linuxonhyperv.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/mana/main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
+index 1543b436ddc68..f2a2ce800443a 100644
+--- a/drivers/infiniband/hw/mana/main.c
++++ b/drivers/infiniband/hw/mana/main.c
+@@ -383,7 +383,7 @@ static int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem
+
+ create_req->length = umem->length;
+ create_req->offset_in_page = ib_umem_dma_offset(umem, page_sz);
+- create_req->gdma_page_type = order_base_2(page_sz) - PAGE_SHIFT;
++ create_req->gdma_page_type = order_base_2(page_sz) - MANA_PAGE_SHIFT;
+ create_req->page_count = num_pages_total;
+
+ ibdev_dbg(&dev->ib_dev, "size_dma_region %lu num_pages_total %lu\n",
+--
+2.43.0
+
--- /dev/null
+From 67f3dcb9f3b385a548709c2c6d4042d843989945 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Aug 2024 13:11:26 +0530
+Subject: remoteproc: k3-r5: Acquire mailbox handle during probe routine
+
+From: Beleswar Padhi <b-padhi@ti.com>
+
+[ Upstream commit f3f11cfe890733373ddbb1ce8991ccd4ee5e79e1 ]
+
+Acquire the mailbox handle during device probe and do not release handle
+in stop/detach routine or error paths. This removes the redundant
+requests for mbox handle later during rproc start/attach. This also
+allows to defer remoteproc driver's probe if mailbox is not probed yet.
+
+Signed-off-by: Beleswar Padhi <b-padhi@ti.com>
+Link: https://lore.kernel.org/r/20240808074127.2688131-3-b-padhi@ti.com
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Stable-dep-of: 8fa052c29e50 ("remoteproc: k3-r5: Delay notification of wakeup event")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/remoteproc/ti_k3_r5_remoteproc.c | 78 +++++++++---------------
+ 1 file changed, 30 insertions(+), 48 deletions(-)
+
+diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
+index eb09d2e9b32a4..6424b347aa4f2 100644
+--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
++++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
+@@ -194,6 +194,10 @@ static void k3_r5_rproc_mbox_callback(struct mbox_client *client, void *data)
+ const char *name = kproc->rproc->name;
+ u32 msg = omap_mbox_message(data);
+
++ /* Do not forward message from a detached core */
++ if (kproc->rproc->state == RPROC_DETACHED)
++ return;
++
+ dev_dbg(dev, "mbox msg: 0x%x\n", msg);
+
+ switch (msg) {
+@@ -229,6 +233,10 @@ static void k3_r5_rproc_kick(struct rproc *rproc, int vqid)
+ mbox_msg_t msg = (mbox_msg_t)vqid;
+ int ret;
+
++ /* Do not forward message to a detached core */
++ if (kproc->rproc->state == RPROC_DETACHED)
++ return;
++
+ /* send the index of the triggered virtqueue in the mailbox payload */
+ ret = mbox_send_message(kproc->mbox, (void *)msg);
+ if (ret < 0)
+@@ -399,12 +407,9 @@ static int k3_r5_rproc_request_mbox(struct rproc *rproc)
+ client->knows_txdone = false;
+
+ kproc->mbox = mbox_request_channel(client, 0);
+- if (IS_ERR(kproc->mbox)) {
+- ret = -EBUSY;
+- dev_err(dev, "mbox_request_channel failed: %ld\n",
+- PTR_ERR(kproc->mbox));
+- return ret;
+- }
++ if (IS_ERR(kproc->mbox))
++ return dev_err_probe(dev, PTR_ERR(kproc->mbox),
++ "mbox_request_channel failed\n");
+
+ /*
+ * Ping the remote processor, this is only for sanity-sake for now;
+@@ -552,10 +557,6 @@ static int k3_r5_rproc_start(struct rproc *rproc)
+ u32 boot_addr;
+ int ret;
+
+- ret = k3_r5_rproc_request_mbox(rproc);
+- if (ret)
+- return ret;
+-
+ boot_addr = rproc->bootaddr;
+ /* TODO: add boot_addr sanity checking */
+ dev_dbg(dev, "booting R5F core using boot addr = 0x%x\n", boot_addr);
+@@ -564,7 +565,7 @@ static int k3_r5_rproc_start(struct rproc *rproc)
+ core = kproc->core;
+ ret = ti_sci_proc_set_config(core->tsp, boot_addr, 0, 0);
+ if (ret)
+- goto put_mbox;
++ return ret;
+
+ /* unhalt/run all applicable cores */
+ if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
+@@ -580,13 +581,12 @@ static int k3_r5_rproc_start(struct rproc *rproc)
+ if (core != core0 && core0->rproc->state == RPROC_OFFLINE) {
+ dev_err(dev, "%s: can not start core 1 before core 0\n",
+ __func__);
+- ret = -EPERM;
+- goto put_mbox;
++ return -EPERM;
+ }
+
+ ret = k3_r5_core_run(core);
+ if (ret)
+- goto put_mbox;
++ return ret;
+ }
+
+ return 0;
+@@ -596,8 +596,6 @@ static int k3_r5_rproc_start(struct rproc *rproc)
+ if (k3_r5_core_halt(core))
+ dev_warn(core->dev, "core halt back failed\n");
+ }
+-put_mbox:
+- mbox_free_channel(kproc->mbox);
+ return ret;
+ }
+
+@@ -658,8 +656,6 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
+ goto out;
+ }
+
+- mbox_free_channel(kproc->mbox);
+-
+ return 0;
+
+ unroll_core_halt:
+@@ -674,42 +670,22 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
+ /*
+ * Attach to a running R5F remote processor (IPC-only mode)
+ *
+- * The R5F attach callback only needs to request the mailbox, the remote
+- * processor is already booted, so there is no need to issue any TI-SCI
+- * commands to boot the R5F cores in IPC-only mode. This callback is invoked
+- * only in IPC-only mode.
++ * The R5F attach callback is a NOP. The remote processor is already booted, and
++ * all required resources have been acquired during probe routine, so there is
++ * no need to issue any TI-SCI commands to boot the R5F cores in IPC-only mode.
++ * This callback is invoked only in IPC-only mode and exists because
++ * rproc_validate() checks for its existence.
+ */
+-static int k3_r5_rproc_attach(struct rproc *rproc)
+-{
+- struct k3_r5_rproc *kproc = rproc->priv;
+- struct device *dev = kproc->dev;
+- int ret;
+-
+- ret = k3_r5_rproc_request_mbox(rproc);
+- if (ret)
+- return ret;
+-
+- dev_info(dev, "R5F core initialized in IPC-only mode\n");
+- return 0;
+-}
++static int k3_r5_rproc_attach(struct rproc *rproc) { return 0; }
+
+ /*
+ * Detach from a running R5F remote processor (IPC-only mode)
+ *
+- * The R5F detach callback performs the opposite operation to attach callback
+- * and only needs to release the mailbox, the R5F cores are not stopped and
+- * will be left in booted state in IPC-only mode. This callback is invoked
+- * only in IPC-only mode.
++ * The R5F detach callback is a NOP. The R5F cores are not stopped and will be
++ * left in booted state in IPC-only mode. This callback is invoked only in
++ * IPC-only mode and exists for sanity sake.
+ */
+-static int k3_r5_rproc_detach(struct rproc *rproc)
+-{
+- struct k3_r5_rproc *kproc = rproc->priv;
+- struct device *dev = kproc->dev;
+-
+- mbox_free_channel(kproc->mbox);
+- dev_info(dev, "R5F core deinitialized in IPC-only mode\n");
+- return 0;
+-}
++static int k3_r5_rproc_detach(struct rproc *rproc) { return 0; }
+
+ /*
+ * This function implements the .get_loaded_rsc_table() callback and is used
+@@ -1278,6 +1254,10 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
+ kproc->rproc = rproc;
+ core->rproc = rproc;
+
++ ret = k3_r5_rproc_request_mbox(rproc);
++ if (ret)
++ return ret;
++
+ ret = k3_r5_rproc_configure_mode(kproc);
+ if (ret < 0)
+ goto err_config;
+@@ -1396,6 +1376,8 @@ static void k3_r5_cluster_rproc_exit(void *data)
+ }
+ }
+
++ mbox_free_channel(kproc->mbox);
++
+ rproc_del(rproc);
+
+ k3_r5_reserved_mem_exit(kproc);
+--
+2.43.0
+
--- /dev/null
+From 3947fa6bd5ad9bfe11a229ddd149f7469448db9c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Aug 2024 16:20:04 +0530
+Subject: remoteproc: k3-r5: Delay notification of wakeup event
+
+From: Udit Kumar <u-kumar1@ti.com>
+
+[ Upstream commit 8fa052c29e509f3e47d56d7fc2ca28094d78c60a ]
+
+Few times, core1 was scheduled to boot first before core0, which leads
+to error:
+
+'k3_r5_rproc_start: can not start core 1 before core 0'.
+
+This was happening due to some scheduling between prepare and start
+callback. The probe function waits for event, which is getting
+triggered by prepare callback. To avoid above condition move event
+trigger to start instead of prepare callback.
+
+Fixes: 61f6f68447ab ("remoteproc: k3-r5: Wait for core0 power-up before powering up core1")
+Signed-off-by: Udit Kumar <u-kumar1@ti.com>
+[ Applied wakeup event trigger only for Split-Mode booted rprocs ]
+Signed-off-by: Beleswar Padhi <b-padhi@ti.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240820105004.2788327-1-b-padhi@ti.com
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/remoteproc/ti_k3_r5_remoteproc.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
+index 6424b347aa4f2..2992fd4eca648 100644
+--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
++++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
+@@ -469,8 +469,6 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
+ ret);
+ return ret;
+ }
+- core->released_from_reset = true;
+- wake_up_interruptible(&cluster->core_transition);
+
+ /*
+ * Newer IP revisions like on J7200 SoCs support h/w auto-initialization
+@@ -587,6 +585,9 @@ static int k3_r5_rproc_start(struct rproc *rproc)
+ ret = k3_r5_core_run(core);
+ if (ret)
+ return ret;
++
++ core->released_from_reset = true;
++ wake_up_interruptible(&cluster->core_transition);
+ }
+
+ return 0;
+--
+2.43.0
+
--- /dev/null
+From 5ec405f1ef7d68de99d26714cbd36db097f02de3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 3 Oct 2024 07:29:05 -0400
+Subject: sched: psi: fix bogus pressure spikes from aggregation race
+
+From: Johannes Weiner <hannes@cmpxchg.org>
+
+[ Upstream commit 3840cbe24cf060ea05a585ca497814609f5d47d1 ]
+
+Brandon reports sporadic, non-sensical spikes in cumulative pressure
+time (total=) when reading cpu.pressure at a high rate. This is due to
+a race condition between reader aggregation and tasks changing states.
+
+While it affects all states and all resources captured by PSI, in
+practice it most likely triggers with CPU pressure, since scheduling
+events are so frequent compared to other resource events.
+
+The race context is the live snooping of ongoing stalls during a
+pressure read. The read aggregates per-cpu records for stalls that
+have concluded, but will also incorporate ad-hoc the duration of any
+active state that hasn't been recorded yet. This is important to get
+timely measurements of ongoing stalls. Those ad-hoc samples are
+calculated on-the-fly up to the current time on that CPU; since the
+stall hasn't concluded, it's expected that this is the minimum amount
+of stall time that will enter the per-cpu records once it does.
+
+The problem is that the path that concludes the state uses a CPU clock
+read that is not synchronized against aggregators; the clock is read
+outside of the seqlock protection. This allows aggregators to race and
+snoop a stall with a longer duration than will actually be recorded.
+
+With the recorded stall time being less than the last snapshot
+remembered by the aggregator, a subsequent sample will underflow and
+observe a bogus delta value, resulting in an erratic jump in pressure.
+
+Fix this by moving the clock read of the state change into the seqlock
+protection. This ensures no aggregation can snoop live stalls past the
+time that's recorded when the state concludes.
+
+Reported-by: Brandon Duffany <brandon@buildbuddy.io>
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=219194
+Link: https://lore.kernel.org/lkml/20240827121851.GB438928@cmpxchg.org/
+Fixes: df77430639c9 ("psi: Reduce calls to sched_clock() in psi")
+Cc: stable@vger.kernel.org
+Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
+Reviewed-by: Chengming Zhou <chengming.zhou@linux.dev>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/psi.c | 26 ++++++++++++--------------
+ 1 file changed, 12 insertions(+), 14 deletions(-)
+
+diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
+index 507d7b8d79afa..8d4a3d9de4797 100644
+--- a/kernel/sched/psi.c
++++ b/kernel/sched/psi.c
+@@ -765,13 +765,14 @@ static void record_times(struct psi_group_cpu *groupc, u64 now)
+ }
+
+ static void psi_group_change(struct psi_group *group, int cpu,
+- unsigned int clear, unsigned int set, u64 now,
++ unsigned int clear, unsigned int set,
+ bool wake_clock)
+ {
+ struct psi_group_cpu *groupc;
+ unsigned int t, m;
+ enum psi_states s;
+ u32 state_mask;
++ u64 now;
+
+ lockdep_assert_rq_held(cpu_rq(cpu));
+ groupc = per_cpu_ptr(group->pcpu, cpu);
+@@ -786,6 +787,7 @@ static void psi_group_change(struct psi_group *group, int cpu,
+ * SOME and FULL time these may have resulted in.
+ */
+ write_seqcount_begin(&groupc->seq);
++ now = cpu_clock(cpu);
+
+ /*
+ * Start with TSK_ONCPU, which doesn't have a corresponding
+@@ -899,18 +901,15 @@ void psi_task_change(struct task_struct *task, int clear, int set)
+ {
+ int cpu = task_cpu(task);
+ struct psi_group *group;
+- u64 now;
+
+ if (!task->pid)
+ return;
+
+ psi_flags_change(task, clear, set);
+
+- now = cpu_clock(cpu);
+-
+ group = task_psi_group(task);
+ do {
+- psi_group_change(group, cpu, clear, set, now, true);
++ psi_group_change(group, cpu, clear, set, true);
+ } while ((group = group->parent));
+ }
+
+@@ -919,7 +918,6 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
+ {
+ struct psi_group *group, *common = NULL;
+ int cpu = task_cpu(prev);
+- u64 now = cpu_clock(cpu);
+
+ if (next->pid) {
+ psi_flags_change(next, 0, TSK_ONCPU);
+@@ -936,7 +934,7 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
+ break;
+ }
+
+- psi_group_change(group, cpu, 0, TSK_ONCPU, now, true);
++ psi_group_change(group, cpu, 0, TSK_ONCPU, true);
+ } while ((group = group->parent));
+ }
+
+@@ -974,7 +972,7 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
+ do {
+ if (group == common)
+ break;
+- psi_group_change(group, cpu, clear, set, now, wake_clock);
++ psi_group_change(group, cpu, clear, set, wake_clock);
+ } while ((group = group->parent));
+
+ /*
+@@ -986,7 +984,7 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
+ if ((prev->psi_flags ^ next->psi_flags) & ~TSK_ONCPU) {
+ clear &= ~TSK_ONCPU;
+ for (; group; group = group->parent)
+- psi_group_change(group, cpu, clear, set, now, wake_clock);
++ psi_group_change(group, cpu, clear, set, wake_clock);
+ }
+ }
+ }
+@@ -997,8 +995,8 @@ void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_st
+ int cpu = task_cpu(curr);
+ struct psi_group *group;
+ struct psi_group_cpu *groupc;
+- u64 now, irq;
+ s64 delta;
++ u64 irq;
+
+ if (static_branch_likely(&psi_disabled))
+ return;
+@@ -1011,7 +1009,6 @@ void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_st
+ if (prev && task_psi_group(prev) == group)
+ return;
+
+- now = cpu_clock(cpu);
+ irq = irq_time_read(cpu);
+ delta = (s64)(irq - rq->psi_irq_time);
+ if (delta < 0)
+@@ -1019,12 +1016,15 @@ void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_st
+ rq->psi_irq_time = irq;
+
+ do {
++ u64 now;
++
+ if (!group->enabled)
+ continue;
+
+ groupc = per_cpu_ptr(group->pcpu, cpu);
+
+ write_seqcount_begin(&groupc->seq);
++ now = cpu_clock(cpu);
+
+ record_times(groupc, now);
+ groupc->times[PSI_IRQ_FULL] += delta;
+@@ -1223,11 +1223,9 @@ void psi_cgroup_restart(struct psi_group *group)
+ for_each_possible_cpu(cpu) {
+ struct rq *rq = cpu_rq(cpu);
+ struct rq_flags rf;
+- u64 now;
+
+ rq_lock_irq(rq, &rf);
+- now = cpu_clock(cpu);
+- psi_group_change(group, cpu, 0, 0, now, true);
++ psi_group_change(group, cpu, 0, 0, true);
+ rq_unlock_irq(rq, &rf);
+ }
+ }
+--
+2.43.0
+
drm-amd-display-add-hdr-workaround-for-specific-edp.patch
drm-amd-display-update-dml2-policy-enhancedprefetchscheduleaccelerationfinal-dcn35.patch
drm-amd-display-fix-system-hang-while-resume-with-tbt-monitor.patch
+cpufreq-intel_pstate-make-hwp_notify_lock-a-raw-spin.patch
+kconfig-qconf-fix-buffer-overflow-in-debug-links.patch
+arm64-cputype-add-neoverse-n3-definitions.patch
+arm64-errata-expand-speculative-ssbs-workaround-once.patch
+uprobes-fix-kernel-info-leak-via-uprobes-vma.patch
+mm-z3fold-deprecate-config_z3fold.patch
+drm-amd-display-allow-backlight-to-go-below-amdgpu_d.patch
+build-id-require-program-headers-to-be-right-after-e.patch
+lib-buildid-harden-build-id-parsing-logic.patch
+drm-xe-delete-unused-guc-submission_state.suspend.patch
+drm-xe-fix-uaf-around-queue-destruction.patch
+sched-psi-fix-bogus-pressure-spikes-from-aggregation.patch
+sunrpc-change-sp_nrthreads-from-atomic_t-to-unsigned.patch
+nfsd-async-copy-result-needs-to-return-a-write-verif.patch
+nfsd-limit-the-number-of-concurrent-async-copy-opera.patch
+net-mana-add-support-for-page-sizes-other-than-4kb-o.patch
+rdma-mana_ib-use-the-correct-page-table-index-based-.patch
+remoteproc-k3-r5-acquire-mailbox-handle-during-probe.patch
+remoteproc-k3-r5-delay-notification-of-wakeup-event.patch
+iio-pressure-bmp280-improve-indentation-and-line-wra.patch
+iio-pressure-bmp280-use-bme-prefix-for-bme280-specif.patch
+iio-pressure-bmp280-fix-regmap-for-bmp280-device.patch
+iio-pressure-bmp280-fix-waiting-time-for-bmp3xx-conf.patch
+r8169-fix-spelling-mistake-tx_underun-tx_underrun.patch
+r8169-add-tally-counter-fields-added-with-rtl8125.patch
+acpi-battery-simplify-battery-hook-locking.patch
+acpi-battery-fix-possible-crash-when-unregistering-a.patch
--- /dev/null
+From 14040d95ed0caafff8191e068a46aa9fa951424a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Jul 2024 17:14:18 +1000
+Subject: sunrpc: change sp_nrthreads from atomic_t to unsigned int.
+
+From: NeilBrown <neilb@suse.de>
+
+[ Upstream commit 60749cbe3d8ae572a6c7dda675de3e8b25797a18 ]
+
+sp_nrthreads is only ever accessed under the service mutex
+ nlmsvc_mutex nfs_callback_mutex nfsd_mutex
+so these is no need for it to be an atomic_t.
+
+The fact that all code using it is single-threaded means that we can
+simplify svc_pool_victim and remove the temporary elevation of
+sp_nrthreads.
+
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Stable-dep-of: aadc3bbea163 ("NFSD: Limit the number of concurrent async COPY operations")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfsd/nfsctl.c | 2 +-
+ fs/nfsd/nfssvc.c | 2 +-
+ include/linux/sunrpc/svc.h | 4 ++--
+ net/sunrpc/svc.c | 31 +++++++++++--------------------
+ 4 files changed, 15 insertions(+), 24 deletions(-)
+
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index 0f9b4f7b56cd8..37f619ccafce0 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1746,7 +1746,7 @@ int nfsd_nl_threads_get_doit(struct sk_buff *skb, struct genl_info *info)
+ struct svc_pool *sp = &nn->nfsd_serv->sv_pools[i];
+
+ err = nla_put_u32(skb, NFSD_A_SERVER_THREADS,
+- atomic_read(&sp->sp_nrthreads));
++ sp->sp_nrthreads);
+ if (err)
+ goto err_unlock;
+ }
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index 89d7918de7b1a..877f926356549 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -705,7 +705,7 @@ int nfsd_get_nrthreads(int n, int *nthreads, struct net *net)
+
+ if (serv)
+ for (i = 0; i < serv->sv_nrpools && i < n; i++)
+- nthreads[i] = atomic_read(&serv->sv_pools[i].sp_nrthreads);
++ nthreads[i] = serv->sv_pools[i].sp_nrthreads;
+ return 0;
+ }
+
+diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
+index 23617da0e565e..38a4fdf784e9a 100644
+--- a/include/linux/sunrpc/svc.h
++++ b/include/linux/sunrpc/svc.h
+@@ -33,9 +33,9 @@
+ * node traffic on multi-node NUMA NFS servers.
+ */
+ struct svc_pool {
+- unsigned int sp_id; /* pool id; also node id on NUMA */
++ unsigned int sp_id; /* pool id; also node id on NUMA */
+ struct lwq sp_xprts; /* pending transports */
+- atomic_t sp_nrthreads; /* # of threads in pool */
++ unsigned int sp_nrthreads; /* # of threads in pool */
+ struct list_head sp_all_threads; /* all server threads */
+ struct llist_head sp_idle_threads; /* idle server threads */
+
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index d9cda1e53a017..6a15b831589c0 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -682,7 +682,7 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
+ serv->sv_nrthreads += 1;
+ spin_unlock_bh(&serv->sv_lock);
+
+- atomic_inc(&pool->sp_nrthreads);
++ pool->sp_nrthreads += 1;
+
+ /* Protected by whatever lock the service uses when calling
+ * svc_set_num_threads()
+@@ -737,31 +737,22 @@ svc_pool_victim(struct svc_serv *serv, struct svc_pool *target_pool,
+ struct svc_pool *pool;
+ unsigned int i;
+
+-retry:
+ pool = target_pool;
+
+- if (pool != NULL) {
+- if (atomic_inc_not_zero(&pool->sp_nrthreads))
+- goto found_pool;
+- return NULL;
+- } else {
++ if (!pool) {
+ for (i = 0; i < serv->sv_nrpools; i++) {
+ pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
+- if (atomic_inc_not_zero(&pool->sp_nrthreads))
+- goto found_pool;
++ if (pool->sp_nrthreads)
++ break;
+ }
+- return NULL;
+ }
+
+-found_pool:
+- set_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
+- set_bit(SP_NEED_VICTIM, &pool->sp_flags);
+- if (!atomic_dec_and_test(&pool->sp_nrthreads))
++ if (pool && pool->sp_nrthreads) {
++ set_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
++ set_bit(SP_NEED_VICTIM, &pool->sp_flags);
+ return pool;
+- /* Nothing left in this pool any more */
+- clear_bit(SP_NEED_VICTIM, &pool->sp_flags);
+- clear_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
+- goto retry;
++ }
++ return NULL;
+ }
+
+ static int
+@@ -840,7 +831,7 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
+ if (!pool)
+ nrservs -= serv->sv_nrthreads;
+ else
+- nrservs -= atomic_read(&pool->sp_nrthreads);
++ nrservs -= pool->sp_nrthreads;
+
+ if (nrservs > 0)
+ return svc_start_kthreads(serv, pool, nrservs);
+@@ -928,7 +919,7 @@ svc_exit_thread(struct svc_rqst *rqstp)
+
+ list_del_rcu(&rqstp->rq_all);
+
+- atomic_dec(&pool->sp_nrthreads);
++ pool->sp_nrthreads -= 1;
+
+ spin_lock_bh(&serv->sv_lock);
+ serv->sv_nrthreads -= 1;
+--
+2.43.0
+
--- /dev/null
+From 8ba90f8388c7a414dc7ca56c1c121b32fff4e0fe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Oct 2024 19:46:01 +0200
+Subject: uprobes: fix kernel info leak via "[uprobes]" vma
+
+From: Oleg Nesterov <oleg@redhat.com>
+
+commit 34820304cc2cd1804ee1f8f3504ec77813d29c8e upstream.
+
+xol_add_vma() maps the uninitialized page allocated by __create_xol_area()
+into userspace. On some architectures (x86) this memory is readable even
+without VM_READ, VM_EXEC results in the same pgprot_t as VM_EXEC|VM_READ,
+although this doesn't really matter, debugger can read this memory anyway.
+
+Link: https://lore.kernel.org/all/20240929162047.GA12611@redhat.com/
+
+Reported-by: Will Deacon <will@kernel.org>
+Fixes: d4b3b6384f98 ("uprobes/core: Allocate XOL slots for uprobes use")
+Cc: stable@vger.kernel.org
+Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/uprobes.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
+index 28c678c8daef3..3dd1f14643648 100644
+--- a/kernel/events/uprobes.c
++++ b/kernel/events/uprobes.c
+@@ -1491,7 +1491,7 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
+
+ area->xol_mapping.name = "[uprobes]";
+ area->xol_mapping.pages = area->pages;
+- area->pages[0] = alloc_page(GFP_HIGHUSER);
++ area->pages[0] = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
+ if (!area->pages[0])
+ goto free_bitmap;
+ area->pages[1] = NULL;
+--
+2.43.0
+