]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 6.11
authorSasha Levin <sashal@kernel.org>
Tue, 8 Oct 2024 06:02:18 +0000 (02:02 -0400)
committerSasha Levin <sashal@kernel.org>
Tue, 8 Oct 2024 06:02:18 +0000 (02:02 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
20 files changed:
queue-6.11/acpi-battery-fix-possible-crash-when-unregistering-a.patch [new file with mode: 0644]
queue-6.11/acpi-battery-simplify-battery-hook-locking.patch [new file with mode: 0644]
queue-6.11/arm64-cputype-add-neoverse-n3-definitions.patch [new file with mode: 0644]
queue-6.11/arm64-errata-expand-speculative-ssbs-workaround-once.patch [new file with mode: 0644]
queue-6.11/drm-amd-display-allow-backlight-to-go-below-amdgpu_d.patch [new file with mode: 0644]
queue-6.11/drm-xe-clean-up-vm-exec-queue-file-lock-usage.patch [new file with mode: 0644]
queue-6.11/drm-xe-vm-move-xa_alloc-to-prevent-uaf.patch [new file with mode: 0644]
queue-6.11/kconfig-fix-infinite-loop-in-sym_calc_choice.patch [new file with mode: 0644]
queue-6.11/kconfig-qconf-fix-buffer-overflow-in-debug-links.patch [new file with mode: 0644]
queue-6.11/kconfig-qconf-move-conf_read-before-drawing-tree-pai.patch [new file with mode: 0644]
queue-6.11/mm-z3fold-deprecate-config_z3fold.patch [new file with mode: 0644]
queue-6.11/nfsd-async-copy-result-needs-to-return-a-write-verif.patch [new file with mode: 0644]
queue-6.11/nfsd-limit-the-number-of-concurrent-async-copy-opera.patch [new file with mode: 0644]
queue-6.11/r8169-add-tally-counter-fields-added-with-rtl8125.patch [new file with mode: 0644]
queue-6.11/r8169-fix-spelling-mistake-tx_underun-tx_underrun.patch [new file with mode: 0644]
queue-6.11/remoteproc-k3-r5-acquire-mailbox-handle-during-probe.patch [new file with mode: 0644]
queue-6.11/remoteproc-k3-r5-delay-notification-of-wakeup-event.patch [new file with mode: 0644]
queue-6.11/series
queue-6.11/sunrpc-change-sp_nrthreads-from-atomic_t-to-unsigned.patch [new file with mode: 0644]
queue-6.11/uprobes-fix-kernel-info-leak-via-uprobes-vma.patch [new file with mode: 0644]

diff --git a/queue-6.11/acpi-battery-fix-possible-crash-when-unregistering-a.patch b/queue-6.11/acpi-battery-fix-possible-crash-when-unregistering-a.patch
new file mode 100644 (file)
index 0000000..583dc26
--- /dev/null
@@ -0,0 +1,69 @@
+From 71fc0c8c6dba956e686fbf2d6c5657db6886e158 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 1 Oct 2024 23:28:34 +0200
+Subject: ACPI: battery: Fix possible crash when unregistering a battery hook
+
+From: Armin Wolf <W_Armin@gmx.de>
+
+[ Upstream commit 76959aff14a0012ad6b984ec7686d163deccdc16 ]
+
+When a battery hook returns an error when adding a new battery, then
+the battery hook is automatically unregistered.
+However the battery hook provider cannot know that, so it will later
+call battery_hook_unregister() on the already unregistered battery
+hook, resulting in a crash.
+
+Fix this by using the list head to mark already unregistered battery
+hooks as already being unregistered so that they can be ignored by
+battery_hook_unregister().
+
+Fixes: fa93854f7a7e ("battery: Add the battery hooking API")
+Signed-off-by: Armin Wolf <W_Armin@gmx.de>
+Link: https://patch.msgid.link/20241001212835.341788-3-W_Armin@gmx.de
+Cc: All applicable <stable@vger.kernel.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/acpi/battery.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
+index 10e9136897a75..4f1637ed76e5c 100644
+--- a/drivers/acpi/battery.c
++++ b/drivers/acpi/battery.c
+@@ -718,7 +718,7 @@ static void battery_hook_unregister_unlocked(struct acpi_battery_hook *hook)
+               if (!hook->remove_battery(battery->bat, hook))
+                       power_supply_changed(battery->bat);
+       }
+-      list_del(&hook->list);
++      list_del_init(&hook->list);
+       pr_info("extension unregistered: %s\n", hook->name);
+ }
+@@ -726,7 +726,14 @@ static void battery_hook_unregister_unlocked(struct acpi_battery_hook *hook)
+ void battery_hook_unregister(struct acpi_battery_hook *hook)
+ {
+       mutex_lock(&hook_mutex);
+-      battery_hook_unregister_unlocked(hook);
++      /*
++       * Ignore already unregistered battery hooks. This might happen
++       * if a battery hook was previously unloaded due to an error when
++       * adding a new battery.
++       */
++      if (!list_empty(&hook->list))
++              battery_hook_unregister_unlocked(hook);
++
+       mutex_unlock(&hook_mutex);
+ }
+ EXPORT_SYMBOL_GPL(battery_hook_unregister);
+@@ -736,7 +743,6 @@ void battery_hook_register(struct acpi_battery_hook *hook)
+       struct acpi_battery *battery;
+       mutex_lock(&hook_mutex);
+-      INIT_LIST_HEAD(&hook->list);
+       list_add(&hook->list, &battery_hook_list);
+       /*
+        * Now that the driver is registered, we need
+-- 
+2.43.0
+
diff --git a/queue-6.11/acpi-battery-simplify-battery-hook-locking.patch b/queue-6.11/acpi-battery-simplify-battery-hook-locking.patch
new file mode 100644 (file)
index 0000000..3d3b9e5
--- /dev/null
@@ -0,0 +1,96 @@
+From adfaa7c20ca15e556e708386b2b5eb1fad10137d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 1 Oct 2024 23:28:33 +0200
+Subject: ACPI: battery: Simplify battery hook locking
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Armin Wolf <W_Armin@gmx.de>
+
+[ Upstream commit 86309cbed26139e1caae7629dcca1027d9a28e75 ]
+
+Move the conditional locking from __battery_hook_unregister()
+into battery_hook_unregister() and rename the low-level function
+to simplify the locking during battery hook removal.
+
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Reviewed-by: Pali Rohár <pali@kernel.org>
+Signed-off-by: Armin Wolf <W_Armin@gmx.de>
+Link: https://patch.msgid.link/20241001212835.341788-2-W_Armin@gmx.de
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Stable-dep-of: 76959aff14a0 ("ACPI: battery: Fix possible crash when unregistering a battery hook")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/acpi/battery.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
+index da3a879d638a8..10e9136897a75 100644
+--- a/drivers/acpi/battery.c
++++ b/drivers/acpi/battery.c
+@@ -706,28 +706,28 @@ static LIST_HEAD(acpi_battery_list);
+ static LIST_HEAD(battery_hook_list);
+ static DEFINE_MUTEX(hook_mutex);
+-static void __battery_hook_unregister(struct acpi_battery_hook *hook, int lock)
++static void battery_hook_unregister_unlocked(struct acpi_battery_hook *hook)
+ {
+       struct acpi_battery *battery;
++
+       /*
+        * In order to remove a hook, we first need to
+        * de-register all the batteries that are registered.
+        */
+-      if (lock)
+-              mutex_lock(&hook_mutex);
+       list_for_each_entry(battery, &acpi_battery_list, list) {
+               if (!hook->remove_battery(battery->bat, hook))
+                       power_supply_changed(battery->bat);
+       }
+       list_del(&hook->list);
+-      if (lock)
+-              mutex_unlock(&hook_mutex);
++
+       pr_info("extension unregistered: %s\n", hook->name);
+ }
+ void battery_hook_unregister(struct acpi_battery_hook *hook)
+ {
+-      __battery_hook_unregister(hook, 1);
++      mutex_lock(&hook_mutex);
++      battery_hook_unregister_unlocked(hook);
++      mutex_unlock(&hook_mutex);
+ }
+ EXPORT_SYMBOL_GPL(battery_hook_unregister);
+@@ -753,7 +753,7 @@ void battery_hook_register(struct acpi_battery_hook *hook)
+                        * hooks.
+                        */
+                       pr_err("extension failed to load: %s", hook->name);
+-                      __battery_hook_unregister(hook, 0);
++                      battery_hook_unregister_unlocked(hook);
+                       goto end;
+               }
+@@ -807,7 +807,7 @@ static void battery_hook_add_battery(struct acpi_battery *battery)
+                        */
+                       pr_err("error in extension, unloading: %s",
+                                       hook_node->name);
+-                      __battery_hook_unregister(hook_node, 0);
++                      battery_hook_unregister_unlocked(hook_node);
+               }
+       }
+       mutex_unlock(&hook_mutex);
+@@ -840,7 +840,7 @@ static void __exit battery_hook_exit(void)
+        * need to remove the hooks.
+        */
+       list_for_each_entry_safe(hook, ptr, &battery_hook_list, list) {
+-              __battery_hook_unregister(hook, 1);
++              battery_hook_unregister(hook);
+       }
+       mutex_destroy(&hook_mutex);
+ }
+-- 
+2.43.0
+
diff --git a/queue-6.11/arm64-cputype-add-neoverse-n3-definitions.patch b/queue-6.11/arm64-cputype-add-neoverse-n3-definitions.patch
new file mode 100644 (file)
index 0000000..0c1f81f
--- /dev/null
@@ -0,0 +1,52 @@
+From f8f10d26b9a85d01f64832511e949ff36260879b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Oct 2024 13:04:19 +0100
+Subject: arm64: cputype: Add Neoverse-N3 definitions
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 924725707d80bc2588cefafef76ff3f164d299bc ]
+
+Add cputype definitions for Neoverse-N3. These will be used for errata
+detection in subsequent patches.
+
+These values can be found in Table A-261 ("MIDR_EL1 bit descriptions")
+in issue 02 of the Neoverse-N3 TRM, which can be found at:
+
+  https://developer.arm.com/documentation/107997/0000/?lang=en
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20240930111705.3352047-2-mark.rutland@arm.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+[ Mark: trivial backport ]
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/include/asm/cputype.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
+index 5a7dfeb8e8eb5..488f8e7513495 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -94,6 +94,7 @@
+ #define ARM_CPU_PART_NEOVERSE_V3      0xD84
+ #define ARM_CPU_PART_CORTEX_X925      0xD85
+ #define ARM_CPU_PART_CORTEX_A725      0xD87
++#define ARM_CPU_PART_NEOVERSE_N3      0xD8E
+ #define APM_CPU_PART_XGENE            0x000
+ #define APM_CPU_VAR_POTENZA           0x00
+@@ -176,6 +177,7 @@
+ #define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
+ #define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
+ #define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)
++#define MIDR_NEOVERSE_N3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N3)
+ #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+ #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
+ #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
+-- 
+2.43.0
+
diff --git a/queue-6.11/arm64-errata-expand-speculative-ssbs-workaround-once.patch b/queue-6.11/arm64-errata-expand-speculative-ssbs-workaround-once.patch
new file mode 100644 (file)
index 0000000..24ff05a
--- /dev/null
@@ -0,0 +1,114 @@
+From c1f69fd66fb14df0332bb35e943282b4622333ec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Oct 2024 13:04:20 +0100
+Subject: arm64: errata: Expand speculative SSBS workaround once more
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 081eb7932c2b244f63317a982c5e3990e2c7fbdd ]
+
+A number of Arm Ltd CPUs suffer from errata whereby an MSR to the SSBS
+special-purpose register does not affect subsequent speculative
+instructions, permitting speculative store bypassing for a window of
+time.
+
+We worked around this for a number of CPUs in commits:
+
+* 7187bb7d0b5c7dfa ("arm64: errata: Add workaround for Arm errata 3194386 and 3312417")
+* 75b3c43eab594bfb ("arm64: errata: Expand speculative SSBS workaround")
+* 145502cac7ea70b5 ("arm64: errata: Expand speculative SSBS workaround (again)")
+
+Since then, a (hopefully final) batch of updates have been published,
+with two more affected CPUs. For the affected CPUs the existing
+mitigation is sufficient, as described in their respective Software
+Developer Errata Notice (SDEN) documents:
+
+* Cortex-A715 (MP148) SDEN v15.0, erratum 3456084
+  https://developer.arm.com/documentation/SDEN-2148827/1500/
+
+* Neoverse-N3 (MP195) SDEN v5.0, erratum 3456111
+  https://developer.arm.com/documentation/SDEN-3050973/0500/
+
+Enable the existing mitigation by adding the relevant MIDRs to
+erratum_spec_ssbs_list, and update silicon-errata.rst and the
+Kconfig text accordingly.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20240930111705.3352047-3-mark.rutland@arm.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+[ Mark: trivial backport ]
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/arch/arm64/silicon-errata.rst | 4 ++++
+ arch/arm64/Kconfig                          | 2 ++
+ arch/arm64/kernel/cpu_errata.c              | 2 ++
+ 3 files changed, 8 insertions(+)
+
+diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst
+index 3bc51669ead7d..8cd4f365044b6 100644
+--- a/Documentation/arch/arm64/silicon-errata.rst
++++ b/Documentation/arch/arm64/silicon-errata.rst
+@@ -146,6 +146,8 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Cortex-A715     | #2645198        | ARM64_ERRATUM_2645198       |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM            | Cortex-A715     | #3456084        | ARM64_ERRATUM_3194386       |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Cortex-A720     | #3456091        | ARM64_ERRATUM_3194386       |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Cortex-A725     | #3456106        | ARM64_ERRATUM_3194386       |
+@@ -186,6 +188,8 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Neoverse-N2     | #3324339        | ARM64_ERRATUM_3194386       |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM            | Neoverse-N3     | #3456111        | ARM64_ERRATUM_3194386       |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Neoverse-V1     | #1619801        | N/A                         |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Neoverse-V1     | #3324341        | ARM64_ERRATUM_3194386       |
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 09ce28ff9e871..89b331575ed49 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -1079,6 +1079,7 @@ config ARM64_ERRATUM_3194386
+         * ARM Cortex-A78C erratum 3324346
+         * ARM Cortex-A78C erratum 3324347
+         * ARM Cortex-A710 erratam 3324338
++        * ARM Cortex-A715 errartum 3456084
+         * ARM Cortex-A720 erratum 3456091
+         * ARM Cortex-A725 erratum 3456106
+         * ARM Cortex-X1 erratum 3324344
+@@ -1089,6 +1090,7 @@ config ARM64_ERRATUM_3194386
+         * ARM Cortex-X925 erratum 3324334
+         * ARM Neoverse-N1 erratum 3324349
+         * ARM Neoverse N2 erratum 3324339
++        * ARM Neoverse-N3 erratum 3456111
+         * ARM Neoverse-V1 erratum 3324341
+         * ARM Neoverse V2 erratum 3324336
+         * ARM Neoverse-V3 erratum 3312417
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index aec2867daadc2..a78f247029aec 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -439,6 +439,7 @@ static const struct midr_range erratum_spec_ssbs_list[] = {
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
++      MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A725),
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
+@@ -450,6 +451,7 @@ static const struct midr_range erratum_spec_ssbs_list[] = {
+       MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
+       MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
+       MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
++      MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N3),
+       MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
+       MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
+       MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
+-- 
+2.43.0
+
diff --git a/queue-6.11/drm-amd-display-allow-backlight-to-go-below-amdgpu_d.patch b/queue-6.11/drm-amd-display-allow-backlight-to-go-below-amdgpu_d.patch
new file mode 100644 (file)
index 0000000..83263ff
--- /dev/null
@@ -0,0 +1,47 @@
+From 698ddab127dd6fc3c2519b672ab399021593a97f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 15 Sep 2024 14:28:37 -0500
+Subject: drm/amd/display: Allow backlight to go below
+ `AMDGPU_DM_DEFAULT_MIN_BACKLIGHT`
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+[ Upstream commit 87d749a6aab73d8069d0345afaa98297816cb220 ]
+
+The issue with panel power savings compatibility below
+`AMDGPU_DM_DEFAULT_MIN_BACKLIGHT` happens at
+`AMDGPU_DM_DEFAULT_MIN_BACKLIGHT` as well.
+
+That issue will be fixed separately, so don't prevent the backlight
+brightness from going that low.
+
+Cc: Harry Wentland <harry.wentland@amd.com>
+Cc: Thomas Weißschuh <linux@weissschuh.net>
+Link: https://lore.kernel.org/amd-gfx/be04226a-a9e3-4a45-a83b-6d263c6557d8@t-8ch.de/T/#m400dee4e2fc61fe9470334d20a7c8c89c9aef44f
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index ccb1883a67ff5..1ab7cd8a6b6ae 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4485,7 +4485,7 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
+               int spread = caps.max_input_signal - caps.min_input_signal;
+               if (caps.max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
+-                  caps.min_input_signal < AMDGPU_DM_DEFAULT_MIN_BACKLIGHT ||
++                  caps.min_input_signal < 0 ||
+                   spread > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
+                   spread < AMDGPU_DM_MIN_SPREAD) {
+                       DRM_DEBUG_KMS("DM: Invalid backlight caps: min=%d, max=%d\n",
+-- 
+2.43.0
+
diff --git a/queue-6.11/drm-xe-clean-up-vm-exec-queue-file-lock-usage.patch b/queue-6.11/drm-xe-clean-up-vm-exec-queue-file-lock-usage.patch
new file mode 100644 (file)
index 0000000..a170509
--- /dev/null
@@ -0,0 +1,140 @@
+From abec1949ab466a290647f196d8f2692e3f092608 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Sep 2024 18:17:12 -0700
+Subject: drm/xe: Clean up VM / exec queue file lock usage.
+
+From: Matthew Brost <matthew.brost@intel.com>
+
+[ Upstream commit 9e3c85ddea7a473ed57b6cdfef2dfd468356fc91 ]
+
+Both the VM / exec queue file lock protect the lookup and reference to
+the object, nothing more. These locks are not intended anything else
+underneath them. XA have their own locking too, so no need to take the
+VM / exec queue file lock aside from when doing a lookup and reference
+get.
+
+Add some kernel doc to make this clear and cleanup a few typos too.
+
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240921011712.2681510-1-matthew.brost@intel.com
+(cherry picked from commit fe4f5d4b661666a45b48fe7f95443f8fefc09c8c)
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Stable-dep-of: 74231870cf49 ("drm/xe/vm: move xa_alloc to prevent UAF")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_device.c       |  2 --
+ drivers/gpu/drm/xe/xe_device_types.h | 14 +++++++++++---
+ drivers/gpu/drm/xe/xe_drm_client.c   |  9 ++++++++-
+ drivers/gpu/drm/xe/xe_exec_queue.c   |  2 --
+ drivers/gpu/drm/xe/xe_vm.c           |  4 ----
+ 5 files changed, 19 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
+index 83f603a1ab122..8a44a2b6dcbb6 100644
+--- a/drivers/gpu/drm/xe/xe_device.c
++++ b/drivers/gpu/drm/xe/xe_device.c
+@@ -159,10 +159,8 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
+               xe_exec_queue_kill(q);
+               xe_exec_queue_put(q);
+       }
+-      mutex_lock(&xef->vm.lock);
+       xa_for_each(&xef->vm.xa, idx, vm)
+               xe_vm_close_and_put(vm);
+-      mutex_unlock(&xef->vm.lock);
+       xe_file_put(xef);
+diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
+index fbc05188263d9..a7c7812d57915 100644
+--- a/drivers/gpu/drm/xe/xe_device_types.h
++++ b/drivers/gpu/drm/xe/xe_device_types.h
+@@ -558,15 +558,23 @@ struct xe_file {
+       struct {
+               /** @vm.xe: xarray to store VMs */
+               struct xarray xa;
+-              /** @vm.lock: protects file VM state */
++              /**
++               * @vm.lock: Protects VM lookup + reference and removal a from
++               * file xarray. Not an intended to be an outer lock which does
++               * thing while being held.
++               */
+               struct mutex lock;
+       } vm;
+       /** @exec_queue: Submission exec queue state for file */
+       struct {
+-              /** @exec_queue.xe: xarray to store engines */
++              /** @exec_queue.xa: xarray to store exece queues */
+               struct xarray xa;
+-              /** @exec_queue.lock: protects file engine state */
++              /**
++               * @exec_queue.lock: Protects exec queue lookup + reference and
++               * removal a frommfile xarray. Not an intended to be an outer
++               * lock which does thing while being held.
++               */
+               struct mutex lock;
+       } exec_queue;
+diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
+index 1af95b9b91715..c237ced421833 100644
+--- a/drivers/gpu/drm/xe/xe_drm_client.c
++++ b/drivers/gpu/drm/xe/xe_drm_client.c
+@@ -288,8 +288,15 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
+       /* Accumulate all the exec queues from this client */
+       mutex_lock(&xef->exec_queue.lock);
+-      xa_for_each(&xef->exec_queue.xa, i, q)
++      xa_for_each(&xef->exec_queue.xa, i, q) {
++              xe_exec_queue_get(q);
++              mutex_unlock(&xef->exec_queue.lock);
++
+               xe_exec_queue_update_run_ticks(q);
++
++              mutex_lock(&xef->exec_queue.lock);
++              xe_exec_queue_put(q);
++      }
+       mutex_unlock(&xef->exec_queue.lock);
+       /* Get the total GPU cycles */
+diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
+index d0bbb1d9b1ac1..2179c65dc60ab 100644
+--- a/drivers/gpu/drm/xe/xe_exec_queue.c
++++ b/drivers/gpu/drm/xe/xe_exec_queue.c
+@@ -627,9 +627,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
+               }
+       }
+-      mutex_lock(&xef->exec_queue.lock);
+       err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
+-      mutex_unlock(&xef->exec_queue.lock);
+       if (err)
+               goto kill_exec_queue;
+diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
+index 743c8d79d79d2..8fb425ad9e4a4 100644
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -1905,9 +1905,7 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
+       if (IS_ERR(vm))
+               return PTR_ERR(vm);
+-      mutex_lock(&xef->vm.lock);
+       err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
+-      mutex_unlock(&xef->vm.lock);
+       if (err)
+               goto err_close_and_put;
+@@ -1939,9 +1937,7 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
+       return 0;
+ err_free_id:
+-      mutex_lock(&xef->vm.lock);
+       xa_erase(&xef->vm.xa, id);
+-      mutex_unlock(&xef->vm.lock);
+ err_close_and_put:
+       xe_vm_close_and_put(vm);
+-- 
+2.43.0
+
diff --git a/queue-6.11/drm-xe-vm-move-xa_alloc-to-prevent-uaf.patch b/queue-6.11/drm-xe-vm-move-xa_alloc-to-prevent-uaf.patch
new file mode 100644 (file)
index 0000000..59eec25
--- /dev/null
@@ -0,0 +1,81 @@
+From be973ca7b78419c56e2dc42fecd0d1f432c8527e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Sep 2024 08:14:27 +0100
+Subject: drm/xe/vm: move xa_alloc to prevent UAF
+
+From: Matthew Auld <matthew.auld@intel.com>
+
+[ Upstream commit 74231870cf4976f69e83aa24f48edb16619f652f ]
+
+Evil user can guess the next id of the vm before the ioctl completes and
+then call vm destroy ioctl to trigger UAF since create ioctl is still
+referencing the same vm. Move the xa_alloc all the way to the end to
+prevent this.
+
+v2:
+ - Rebase
+
+Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
+Signed-off-by: Matthew Auld <matthew.auld@intel.com>
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: <stable@vger.kernel.org> # v6.8+
+Reviewed-by: Nirmoy Das <nirmoy.das@intel.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240925071426.144015-3-matthew.auld@intel.com
+(cherry picked from commit dcfd3971327f3ee92765154baebbaece833d3ca9)
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_vm.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
+index 8fb425ad9e4a4..49ba9a1e375f4 100644
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -1905,10 +1905,6 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
+       if (IS_ERR(vm))
+               return PTR_ERR(vm);
+-      err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
+-      if (err)
+-              goto err_close_and_put;
+-
+       if (xe->info.has_asid) {
+               mutex_lock(&xe->usm.lock);
+               err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
+@@ -1916,12 +1912,11 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
+                                     &xe->usm.next_asid, GFP_KERNEL);
+               mutex_unlock(&xe->usm.lock);
+               if (err < 0)
+-                      goto err_free_id;
++                      goto err_close_and_put;
+               vm->usm.asid = asid;
+       }
+-      args->vm_id = id;
+       vm->xef = xe_file_get(xef);
+       /* Record BO memory for VM pagetable created against client */
+@@ -1934,10 +1929,15 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
+       args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
+ #endif
++      /* user id alloc must always be last in ioctl to prevent UAF */
++      err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
++      if (err)
++              goto err_close_and_put;
++
++      args->vm_id = id;
++
+       return 0;
+-err_free_id:
+-      xa_erase(&xef->vm.xa, id);
+ err_close_and_put:
+       xe_vm_close_and_put(vm);
+-- 
+2.43.0
+
diff --git a/queue-6.11/kconfig-fix-infinite-loop-in-sym_calc_choice.patch b/queue-6.11/kconfig-fix-infinite-loop-in-sym_calc_choice.patch
new file mode 100644 (file)
index 0000000..bbbff61
--- /dev/null
@@ -0,0 +1,97 @@
+From 9bca48b40ff15b84e2e69519eea98030ba60b5ff Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Sep 2024 20:25:31 +0900
+Subject: kconfig: fix infinite loop in sym_calc_choice()
+
+From: Masahiro Yamada <masahiroy@kernel.org>
+
+[ Upstream commit 4d46b5b623e0adee1153b1d80689211e5094ae44 ]
+
+Since commit f79dc03fe68c ("kconfig: refactor choice value calculation"),
+Kconfig for ARCH=powerpc may result in an infinite loop. This occurs
+because there are two entries for POWERPC64_CPU in a choice block.
+
+If the same symbol appears twice in a choice block, the ->choice_link
+node is added twice to ->choice_members, resulting a corrupted linked
+list.
+
+A simple test case is:
+
+    choice
+            prompt "choice"
+
+    config A
+            bool "A"
+
+    config B
+            bool "B 1"
+
+    config B
+            bool "B 2"
+
+    endchoice
+
+Running 'make defconfig' results in an infinite loop.
+
+One solution is to replace the current two entries:
+
+    config POWERPC64_CPU
+            bool "Generic (POWER5 and PowerPC 970 and above)"
+            depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN
+            select PPC_64S_HASH_MMU
+
+    config POWERPC64_CPU
+            bool "Generic (POWER8 and above)"
+            depends on PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
+            select ARCH_HAS_FAST_MULTIPLIER
+            select PPC_64S_HASH_MMU
+            select PPC_HAS_LBARX_LHARX
+
+with the following single entry:
+
+    config POWERPC64_CPU
+            bool "Generic 64 bit powerpc"
+            depends on PPC_BOOK3S_64
+            select ARCH_HAS_FAST_MULTIPLIER if CPU_LITTLE_ENDIAN
+            select PPC_64S_HASH_MMU
+            select PPC_HAS_LBARX_LHARX if CPU_LITTLE_ENDIAN
+
+In my opinion, the latter looks cleaner, but PowerPC maintainers may
+prefer to display different prompts depending on CPU_LITTLE_ENDIAN.
+
+For now, this commit fixes the issue in Kconfig, restoring the original
+behavior. I will reconsider whether such a use case is worth supporting.
+
+Fixes: f79dc03fe68c ("kconfig: refactor choice value calculation")
+Reported-by: Marco Bonelli <marco@mebeim.net>
+Closes: https://lore.kernel.org/all/1763151587.3581913.1727224126288@privateemail.com/
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ scripts/kconfig/parser.y | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/scripts/kconfig/parser.y b/scripts/kconfig/parser.y
+index 61900feb4254a..add1ce4b5091d 100644
+--- a/scripts/kconfig/parser.y
++++ b/scripts/kconfig/parser.y
+@@ -158,8 +158,14 @@ config_stmt: config_entry_start config_option_list
+                       yynerrs++;
+               }
+-              list_add_tail(&current_entry->sym->choice_link,
+-                            &current_choice->choice_members);
++              /*
++               * If the same symbol appears twice in a choice block, the list
++               * node would be added twice, leading to a broken linked list.
++               * list_empty() ensures that this symbol has not yet added.
++               */
++              if (list_empty(&current_entry->sym->choice_link))
++                      list_add_tail(&current_entry->sym->choice_link,
++                                    &current_choice->choice_members);
+       }
+       printd(DEBUG_PARSE, "%s:%d:endconfig\n", cur_filename, cur_lineno);
+-- 
+2.43.0
+
diff --git a/queue-6.11/kconfig-qconf-fix-buffer-overflow-in-debug-links.patch b/queue-6.11/kconfig-qconf-fix-buffer-overflow-in-debug-links.patch
new file mode 100644 (file)
index 0000000..3f0baad
--- /dev/null
@@ -0,0 +1,43 @@
+From cd1cad4b50fbf5ae70eda3ee057442b9cd8c63c9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 1 Oct 2024 18:02:22 +0900
+Subject: kconfig: qconf: fix buffer overflow in debug links
+
+From: Masahiro Yamada <masahiroy@kernel.org>
+
+[ Upstream commit 984ed20ece1c6c20789ece040cbff3eb1a388fa9 ]
+
+If you enable "Option -> Show Debug Info" and click a link, the program
+terminates with the following error:
+
+    *** buffer overflow detected ***: terminated
+
+The buffer overflow is caused by the following line:
+
+    strcat(data, "$");
+
+The buffer needs one more byte to accommodate the additional character.
+
+Fixes: c4f7398bee9c ("kconfig: qconf: make debug links work again")
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ scripts/kconfig/qconf.cc | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
+index 959c2c78e1ef9..5e9f810b9e7f7 100644
+--- a/scripts/kconfig/qconf.cc
++++ b/scripts/kconfig/qconf.cc
+@@ -1166,7 +1166,7 @@ void ConfigInfoView::clicked(const QUrl &url)
+ {
+       QByteArray str = url.toEncoded();
+       const std::size_t count = str.size();
+-      char *data = new char[count + 1];
++      char *data = new char[count + 2];  // '$' + '\0'
+       struct symbol **result;
+       struct menu *m = NULL;
+-- 
+2.43.0
+
diff --git a/queue-6.11/kconfig-qconf-move-conf_read-before-drawing-tree-pai.patch b/queue-6.11/kconfig-qconf-move-conf_read-before-drawing-tree-pai.patch
new file mode 100644 (file)
index 0000000..0d4fffc
--- /dev/null
@@ -0,0 +1,44 @@
+From 908647d69c8233afad20f6fd61d7b7a4d9933ffe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 1 Oct 2024 02:02:23 +0900
+Subject: kconfig: qconf: move conf_read() before drawing tree pain
+
+From: Masahiro Yamada <masahiroy@kernel.org>
+
+[ Upstream commit da724c33b685463720b1c625ac440e894dc57ec0 ]
+
+The constructor of ConfigMainWindow() calls show*View(), which needs
+to calculate symbol values. conf_read() must be called before that.
+
+Fixes: 060e05c3b422 ("kconfig: qconf: remove initial call to conf_changed()")
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ scripts/kconfig/qconf.cc | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
+index 7d239c032b3d6..959c2c78e1ef9 100644
+--- a/scripts/kconfig/qconf.cc
++++ b/scripts/kconfig/qconf.cc
+@@ -1505,6 +1505,8 @@ ConfigMainWindow::ConfigMainWindow(void)
+       connect(helpText, &ConfigInfoView::menuSelected,
+               this, &ConfigMainWindow::setMenuLink);
++      conf_read(NULL);
++
+       QString listMode = configSettings->value("/listMode", "symbol").toString();
+       if (listMode == "single")
+               showSingleView();
+@@ -1906,8 +1908,6 @@ int main(int ac, char** av)
+       configApp->connect(configApp, SIGNAL(lastWindowClosed()), SLOT(quit()));
+       configApp->connect(configApp, SIGNAL(aboutToQuit()), v, SLOT(saveSettings()));
+-      conf_read(NULL);
+-
+       v->show();
+       configApp->exec();
+-- 
+2.43.0
+
diff --git a/queue-6.11/mm-z3fold-deprecate-config_z3fold.patch b/queue-6.11/mm-z3fold-deprecate-config_z3fold.patch
new file mode 100644 (file)
index 0000000..065efb7
--- /dev/null
@@ -0,0 +1,164 @@
+From 14f0bd3b32487a3bf4a9f461a1ad9fddc2d95af4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Oct 2024 19:21:16 +0000
+Subject: mm: z3fold: deprecate CONFIG_Z3FOLD
+
+From: Yosry Ahmed <yosryahmed@google.com>
+
+The z3fold compressed pages allocator is rarely used, most users use
+zsmalloc.  The only disadvantage of zsmalloc in comparison is the
+dependency on MMU, and zbud is a more common option for !MMU as it was the
+default zswap allocator for a long time.
+
+Historically, zsmalloc had worse latency than zbud and z3fold but offered
+better memory savings.  This is no longer the case as shown by a simple
+recent analysis [1].  That analysis showed that z3fold does not have any
+advantage over zsmalloc or zbud considering both performance and memory
+usage.  In a kernel build test on tmpfs in a limited cgroup, z3fold took
+3% more time and used 1.8% more memory.  The latency of zswap_load() was
+7% higher, and that of zswap_store() was 10% higher.  Zsmalloc is better
+in all metrics.
+
+Moreover, z3fold apparently has latent bugs, which was made noticeable by
+a recent soft lockup bug report with z3fold [2].  Switching to zsmalloc
+not only fixed the problem, but also reduced the swap usage from 6~8G to
+1~2G.  Other users have also reported being bitten by mistakenly enabling
+z3fold.
+
+Other than hurting users, z3fold is repeatedly causing wasted engineering
+effort.  Apart from investigating the above bug, it came up in multiple
+development discussions (e.g.  [3]) as something we need to handle, when
+there aren't any legit users (at least not intentionally).
+
+The natural course of action is to deprecate z3fold, and remove in a few
+cycles if no objections are raised from active users.  Next on the list
+should be zbud, as it offers marginal latency gains at the cost of huge
+memory waste when compared to zsmalloc.  That one will need to wait until
+zsmalloc does not depend on MMU.
+
+Rename the user-visible config option from CONFIG_Z3FOLD to
+CONFIG_Z3FOLD_DEPRECATED so that users with CONFIG_Z3FOLD=y get a new
+prompt with explanation during make oldconfig.  Also, remove
+CONFIG_Z3FOLD=y from defconfigs.
+
+[1]https://lore.kernel.org/lkml/CAJD7tkbRF6od-2x_L8-A1QL3=2Ww13sCj4S3i4bNndqF+3+_Vg@mail.gmail.com/
+[2]https://lore.kernel.org/lkml/EF0ABD3E-A239-4111-A8AB-5C442E759CF3@gmail.com/
+[3]https://lore.kernel.org/lkml/CAJD7tkbnmeVugfunffSovJf9FAgy9rhBVt_tx=nxUveLUfqVsA@mail.gmail.com/
+
+[arnd@arndb.de: deprecate ZSWAP_ZPOOL_DEFAULT_Z3FOLD as well]
+  Link: https://lkml.kernel.org/r/20240909202625.1054880-1-arnd@kernel.org
+Link: https://lkml.kernel.org/r/20240904233343.933462-1-yosryahmed@google.com
+Signed-off-by: Yosry Ahmed <yosryahmed@google.com>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Acked-by: Chris Down <chris@chrisdown.name>
+Acked-by: Nhat Pham <nphamcs@gmail.com>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Acked-by: Vitaly Wool <vitaly.wool@konsulko.com>
+Acked-by: Christoph Hellwig <hch@lst.de>
+Cc: Aneesh Kumar K.V <aneesh.kumar@kernel.org>
+Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
+Cc: Huacai Chen <chenhuacai@kernel.org>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
+Cc: Nicholas Piggin <npiggin@gmail.com>
+Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
+Cc: WANG Xuerui <kernel@xen0n.name>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+(cherry picked from commit 7a2369b74abf76cd3e54c45b30f6addb497f831b)
+Signed-off-by: Yosry Ahmed <yosryahmed@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/configs/loongson3_defconfig |  1 -
+ arch/powerpc/configs/ppc64_defconfig       |  1 -
+ mm/Kconfig                                 | 25 ++++++++++++++++------
+ 3 files changed, 19 insertions(+), 8 deletions(-)
+
+diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
+index b4252c357c8e2..75b366407a60a 100644
+--- a/arch/loongarch/configs/loongson3_defconfig
++++ b/arch/loongarch/configs/loongson3_defconfig
+@@ -96,7 +96,6 @@ CONFIG_ZPOOL=y
+ CONFIG_ZSWAP=y
+ CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y
+ CONFIG_ZBUD=y
+-CONFIG_Z3FOLD=y
+ CONFIG_ZSMALLOC=m
+ # CONFIG_COMPAT_BRK is not set
+ CONFIG_MEMORY_HOTPLUG=y
+diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
+index 544a65fda77bc..d39284489aa26 100644
+--- a/arch/powerpc/configs/ppc64_defconfig
++++ b/arch/powerpc/configs/ppc64_defconfig
+@@ -81,7 +81,6 @@ CONFIG_MODULE_SIG_SHA512=y
+ CONFIG_PARTITION_ADVANCED=y
+ CONFIG_BINFMT_MISC=m
+ CONFIG_ZSWAP=y
+-CONFIG_Z3FOLD=y
+ CONFIG_ZSMALLOC=y
+ # CONFIG_SLAB_MERGE_DEFAULT is not set
+ CONFIG_SLAB_FREELIST_RANDOM=y
+diff --git a/mm/Kconfig b/mm/Kconfig
+index b72e7d040f789..03395624bc709 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -146,12 +146,15 @@ config ZSWAP_ZPOOL_DEFAULT_ZBUD
+       help
+         Use the zbud allocator as the default allocator.
+-config ZSWAP_ZPOOL_DEFAULT_Z3FOLD
+-      bool "z3fold"
+-      select Z3FOLD
++config ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED
++      bool "z3foldi (DEPRECATED)"
++      select Z3FOLD_DEPRECATED
+       help
+         Use the z3fold allocator as the default allocator.
++        Deprecated and scheduled for removal in a few cycles,
++        see CONFIG_Z3FOLD_DEPRECATED.
++
+ config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
+       bool "zsmalloc"
+       depends on HAVE_ZSMALLOC
+@@ -164,7 +167,7 @@ config ZSWAP_ZPOOL_DEFAULT
+        string
+        depends on ZSWAP
+        default "zbud" if ZSWAP_ZPOOL_DEFAULT_ZBUD
+-       default "z3fold" if ZSWAP_ZPOOL_DEFAULT_Z3FOLD
++       default "z3fold" if ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED
+        default "zsmalloc" if ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
+        default ""
+@@ -178,15 +181,25 @@ config ZBUD
+         deterministic reclaim properties that make it preferable to a higher
+         density approach when reclaim will be used.
+-config Z3FOLD
+-      tristate "3:1 compression allocator (z3fold)"
++config Z3FOLD_DEPRECATED
++      tristate "3:1 compression allocator (z3fold) (DEPRECATED)"
+       depends on ZSWAP
+       help
++        Deprecated and scheduled for removal in a few cycles. If you have
++        a good reason for using Z3FOLD over ZSMALLOC, please contact
++        linux-mm@kvack.org and the zswap maintainers.
++
+         A special purpose allocator for storing compressed pages.
+         It is designed to store up to three compressed pages per physical
+         page. It is a ZBUD derivative so the simplicity and determinism are
+         still there.
++config Z3FOLD
++      tristate
++      default y if Z3FOLD_DEPRECATED=y
++      default m if Z3FOLD_DEPRECATED=m
++      depends on Z3FOLD_DEPRECATED
++
+ config HAVE_ZSMALLOC
+       def_bool y
+       depends on MMU
+-- 
+2.43.0
+
diff --git a/queue-6.11/nfsd-async-copy-result-needs-to-return-a-write-verif.patch b/queue-6.11/nfsd-async-copy-result-needs-to-return-a-write-verif.patch
new file mode 100644 (file)
index 0000000..c93a591
--- /dev/null
@@ -0,0 +1,117 @@
+From 3640d3313139b1c8883c29eea3c17b0e331a3dcd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Aug 2024 13:40:03 -0400
+Subject: NFSD: Async COPY result needs to return a write verifier
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit 9ed666eba4e0a2bb8ffaa3739d830b64d4f2aaad ]
+
+Currently, when NFSD handles an asynchronous COPY, it returns a
+zero write verifier, relying on the subsequent CB_OFFLOAD callback
+to pass the write verifier and a stable_how4 value to the client.
+
+However, if the CB_OFFLOAD never arrives at the client (for example,
+if a network partition occurs just as the server sends the
+CB_OFFLOAD operation), the client will never receive this verifier.
+Thus, if the client sends a follow-up COMMIT, there is no way for
+the client to assess the COMMIT result.
+
+The usual recovery for a missing CB_OFFLOAD is for the client to
+send an OFFLOAD_STATUS operation, but that operation does not carry
+a write verifier in its result. Neither does it carry a stable_how4
+value, so the client /must/ send a COMMIT in this case -- which will
+always fail because currently there's still no write verifier in the
+COPY result.
+
+Thus the server needs to return a normal write verifier in its COPY
+result even if the COPY operation is to be performed asynchronously.
+
+If the server recognizes the callback stateid in subsequent
+OFFLOAD_STATUS operations, then obviously it has not restarted, and
+the write verifier the client received in the COPY result is still
+valid and can be used to assess a COMMIT of the copied data, if one
+is needed.
+
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Stable-dep-of: aadc3bbea163 ("NFSD: Limit the number of concurrent async COPY operations")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfsd/nfs4proc.c | 23 ++++++++---------------
+ 1 file changed, 8 insertions(+), 15 deletions(-)
+
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 2e39cf2e502a3..60c526adc27c6 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -751,15 +751,6 @@ nfsd4_access(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+                          &access->ac_supported);
+ }
+-static void gen_boot_verifier(nfs4_verifier *verifier, struct net *net)
+-{
+-      __be32 *verf = (__be32 *)verifier->data;
+-
+-      BUILD_BUG_ON(2*sizeof(*verf) != sizeof(verifier->data));
+-
+-      nfsd_copy_write_verifier(verf, net_generic(net, nfsd_net_id));
+-}
+-
+ static __be32
+ nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+            union nfsd4_op_u *u)
+@@ -1630,7 +1621,6 @@ static void nfsd4_init_copy_res(struct nfsd4_copy *copy, bool sync)
+               test_bit(NFSD4_COPY_F_COMMITTED, &copy->cp_flags) ?
+                       NFS_FILE_SYNC : NFS_UNSTABLE;
+       nfsd4_copy_set_sync(copy, sync);
+-      gen_boot_verifier(&copy->cp_res.wr_verifier, copy->cp_clp->net);
+ }
+ static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy,
+@@ -1803,9 +1793,11 @@ static __be32
+ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+               union nfsd4_op_u *u)
+ {
++      struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
++      struct nfsd4_copy *async_copy = NULL;
+       struct nfsd4_copy *copy = &u->copy;
++      struct nfsd42_write_res *result;
+       __be32 status;
+-      struct nfsd4_copy *async_copy = NULL;
+       /*
+        * Currently, async COPY is not reliable. Force all COPY
+@@ -1814,6 +1806,9 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+        */
+       nfsd4_copy_set_sync(copy, true);
++      result = &copy->cp_res;
++      nfsd_copy_write_verifier((__be32 *)&result->wr_verifier.data, nn);
++
+       copy->cp_clp = cstate->clp;
+       if (nfsd4_ssc_is_inter(copy)) {
+               trace_nfsd_copy_inter(copy);
+@@ -1838,8 +1833,6 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+       memcpy(&copy->fh, &cstate->current_fh.fh_handle,
+               sizeof(struct knfsd_fh));
+       if (nfsd4_copy_is_async(copy)) {
+-              struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+-
+               status = nfserrno(-ENOMEM);
+               async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
+               if (!async_copy)
+@@ -1851,8 +1844,8 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+                       goto out_err;
+               if (!nfs4_init_copy_state(nn, copy))
+                       goto out_err;
+-              memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid.cs_stid,
+-                      sizeof(copy->cp_res.cb_stateid));
++              memcpy(&result->cb_stateid, &copy->cp_stateid.cs_stid,
++                      sizeof(result->cb_stateid));
+               dup_copy_fields(copy, async_copy);
+               async_copy->copy_task = kthread_create(nfsd4_do_async_copy,
+                               async_copy, "%s", "copy thread");
+-- 
+2.43.0
+
diff --git a/queue-6.11/nfsd-limit-the-number-of-concurrent-async-copy-opera.patch b/queue-6.11/nfsd-limit-the-number-of-concurrent-async-copy-opera.patch
new file mode 100644 (file)
index 0000000..09fae68
--- /dev/null
@@ -0,0 +1,115 @@
+From 616f16bf68cf75b449a2432f034abfded43f5979 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Aug 2024 13:40:04 -0400
+Subject: NFSD: Limit the number of concurrent async COPY operations
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit aadc3bbea163b6caaaebfdd2b6c4667fbc726752 ]
+
+Nothing appears to limit the number of concurrent async COPY
+operations that clients can start. In addition, AFAICT each async
+COPY can copy an unlimited number of 4MB chunks, so can run for a
+long time. Thus IMO async COPY can become a DoS vector.
+
+Add a restriction mechanism that bounds the number of concurrent
+background COPY operations. Start simple and try to be fair -- this
+patch implements a per-namespace limit.
+
+An async COPY request that occurs while this limit is exceeded gets
+NFS4ERR_DELAY. The requesting client can choose to send the request
+again after a delay or fall back to a traditional read/write style
+copy.
+
+If there is need to make the mechanism more sophisticated, we can
+visit that in future patches.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfsd/netns.h     |  1 +
+ fs/nfsd/nfs4proc.c  | 11 +++++++++--
+ fs/nfsd/nfs4state.c |  1 +
+ fs/nfsd/xdr4.h      |  1 +
+ 4 files changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
+index 14ec156563209..5cae26917436c 100644
+--- a/fs/nfsd/netns.h
++++ b/fs/nfsd/netns.h
+@@ -148,6 +148,7 @@ struct nfsd_net {
+       u32             s2s_cp_cl_id;
+       struct idr      s2s_cp_stateids;
+       spinlock_t      s2s_cp_lock;
++      atomic_t        pending_async_copies;
+       /*
+        * Version information
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 60c526adc27c6..5768b2ff1d1d1 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1279,6 +1279,7 @@ static void nfs4_put_copy(struct nfsd4_copy *copy)
+ {
+       if (!refcount_dec_and_test(&copy->refcount))
+               return;
++      atomic_dec(&copy->cp_nn->pending_async_copies);
+       kfree(copy->cp_src);
+       kfree(copy);
+ }
+@@ -1833,10 +1834,16 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+       memcpy(&copy->fh, &cstate->current_fh.fh_handle,
+               sizeof(struct knfsd_fh));
+       if (nfsd4_copy_is_async(copy)) {
+-              status = nfserrno(-ENOMEM);
+               async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
+               if (!async_copy)
+                       goto out_err;
++              async_copy->cp_nn = nn;
++              /* Arbitrary cap on number of pending async copy operations */
++              if (atomic_inc_return(&nn->pending_async_copies) >
++                              (int)rqstp->rq_pool->sp_nrthreads) {
++                      atomic_dec(&nn->pending_async_copies);
++                      goto out_err;
++              }
+               INIT_LIST_HEAD(&async_copy->copies);
+               refcount_set(&async_copy->refcount, 1);
+               async_copy->cp_src = kmalloc(sizeof(*async_copy->cp_src), GFP_KERNEL);
+@@ -1876,7 +1883,7 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+       }
+       if (async_copy)
+               cleanup_async_copy(async_copy);
+-      status = nfserrno(-ENOMEM);
++      status = nfserr_jukebox;
+       goto out;
+ }
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index f4eae4b65572a..3837f4e417247 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -8575,6 +8575,7 @@ static int nfs4_state_create_net(struct net *net)
+       spin_lock_init(&nn->client_lock);
+       spin_lock_init(&nn->s2s_cp_lock);
+       idr_init(&nn->s2s_cp_stateids);
++      atomic_set(&nn->pending_async_copies, 0);
+       spin_lock_init(&nn->blocked_locks_lock);
+       INIT_LIST_HEAD(&nn->blocked_locks_lru);
+diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
+index fbdd42cde1fa5..2a21a7662e030 100644
+--- a/fs/nfsd/xdr4.h
++++ b/fs/nfsd/xdr4.h
+@@ -713,6 +713,7 @@ struct nfsd4_copy {
+       struct nfsd4_ssc_umount_item *ss_nsui;
+       struct nfs_fh           c_fh;
+       nfs4_stateid            stateid;
++      struct nfsd_net         *cp_nn;
+ };
+ static inline void nfsd4_copy_set_sync(struct nfsd4_copy *copy, bool sync)
+-- 
+2.43.0
+
diff --git a/queue-6.11/r8169-add-tally-counter-fields-added-with-rtl8125.patch b/queue-6.11/r8169-add-tally-counter-fields-added-with-rtl8125.patch
new file mode 100644 (file)
index 0000000..c9714eb
--- /dev/null
@@ -0,0 +1,66 @@
+From b1592e298c405fcdb16eeff1b90c1253c5293875 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Sep 2024 23:04:46 +0200
+Subject: r8169: add tally counter fields added with RTL8125
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+[ Upstream commit ced8e8b8f40accfcce4a2bbd8b150aa76d5eff9a ]
+
+RTL8125 added fields to the tally counter, what may result in the chip
+dma'ing these new fields to unallocated memory. Therefore make sure
+that the allocated memory area is big enough to hold all of the
+tally counter values, even if we use only parts of it.
+
+Fixes: f1bce4ad2f1c ("r8169: add support for RTL8125")
+Cc: stable@vger.kernel.org
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/741d26a9-2b2b-485d-91d9-ecb302e345b5@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/realtek/r8169_main.c | 27 +++++++++++++++++++++++
+ 1 file changed, 27 insertions(+)
+
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 4c22e6f602702..01e18f645c0ed 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -577,6 +577,33 @@ struct rtl8169_counters {
+       __le32  rx_multicast;
+       __le16  tx_aborted;
+       __le16  tx_underrun;
++      /* new since RTL8125 */
++      __le64 tx_octets;
++      __le64 rx_octets;
++      __le64 rx_multicast64;
++      __le64 tx_unicast64;
++      __le64 tx_broadcast64;
++      __le64 tx_multicast64;
++      __le32 tx_pause_on;
++      __le32 tx_pause_off;
++      __le32 tx_pause_all;
++      __le32 tx_deferred;
++      __le32 tx_late_collision;
++      __le32 tx_all_collision;
++      __le32 tx_aborted32;
++      __le32 align_errors32;
++      __le32 rx_frame_too_long;
++      __le32 rx_runt;
++      __le32 rx_pause_on;
++      __le32 rx_pause_off;
++      __le32 rx_pause_all;
++      __le32 rx_unknown_opcode;
++      __le32 rx_mac_error;
++      __le32 tx_underrun32;
++      __le32 rx_mac_missed;
++      __le32 rx_tcam_dropped;
++      __le32 tdu;
++      __le32 rdu;
+ };
+ struct rtl8169_tc_offsets {
+-- 
+2.43.0
+
diff --git a/queue-6.11/r8169-fix-spelling-mistake-tx_underun-tx_underrun.patch b/queue-6.11/r8169-fix-spelling-mistake-tx_underun-tx_underrun.patch
new file mode 100644 (file)
index 0000000..bd726a4
--- /dev/null
@@ -0,0 +1,48 @@
+From bae99a4a204ed9ca9d52855b758087497b0680c0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Sep 2024 15:00:21 +0100
+Subject: r8169: Fix spelling mistake: "tx_underun" -> "tx_underrun"
+
+From: Colin Ian King <colin.i.king@gmail.com>
+
+[ Upstream commit 8df9439389a44fb2cc4ef695e08d6a8870b1616c ]
+
+There is a spelling mistake in the struct field tx_underun, rename
+it to tx_underrun.
+
+Signed-off-by: Colin Ian King <colin.i.king@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Heiner Kallweit <hkallweit1@gmail.com>
+Link: https://patch.msgid.link/20240909140021.64884-1-colin.i.king@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: ced8e8b8f40a ("r8169: add tally counter fields added with RTL8125")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/realtek/r8169_main.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 3507c2e28110d..4c22e6f602702 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -576,7 +576,7 @@ struct rtl8169_counters {
+       __le64  rx_broadcast;
+       __le32  rx_multicast;
+       __le16  tx_aborted;
+-      __le16  tx_underun;
++      __le16  tx_underrun;
+ };
+ struct rtl8169_tc_offsets {
+@@ -1841,7 +1841,7 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev,
+       data[9] = le64_to_cpu(counters->rx_broadcast);
+       data[10] = le32_to_cpu(counters->rx_multicast);
+       data[11] = le16_to_cpu(counters->tx_aborted);
+-      data[12] = le16_to_cpu(counters->tx_underun);
++      data[12] = le16_to_cpu(counters->tx_underrun);
+ }
+ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+-- 
+2.43.0
+
diff --git a/queue-6.11/remoteproc-k3-r5-acquire-mailbox-handle-during-probe.patch b/queue-6.11/remoteproc-k3-r5-acquire-mailbox-handle-during-probe.patch
new file mode 100644 (file)
index 0000000..d3dbca7
--- /dev/null
@@ -0,0 +1,195 @@
+From e1990338860075febe4b3bebaf6b6f7f1158a6fa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Aug 2024 13:11:26 +0530
+Subject: remoteproc: k3-r5: Acquire mailbox handle during probe routine
+
+From: Beleswar Padhi <b-padhi@ti.com>
+
+[ Upstream commit f3f11cfe890733373ddbb1ce8991ccd4ee5e79e1 ]
+
+Acquire the mailbox handle during device probe and do not release handle
+in stop/detach routine or error paths. This removes the redundant
+requests for mbox handle later during rproc start/attach. This also
+allows to defer remoteproc driver's probe if mailbox is not probed yet.
+
+Signed-off-by: Beleswar Padhi <b-padhi@ti.com>
+Link: https://lore.kernel.org/r/20240808074127.2688131-3-b-padhi@ti.com
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Stable-dep-of: 8fa052c29e50 ("remoteproc: k3-r5: Delay notification of wakeup event")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/remoteproc/ti_k3_r5_remoteproc.c | 78 +++++++++---------------
+ 1 file changed, 30 insertions(+), 48 deletions(-)
+
+diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
+index eb09d2e9b32a4..6424b347aa4f2 100644
+--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
++++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
+@@ -194,6 +194,10 @@ static void k3_r5_rproc_mbox_callback(struct mbox_client *client, void *data)
+       const char *name = kproc->rproc->name;
+       u32 msg = omap_mbox_message(data);
++      /* Do not forward message from a detached core */
++      if (kproc->rproc->state == RPROC_DETACHED)
++              return;
++
+       dev_dbg(dev, "mbox msg: 0x%x\n", msg);
+       switch (msg) {
+@@ -229,6 +233,10 @@ static void k3_r5_rproc_kick(struct rproc *rproc, int vqid)
+       mbox_msg_t msg = (mbox_msg_t)vqid;
+       int ret;
++      /* Do not forward message to a detached core */
++      if (kproc->rproc->state == RPROC_DETACHED)
++              return;
++
+       /* send the index of the triggered virtqueue in the mailbox payload */
+       ret = mbox_send_message(kproc->mbox, (void *)msg);
+       if (ret < 0)
+@@ -399,12 +407,9 @@ static int k3_r5_rproc_request_mbox(struct rproc *rproc)
+       client->knows_txdone = false;
+       kproc->mbox = mbox_request_channel(client, 0);
+-      if (IS_ERR(kproc->mbox)) {
+-              ret = -EBUSY;
+-              dev_err(dev, "mbox_request_channel failed: %ld\n",
+-                      PTR_ERR(kproc->mbox));
+-              return ret;
+-      }
++      if (IS_ERR(kproc->mbox))
++              return dev_err_probe(dev, PTR_ERR(kproc->mbox),
++                                   "mbox_request_channel failed\n");
+       /*
+        * Ping the remote processor, this is only for sanity-sake for now;
+@@ -552,10 +557,6 @@ static int k3_r5_rproc_start(struct rproc *rproc)
+       u32 boot_addr;
+       int ret;
+-      ret = k3_r5_rproc_request_mbox(rproc);
+-      if (ret)
+-              return ret;
+-
+       boot_addr = rproc->bootaddr;
+       /* TODO: add boot_addr sanity checking */
+       dev_dbg(dev, "booting R5F core using boot addr = 0x%x\n", boot_addr);
+@@ -564,7 +565,7 @@ static int k3_r5_rproc_start(struct rproc *rproc)
+       core = kproc->core;
+       ret = ti_sci_proc_set_config(core->tsp, boot_addr, 0, 0);
+       if (ret)
+-              goto put_mbox;
++              return ret;
+       /* unhalt/run all applicable cores */
+       if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
+@@ -580,13 +581,12 @@ static int k3_r5_rproc_start(struct rproc *rproc)
+               if (core != core0 && core0->rproc->state == RPROC_OFFLINE) {
+                       dev_err(dev, "%s: can not start core 1 before core 0\n",
+                               __func__);
+-                      ret = -EPERM;
+-                      goto put_mbox;
++                      return -EPERM;
+               }
+               ret = k3_r5_core_run(core);
+               if (ret)
+-                      goto put_mbox;
++                      return ret;
+       }
+       return 0;
+@@ -596,8 +596,6 @@ static int k3_r5_rproc_start(struct rproc *rproc)
+               if (k3_r5_core_halt(core))
+                       dev_warn(core->dev, "core halt back failed\n");
+       }
+-put_mbox:
+-      mbox_free_channel(kproc->mbox);
+       return ret;
+ }
+@@ -658,8 +656,6 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
+                       goto out;
+       }
+-      mbox_free_channel(kproc->mbox);
+-
+       return 0;
+ unroll_core_halt:
+@@ -674,42 +670,22 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
+ /*
+  * Attach to a running R5F remote processor (IPC-only mode)
+  *
+- * The R5F attach callback only needs to request the mailbox, the remote
+- * processor is already booted, so there is no need to issue any TI-SCI
+- * commands to boot the R5F cores in IPC-only mode. This callback is invoked
+- * only in IPC-only mode.
++ * The R5F attach callback is a NOP. The remote processor is already booted, and
++ * all required resources have been acquired during probe routine, so there is
++ * no need to issue any TI-SCI commands to boot the R5F cores in IPC-only mode.
++ * This callback is invoked only in IPC-only mode and exists because
++ * rproc_validate() checks for its existence.
+  */
+-static int k3_r5_rproc_attach(struct rproc *rproc)
+-{
+-      struct k3_r5_rproc *kproc = rproc->priv;
+-      struct device *dev = kproc->dev;
+-      int ret;
+-
+-      ret = k3_r5_rproc_request_mbox(rproc);
+-      if (ret)
+-              return ret;
+-
+-      dev_info(dev, "R5F core initialized in IPC-only mode\n");
+-      return 0;
+-}
++static int k3_r5_rproc_attach(struct rproc *rproc) { return 0; }
+ /*
+  * Detach from a running R5F remote processor (IPC-only mode)
+  *
+- * The R5F detach callback performs the opposite operation to attach callback
+- * and only needs to release the mailbox, the R5F cores are not stopped and
+- * will be left in booted state in IPC-only mode. This callback is invoked
+- * only in IPC-only mode.
++ * The R5F detach callback is a NOP. The R5F cores are not stopped and will be
++ * left in booted state in IPC-only mode. This callback is invoked only in
++ * IPC-only mode and exists for sanity sake.
+  */
+-static int k3_r5_rproc_detach(struct rproc *rproc)
+-{
+-      struct k3_r5_rproc *kproc = rproc->priv;
+-      struct device *dev = kproc->dev;
+-
+-      mbox_free_channel(kproc->mbox);
+-      dev_info(dev, "R5F core deinitialized in IPC-only mode\n");
+-      return 0;
+-}
++static int k3_r5_rproc_detach(struct rproc *rproc) { return 0; }
+ /*
+  * This function implements the .get_loaded_rsc_table() callback and is used
+@@ -1278,6 +1254,10 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
+               kproc->rproc = rproc;
+               core->rproc = rproc;
++              ret = k3_r5_rproc_request_mbox(rproc);
++              if (ret)
++                      return ret;
++
+               ret = k3_r5_rproc_configure_mode(kproc);
+               if (ret < 0)
+                       goto err_config;
+@@ -1396,6 +1376,8 @@ static void k3_r5_cluster_rproc_exit(void *data)
+                       }
+               }
++              mbox_free_channel(kproc->mbox);
++
+               rproc_del(rproc);
+               k3_r5_reserved_mem_exit(kproc);
+-- 
+2.43.0
+
diff --git a/queue-6.11/remoteproc-k3-r5-delay-notification-of-wakeup-event.patch b/queue-6.11/remoteproc-k3-r5-delay-notification-of-wakeup-event.patch
new file mode 100644 (file)
index 0000000..e8c153c
--- /dev/null
@@ -0,0 +1,57 @@
+From 2b9d4207ad6ae57275d9189fba60803ac86f764e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Aug 2024 16:20:04 +0530
+Subject: remoteproc: k3-r5: Delay notification of wakeup event
+
+From: Udit Kumar <u-kumar1@ti.com>
+
+[ Upstream commit 8fa052c29e509f3e47d56d7fc2ca28094d78c60a ]
+
+Few times, core1 was scheduled to boot first before core0, which leads
+to error:
+
+'k3_r5_rproc_start: can not start core 1 before core 0'.
+
+This was happening due to some scheduling between prepare and start
+callback. The probe function waits for event, which is getting
+triggered by prepare callback. To avoid above condition move event
+trigger to start instead of prepare callback.
+
+Fixes: 61f6f68447ab ("remoteproc: k3-r5: Wait for core0 power-up before powering up core1")
+Signed-off-by: Udit Kumar <u-kumar1@ti.com>
+[ Applied wakeup event trigger only for Split-Mode booted rprocs ]
+Signed-off-by: Beleswar Padhi <b-padhi@ti.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240820105004.2788327-1-b-padhi@ti.com
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/remoteproc/ti_k3_r5_remoteproc.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
+index 6424b347aa4f2..2992fd4eca648 100644
+--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
++++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
+@@ -469,8 +469,6 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
+                       ret);
+               return ret;
+       }
+-      core->released_from_reset = true;
+-      wake_up_interruptible(&cluster->core_transition);
+       /*
+        * Newer IP revisions like on J7200 SoCs support h/w auto-initialization
+@@ -587,6 +585,9 @@ static int k3_r5_rproc_start(struct rproc *rproc)
+               ret = k3_r5_core_run(core);
+               if (ret)
+                       return ret;
++
++              core->released_from_reset = true;
++              wake_up_interruptible(&cluster->core_transition);
+       }
+       return 0;
+-- 
+2.43.0
+
index 19194419f91f8f3c374b270c400c5af50a7fffac..7ddf070729e72cf22ab737d899affc0303559290 100644 (file)
@@ -526,3 +526,22 @@ drm-amd-display-add-hdr-workaround-for-specific-edp.patch
 drm-amd-display-enable-idle-workqueue-for-more-ips-modes.patch
 drm-amd-display-update-dml2-policy-enhancedprefetchscheduleaccelerationfinal-dcn35.patch
 drm-amd-display-fix-system-hang-while-resume-with-tbt-monitor.patch
+kconfig-fix-infinite-loop-in-sym_calc_choice.patch
+kconfig-qconf-move-conf_read-before-drawing-tree-pai.patch
+kconfig-qconf-fix-buffer-overflow-in-debug-links.patch
+arm64-cputype-add-neoverse-n3-definitions.patch
+arm64-errata-expand-speculative-ssbs-workaround-once.patch
+uprobes-fix-kernel-info-leak-via-uprobes-vma.patch
+mm-z3fold-deprecate-config_z3fold.patch
+drm-amd-display-allow-backlight-to-go-below-amdgpu_d.patch
+sunrpc-change-sp_nrthreads-from-atomic_t-to-unsigned.patch
+nfsd-async-copy-result-needs-to-return-a-write-verif.patch
+nfsd-limit-the-number-of-concurrent-async-copy-opera.patch
+remoteproc-k3-r5-acquire-mailbox-handle-during-probe.patch
+remoteproc-k3-r5-delay-notification-of-wakeup-event.patch
+r8169-fix-spelling-mistake-tx_underun-tx_underrun.patch
+r8169-add-tally-counter-fields-added-with-rtl8125.patch
+acpi-battery-simplify-battery-hook-locking.patch
+acpi-battery-fix-possible-crash-when-unregistering-a.patch
+drm-xe-clean-up-vm-exec-queue-file-lock-usage.patch
+drm-xe-vm-move-xa_alloc-to-prevent-uaf.patch
diff --git a/queue-6.11/sunrpc-change-sp_nrthreads-from-atomic_t-to-unsigned.patch b/queue-6.11/sunrpc-change-sp_nrthreads-from-atomic_t-to-unsigned.patch
new file mode 100644 (file)
index 0000000..016a2bb
--- /dev/null
@@ -0,0 +1,144 @@
+From 579d91b428b25d1e306802a287383bffb723bd16 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Jul 2024 17:14:18 +1000
+Subject: sunrpc: change sp_nrthreads from atomic_t to unsigned int.
+
+From: NeilBrown <neilb@suse.de>
+
+[ Upstream commit 60749cbe3d8ae572a6c7dda675de3e8b25797a18 ]
+
+sp_nrthreads is only ever accessed under the service mutex
+  nlmsvc_mutex nfs_callback_mutex nfsd_mutex
+so these is no need for it to be an atomic_t.
+
+The fact that all code using it is single-threaded means that we can
+simplify svc_pool_victim and remove the temporary elevation of
+sp_nrthreads.
+
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Stable-dep-of: aadc3bbea163 ("NFSD: Limit the number of concurrent async COPY operations")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfsd/nfsctl.c           |  2 +-
+ fs/nfsd/nfssvc.c           |  2 +-
+ include/linux/sunrpc/svc.h |  4 ++--
+ net/sunrpc/svc.c           | 31 +++++++++++--------------------
+ 4 files changed, 15 insertions(+), 24 deletions(-)
+
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index 34eb2c2cbcde3..e8704a4e848ca 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1762,7 +1762,7 @@ int nfsd_nl_threads_get_doit(struct sk_buff *skb, struct genl_info *info)
+                       struct svc_pool *sp = &nn->nfsd_serv->sv_pools[i];
+                       err = nla_put_u32(skb, NFSD_A_SERVER_THREADS,
+-                                        atomic_read(&sp->sp_nrthreads));
++                                        sp->sp_nrthreads);
+                       if (err)
+                               goto err_unlock;
+               }
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index 0bc8eaa5e0098..8103c3c90cd11 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -705,7 +705,7 @@ int nfsd_get_nrthreads(int n, int *nthreads, struct net *net)
+       if (serv)
+               for (i = 0; i < serv->sv_nrpools && i < n; i++)
+-                      nthreads[i] = atomic_read(&serv->sv_pools[i].sp_nrthreads);
++                      nthreads[i] = serv->sv_pools[i].sp_nrthreads;
+       return 0;
+ }
+diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
+index a7d0406b9ef59..6811681033c0f 100644
+--- a/include/linux/sunrpc/svc.h
++++ b/include/linux/sunrpc/svc.h
+@@ -33,9 +33,9 @@
+  * node traffic on multi-node NUMA NFS servers.
+  */
+ struct svc_pool {
+-      unsigned int            sp_id;          /* pool id; also node id on NUMA */
++      unsigned int            sp_id;          /* pool id; also node id on NUMA */
+       struct lwq              sp_xprts;       /* pending transports */
+-      atomic_t                sp_nrthreads;   /* # of threads in pool */
++      unsigned int            sp_nrthreads;   /* # of threads in pool */
+       struct list_head        sp_all_threads; /* all server threads */
+       struct llist_head       sp_idle_threads; /* idle server threads */
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index 88a59cfa5583c..df06b152ed94e 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -713,7 +713,7 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
+       serv->sv_nrthreads += 1;
+       spin_unlock_bh(&serv->sv_lock);
+-      atomic_inc(&pool->sp_nrthreads);
++      pool->sp_nrthreads += 1;
+       /* Protected by whatever lock the service uses when calling
+        * svc_set_num_threads()
+@@ -768,31 +768,22 @@ svc_pool_victim(struct svc_serv *serv, struct svc_pool *target_pool,
+       struct svc_pool *pool;
+       unsigned int i;
+-retry:
+       pool = target_pool;
+-      if (pool != NULL) {
+-              if (atomic_inc_not_zero(&pool->sp_nrthreads))
+-                      goto found_pool;
+-              return NULL;
+-      } else {
++      if (!pool) {
+               for (i = 0; i < serv->sv_nrpools; i++) {
+                       pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
+-                      if (atomic_inc_not_zero(&pool->sp_nrthreads))
+-                              goto found_pool;
++                      if (pool->sp_nrthreads)
++                              break;
+               }
+-              return NULL;
+       }
+-found_pool:
+-      set_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
+-      set_bit(SP_NEED_VICTIM, &pool->sp_flags);
+-      if (!atomic_dec_and_test(&pool->sp_nrthreads))
++      if (pool && pool->sp_nrthreads) {
++              set_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
++              set_bit(SP_NEED_VICTIM, &pool->sp_flags);
+               return pool;
+-      /* Nothing left in this pool any more */
+-      clear_bit(SP_NEED_VICTIM, &pool->sp_flags);
+-      clear_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
+-      goto retry;
++      }
++      return NULL;
+ }
+ static int
+@@ -871,7 +862,7 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
+       if (!pool)
+               nrservs -= serv->sv_nrthreads;
+       else
+-              nrservs -= atomic_read(&pool->sp_nrthreads);
++              nrservs -= pool->sp_nrthreads;
+       if (nrservs > 0)
+               return svc_start_kthreads(serv, pool, nrservs);
+@@ -959,7 +950,7 @@ svc_exit_thread(struct svc_rqst *rqstp)
+       list_del_rcu(&rqstp->rq_all);
+-      atomic_dec(&pool->sp_nrthreads);
++      pool->sp_nrthreads -= 1;
+       spin_lock_bh(&serv->sv_lock);
+       serv->sv_nrthreads -= 1;
+-- 
+2.43.0
+
diff --git a/queue-6.11/uprobes-fix-kernel-info-leak-via-uprobes-vma.patch b/queue-6.11/uprobes-fix-kernel-info-leak-via-uprobes-vma.patch
new file mode 100644 (file)
index 0000000..a3f8173
--- /dev/null
@@ -0,0 +1,43 @@
+From 8237517c9ee3f83b67c3aa4f716ac6dccc00cd37 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Oct 2024 19:46:01 +0200
+Subject: uprobes: fix kernel info leak via "[uprobes]" vma
+
+From: Oleg Nesterov <oleg@redhat.com>
+
+commit 34820304cc2cd1804ee1f8f3504ec77813d29c8e upstream.
+
+xol_add_vma() maps the uninitialized page allocated by __create_xol_area()
+into userspace. On some architectures (x86) this memory is readable even
+without VM_READ, VM_EXEC results in the same pgprot_t as VM_EXEC|VM_READ,
+although this doesn't really matter, debugger can read this memory anyway.
+
+Link: https://lore.kernel.org/all/20240929162047.GA12611@redhat.com/
+
+Reported-by: Will Deacon <will@kernel.org>
+Fixes: d4b3b6384f98 ("uprobes/core: Allocate XOL slots for uprobes use")
+Cc: stable@vger.kernel.org
+Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/uprobes.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
+index 333c44f2ce55d..56cd0c7f516d3 100644
+--- a/kernel/events/uprobes.c
++++ b/kernel/events/uprobes.c
+@@ -1500,7 +1500,7 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
+       area->xol_mapping.name = "[uprobes]";
+       area->xol_mapping.pages = area->pages;
+-      area->pages[0] = alloc_page(GFP_HIGHUSER);
++      area->pages[0] = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
+       if (!area->pages[0])
+               goto free_bitmap;
+       area->pages[1] = NULL;
+-- 
+2.43.0
+