]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.12-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 17 Oct 2025 07:13:19 +0000 (09:13 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 17 Oct 2025 07:13:19 +0000 (09:13 +0200)
added patches:
arm64-cputype-add-neoverse-v3ae-definitions.patch
arm64-errata-apply-workarounds-for-neoverse-v3ae.patch
arm64-mte-do-not-flag-the-zero-page-as-pg_mte_tagged.patch
asoc-sof-ipc4-pcm-enable-delay-reporting-for-chaindma-streams.patch
asoc-sof-ipc4-pcm-fix-delay-calculation-when-dsp-resamples.patch
btrfs-fix-the-incorrect-max_bytes-value-for-find_lock_delalloc_range.patch
cpufreq-make-drivers-using-cpufreq_eternal-specify-transition-latency.patch
kvm-x86-advertise-srso_user_kernel_no-to-userspace.patch
lib-crypto-curve25519-hacl64-disable-kasan-with-clang-17-and-older.patch
media-mc-clear-minor-number-before-put-device.patch
mfd-intel_soc_pmic_chtdc_ti-drop-unneeded-assignment-for-cache_type.patch
mfd-intel_soc_pmic_chtdc_ti-fix-invalid-regmap-config-max_register-value.patch
mfd-intel_soc_pmic_chtdc_ti-set-use_single_read-regmap_config-flag.patch
mm-ksm-fix-incorrect-ksm-counter-handling-in-mm_struct-during-fork.patch
pci-endpoint-pci-epf-test-add-null-check-for-dma-channels-before-release.patch
pci-endpoint-remove-surplus-return-statement-from-pci_epf_test_clean_dma_chan.patch
selftests-mm-skip-soft-dirty-tests-when-config_mem_soft_dirty-is-disabled.patch
squashfs-add-additional-inode-sanity-checking.patch
squashfs-reject-negative-file-sizes-in-squashfs_read_inode.patch
statmount-don-t-call-path_put-under-namespace-semaphore.patch
tracing-fix-race-condition-in-kprobe-initialization-causing-null-pointer-dereference.patch
x86-kvm-force-legacy-pci-hole-to-uc-when-overriding-mtrrs-for-tdx-snp.patch
x86-mtrr-rename-mtrr_overwrite_state-to-guest_force_mtrr_state.patch

24 files changed:
queue-6.12/arm64-cputype-add-neoverse-v3ae-definitions.patch [new file with mode: 0644]
queue-6.12/arm64-errata-apply-workarounds-for-neoverse-v3ae.patch [new file with mode: 0644]
queue-6.12/arm64-mte-do-not-flag-the-zero-page-as-pg_mte_tagged.patch [new file with mode: 0644]
queue-6.12/asoc-sof-ipc4-pcm-enable-delay-reporting-for-chaindma-streams.patch [new file with mode: 0644]
queue-6.12/asoc-sof-ipc4-pcm-fix-delay-calculation-when-dsp-resamples.patch [new file with mode: 0644]
queue-6.12/btrfs-fix-the-incorrect-max_bytes-value-for-find_lock_delalloc_range.patch [new file with mode: 0644]
queue-6.12/cpufreq-make-drivers-using-cpufreq_eternal-specify-transition-latency.patch [new file with mode: 0644]
queue-6.12/kvm-x86-advertise-srso_user_kernel_no-to-userspace.patch [new file with mode: 0644]
queue-6.12/lib-crypto-curve25519-hacl64-disable-kasan-with-clang-17-and-older.patch [new file with mode: 0644]
queue-6.12/media-mc-clear-minor-number-before-put-device.patch [new file with mode: 0644]
queue-6.12/mfd-intel_soc_pmic_chtdc_ti-drop-unneeded-assignment-for-cache_type.patch [new file with mode: 0644]
queue-6.12/mfd-intel_soc_pmic_chtdc_ti-fix-invalid-regmap-config-max_register-value.patch [new file with mode: 0644]
queue-6.12/mfd-intel_soc_pmic_chtdc_ti-set-use_single_read-regmap_config-flag.patch [new file with mode: 0644]
queue-6.12/mm-ksm-fix-incorrect-ksm-counter-handling-in-mm_struct-during-fork.patch [new file with mode: 0644]
queue-6.12/pci-endpoint-pci-epf-test-add-null-check-for-dma-channels-before-release.patch [new file with mode: 0644]
queue-6.12/pci-endpoint-remove-surplus-return-statement-from-pci_epf_test_clean_dma_chan.patch [new file with mode: 0644]
queue-6.12/selftests-mm-skip-soft-dirty-tests-when-config_mem_soft_dirty-is-disabled.patch [new file with mode: 0644]
queue-6.12/series
queue-6.12/squashfs-add-additional-inode-sanity-checking.patch [new file with mode: 0644]
queue-6.12/squashfs-reject-negative-file-sizes-in-squashfs_read_inode.patch [new file with mode: 0644]
queue-6.12/statmount-don-t-call-path_put-under-namespace-semaphore.patch [new file with mode: 0644]
queue-6.12/tracing-fix-race-condition-in-kprobe-initialization-causing-null-pointer-dereference.patch [new file with mode: 0644]
queue-6.12/x86-kvm-force-legacy-pci-hole-to-uc-when-overriding-mtrrs-for-tdx-snp.patch [new file with mode: 0644]
queue-6.12/x86-mtrr-rename-mtrr_overwrite_state-to-guest_force_mtrr_state.patch [new file with mode: 0644]

diff --git a/queue-6.12/arm64-cputype-add-neoverse-v3ae-definitions.patch b/queue-6.12/arm64-cputype-add-neoverse-v3ae-definitions.patch
new file mode 100644 (file)
index 0000000..201a7f1
--- /dev/null
@@ -0,0 +1,52 @@
+From stable+bounces-186003-greg=kroah.com@vger.kernel.org Thu Oct 16 13:12:49 2025
+From: Ryan Roberts <ryan.roberts@arm.com>
+Date: Thu, 16 Oct 2025 12:12:05 +0100
+Subject: arm64: cputype: Add Neoverse-V3AE definitions
+To: stable@vger.kernel.org
+Cc: Ryan Roberts <ryan.roberts@arm.com>, catalin.marinas@arm.com, will@kernel.org, mark.rutland@arm.com, linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, James Morse <james.morse@arm.com>
+Message-ID: <20251016111208.3983300-2-ryan.roberts@arm.com>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 3bbf004c4808e2c3241e5c1ad6cc102f38a03c39 ]
+
+Add cputype definitions for Neoverse-V3AE. These will be used for errata
+detection in subsequent patches.
+
+These values can be found in the Neoverse-V3AE TRM:
+
+  https://developer.arm.com/documentation/SDEN-2615521/9-0/
+
+... in section A.6.1 ("MIDR_EL1, Main ID Register").
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+[ Ryan: Trivial backport ]
+Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cputype.h |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -93,6 +93,7 @@
+ #define ARM_CPU_PART_NEOVERSE_V2      0xD4F
+ #define ARM_CPU_PART_CORTEX_A720      0xD81
+ #define ARM_CPU_PART_CORTEX_X4                0xD82
++#define ARM_CPU_PART_NEOVERSE_V3AE    0xD83
+ #define ARM_CPU_PART_NEOVERSE_V3      0xD84
+ #define ARM_CPU_PART_CORTEX_X925      0xD85
+ #define ARM_CPU_PART_CORTEX_A725      0xD87
+@@ -180,6 +181,7 @@
+ #define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
+ #define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720)
+ #define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4)
++#define MIDR_NEOVERSE_V3AE    MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3AE)
+ #define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
+ #define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
+ #define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)
diff --git a/queue-6.12/arm64-errata-apply-workarounds-for-neoverse-v3ae.patch b/queue-6.12/arm64-errata-apply-workarounds-for-neoverse-v3ae.patch
new file mode 100644 (file)
index 0000000..105c4ca
--- /dev/null
@@ -0,0 +1,66 @@
+From stable+bounces-186004-greg=kroah.com@vger.kernel.org Thu Oct 16 13:12:53 2025
+From: Ryan Roberts <ryan.roberts@arm.com>
+Date: Thu, 16 Oct 2025 12:12:06 +0100
+Subject: arm64: errata: Apply workarounds for Neoverse-V3AE
+To: stable@vger.kernel.org
+Cc: Ryan Roberts <ryan.roberts@arm.com>, catalin.marinas@arm.com, will@kernel.org, mark.rutland@arm.com, linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, James Morse <james.morse@arm.com>
+Message-ID: <20251016111208.3983300-3-ryan.roberts@arm.com>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 0c33aa1804d101c11ba1992504f17a42233f0e11 ]
+
+Neoverse-V3AE is also affected by erratum #3312417, as described in its
+Software Developer Errata Notice (SDEN) document:
+
+  Neoverse V3AE (MP172) SDEN v9.0, erratum 3312417
+  https://developer.arm.com/documentation/SDEN-2615521/9-0/
+
+Enable the workaround for Neoverse-V3AE, and document this.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+[ Ryan: Trivial backport ]
+Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/arch/arm64/silicon-errata.rst |    2 ++
+ arch/arm64/Kconfig                          |    1 +
+ arch/arm64/kernel/cpu_errata.c              |    1 +
+ 3 files changed, 4 insertions(+)
+
+--- a/Documentation/arch/arm64/silicon-errata.rst
++++ b/Documentation/arch/arm64/silicon-errata.rst
+@@ -198,6 +198,8 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Neoverse-V3     | #3312417        | ARM64_ERRATUM_3194386       |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM            | Neoverse-V3AE   | #3312417        | ARM64_ERRATUM_3194386       |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | MMU-500         | #841119,826419  | N/A                         |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | MMU-600         | #1076982,1209401| N/A                         |
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -1111,6 +1111,7 @@ config ARM64_ERRATUM_3194386
+         * ARM Neoverse-V1 erratum 3324341
+         * ARM Neoverse V2 erratum 3324336
+         * ARM Neoverse-V3 erratum 3312417
++        * ARM Neoverse-V3AE erratum 3312417
+         On affected cores "MSR SSBS, #0" instructions may not affect
+         subsequent speculative instructions, which may permit unexepected
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -455,6 +455,7 @@ static const struct midr_range erratum_s
+       MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
+       MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
+       MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
++      MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3AE),
+       {}
+ };
+ #endif
diff --git a/queue-6.12/arm64-mte-do-not-flag-the-zero-page-as-pg_mte_tagged.patch b/queue-6.12/arm64-mte-do-not-flag-the-zero-page-as-pg_mte_tagged.patch
new file mode 100644 (file)
index 0000000..b31878d
--- /dev/null
@@ -0,0 +1,91 @@
+From stable+bounces-186230-greg=kroah.com@vger.kernel.org Fri Oct 17 03:14:23 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Oct 2025 21:14:15 -0400
+Subject: arm64: mte: Do not flag the zero page as PG_mte_tagged
+To: stable@vger.kernel.org
+Cc: Catalin Marinas <catalin.marinas@arm.com>, Gergely Kovacs <Gergely.Kovacs2@arm.com>, Will Deacon <will@kernel.org>, David Hildenbrand <david@redhat.com>, Lance Yang <lance.yang@linux.dev>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251017011415.3502373-1-sashal@kernel.org>
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+[ Upstream commit f620d66af3165838bfa845dcf9f5f9b4089bf508 ]
+
+Commit 68d54ceeec0e ("arm64: mte: Allow PTRACE_PEEKMTETAGS access to the
+zero page") attempted to fix ptrace() reading of tags from the zero page
+by marking it as PG_mte_tagged during cpu_enable_mte(). The same commit
+also changed the ptrace() tag access permission check to the VM_MTE vma
+flag while turning the page flag test into a WARN_ON_ONCE().
+
+Attempting to set the PG_mte_tagged flag early with
+CONFIG_DEFERRED_STRUCT_PAGE_INIT enabled may either hang (after commit
+d77e59a8fccd "arm64: mte: Lock a page for MTE tag initialisation") or
+have the flags cleared later during page_alloc_init_late(). In addition,
+pages_identical() -> memcmp_pages() will reject any comparison with the
+zero page as it is marked as tagged.
+
+Partially revert the above commit to avoid setting PG_mte_tagged on the
+zero page. Update the __access_remote_tags() warning on untagged pages
+to ignore the zero page since it is known to have the tags initialised.
+
+Note that all user mapping of the zero page are marked as pte_special().
+The arm64 set_pte_at() will not call mte_sync_tags() on such pages, so
+PG_mte_tagged will remain cleared.
+
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Fixes: 68d54ceeec0e ("arm64: mte: Allow PTRACE_PEEKMTETAGS access to the zero page")
+Reported-by: Gergely Kovacs <Gergely.Kovacs2@arm.com>
+Cc: stable@vger.kernel.org # 5.10.x
+Cc: Will Deacon <will@kernel.org>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Lance Yang <lance.yang@linux.dev>
+Acked-by: Lance Yang <lance.yang@linux.dev>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Tested-by: Lance Yang <lance.yang@linux.dev>
+Signed-off-by: Will Deacon <will@kernel.org>
+[ Adjust context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpufeature.c |   10 +++++++---
+ arch/arm64/kernel/mte.c        |    3 ++-
+ 2 files changed, 9 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -2279,17 +2279,21 @@ static void bti_enable(const struct arm6
+ #ifdef CONFIG_ARM64_MTE
+ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
+ {
++      static bool cleared_zero_page = false;
++
+       sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ATA | SCTLR_EL1_ATA0);
+       mte_cpu_setup();
+       /*
+        * Clear the tags in the zero page. This needs to be done via the
+-       * linear map which has the Tagged attribute.
++       * linear map which has the Tagged attribute. Since this page is
++       * always mapped as pte_special(), set_pte_at() will not attempt to
++       * clear the tags or set PG_mte_tagged.
+        */
+-      if (try_page_mte_tagging(ZERO_PAGE(0))) {
++      if (!cleared_zero_page) {
++              cleared_zero_page = true;
+               mte_clear_page_tags(lm_alias(empty_zero_page));
+-              set_page_mte_tagged(ZERO_PAGE(0));
+       }
+       kasan_init_hw_tags_cpu();
+--- a/arch/arm64/kernel/mte.c
++++ b/arch/arm64/kernel/mte.c
+@@ -428,7 +428,8 @@ static int __access_remote_tags(struct m
+                       put_page(page);
+                       break;
+               }
+-              WARN_ON_ONCE(!page_mte_tagged(page));
++
++              WARN_ON_ONCE(!page_mte_tagged(page) && !is_zero_page(page));
+               /* limit access to the end of the page */
+               offset = offset_in_page(addr);
diff --git a/queue-6.12/asoc-sof-ipc4-pcm-enable-delay-reporting-for-chaindma-streams.patch b/queue-6.12/asoc-sof-ipc4-pcm-enable-delay-reporting-for-chaindma-streams.patch
new file mode 100644 (file)
index 0000000..8c14cb4
--- /dev/null
@@ -0,0 +1,137 @@
+From stable+bounces-185507-greg=kroah.com@vger.kernel.org Mon Oct 13 22:05:29 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Oct 2025 16:05:18 -0400
+Subject: ASoC: SOF: ipc4-pcm: Enable delay reporting for ChainDMA streams
+To: stable@vger.kernel.org
+Cc: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>, Bard Liao <yung-chuan.liao@linux.intel.com>, Liam Girdwood <liam.r.girdwood@intel.com>, Ranjani Sridharan <ranjani.sridharan@linux.intel.com>, Mark Brown <broonie@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251013200519.3580966-1-sashal@kernel.org>
+
+From: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+
+[ Upstream commit a1d203d390e04798ccc1c3c06019cd4411885d6d ]
+
+All streams (currently) which is configured to use ChainDMA can only work
+on Link/host DMA pairs where the link side position can be access via host
+registers (like HDA on CAVS 2.5 platforms).
+
+Since the firmware does not provide time_info for ChainDMA, unlike for HDA
+stream, the kernel should calculate the start and end offsets that is
+needed for the delay calculation.
+
+With this small change we can report accurate delays when the stream is
+configured to use ChainDMA.
+
+Signed-off-by: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Reviewed-by: Bard Liao <yung-chuan.liao@linux.intel.com>
+Reviewed-by: Liam Girdwood <liam.r.girdwood@intel.com>
+Reviewed-by: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+Link: https://patch.msgid.link/20250619102848.12389-1-peter.ujfalusi@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Stable-dep-of: bcd1383516bb ("ASoC: SOF: ipc4-pcm: fix delay calculation when DSP resamples")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/sof/ipc4-pcm.c      |   49 ++++++++++++++++++++++++++++++++++++++----
+ sound/soc/sof/ipc4-topology.c |    6 ++---
+ sound/soc/sof/ipc4-topology.h |    1 
+ 3 files changed, 49 insertions(+), 7 deletions(-)
+
+--- a/sound/soc/sof/ipc4-pcm.c
++++ b/sound/soc/sof/ipc4-pcm.c
+@@ -409,9 +409,33 @@ static int sof_ipc4_trigger_pipelines(st
+        * If use_chain_dma attribute is set we proceed to chained DMA
+        * trigger function that handles the rest for the substream.
+        */
+-      if (pipeline->use_chain_dma)
+-              return sof_ipc4_chain_dma_trigger(sdev, spcm, substream->stream,
+-                                                pipeline_list, state, cmd);
++      if (pipeline->use_chain_dma) {
++              struct sof_ipc4_timestamp_info *time_info;
++
++              time_info = sof_ipc4_sps_to_time_info(&spcm->stream[substream->stream]);
++
++              ret = sof_ipc4_chain_dma_trigger(sdev, spcm, substream->stream,
++                                               pipeline_list, state, cmd);
++              if (ret || !time_info)
++                      return ret;
++
++              if (state == SOF_IPC4_PIPE_PAUSED) {
++                      /*
++                       * Record the DAI position for delay reporting
++                       * To handle multiple pause/resume/xrun we need to add
++                       * the positions to simulate how the firmware behaves
++                       */
++                      u64 pos = snd_sof_pcm_get_dai_frame_counter(sdev, component,
++                                                                  substream);
++
++                      time_info->stream_end_offset += pos;
++              } else if (state == SOF_IPC4_PIPE_RESET) {
++                      /* Reset the end offset as the stream is stopped */
++                      time_info->stream_end_offset = 0;
++              }
++
++              return 0;
++      }
+       /* allocate memory for the pipeline data */
+       trigger_list = kzalloc(struct_size(trigger_list, pipeline_instance_ids,
+@@ -924,8 +948,24 @@ static int sof_ipc4_get_stream_start_off
+       if (!host_copier || !dai_copier)
+               return -EINVAL;
+-      if (host_copier->data.gtw_cfg.node_id == SOF_IPC4_INVALID_NODE_ID)
++      if (host_copier->data.gtw_cfg.node_id == SOF_IPC4_INVALID_NODE_ID) {
+               return -EINVAL;
++      } else if (host_copier->data.gtw_cfg.node_id == SOF_IPC4_CHAIN_DMA_NODE_ID) {
++              /*
++               * While the firmware does not supports time_info reporting for
++               * streams using ChainDMA, it is granted that ChainDMA can only
++               * be used on Host+Link pairs where the link position is
++               * accessible from the host side.
++               *
++               * Enable delay calculation in case of ChainDMA via host
++               * accessible registers.
++               *
++               * The ChainDMA uses 2x 1ms ping-pong buffer, dai side starts
++               * when 1ms data is available
++               */
++              time_info->stream_start_offset = substream->runtime->rate / MSEC_PER_SEC;
++              goto out;
++      }
+       node_index = SOF_IPC4_NODE_INDEX(host_copier->data.gtw_cfg.node_id);
+       offset = offsetof(struct sof_ipc4_fw_registers, pipeline_regs) + node_index * sizeof(ppl_reg);
+@@ -943,6 +983,7 @@ static int sof_ipc4_get_stream_start_off
+       time_info->stream_end_offset = ppl_reg.stream_end_offset;
+       do_div(time_info->stream_end_offset, dai_sample_size);
++out:
+       /*
+        * Calculate the wrap boundary need to be used for delay calculation
+        * The host counter is in bytes, it will wrap earlier than the frames
+--- a/sound/soc/sof/ipc4-topology.c
++++ b/sound/soc/sof/ipc4-topology.c
+@@ -1782,10 +1782,10 @@ sof_ipc4_prepare_copier_module(struct sn
+                       pipeline->msg.extension |= SOF_IPC4_GLB_EXT_CHAIN_DMA_FIFO_SIZE(fifo_size);
+                       /*
+-                       * Chain DMA does not support stream timestamping, set node_id to invalid
+-                       * to skip the code in sof_ipc4_get_stream_start_offset().
++                       * Chain DMA does not support stream timestamping, but it
++                       * can use the host side registers for delay calculation.
+                        */
+-                      copier_data->gtw_cfg.node_id = SOF_IPC4_INVALID_NODE_ID;
++                      copier_data->gtw_cfg.node_id = SOF_IPC4_CHAIN_DMA_NODE_ID;
+                       return 0;
+               }
+--- a/sound/soc/sof/ipc4-topology.h
++++ b/sound/soc/sof/ipc4-topology.h
+@@ -58,6 +58,7 @@
+ #define SOF_IPC4_DMA_DEVICE_MAX_COUNT 16
++#define SOF_IPC4_CHAIN_DMA_NODE_ID    0x7fffffff
+ #define SOF_IPC4_INVALID_NODE_ID      0xffffffff
+ /* FW requires minimum 4ms DMA buffer size */
diff --git a/queue-6.12/asoc-sof-ipc4-pcm-fix-delay-calculation-when-dsp-resamples.patch b/queue-6.12/asoc-sof-ipc4-pcm-fix-delay-calculation-when-dsp-resamples.patch
new file mode 100644 (file)
index 0000000..04182be
--- /dev/null
@@ -0,0 +1,183 @@
+From stable+bounces-185508-greg=kroah.com@vger.kernel.org Mon Oct 13 22:05:29 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Oct 2025 16:05:19 -0400
+Subject: ASoC: SOF: ipc4-pcm: fix delay calculation when DSP resamples
+To: stable@vger.kernel.org
+Cc: "Kai Vehmanen" <kai.vehmanen@linux.intel.com>, "Péter Ujfalusi" <peter.ujfalusi@linux.intel.com>, "Bard Liao" <yung-chuan.liao@linux.intel.com>, "Mark Brown" <broonie@kernel.org>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20251013200519.3580966-2-sashal@kernel.org>
+
+From: Kai Vehmanen <kai.vehmanen@linux.intel.com>
+
+[ Upstream commit bcd1383516bb5a6f72b2d1e7f7ad42c4a14837d1 ]
+
+When the sampling rates going in (host) and out (dai) from the DSP
+are different, the IPC4 delay reporting does not work correctly.
+Add support for this case by scaling the all raw position values to
+a common timebase before calculating real-time delay for the PCM.
+
+Cc: stable@vger.kernel.org
+Fixes: 0ea06680dfcb ("ASoC: SOF: ipc4-pcm: Correct the delay calculation")
+Signed-off-by: Kai Vehmanen <kai.vehmanen@linux.intel.com>
+Reviewed-by: Péter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Reviewed-by: Bard Liao <yung-chuan.liao@linux.intel.com>
+Signed-off-by: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Link: https://patch.msgid.link/20251002074719.2084-2-peter.ujfalusi@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/sof/ipc4-pcm.c |   83 +++++++++++++++++++++++++++++++++++------------
+ 1 file changed, 62 insertions(+), 21 deletions(-)
+
+--- a/sound/soc/sof/ipc4-pcm.c
++++ b/sound/soc/sof/ipc4-pcm.c
+@@ -19,12 +19,14 @@
+  * struct sof_ipc4_timestamp_info - IPC4 timestamp info
+  * @host_copier: the host copier of the pcm stream
+  * @dai_copier: the dai copier of the pcm stream
+- * @stream_start_offset: reported by fw in memory window (converted to frames)
+- * @stream_end_offset: reported by fw in memory window (converted to frames)
++ * @stream_start_offset: reported by fw in memory window (converted to
++ *                       frames at host_copier sampling rate)
++ * @stream_end_offset: reported by fw in memory window (converted to
++ *                     frames at host_copier sampling rate)
+  * @llp_offset: llp offset in memory window
+- * @boundary: wrap boundary should be used for the LLP frame counter
+  * @delay: Calculated and stored in pointer callback. The stored value is
+- *       returned in the delay callback.
++ *         returned in the delay callback. Expressed in frames at host copier
++ *         sampling rate.
+  */
+ struct sof_ipc4_timestamp_info {
+       struct sof_ipc4_copier *host_copier;
+@@ -33,7 +35,6 @@ struct sof_ipc4_timestamp_info {
+       u64 stream_end_offset;
+       u32 llp_offset;
+-      u64 boundary;
+       snd_pcm_sframes_t delay;
+ };
+@@ -48,6 +49,16 @@ struct sof_ipc4_pcm_stream_priv {
+       bool chain_dma_allocated;
+ };
++/*
++ * Modulus to use to compare host and link position counters. The sampling
++ * rates may be different, so the raw hardware counters will wrap
++ * around at different times. To calculate differences, use
++ * DELAY_BOUNDARY as a common modulus. This value must be smaller than
++ * the wrap-around point of any hardware counter, and larger than any
++ * valid delay measurement.
++ */
++#define DELAY_BOUNDARY                U32_MAX
++
+ static inline struct sof_ipc4_timestamp_info *
+ sof_ipc4_sps_to_time_info(struct snd_sof_pcm_stream *sps)
+ {
+@@ -933,6 +944,35 @@ static int sof_ipc4_pcm_hw_params(struct
+       return 0;
+ }
++static u64 sof_ipc4_frames_dai_to_host(struct sof_ipc4_timestamp_info *time_info, u64 value)
++{
++      u64 dai_rate, host_rate;
++
++      if (!time_info->dai_copier || !time_info->host_copier)
++              return value;
++
++      /*
++       * copiers do not change sampling rate, so we can use the
++       * out_format independently of stream direction
++       */
++      dai_rate = time_info->dai_copier->data.out_format.sampling_frequency;
++      host_rate = time_info->host_copier->data.out_format.sampling_frequency;
++
++      if (!dai_rate || !host_rate || dai_rate == host_rate)
++              return value;
++
++      /* take care not to overflow u64, rates can be up to 768000 */
++      if (value > U32_MAX) {
++              value = div64_u64(value, dai_rate);
++              value *= host_rate;
++      } else {
++              value *= host_rate;
++              value = div64_u64(value, dai_rate);
++      }
++
++      return value;
++}
++
+ static int sof_ipc4_get_stream_start_offset(struct snd_sof_dev *sdev,
+                                           struct snd_pcm_substream *substream,
+                                           struct snd_sof_pcm_stream *sps,
+@@ -983,14 +1023,13 @@ static int sof_ipc4_get_stream_start_off
+       time_info->stream_end_offset = ppl_reg.stream_end_offset;
+       do_div(time_info->stream_end_offset, dai_sample_size);
++      /* convert to host frame time */
++      time_info->stream_start_offset =
++              sof_ipc4_frames_dai_to_host(time_info, time_info->stream_start_offset);
++      time_info->stream_end_offset =
++              sof_ipc4_frames_dai_to_host(time_info, time_info->stream_end_offset);
++
+ out:
+-      /*
+-       * Calculate the wrap boundary need to be used for delay calculation
+-       * The host counter is in bytes, it will wrap earlier than the frames
+-       * based link counter.
+-       */
+-      time_info->boundary = div64_u64(~((u64)0),
+-                                      frames_to_bytes(substream->runtime, 1));
+       /* Initialize the delay value to 0 (no delay) */
+       time_info->delay = 0;
+@@ -1033,6 +1072,8 @@ static int sof_ipc4_pcm_pointer(struct s
+       /* For delay calculation we need the host counter */
+       host_cnt = snd_sof_pcm_get_host_byte_counter(sdev, component, substream);
++
++      /* Store the original value to host_ptr */
+       host_ptr = host_cnt;
+       /* convert the host_cnt to frames */
+@@ -1051,6 +1092,8 @@ static int sof_ipc4_pcm_pointer(struct s
+               sof_mailbox_read(sdev, time_info->llp_offset, &llp, sizeof(llp));
+               dai_cnt = ((u64)llp.reading.llp_u << 32) | llp.reading.llp_l;
+       }
++
++      dai_cnt = sof_ipc4_frames_dai_to_host(time_info, dai_cnt);
+       dai_cnt += time_info->stream_end_offset;
+       /* In two cases dai dma counter is not accurate
+@@ -1084,8 +1127,9 @@ static int sof_ipc4_pcm_pointer(struct s
+               dai_cnt -= time_info->stream_start_offset;
+       }
+-      /* Wrap the dai counter at the boundary where the host counter wraps */
+-      div64_u64_rem(dai_cnt, time_info->boundary, &dai_cnt);
++      /* Convert to a common base before comparisons */
++      dai_cnt &= DELAY_BOUNDARY;
++      host_cnt &= DELAY_BOUNDARY;
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+               head_cnt = host_cnt;
+@@ -1095,14 +1139,11 @@ static int sof_ipc4_pcm_pointer(struct s
+               tail_cnt = host_cnt;
+       }
+-      if (head_cnt < tail_cnt) {
+-              time_info->delay = time_info->boundary - tail_cnt + head_cnt;
+-              goto out;
+-      }
+-
+-      time_info->delay =  head_cnt - tail_cnt;
++      if (unlikely(head_cnt < tail_cnt))
++              time_info->delay = DELAY_BOUNDARY - tail_cnt + head_cnt;
++      else
++              time_info->delay = head_cnt - tail_cnt;
+-out:
+       /*
+        * Convert the host byte counter to PCM pointer which wraps in buffer
+        * and it is in frames
diff --git a/queue-6.12/btrfs-fix-the-incorrect-max_bytes-value-for-find_lock_delalloc_range.patch b/queue-6.12/btrfs-fix-the-incorrect-max_bytes-value-for-find_lock_delalloc_range.patch
new file mode 100644 (file)
index 0000000..bfdeea8
--- /dev/null
@@ -0,0 +1,146 @@
+From stable+bounces-185812-greg=kroah.com@vger.kernel.org Wed Oct 15 14:48:25 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Oct 2025 08:48:17 -0400
+Subject: btrfs: fix the incorrect max_bytes value for find_lock_delalloc_range()
+To: stable@vger.kernel.org
+Cc: Qu Wenruo <wqu@suse.com>, David Sterba <dsterba@suse.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251015124817.1385251-1-sashal@kernel.org>
+
+From: Qu Wenruo <wqu@suse.com>
+
+[ Upstream commit 7b26da407420e5054e3f06c5d13271697add9423 ]
+
+[BUG]
+With my local branch to enable bs > ps support for btrfs, sometimes I
+hit the following ASSERT() inside submit_one_sector():
+
+       ASSERT(block_start != EXTENT_MAP_HOLE);
+
+Please note that it's not yet possible to hit this ASSERT() in the wild
+yet, as it requires btrfs bs > ps support, which is not even in the
+development branch.
+
+But on the other hand, there is also a very low chance to hit above
+ASSERT() with bs < ps cases, so this is an existing bug affect not only
+the incoming bs > ps support but also the existing bs < ps support.
+
+[CAUSE]
+Firstly that ASSERT() means we're trying to submit a dirty block but
+without a real extent map nor ordered extent map backing it.
+
+Furthermore with extra debugging, the folio triggering such ASSERT() is
+always larger than the fs block size in my bs > ps case.
+(8K block size, 4K page size)
+
+After some more debugging, the ASSERT() is trigger by the following
+sequence:
+
+ extent_writepage()
+ |  We got a 32K folio (4 fs blocks) at file offset 0, and the fs block
+ |  size is 8K, page size is 4K.
+ |  And there is another 8K folio at file offset 32K, which is also
+ |  dirty.
+ |  So the filemap layout looks like the following:
+ |
+ |  "||" is the filio boundary in the filemap.
+ |  "//| is the dirty range.
+ |
+ |  0        8K       16K        24K         32K       40K
+ |  |////////|        |//////////////////////||////////|
+ |
+ |- writepage_delalloc()
+ |  |- find_lock_delalloc_range() for [0, 8K)
+ |  |  Now range [0, 8K) is properly locked.
+ |  |
+ |  |- find_lock_delalloc_range() for [16K, 40K)
+ |  |  |- btrfs_find_delalloc_range() returned range [16K, 40K)
+ |  |  |- lock_delalloc_folios() locked folio 0 successfully
+ |  |  |
+ |  |  |  The filemap range [32K, 40K) got dropped from filemap.
+ |  |  |
+ |  |  |- lock_delalloc_folios() failed with -EAGAIN on folio 32K
+ |  |  |  As the folio at 32K is dropped.
+ |  |  |
+ |  |  |- loops = 1;
+ |  |  |- max_bytes = PAGE_SIZE;
+ |  |  |- goto again;
+ |  |  |  This will re-do the lookup for dirty delalloc ranges.
+ |  |  |
+ |  |  |- btrfs_find_delalloc_range() called with @max_bytes == 4K
+ |  |  |  This is smaller than block size, so
+ |  |  |  btrfs_find_delalloc_range() is unable to return any range.
+ |  |  \- return false;
+ |  |
+ |  \- Now only range [0, 8K) has an OE for it, but for dirty range
+ |     [16K, 32K) it's dirty without an OE.
+ |     This breaks the assumption that writepage_delalloc() will find
+ |     and lock all dirty ranges inside the folio.
+ |
+ |- extent_writepage_io()
+    |- submit_one_sector() for [0, 8K)
+    |  Succeeded
+    |
+    |- submit_one_sector() for [16K, 24K)
+       Triggering the ASSERT(), as there is no OE, and the original
+       extent map is a hole.
+
+Please note that, this also exposed the same problem for bs < ps
+support. E.g. with 64K page size and 4K block size.
+
+If we failed to lock a folio, and falls back into the "loops = 1;"
+branch, we will re-do the search using 64K as max_bytes.
+Which may fail again to lock the next folio, and exit early without
+handling all dirty blocks inside the folio.
+
+[FIX]
+Instead of using the fixed size PAGE_SIZE as @max_bytes, use
+@sectorsize, so that we are ensured to find and lock any remaining
+blocks inside the folio.
+
+And since we're here, add an extra ASSERT() to
+before calling btrfs_find_delalloc_range() to make sure the @max_bytes is
+at least no smaller than a block to avoid false negative.
+
+Cc: stable@vger.kernel.org # 5.15+
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/extent_io.c |   14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -355,6 +355,13 @@ again:
+       /* step one, find a bunch of delalloc bytes starting at start */
+       delalloc_start = *start;
+       delalloc_end = 0;
++
++      /*
++       * If @max_bytes is smaller than a block, btrfs_find_delalloc_range() can
++       * return early without handling any dirty ranges.
++       */
++      ASSERT(max_bytes >= fs_info->sectorsize);
++
+       found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
+                                         max_bytes, &cached_state);
+       if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
+@@ -385,13 +392,14 @@ again:
+                                  delalloc_end);
+       ASSERT(!ret || ret == -EAGAIN);
+       if (ret == -EAGAIN) {
+-              /* some of the folios are gone, lets avoid looping by
+-               * shortening the size of the delalloc range we're searching
++              /*
++               * Some of the folios are gone, lets avoid looping by
++               * shortening the size of the delalloc range we're searching.
+                */
+               free_extent_state(cached_state);
+               cached_state = NULL;
+               if (!loops) {
+-                      max_bytes = PAGE_SIZE;
++                      max_bytes = fs_info->sectorsize;
+                       loops = 1;
+                       goto again;
+               } else {
diff --git a/queue-6.12/cpufreq-make-drivers-using-cpufreq_eternal-specify-transition-latency.patch b/queue-6.12/cpufreq-make-drivers-using-cpufreq_eternal-specify-transition-latency.patch
new file mode 100644 (file)
index 0000000..ca2f9ea
--- /dev/null
@@ -0,0 +1,143 @@
+From stable+bounces-185852-greg=kroah.com@vger.kernel.org Wed Oct 15 20:45:22 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Oct 2025 14:45:16 -0400
+Subject: cpufreq: Make drivers using CPUFREQ_ETERNAL specify transition latency
+To: stable@vger.kernel.org
+Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>, Shawn Guo <shawnguo@kernel.org>, "Mario Limonciello (AMD)" <superm1@kernel.org>, Jie Zhan <zhanjie9@hisilicon.com>, Viresh Kumar <viresh.kumar@linaro.org>, Qais Yousef <qyousef@layalina.io>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251015184516.1496577-1-sashal@kernel.org>
+
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+
+[ Upstream commit f97aef092e199c10a3da96ae79b571edd5362faa ]
+
+Commit a755d0e2d41b ("cpufreq: Honour transition_latency over
+transition_delay_us") caused platforms where cpuinfo.transition_latency
+is CPUFREQ_ETERNAL to get a very large transition latency whereas
+previously it had been capped at 10 ms (and later at 2 ms).
+
+This led to a user-observable regression between 6.6 and 6.12 as
+described by Shawn:
+
+"The dbs sampling_rate was 10000 us on 6.6 and suddently becomes
+ 6442450 us (4294967295 / 1000 * 1.5) on 6.12 for these platforms
+ because the default transition delay was dropped [...].
+
+ It slows down dbs governor's reacting to CPU loading change
+ dramatically.  Also, as transition_delay_us is used by schedutil
+ governor as rate_limit_us, it shows a negative impact on device
+ idle power consumption, because the device gets slightly less time
+ in the lowest OPP."
+
+Evidently, the expectation of the drivers using CPUFREQ_ETERNAL as
+cpuinfo.transition_latency was that it would be capped by the core,
+but they may as well return a default transition latency value instead
+of CPUFREQ_ETERNAL and the core need not do anything with it.
+
+Accordingly, introduce CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS and make
+all of the drivers in question use it instead of CPUFREQ_ETERNAL.  Also
+update the related Rust binding.
+
+Fixes: a755d0e2d41b ("cpufreq: Honour transition_latency over transition_delay_us")
+Closes: https://lore.kernel.org/linux-pm/20250922125929.453444-1-shawnguo2@yeah.net/
+Reported-by: Shawn Guo <shawnguo@kernel.org>
+Reviewed-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Reviewed-by: Jie Zhan <zhanjie9@hisilicon.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Cc: 6.6+ <stable@vger.kernel.org> # 6.6+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Link: https://patch.msgid.link/2264949.irdbgypaU6@rafael.j.wysocki
+[ rjw: Fix typo in new symbol name, drop redundant type cast from Rust binding ]
+Tested-by: Shawn Guo <shawnguo@kernel.org> # with cpufreq-dt driver
+Reviewed-by: Qais Yousef <qyousef@layalina.io>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+[ omitted Rust changes ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpufreq/cpufreq-dt.c          |    2 +-
+ drivers/cpufreq/imx6q-cpufreq.c       |    2 +-
+ drivers/cpufreq/mediatek-cpufreq-hw.c |    2 +-
+ drivers/cpufreq/scmi-cpufreq.c        |    2 +-
+ drivers/cpufreq/scpi-cpufreq.c        |    2 +-
+ drivers/cpufreq/spear-cpufreq.c       |    2 +-
+ include/linux/cpufreq.h               |    3 +++
+ 7 files changed, 9 insertions(+), 6 deletions(-)
+
+--- a/drivers/cpufreq/cpufreq-dt.c
++++ b/drivers/cpufreq/cpufreq-dt.c
+@@ -110,7 +110,7 @@ static int cpufreq_init(struct cpufreq_p
+       transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
+       if (!transition_latency)
+-              transition_latency = CPUFREQ_ETERNAL;
++              transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
+       cpumask_copy(policy->cpus, priv->cpus);
+       policy->driver_data = priv;
+--- a/drivers/cpufreq/imx6q-cpufreq.c
++++ b/drivers/cpufreq/imx6q-cpufreq.c
+@@ -443,7 +443,7 @@ soc_opp_out:
+       }
+       if (of_property_read_u32(np, "clock-latency", &transition_latency))
+-              transition_latency = CPUFREQ_ETERNAL;
++              transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
+       /*
+        * Calculate the ramp time for max voltage change in the
+--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
++++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
+@@ -238,7 +238,7 @@ static int mtk_cpufreq_hw_cpu_init(struc
+       latency = readl_relaxed(data->reg_bases[REG_FREQ_LATENCY]) * 1000;
+       if (!latency)
+-              latency = CPUFREQ_ETERNAL;
++              latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
+       policy->cpuinfo.transition_latency = latency;
+       policy->fast_switch_possible = true;
+--- a/drivers/cpufreq/scmi-cpufreq.c
++++ b/drivers/cpufreq/scmi-cpufreq.c
+@@ -280,7 +280,7 @@ static int scmi_cpufreq_init(struct cpuf
+       latency = perf_ops->transition_latency_get(ph, domain);
+       if (!latency)
+-              latency = CPUFREQ_ETERNAL;
++              latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
+       policy->cpuinfo.transition_latency = latency;
+--- a/drivers/cpufreq/scpi-cpufreq.c
++++ b/drivers/cpufreq/scpi-cpufreq.c
+@@ -157,7 +157,7 @@ static int scpi_cpufreq_init(struct cpuf
+       latency = scpi_ops->get_transition_latency(cpu_dev);
+       if (!latency)
+-              latency = CPUFREQ_ETERNAL;
++              latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
+       policy->cpuinfo.transition_latency = latency;
+--- a/drivers/cpufreq/spear-cpufreq.c
++++ b/drivers/cpufreq/spear-cpufreq.c
+@@ -183,7 +183,7 @@ static int spear_cpufreq_probe(struct pl
+       if (of_property_read_u32(np, "clock-latency",
+                               &spear_cpufreq.transition_latency))
+-              spear_cpufreq.transition_latency = CPUFREQ_ETERNAL;
++              spear_cpufreq.transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
+       cnt = of_property_count_u32_elems(np, "cpufreq_tbl");
+       if (cnt <= 0) {
+--- a/include/linux/cpufreq.h
++++ b/include/linux/cpufreq.h
+@@ -32,6 +32,9 @@
+  */
+ #define CPUFREQ_ETERNAL                       (-1)
++
++#define CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS NSEC_PER_MSEC
++
+ #define CPUFREQ_NAME_LEN              16
+ /* Print length for names. Extra 1 space for accommodating '\n' in prints */
+ #define CPUFREQ_NAME_PLEN             (CPUFREQ_NAME_LEN + 1)
diff --git a/queue-6.12/kvm-x86-advertise-srso_user_kernel_no-to-userspace.patch b/queue-6.12/kvm-x86-advertise-srso_user_kernel_no-to-userspace.patch
new file mode 100644 (file)
index 0000000..966afbc
--- /dev/null
@@ -0,0 +1,41 @@
+From stable+bounces-185886-greg=kroah.com@vger.kernel.org Thu Oct 16 09:52:03 2025
+From: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Date: Thu, 16 Oct 2025 00:46:27 -0700
+Subject: KVM: x86: Advertise SRSO_USER_KERNEL_NO to userspace
+To: bp@alien8.de, stable@vger.kernel.org
+Cc: boris.ostrovsky@oracle.com, Nikolay Borisov <nik.borisov@suse.com>, Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Message-ID: <20251016074627.3417836-1-harshit.m.mogalapalli@oracle.com>
+
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+
+[ Upstream commit 716f86b523d8ec3c17015ee0b03135c7aa6f2f08 ]
+
+SRSO_USER_KERNEL_NO denotes whether the CPU is affected by SRSO across
+user/kernel boundaries. Advertise it to guest userspace.
+
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Nikolay Borisov <nik.borisov@suse.com>
+Link: https://lore.kernel.org/r/20241202120416.6054-3-bp@kernel.org
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+Boris Ostrovsky suggested that we backport this commit to 6.12.y as we
+have commit: 6f0f23ef76be ("KVM: x86: Add IBPB_BRTYPE support") in 6.12.y
+
+Hi borislav: Can you please ACK before stable maintainers pick this ?
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/cpuid.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -816,7 +816,7 @@ void kvm_set_cpu_caps(void)
+               F(NO_NESTED_DATA_BP) | F(LFENCE_RDTSC) | 0 /* SmmPgCfgLock */ |
+               F(VERW_CLEAR) |
+               F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */ |
+-              F(WRMSR_XX_BASE_NS)
++              F(WRMSR_XX_BASE_NS) | F(SRSO_USER_KERNEL_NO)
+       );
+       kvm_cpu_cap_check_and_set(X86_FEATURE_SBPB);
diff --git a/queue-6.12/lib-crypto-curve25519-hacl64-disable-kasan-with-clang-17-and-older.patch b/queue-6.12/lib-crypto-curve25519-hacl64-disable-kasan-with-clang-17-and-older.patch
new file mode 100644 (file)
index 0000000..5957668
--- /dev/null
@@ -0,0 +1,67 @@
+From 2f13daee2a72bb962f5fd356c3a263a6f16da965 Mon Sep 17 00:00:00 2001
+From: Nathan Chancellor <nathan@kernel.org>
+Date: Mon, 9 Jun 2025 15:45:20 -0700
+Subject: lib/crypto/curve25519-hacl64: Disable KASAN with clang-17 and older
+
+From: Nathan Chancellor <nathan@kernel.org>
+
+commit 2f13daee2a72bb962f5fd356c3a263a6f16da965 upstream.
+
+After commit 6f110a5e4f99 ("Disable SLUB_TINY for build testing"), which
+causes CONFIG_KASAN to be enabled in allmodconfig again, arm64
+allmodconfig builds with clang-17 and older show an instance of
+-Wframe-larger-than (which breaks the build with CONFIG_WERROR=y):
+
+  lib/crypto/curve25519-hacl64.c:757:6: error: stack frame size (2336) exceeds limit (2048) in 'curve25519_generic' [-Werror,-Wframe-larger-than]
+    757 | void curve25519_generic(u8 mypublic[CURVE25519_KEY_SIZE],
+        |      ^
+
+When KASAN is disabled, the stack usage is roughly quartered:
+
+  lib/crypto/curve25519-hacl64.c:757:6: error: stack frame size (608) exceeds limit (128) in 'curve25519_generic' [-Werror,-Wframe-larger-than]
+    757 | void curve25519_generic(u8 mypublic[CURVE25519_KEY_SIZE],
+        |      ^
+
+Using '-Rpass-analysis=stack-frame-layout' shows the following variables
+and many, many 8-byte spills when KASAN is enabled:
+
+  Offset: [SP-144], Type: Variable, Align: 8, Size: 40
+  Offset: [SP-464], Type: Variable, Align: 8, Size: 320
+  Offset: [SP-784], Type: Variable, Align: 8, Size: 320
+  Offset: [SP-864], Type: Variable, Align: 32, Size: 80
+  Offset: [SP-896], Type: Variable, Align: 32, Size: 32
+  Offset: [SP-1016], Type: Variable, Align: 8, Size: 120
+
+When KASAN is disabled, there are still spills but not at many and the
+variables list is smaller:
+
+  Offset: [SP-192], Type: Variable, Align: 32, Size: 80
+  Offset: [SP-224], Type: Variable, Align: 32, Size: 32
+  Offset: [SP-344], Type: Variable, Align: 8, Size: 120
+
+Disable KASAN for this file when using clang-17 or older to avoid
+blowing out the stack, clearing up the warning.
+
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Acked-by: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Acked-by: Ard Biesheuvel <ardb@kernel.org>
+Link: https://lore.kernel.org/r/20250609-curve25519-hacl64-disable-kasan-clang-v1-1-08ea0ac5ccff@kernel.org
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/crypto/Makefile |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/lib/crypto/Makefile
++++ b/lib/crypto/Makefile
+@@ -33,6 +33,10 @@ obj-$(CONFIG_CRYPTO_LIB_CURVE25519_GENER
+ libcurve25519-generic-y                               := curve25519-fiat32.o
+ libcurve25519-generic-$(CONFIG_ARCH_SUPPORTS_INT128)  := curve25519-hacl64.o
+ libcurve25519-generic-y                               += curve25519-generic.o
++# clang versions prior to 18 may blow out the stack with KASAN
++ifeq ($(call clang-min-version, 180000),)
++KASAN_SANITIZE_curve25519-hacl64.o := n
++endif
+ obj-$(CONFIG_CRYPTO_LIB_CURVE25519)           += libcurve25519.o
+ libcurve25519-y                                       += curve25519.o
diff --git a/queue-6.12/media-mc-clear-minor-number-before-put-device.patch b/queue-6.12/media-mc-clear-minor-number-before-put-device.patch
new file mode 100644 (file)
index 0000000..9df4754
--- /dev/null
@@ -0,0 +1,51 @@
+From stable+bounces-185512-greg=kroah.com@vger.kernel.org Mon Oct 13 22:45:48 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Oct 2025 16:41:15 -0400
+Subject: media: mc: Clear minor number before put device
+To: stable@vger.kernel.org
+Cc: Edward Adam Davis <eadavis@qq.com>, syzbot+031d0cfd7c362817963f@syzkaller.appspotmail.com, Sakari Ailus <sakari.ailus@linux.intel.com>, Hans Verkuil <hverkuil+cisco@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251013204115.3599451-1-sashal@kernel.org>
+
+From: Edward Adam Davis <eadavis@qq.com>
+
+[ Upstream commit 8cfc8cec1b4da88a47c243a11f384baefd092a50 ]
+
+The device minor should not be cleared after the device is released.
+
+Fixes: 9e14868dc952 ("media: mc: Clear minor number reservation at unregistration time")
+Cc: stable@vger.kernel.org
+Reported-by: syzbot+031d0cfd7c362817963f@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=031d0cfd7c362817963f
+Tested-by: syzbot+031d0cfd7c362817963f@syzkaller.appspotmail.com
+Signed-off-by: Edward Adam Davis <eadavis@qq.com>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+[ moved clear_bit from media_devnode_release callback to media_devnode_unregister before put_device ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/mc/mc-devnode.c |    6 +-----
+ 1 file changed, 1 insertion(+), 5 deletions(-)
+
+--- a/drivers/media/mc/mc-devnode.c
++++ b/drivers/media/mc/mc-devnode.c
+@@ -50,11 +50,6 @@ static void media_devnode_release(struct
+ {
+       struct media_devnode *devnode = to_media_devnode(cd);
+-      mutex_lock(&media_devnode_lock);
+-      /* Mark device node number as free */
+-      clear_bit(devnode->minor, media_devnode_nums);
+-      mutex_unlock(&media_devnode_lock);
+-
+       /* Release media_devnode and perform other cleanups as needed. */
+       if (devnode->release)
+               devnode->release(devnode);
+@@ -281,6 +276,7 @@ void media_devnode_unregister(struct med
+       /* Delete the cdev on this minor as well */
+       cdev_device_del(&devnode->cdev, &devnode->dev);
+       devnode->media_dev = NULL;
++      clear_bit(devnode->minor, media_devnode_nums);
+       mutex_unlock(&media_devnode_lock);
+       put_device(&devnode->dev);
diff --git a/queue-6.12/mfd-intel_soc_pmic_chtdc_ti-drop-unneeded-assignment-for-cache_type.patch b/queue-6.12/mfd-intel_soc_pmic_chtdc_ti-drop-unneeded-assignment-for-cache_type.patch
new file mode 100644 (file)
index 0000000..2a3be7c
--- /dev/null
@@ -0,0 +1,39 @@
+From stable+bounces-185531-greg=kroah.com@vger.kernel.org Tue Oct 14 00:40:22 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Oct 2025 18:36:55 -0400
+Subject: mfd: intel_soc_pmic_chtdc_ti: Drop unneeded assignment for cache_type
+To: stable@vger.kernel.org
+Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>, Hans de Goede <hdegoede@redhat.com>, Lee Jones <lee@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251013223656.3673902-2-sashal@kernel.org>
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit 9eb99c08508714906db078b5efbe075329a3fb06 ]
+
+REGCACHE_NONE is the default type of the cache when not provided.
+Drop unneeded explicit assignment to it.
+
+Note, it's defined to 0, and if ever be redefined, it will break
+literally a lot of the drivers, so it very unlikely to happen.
+
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Link: https://lore.kernel.org/r/20250129152823.1802273-1-andriy.shevchenko@linux.intel.com
+Signed-off-by: Lee Jones <lee@kernel.org>
+Stable-dep-of: 64e0d839c589 ("mfd: intel_soc_pmic_chtdc_ti: Set use_single_read regmap_config flag")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mfd/intel_soc_pmic_chtdc_ti.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/mfd/intel_soc_pmic_chtdc_ti.c
++++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
+@@ -82,7 +82,6 @@ static const struct regmap_config chtdc_
+       .reg_bits = 8,
+       .val_bits = 8,
+       .max_register = 0xff,
+-      .cache_type = REGCACHE_NONE,
+ };
+ static const struct regmap_irq chtdc_ti_irqs[] = {
diff --git a/queue-6.12/mfd-intel_soc_pmic_chtdc_ti-fix-invalid-regmap-config-max_register-value.patch b/queue-6.12/mfd-intel_soc_pmic_chtdc_ti-fix-invalid-regmap-config-max_register-value.patch
new file mode 100644 (file)
index 0000000..418ce57
--- /dev/null
@@ -0,0 +1,40 @@
+From stable+bounces-185530-greg=kroah.com@vger.kernel.org Tue Oct 14 00:40:21 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Oct 2025 18:36:54 -0400
+Subject: mfd: intel_soc_pmic_chtdc_ti: Fix invalid regmap-config max_register value
+To: stable@vger.kernel.org
+Cc: Hans de Goede <hdegoede@redhat.com>, Andy Shevchenko <andy@kernel.org>, Lee Jones <lee@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251013223656.3673902-1-sashal@kernel.org>
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+[ Upstream commit 70e997e0107e5ed85c1a3ef2adfccbe351c29d71 ]
+
+The max_register = 128 setting in the regmap config is not valid.
+
+The Intel Dollar Cove TI PMIC has an eeprom unlock register at address 0x88
+and a number of EEPROM registers at 0xF?. Increase max_register to 0xff so
+that these registers can be accessed.
+
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Reviewed-by: Andy Shevchenko <andy@kernel.org>
+Link: https://lore.kernel.org/r/20241208150028.325349-1-hdegoede@redhat.com
+Signed-off-by: Lee Jones <lee@kernel.org>
+Stable-dep-of: 64e0d839c589 ("mfd: intel_soc_pmic_chtdc_ti: Set use_single_read regmap_config flag")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mfd/intel_soc_pmic_chtdc_ti.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mfd/intel_soc_pmic_chtdc_ti.c
++++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
+@@ -81,7 +81,7 @@ static struct mfd_cell chtdc_ti_dev[] =
+ static const struct regmap_config chtdc_ti_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+-      .max_register = 128,
++      .max_register = 0xff,
+       .cache_type = REGCACHE_NONE,
+ };
diff --git a/queue-6.12/mfd-intel_soc_pmic_chtdc_ti-set-use_single_read-regmap_config-flag.patch b/queue-6.12/mfd-intel_soc_pmic_chtdc_ti-set-use_single_read-regmap_config-flag.patch
new file mode 100644 (file)
index 0000000..33dfcfa
--- /dev/null
@@ -0,0 +1,43 @@
+From stable+bounces-185532-greg=kroah.com@vger.kernel.org Tue Oct 14 00:40:48 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Oct 2025 18:36:56 -0400
+Subject: mfd: intel_soc_pmic_chtdc_ti: Set use_single_read regmap_config flag
+To: stable@vger.kernel.org
+Cc: Hans de Goede <hansg@kernel.org>, Andy Shevchenko <andy@kernel.org>, Lee Jones <lee@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251013223656.3673902-3-sashal@kernel.org>
+
+From: Hans de Goede <hansg@kernel.org>
+
+[ Upstream commit 64e0d839c589f4f2ecd2e3e5bdb5cee6ba6bade9 ]
+
+Testing has shown that reading multiple registers at once (for 10-bit
+ADC values) does not work. Set the use_single_read regmap_config flag
+to make regmap split these for us.
+
+This should fix temperature opregion accesses done by
+drivers/acpi/pmic/intel_pmic_chtdc_ti.c and is also necessary for
+the upcoming drivers for the ADC and battery MFD cells.
+
+Fixes: 6bac0606fdba ("mfd: Add support for Cherry Trail Dollar Cove TI PMIC")
+Cc: stable@vger.kernel.org
+Reviewed-by: Andy Shevchenko <andy@kernel.org>
+Signed-off-by: Hans de Goede <hansg@kernel.org>
+Link: https://lore.kernel.org/r/20250804133240.312383-1-hansg@kernel.org
+Signed-off-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mfd/intel_soc_pmic_chtdc_ti.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/mfd/intel_soc_pmic_chtdc_ti.c
++++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
+@@ -82,6 +82,8 @@ static const struct regmap_config chtdc_
+       .reg_bits = 8,
+       .val_bits = 8,
+       .max_register = 0xff,
++      /* The hardware does not support reading multiple registers at once */
++      .use_single_read = true,
+ };
+ static const struct regmap_irq chtdc_ti_irqs[] = {
diff --git a/queue-6.12/mm-ksm-fix-incorrect-ksm-counter-handling-in-mm_struct-during-fork.patch b/queue-6.12/mm-ksm-fix-incorrect-ksm-counter-handling-in-mm_struct-during-fork.patch
new file mode 100644 (file)
index 0000000..eaa7706
--- /dev/null
@@ -0,0 +1,104 @@
+From stable+bounces-185643-greg=kroah.com@vger.kernel.org Tue Oct 14 13:56:50 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Oct 2025 07:55:13 -0400
+Subject: mm/ksm: fix incorrect KSM counter handling in mm_struct during fork
+To: stable@vger.kernel.org
+Cc: Donet Tom <donettom@linux.ibm.com>, Chengming Zhou <chengming.zhou@linux.dev>, David Hildenbrand <david@redhat.com>, Aboorva Devarajan <aboorvad@linux.ibm.com>, "Ritesh Harjani (IBM)" <ritesh.list@gmail.com>, Wei Yang <richard.weiyang@gmail.com>, xu xin <xu.xin16@zte.com.cn>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251014115513.4165766-1-sashal@kernel.org>
+
+From: Donet Tom <donettom@linux.ibm.com>
+
+[ Upstream commit 4d6fc29f36341d7795db1d1819b4c15fe9be7b23 ]
+
+Patch series "mm/ksm: Fix incorrect accounting of KSM counters during
+fork", v3.
+
+The first patch in this series fixes the incorrect accounting of KSM
+counters such as ksm_merging_pages, ksm_rmap_items, and the global
+ksm_zero_pages during fork.
+
+The following patch add a selftest to verify the ksm_merging_pages counter
+was updated correctly during fork.
+
+Test Results
+============
+Without the first patch
+-----------------------
+ # [RUN] test_fork_ksm_merging_page_count
+ not ok 10 ksm_merging_page in child: 32
+
+With the first patch
+--------------------
+ # [RUN] test_fork_ksm_merging_page_count
+ ok 10 ksm_merging_pages is not inherited after fork
+
+This patch (of 2):
+
+Currently, the KSM-related counters in `mm_struct`, such as
+`ksm_merging_pages`, `ksm_rmap_items`, and `ksm_zero_pages`, are inherited
+by the child process during fork.  This results in inconsistent
+accounting.
+
+When a process uses KSM, identical pages are merged and an rmap item is
+created for each merged page.  The `ksm_merging_pages` and
+`ksm_rmap_items` counters are updated accordingly.  However, after a fork,
+these counters are copied to the child while the corresponding rmap items
+are not.  As a result, when the child later triggers an unmerge, there are
+no rmap items present in the child, so the counters remain stale, leading
+to incorrect accounting.
+
+A similar issue exists with `ksm_zero_pages`, which maintains both a
+global counter and a per-process counter.  During fork, the per-process
+counter is inherited by the child, but the global counter is not
+incremented.  Since the child also references zero pages, the global
+counter should be updated as well.  Otherwise, during zero-page unmerge,
+both the global and per-process counters are decremented, causing the
+global counter to become inconsistent.
+
+To fix this, ksm_merging_pages and ksm_rmap_items are reset to 0 during
+fork, and the global ksm_zero_pages counter is updated with the
+per-process ksm_zero_pages value inherited by the child.  This ensures
+that KSM statistics remain accurate and reflect the activity of each
+process correctly.
+
+Link: https://lkml.kernel.org/r/cover.1758648700.git.donettom@linux.ibm.com
+Link: https://lkml.kernel.org/r/7b9870eb67ccc0d79593940d9dbd4a0b39b5d396.1758648700.git.donettom@linux.ibm.com
+Fixes: 7609385337a4 ("ksm: count ksm merging pages for each process")
+Fixes: cb4df4cae4f2 ("ksm: count allocated ksm rmap_items for each process")
+Fixes: e2942062e01d ("ksm: count all zero pages placed by KSM")
+Signed-off-by: Donet Tom <donettom@linux.ibm.com>
+Reviewed-by: Chengming Zhou <chengming.zhou@linux.dev>
+Acked-by: David Hildenbrand <david@redhat.com>
+Cc: Aboorva Devarajan <aboorvad@linux.ibm.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Donet Tom <donettom@linux.ibm.com>
+Cc: "Ritesh Harjani (IBM)" <ritesh.list@gmail.com>
+Cc: Wei Yang <richard.weiyang@gmail.com>
+Cc: xu xin <xu.xin16@zte.com.cn>
+Cc: <stable@vger.kernel.org>   [6.6+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+[ replaced mm_flags_test() calls with test_bit() ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/ksm.h |    8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/include/linux/ksm.h
++++ b/include/linux/ksm.h
+@@ -57,8 +57,14 @@ static inline long mm_ksm_zero_pages(str
+ static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+ {
+       /* Adding mm to ksm is best effort on fork. */
+-      if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
++      if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) {
++              long nr_ksm_zero_pages = atomic_long_read(&mm->ksm_zero_pages);
++
++              mm->ksm_merging_pages = 0;
++              mm->ksm_rmap_items = 0;
++              atomic_long_add(nr_ksm_zero_pages, &ksm_zero_pages);
+               __ksm_enter(mm);
++      }
+ }
+ static inline int ksm_execve(struct mm_struct *mm)
diff --git a/queue-6.12/pci-endpoint-pci-epf-test-add-null-check-for-dma-channels-before-release.patch b/queue-6.12/pci-endpoint-pci-epf-test-add-null-check-for-dma-channels-before-release.patch
new file mode 100644 (file)
index 0000000..38435c6
--- /dev/null
@@ -0,0 +1,75 @@
+From stable+bounces-185697-greg=kroah.com@vger.kernel.org Tue Oct 14 18:01:13 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Oct 2025 11:58:54 -0400
+Subject: PCI: endpoint: pci-epf-test: Add NULL check for DMA channels before release
+To: stable@vger.kernel.org
+Cc: "Shin'ichiro Kawasaki" <shinichiro.kawasaki@wdc.com>, "Manivannan Sadhasivam" <mani@kernel.org>, "Damien Le Moal" <dlemoal@kernel.org>, "Krzysztof Wilczyński" <kwilczynski@kernel.org>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20251014155854.154310-2-sashal@kernel.org>
+
+From: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+
+[ Upstream commit 85afa9ea122dd9d4a2ead104a951d318975dcd25 ]
+
+The fields dma_chan_tx and dma_chan_rx of the struct pci_epf_test can be
+NULL even after EPF initialization. Then it is prudent to check that
+they have non-NULL values before releasing the channels. Add the checks
+in pci_epf_test_clean_dma_chan().
+
+Without the checks, NULL pointer dereferences happen and they can lead
+to a kernel panic in some cases:
+
+  Unable to handle kernel NULL pointer dereference at virtual address 0000000000000050
+  Call trace:
+   dma_release_channel+0x2c/0x120 (P)
+   pci_epf_test_epc_deinit+0x94/0xc0 [pci_epf_test]
+   pci_epc_deinit_notify+0x74/0xc0
+   tegra_pcie_ep_pex_rst_irq+0x250/0x5d8
+   irq_thread_fn+0x34/0xb8
+   irq_thread+0x18c/0x2e8
+   kthread+0x14c/0x210
+   ret_from_fork+0x10/0x20
+
+Fixes: 8353813c88ef ("PCI: endpoint: Enable DMA tests for endpoints with DMA capabilities")
+Fixes: 5ebf3fc59bd2 ("PCI: endpoint: functions/pci-epf-test: Add DMA support to transfer data")
+Signed-off-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+[mani: trimmed the stack trace]
+Signed-off-by: Manivannan Sadhasivam <mani@kernel.org>
+Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
+Reviewed-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20250916025756.34807-1-shinichiro.kawasaki@wdc.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/endpoint/functions/pci-epf-test.c |   17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+--- a/drivers/pci/endpoint/functions/pci-epf-test.c
++++ b/drivers/pci/endpoint/functions/pci-epf-test.c
+@@ -282,15 +282,20 @@ static void pci_epf_test_clean_dma_chan(
+       if (!epf_test->dma_supported)
+               return;
+-      dma_release_channel(epf_test->dma_chan_tx);
+-      if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
++      if (epf_test->dma_chan_tx) {
++              dma_release_channel(epf_test->dma_chan_tx);
++              if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
++                      epf_test->dma_chan_tx = NULL;
++                      epf_test->dma_chan_rx = NULL;
++                      return;
++              }
+               epf_test->dma_chan_tx = NULL;
+-              epf_test->dma_chan_rx = NULL;
+-              return;
+       }
+-      dma_release_channel(epf_test->dma_chan_rx);
+-      epf_test->dma_chan_rx = NULL;
++      if (epf_test->dma_chan_rx) {
++              dma_release_channel(epf_test->dma_chan_rx);
++              epf_test->dma_chan_rx = NULL;
++      }
+ }
+ static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
diff --git a/queue-6.12/pci-endpoint-remove-surplus-return-statement-from-pci_epf_test_clean_dma_chan.patch b/queue-6.12/pci-endpoint-remove-surplus-return-statement-from-pci_epf_test_clean_dma_chan.patch
new file mode 100644 (file)
index 0000000..feb45ff
--- /dev/null
@@ -0,0 +1,48 @@
+From stable+bounces-185696-greg=kroah.com@vger.kernel.org Tue Oct 14 17:59:28 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Oct 2025 11:58:53 -0400
+Subject: PCI: endpoint: Remove surplus return statement from pci_epf_test_clean_dma_chan()
+To: stable@vger.kernel.org
+Cc: "Wang Jiang" <jiangwang@kylinos.cn>, "Krzysztof Wilczyński" <kwilczynski@kernel.org>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20251014155854.154310-1-sashal@kernel.org>
+
+From: Wang Jiang <jiangwang@kylinos.cn>
+
+[ Upstream commit 9b80bdb10aee04ce7289896e6bdad13e33972636 ]
+
+Remove a surplus return statement from the void function that has been
+added in the commit commit 8353813c88ef ("PCI: endpoint: Enable DMA
+tests for endpoints with DMA capabilities").
+
+Especially, as an empty return statements at the end of a void functions
+serve little purpose.
+
+This fixes the following checkpatch.pl script warning:
+
+  WARNING: void function return statements are not generally useful
+  #296: FILE: drivers/pci/endpoint/functions/pci-epf-test.c:296:
+  +     return;
+  +}
+
+Link: https://lore.kernel.org/r/tencent_F250BEE2A65745A524E2EFE70CF615CA8F06@qq.com
+Signed-off-by: Wang Jiang <jiangwang@kylinos.cn>
+[kwilczynski: commit log]
+Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
+Stable-dep-of: 85afa9ea122d ("PCI: endpoint: pci-epf-test: Add NULL check for DMA channels before release")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/endpoint/functions/pci-epf-test.c |    2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/pci/endpoint/functions/pci-epf-test.c
++++ b/drivers/pci/endpoint/functions/pci-epf-test.c
+@@ -291,8 +291,6 @@ static void pci_epf_test_clean_dma_chan(
+       dma_release_channel(epf_test->dma_chan_rx);
+       epf_test->dma_chan_rx = NULL;
+-
+-      return;
+ }
+ static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
diff --git a/queue-6.12/selftests-mm-skip-soft-dirty-tests-when-config_mem_soft_dirty-is-disabled.patch b/queue-6.12/selftests-mm-skip-soft-dirty-tests-when-config_mem_soft_dirty-is-disabled.patch
new file mode 100644 (file)
index 0000000..381aab2
--- /dev/null
@@ -0,0 +1,192 @@
+From 0389c305ef56cbadca4cbef44affc0ec3213ed30 Mon Sep 17 00:00:00 2001
+From: Lance Yang <lance.yang@linux.dev>
+Date: Wed, 17 Sep 2025 21:31:37 +0800
+Subject: selftests/mm: skip soft-dirty tests when CONFIG_MEM_SOFT_DIRTY is disabled
+
+From: Lance Yang <lance.yang@linux.dev>
+
+commit 0389c305ef56cbadca4cbef44affc0ec3213ed30 upstream.
+
+The madv_populate and soft-dirty kselftests currently fail on systems
+where CONFIG_MEM_SOFT_DIRTY is disabled.
+
+Introduce a new helper softdirty_supported() into vm_util.c/h to ensure
+tests are properly skipped when the feature is not enabled.
+
+Link: https://lkml.kernel.org/r/20250917133137.62802-1-lance.yang@linux.dev
+Fixes: 9f3265db6ae8 ("selftests: vm: add test for Soft-Dirty PTE bit")
+Signed-off-by: Lance Yang <lance.yang@linux.dev>
+Acked-by: David Hildenbrand <david@redhat.com>
+Suggested-by: David Hildenbrand <david@redhat.com>
+Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: Gabriel Krisman Bertazi <krisman@collabora.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/mm/madv_populate.c |   21 -------
+ tools/testing/selftests/mm/soft-dirty.c    |    5 +
+ tools/testing/selftests/mm/vm_util.c       |   77 +++++++++++++++++++++++++++++
+ tools/testing/selftests/mm/vm_util.h       |    1 
+ 4 files changed, 84 insertions(+), 20 deletions(-)
+
+--- a/tools/testing/selftests/mm/madv_populate.c
++++ b/tools/testing/selftests/mm/madv_populate.c
+@@ -264,23 +264,6 @@ static void test_softdirty(void)
+       munmap(addr, SIZE);
+ }
+-static int system_has_softdirty(void)
+-{
+-      /*
+-       * There is no way to check if the kernel supports soft-dirty, other
+-       * than by writing to a page and seeing if the bit was set. But the
+-       * tests are intended to check that the bit gets set when it should, so
+-       * doing that check would turn a potentially legitimate fail into a
+-       * skip. Fortunately, we know for sure that arm64 does not support
+-       * soft-dirty. So for now, let's just use the arch as a corse guide.
+-       */
+-#if defined(__aarch64__)
+-      return 0;
+-#else
+-      return 1;
+-#endif
+-}
+-
+ int main(int argc, char **argv)
+ {
+       int nr_tests = 16;
+@@ -288,7 +271,7 @@ int main(int argc, char **argv)
+       pagesize = getpagesize();
+-      if (system_has_softdirty())
++      if (softdirty_supported())
+               nr_tests += 5;
+       ksft_print_header();
+@@ -300,7 +283,7 @@ int main(int argc, char **argv)
+       test_holes();
+       test_populate_read();
+       test_populate_write();
+-      if (system_has_softdirty())
++      if (softdirty_supported())
+               test_softdirty();
+       err = ksft_get_fail_cnt();
+--- a/tools/testing/selftests/mm/soft-dirty.c
++++ b/tools/testing/selftests/mm/soft-dirty.c
+@@ -193,8 +193,11 @@ int main(int argc, char **argv)
+       int pagesize;
+       ksft_print_header();
+-      ksft_set_plan(15);
++      if (!softdirty_supported())
++              ksft_exit_skip("soft-dirty is not support\n");
++
++      ksft_set_plan(15);
+       pagemap_fd = open(PAGEMAP_FILE_PATH, O_RDONLY);
+       if (pagemap_fd < 0)
+               ksft_exit_fail_msg("Failed to open %s\n", PAGEMAP_FILE_PATH);
+--- a/tools/testing/selftests/mm/vm_util.c
++++ b/tools/testing/selftests/mm/vm_util.c
+@@ -193,6 +193,42 @@ err_out:
+       return rss_anon;
+ }
++char *__get_smap_entry(void *addr, const char *pattern, char *buf, size_t len)
++{
++      int ret;
++      FILE *fp;
++      char *entry = NULL;
++      char addr_pattern[MAX_LINE_LENGTH];
++
++      ret = snprintf(addr_pattern, MAX_LINE_LENGTH, "%08lx-",
++                     (unsigned long)addr);
++      if (ret >= MAX_LINE_LENGTH)
++              ksft_exit_fail_msg("%s: Pattern is too long\n", __func__);
++
++      fp = fopen(SMAP_FILE_PATH, "r");
++      if (!fp)
++              ksft_exit_fail_msg("%s: Failed to open file %s\n", __func__,
++                                 SMAP_FILE_PATH);
++
++      if (!check_for_pattern(fp, addr_pattern, buf, len))
++              goto err_out;
++
++      /* Fetch the pattern in the same block */
++      if (!check_for_pattern(fp, pattern, buf, len))
++              goto err_out;
++
++      /* Trim trailing newline */
++      entry = strchr(buf, '\n');
++      if (entry)
++              *entry = '\0';
++
++      entry = buf + strlen(pattern);
++
++err_out:
++      fclose(fp);
++      return entry;
++}
++
+ bool __check_huge(void *addr, char *pattern, int nr_hpages,
+                 uint64_t hpage_size)
+ {
+@@ -384,3 +420,44 @@ unsigned long get_free_hugepages(void)
+       fclose(f);
+       return fhp;
+ }
++
++static bool check_vmflag(void *addr, const char *flag)
++{
++      char buffer[MAX_LINE_LENGTH];
++      const char *flags;
++      size_t flaglen;
++
++      flags = __get_smap_entry(addr, "VmFlags:", buffer, sizeof(buffer));
++      if (!flags)
++              ksft_exit_fail_msg("%s: No VmFlags for %p\n", __func__, addr);
++
++      while (true) {
++              flags += strspn(flags, " ");
++
++              flaglen = strcspn(flags, " ");
++              if (!flaglen)
++                      return false;
++
++              if (flaglen == strlen(flag) && !memcmp(flags, flag, flaglen))
++                      return true;
++
++              flags += flaglen;
++      }
++}
++
++bool softdirty_supported(void)
++{
++      char *addr;
++      bool supported = false;
++      const size_t pagesize = getpagesize();
++
++      /* New mappings are expected to be marked with VM_SOFTDIRTY (sd). */
++      addr = mmap(0, pagesize, PROT_READ | PROT_WRITE,
++                  MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
++      if (!addr)
++              ksft_exit_fail_msg("mmap failed\n");
++
++      supported = check_vmflag(addr, "sd");
++      munmap(addr, pagesize);
++      return supported;
++}
+--- a/tools/testing/selftests/mm/vm_util.h
++++ b/tools/testing/selftests/mm/vm_util.h
+@@ -53,6 +53,7 @@ int uffd_unregister(int uffd, void *addr
+ int uffd_register_with_ioctls(int uffd, void *addr, uint64_t len,
+                             bool miss, bool wp, bool minor, uint64_t *ioctls);
+ unsigned long get_free_hugepages(void);
++bool softdirty_supported(void);
+ /*
+  * On ppc64 this will only work with radix 2M hugepage size
index b7013f797ccc4ee8ca42ffbedb2984a29306df1f..c69f987edeed402fed3c2fab09b15eba52fcc657 100644 (file)
@@ -219,3 +219,26 @@ ext4-guard-against-ea-inode-refcount-underflow-in-xattr-update.patch
 ext4-validate-ea_ino-and-size-in-check_xattrs.patch
 acpica-allow-to-skip-global-lock-initialization.patch
 ext4-free-orphan-info-with-kvfree.patch
+lib-crypto-curve25519-hacl64-disable-kasan-with-clang-17-and-older.patch
+selftests-mm-skip-soft-dirty-tests-when-config_mem_soft_dirty-is-disabled.patch
+arm64-cputype-add-neoverse-v3ae-definitions.patch
+arm64-errata-apply-workarounds-for-neoverse-v3ae.patch
+media-mc-clear-minor-number-before-put-device.patch
+squashfs-add-additional-inode-sanity-checking.patch
+squashfs-reject-negative-file-sizes-in-squashfs_read_inode.patch
+tracing-fix-race-condition-in-kprobe-initialization-causing-null-pointer-dereference.patch
+mm-ksm-fix-incorrect-ksm-counter-handling-in-mm_struct-during-fork.patch
+pci-endpoint-remove-surplus-return-statement-from-pci_epf_test_clean_dma_chan.patch
+pci-endpoint-pci-epf-test-add-null-check-for-dma-channels-before-release.patch
+asoc-sof-ipc4-pcm-enable-delay-reporting-for-chaindma-streams.patch
+asoc-sof-ipc4-pcm-fix-delay-calculation-when-dsp-resamples.patch
+mfd-intel_soc_pmic_chtdc_ti-fix-invalid-regmap-config-max_register-value.patch
+mfd-intel_soc_pmic_chtdc_ti-drop-unneeded-assignment-for-cache_type.patch
+mfd-intel_soc_pmic_chtdc_ti-set-use_single_read-regmap_config-flag.patch
+btrfs-fix-the-incorrect-max_bytes-value-for-find_lock_delalloc_range.patch
+cpufreq-make-drivers-using-cpufreq_eternal-specify-transition-latency.patch
+kvm-x86-advertise-srso_user_kernel_no-to-userspace.patch
+statmount-don-t-call-path_put-under-namespace-semaphore.patch
+arm64-mte-do-not-flag-the-zero-page-as-pg_mte_tagged.patch
+x86-mtrr-rename-mtrr_overwrite_state-to-guest_force_mtrr_state.patch
+x86-kvm-force-legacy-pci-hole-to-uc-when-overriding-mtrrs-for-tdx-snp.patch
diff --git a/queue-6.12/squashfs-add-additional-inode-sanity-checking.patch b/queue-6.12/squashfs-add-additional-inode-sanity-checking.patch
new file mode 100644 (file)
index 0000000..d73ff19
--- /dev/null
@@ -0,0 +1,90 @@
+From stable+bounces-185500-greg=kroah.com@vger.kernel.org Mon Oct 13 21:28:43 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Oct 2025 15:28:34 -0400
+Subject: Squashfs: add additional inode sanity checking
+To: stable@vger.kernel.org
+Cc: Phillip Lougher <phillip@squashfs.org.uk>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251013192835.3566456-1-sashal@kernel.org>
+
+From: Phillip Lougher <phillip@squashfs.org.uk>
+
+[ Upstream commit 9ee94bfbe930a1b39df53fa2d7b31141b780eb5a ]
+
+Patch series "Squashfs: performance improvement and a sanity check".
+
+This patchset adds an additional sanity check when reading regular file
+inodes, and adds support for SEEK_DATA/SEEK_HOLE lseek() whence values.
+
+This patch (of 2):
+
+Add an additional sanity check when reading regular file inodes.
+
+A regular file if the file size is an exact multiple of the filesystem
+block size cannot have a fragment.  This is because by definition a
+fragment block stores tailends which are not a whole block in size.
+
+Link: https://lkml.kernel.org/r/20250923220652.568416-1-phillip@squashfs.org.uk
+Link: https://lkml.kernel.org/r/20250923220652.568416-2-phillip@squashfs.org.uk
+Signed-off-by: Phillip Lougher <phillip@squashfs.org.uk>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: 9f1c14c1de1b ("Squashfs: reject negative file sizes in squashfs_read_inode()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/squashfs/inode.c |   20 ++++++++++++++++++--
+ 1 file changed, 18 insertions(+), 2 deletions(-)
+
+--- a/fs/squashfs/inode.c
++++ b/fs/squashfs/inode.c
+@@ -140,8 +140,17 @@ int squashfs_read_inode(struct inode *in
+               if (err < 0)
+                       goto failed_read;
++              inode->i_size = le32_to_cpu(sqsh_ino->file_size);
+               frag = le32_to_cpu(sqsh_ino->fragment);
+               if (frag != SQUASHFS_INVALID_FRAG) {
++                      /*
++                       * the file cannot have a fragment (tailend) and have a
++                       * file size a multiple of the block size
++                       */
++                      if ((inode->i_size & (msblk->block_size - 1)) == 0) {
++                              err = -EINVAL;
++                              goto failed_read;
++                      }
+                       frag_offset = le32_to_cpu(sqsh_ino->offset);
+                       frag_size = squashfs_frag_lookup(sb, frag, &frag_blk);
+                       if (frag_size < 0) {
+@@ -155,7 +164,6 @@ int squashfs_read_inode(struct inode *in
+               }
+               set_nlink(inode, 1);
+-              inode->i_size = le32_to_cpu(sqsh_ino->file_size);
+               inode->i_fop = &generic_ro_fops;
+               inode->i_mode |= S_IFREG;
+               inode->i_blocks = ((inode->i_size - 1) >> 9) + 1;
+@@ -184,8 +192,17 @@ int squashfs_read_inode(struct inode *in
+               if (err < 0)
+                       goto failed_read;
++              inode->i_size = le64_to_cpu(sqsh_ino->file_size);
+               frag = le32_to_cpu(sqsh_ino->fragment);
+               if (frag != SQUASHFS_INVALID_FRAG) {
++                      /*
++                       * the file cannot have a fragment (tailend) and have a
++                       * file size a multiple of the block size
++                       */
++                      if ((inode->i_size & (msblk->block_size - 1)) == 0) {
++                              err = -EINVAL;
++                              goto failed_read;
++                      }
+                       frag_offset = le32_to_cpu(sqsh_ino->offset);
+                       frag_size = squashfs_frag_lookup(sb, frag, &frag_blk);
+                       if (frag_size < 0) {
+@@ -200,7 +217,6 @@ int squashfs_read_inode(struct inode *in
+               xattr_id = le32_to_cpu(sqsh_ino->xattr);
+               set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
+-              inode->i_size = le64_to_cpu(sqsh_ino->file_size);
+               inode->i_op = &squashfs_inode_ops;
+               inode->i_fop = &generic_ro_fops;
+               inode->i_mode |= S_IFREG;
diff --git a/queue-6.12/squashfs-reject-negative-file-sizes-in-squashfs_read_inode.patch b/queue-6.12/squashfs-reject-negative-file-sizes-in-squashfs_read_inode.patch
new file mode 100644 (file)
index 0000000..74cee85
--- /dev/null
@@ -0,0 +1,48 @@
+From stable+bounces-185501-greg=kroah.com@vger.kernel.org Mon Oct 13 21:28:44 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Oct 2025 15:28:35 -0400
+Subject: Squashfs: reject negative file sizes in squashfs_read_inode()
+To: stable@vger.kernel.org
+Cc: Phillip Lougher <phillip@squashfs.org.uk>, syzbot+f754e01116421e9754b9@syzkaller.appspotmail.com, Amir Goldstein <amir73il@gmail.com>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251013192835.3566456-2-sashal@kernel.org>
+
+From: Phillip Lougher <phillip@squashfs.org.uk>
+
+[ Upstream commit 9f1c14c1de1bdde395f6cc893efa4f80a2ae3b2b ]
+
+Syskaller reports a "WARNING in ovl_copy_up_file" in overlayfs.
+
+This warning is ultimately caused because the underlying Squashfs file
+system returns a file with a negative file size.
+
+This commit checks for a negative file size and returns EINVAL.
+
+[phillip@squashfs.org.uk: only need to check 64 bit quantity]
+  Link: https://lkml.kernel.org/r/20250926222305.110103-1-phillip@squashfs.org.uk
+Link: https://lkml.kernel.org/r/20250926215935.107233-1-phillip@squashfs.org.uk
+Fixes: 6545b246a2c8 ("Squashfs: inode operations")
+Signed-off-by: Phillip Lougher <phillip@squashfs.org.uk>
+Reported-by: syzbot+f754e01116421e9754b9@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/68d580e5.a00a0220.303701.0019.GAE@google.com/
+Cc: Amir Goldstein <amir73il@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/squashfs/inode.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/fs/squashfs/inode.c
++++ b/fs/squashfs/inode.c
+@@ -193,6 +193,10 @@ int squashfs_read_inode(struct inode *in
+                       goto failed_read;
+               inode->i_size = le64_to_cpu(sqsh_ino->file_size);
++              if (inode->i_size < 0) {
++                      err = -EINVAL;
++                      goto failed_read;
++              }
+               frag = le32_to_cpu(sqsh_ino->fragment);
+               if (frag != SQUASHFS_INVALID_FRAG) {
+                       /*
diff --git a/queue-6.12/statmount-don-t-call-path_put-under-namespace-semaphore.patch b/queue-6.12/statmount-don-t-call-path_put-under-namespace-semaphore.patch
new file mode 100644 (file)
index 0000000..1e6ab79
--- /dev/null
@@ -0,0 +1,68 @@
+From stable+bounces-186008-greg=kroah.com@vger.kernel.org Thu Oct 16 13:59:24 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Oct 2025 07:59:15 -0400
+Subject: statmount: don't call path_put() under namespace semaphore
+To: stable@vger.kernel.org
+Cc: Christian Brauner <brauner@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251016115915.3270405-1-sashal@kernel.org>
+
+From: Christian Brauner <brauner@kernel.org>
+
+[ Upstream commit e8c84e2082e69335f66c8ade4895e80ec270d7c4 ]
+
+Massage statmount() and make sure we don't call path_put() under the
+namespace semaphore. If we put the last reference we're fscked.
+
+Fixes: 46eae99ef733 ("add statmount(2) syscall")
+Cc: stable@vger.kernel.org # v6.8+
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/namespace.c |    8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -5200,7 +5200,6 @@ static int grab_requested_root(struct mn
+ static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id,
+                       struct mnt_namespace *ns)
+ {
+-      struct path root __free(path_put) = {};
+       struct mount *m;
+       int err;
+@@ -5212,7 +5211,7 @@ static int do_statmount(struct kstatmoun
+       if (!s->mnt)
+               return -ENOENT;
+-      err = grab_requested_root(ns, &root);
++      err = grab_requested_root(ns, &s->root);
+       if (err)
+               return err;
+@@ -5221,15 +5220,13 @@ static int do_statmount(struct kstatmoun
+        * mounts to show users.
+        */
+       m = real_mount(s->mnt);
+-      if (!is_path_reachable(m, m->mnt.mnt_root, &root) &&
++      if (!is_path_reachable(m, m->mnt.mnt_root, &s->root) &&
+           !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
+               return -EPERM;
+       err = security_sb_statfs(s->mnt->mnt_root);
+       if (err)
+               return err;
+-
+-      s->root = root;
+       if (s->mask & STATMOUNT_SB_BASIC)
+               statmount_sb_basic(s);
+@@ -5406,6 +5403,7 @@ retry:
+       if (!ret)
+               ret = copy_statmount_to_user(ks);
+       kvfree(ks->seq.buf);
++      path_put(&ks->root);
+       if (retry_statmount(ret, &seq_size))
+               goto retry;
+       return ret;
diff --git a/queue-6.12/tracing-fix-race-condition-in-kprobe-initialization-causing-null-pointer-dereference.patch b/queue-6.12/tracing-fix-race-condition-in-kprobe-initialization-causing-null-pointer-dereference.patch
new file mode 100644 (file)
index 0000000..980c39a
--- /dev/null
@@ -0,0 +1,274 @@
+From stable+bounces-185558-greg=kroah.com@vger.kernel.org Tue Oct 14 02:19:22 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Oct 2025 20:19:15 -0400
+Subject: tracing: Fix race condition in kprobe initialization causing NULL pointer dereference
+To: stable@vger.kernel.org
+Cc: Yuan Chen <chenyuan@kylinos.cn>, "Masami Hiramatsu (Google)" <mhiramat@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251014001915.3749537-1-sashal@kernel.org>
+
+From: Yuan Chen <chenyuan@kylinos.cn>
+
+[ Upstream commit 9cf9aa7b0acfde7545c1a1d912576e9bab28dc6f ]
+
+There is a critical race condition in kprobe initialization that can lead to
+NULL pointer dereference and kernel crash.
+
+[1135630.084782] Unable to handle kernel paging request at virtual address 0000710a04630000
+...
+[1135630.260314] pstate: 404003c9 (nZcv DAIF +PAN -UAO)
+[1135630.269239] pc : kprobe_perf_func+0x30/0x260
+[1135630.277643] lr : kprobe_dispatcher+0x44/0x60
+[1135630.286041] sp : ffffaeff4977fa40
+[1135630.293441] x29: ffffaeff4977fa40 x28: ffffaf015340e400
+[1135630.302837] x27: 0000000000000000 x26: 0000000000000000
+[1135630.312257] x25: ffffaf029ed108a8 x24: ffffaf015340e528
+[1135630.321705] x23: ffffaeff4977fc50 x22: ffffaeff4977fc50
+[1135630.331154] x21: 0000000000000000 x20: ffffaeff4977fc50
+[1135630.340586] x19: ffffaf015340e400 x18: 0000000000000000
+[1135630.349985] x17: 0000000000000000 x16: 0000000000000000
+[1135630.359285] x15: 0000000000000000 x14: 0000000000000000
+[1135630.368445] x13: 0000000000000000 x12: 0000000000000000
+[1135630.377473] x11: 0000000000000000 x10: 0000000000000000
+[1135630.386411] x9 : 0000000000000000 x8 : 0000000000000000
+[1135630.395252] x7 : 0000000000000000 x6 : 0000000000000000
+[1135630.403963] x5 : 0000000000000000 x4 : 0000000000000000
+[1135630.412545] x3 : 0000710a04630000 x2 : 0000000000000006
+[1135630.421021] x1 : ffffaeff4977fc50 x0 : 0000710a04630000
+[1135630.429410] Call trace:
+[1135630.434828]  kprobe_perf_func+0x30/0x260
+[1135630.441661]  kprobe_dispatcher+0x44/0x60
+[1135630.448396]  aggr_pre_handler+0x70/0xc8
+[1135630.454959]  kprobe_breakpoint_handler+0x140/0x1e0
+[1135630.462435]  brk_handler+0xbc/0xd8
+[1135630.468437]  do_debug_exception+0x84/0x138
+[1135630.475074]  el1_dbg+0x18/0x8c
+[1135630.480582]  security_file_permission+0x0/0xd0
+[1135630.487426]  vfs_write+0x70/0x1c0
+[1135630.493059]  ksys_write+0x5c/0xc8
+[1135630.498638]  __arm64_sys_write+0x24/0x30
+[1135630.504821]  el0_svc_common+0x78/0x130
+[1135630.510838]  el0_svc_handler+0x38/0x78
+[1135630.516834]  el0_svc+0x8/0x1b0
+
+kernel/trace/trace_kprobe.c: 1308
+0xffff3df8995039ec <kprobe_perf_func+0x2c>:     ldr     x21, [x24,#120]
+include/linux/compiler.h: 294
+0xffff3df8995039f0 <kprobe_perf_func+0x30>:     ldr     x1, [x21,x0]
+
+kernel/trace/trace_kprobe.c
+1308: head = this_cpu_ptr(call->perf_events);
+1309: if (hlist_empty(head))
+1310:  return 0;
+
+crash> struct trace_event_call -o
+struct trace_event_call {
+  ...
+  [120] struct hlist_head *perf_events;  //(call->perf_event)
+  ...
+}
+
+crash> struct trace_event_call ffffaf015340e528
+struct trace_event_call {
+  ...
+  perf_events = 0xffff0ad5fa89f088, //this value is correct, but x21 = 0
+  ...
+}
+
+Race Condition Analysis:
+
+The race occurs between kprobe activation and perf_events initialization:
+
+  CPU0                                    CPU1
+  ====                                    ====
+  perf_kprobe_init
+    perf_trace_event_init
+      tp_event->perf_events = list;(1)
+      tp_event->class->reg (2)← KPROBE ACTIVE
+                                          Debug exception triggers
+                                          ...
+                                          kprobe_dispatcher
+                                            kprobe_perf_func (tk->tp.flags & TP_FLAG_PROFILE)
+                                              head = this_cpu_ptr(call->perf_events)(3)
+                                              (perf_events is still NULL)
+
+Problem:
+1. CPU0 executes (1) assigning tp_event->perf_events = list
+2. CPU0 executes (2) enabling kprobe functionality via class->reg()
+3. CPU1 triggers and reaches kprobe_dispatcher
+4. CPU1 checks TP_FLAG_PROFILE - condition passes (step 2 completed)
+5. CPU1 calls kprobe_perf_func() and crashes at (3) because
+   call->perf_events is still NULL
+
+CPU1 sees that kprobe functionality is enabled but does not see that
+perf_events has been assigned.
+
+Add pairing read and write memory barriers to guarantee that if CPU1
+sees that kprobe functionality is enabled, it must also see that
+perf_events has been assigned.
+
+Link: https://lore.kernel.org/all/20251001022025.44626-1-chenyuan_fl@163.com/
+
+Fixes: 50d780560785 ("tracing/kprobes: Add probe handler dispatcher to support perf and ftrace concurrent use")
+Cc: stable@vger.kernel.org
+Signed-off-by: Yuan Chen <chenyuan@kylinos.cn>
+Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+[ Adjust context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_fprobe.c |   11 +++++++----
+ kernel/trace/trace_kprobe.c |   11 +++++++----
+ kernel/trace/trace_probe.h  |    9 +++++++--
+ kernel/trace/trace_uprobe.c |   12 ++++++++----
+ 4 files changed, 29 insertions(+), 14 deletions(-)
+
+--- a/kernel/trace/trace_fprobe.c
++++ b/kernel/trace/trace_fprobe.c
+@@ -343,12 +343,14 @@ static int fentry_dispatcher(struct fpro
+                            void *entry_data)
+ {
+       struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
++      unsigned int flags = trace_probe_load_flag(&tf->tp);
+       int ret = 0;
+-      if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE))
++      if (flags & TP_FLAG_TRACE)
+               fentry_trace_func(tf, entry_ip, regs);
++
+ #ifdef CONFIG_PERF_EVENTS
+-      if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE))
++      if (flags & TP_FLAG_PROFILE)
+               ret = fentry_perf_func(tf, entry_ip, regs);
+ #endif
+       return ret;
+@@ -360,11 +362,12 @@ static void fexit_dispatcher(struct fpro
+                            void *entry_data)
+ {
+       struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
++      unsigned int flags = trace_probe_load_flag(&tf->tp);
+-      if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE))
++      if (flags & TP_FLAG_TRACE)
+               fexit_trace_func(tf, entry_ip, ret_ip, regs, entry_data);
+ #ifdef CONFIG_PERF_EVENTS
+-      if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE))
++      if (flags & TP_FLAG_PROFILE)
+               fexit_perf_func(tf, entry_ip, ret_ip, regs, entry_data);
+ #endif
+ }
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -1799,14 +1799,15 @@ static int kprobe_register(struct trace_
+ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
+ {
+       struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
++      unsigned int flags = trace_probe_load_flag(&tk->tp);
+       int ret = 0;
+       raw_cpu_inc(*tk->nhit);
+-      if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
++      if (flags & TP_FLAG_TRACE)
+               kprobe_trace_func(tk, regs);
+ #ifdef CONFIG_PERF_EVENTS
+-      if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
++      if (flags & TP_FLAG_PROFILE)
+               ret = kprobe_perf_func(tk, regs);
+ #endif
+       return ret;
+@@ -1818,6 +1819,7 @@ kretprobe_dispatcher(struct kretprobe_in
+ {
+       struct kretprobe *rp = get_kretprobe(ri);
+       struct trace_kprobe *tk;
++      unsigned int flags;
+       /*
+        * There is a small chance that get_kretprobe(ri) returns NULL when
+@@ -1830,10 +1832,11 @@ kretprobe_dispatcher(struct kretprobe_in
+       tk = container_of(rp, struct trace_kprobe, rp);
+       raw_cpu_inc(*tk->nhit);
+-      if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
++      flags = trace_probe_load_flag(&tk->tp);
++      if (flags & TP_FLAG_TRACE)
+               kretprobe_trace_func(tk, ri, regs);
+ #ifdef CONFIG_PERF_EVENTS
+-      if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
++      if (flags & TP_FLAG_PROFILE)
+               kretprobe_perf_func(tk, ri, regs);
+ #endif
+       return 0;       /* We don't tweak kernel, so just return 0 */
+--- a/kernel/trace/trace_probe.h
++++ b/kernel/trace/trace_probe.h
+@@ -269,16 +269,21 @@ struct event_file_link {
+       struct list_head                list;
+ };
++static inline unsigned int trace_probe_load_flag(struct trace_probe *tp)
++{
++      return smp_load_acquire(&tp->event->flags);
++}
++
+ static inline bool trace_probe_test_flag(struct trace_probe *tp,
+                                        unsigned int flag)
+ {
+-      return !!(tp->event->flags & flag);
++      return !!(trace_probe_load_flag(tp) & flag);
+ }
+ static inline void trace_probe_set_flag(struct trace_probe *tp,
+                                       unsigned int flag)
+ {
+-      tp->event->flags |= flag;
++      smp_store_release(&tp->event->flags, tp->event->flags | flag);
+ }
+ static inline void trace_probe_clear_flag(struct trace_probe *tp,
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -1531,6 +1531,7 @@ static int uprobe_dispatcher(struct upro
+       struct trace_uprobe *tu;
+       struct uprobe_dispatch_data udd;
+       struct uprobe_cpu_buffer *ucb = NULL;
++      unsigned int flags;
+       int ret = 0;
+       tu = container_of(con, struct trace_uprobe, consumer);
+@@ -1545,11 +1546,12 @@ static int uprobe_dispatcher(struct upro
+       if (WARN_ON_ONCE(!uprobe_cpu_buffer))
+               return 0;
+-      if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
++      flags = trace_probe_load_flag(&tu->tp);
++      if (flags & TP_FLAG_TRACE)
+               ret |= uprobe_trace_func(tu, regs, &ucb);
+ #ifdef CONFIG_PERF_EVENTS
+-      if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
++      if (flags & TP_FLAG_PROFILE)
+               ret |= uprobe_perf_func(tu, regs, &ucb);
+ #endif
+       uprobe_buffer_put(ucb);
+@@ -1562,6 +1564,7 @@ static int uretprobe_dispatcher(struct u
+       struct trace_uprobe *tu;
+       struct uprobe_dispatch_data udd;
+       struct uprobe_cpu_buffer *ucb = NULL;
++      unsigned int flags;
+       tu = container_of(con, struct trace_uprobe, consumer);
+@@ -1573,11 +1576,12 @@ static int uretprobe_dispatcher(struct u
+       if (WARN_ON_ONCE(!uprobe_cpu_buffer))
+               return 0;
+-      if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
++      flags = trace_probe_load_flag(&tu->tp);
++      if (flags & TP_FLAG_TRACE)
+               uretprobe_trace_func(tu, func, regs, &ucb);
+ #ifdef CONFIG_PERF_EVENTS
+-      if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
++      if (flags & TP_FLAG_PROFILE)
+               uretprobe_perf_func(tu, func, regs, &ucb);
+ #endif
+       uprobe_buffer_put(ucb);
diff --git a/queue-6.12/x86-kvm-force-legacy-pci-hole-to-uc-when-overriding-mtrrs-for-tdx-snp.patch b/queue-6.12/x86-kvm-force-legacy-pci-hole-to-uc-when-overriding-mtrrs-for-tdx-snp.patch
new file mode 100644 (file)
index 0000000..5a472e0
--- /dev/null
@@ -0,0 +1,199 @@
+From stable+bounces-186229-greg=kroah.com@vger.kernel.org Fri Oct 17 03:13:19 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Oct 2025 21:13:06 -0400
+Subject: x86/kvm: Force legacy PCI hole to UC when overriding MTRRs for TDX/SNP
+To: stable@vger.kernel.org
+Cc: "Sean Christopherson" <seanjc@google.com>, "Peter Gonda" <pgonda@google.com>, "Vitaly Kuznetsov" <vkuznets@redhat.com>, "Tom Lendacky" <thomas.lendacky@amd.com>, "Jürgen Groß" <jgross@suse.com>, "Korakit Seemakhupt" <korakit@google.com>, "Jianxiong Gao" <jxgao@google.com>, "Nikolay Borisov" <nik.borisov@suse.com>, "Binbin Wu" <binbin.wu@linux.intel.com>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20251017011306.3501479-2-sashal@kernel.org>
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit 0dccbc75e18df85399a71933d60b97494110f559 ]
+
+When running as an SNP or TDX guest under KVM, force the legacy PCI hole,
+i.e. memory between Top of Lower Usable DRAM and 4GiB, to be mapped as UC
+via a forced variable MTRR range.
+
+In most KVM-based setups, legacy devices such as the HPET and TPM are
+enumerated via ACPI.  ACPI enumeration includes a Memory32Fixed entry, and
+optionally a SystemMemory descriptor for an OperationRegion, e.g. if the
+device needs to be accessed via a Control Method.
+
+If a SystemMemory entry is present, then the kernel's ACPI driver will
+auto-ioremap the region so that it can be accessed at will.  However, the
+ACPI spec doesn't provide a way to enumerate the memory type of
+SystemMemory regions, i.e. there's no way to tell software that a region
+must be mapped as UC vs. WB, etc.  As a result, Linux's ACPI driver always
+maps SystemMemory regions using ioremap_cache(), i.e. as WB on x86.
+
+The dedicated device drivers however, e.g. the HPET driver and TPM driver,
+want to map their associated memory as UC or WC, as accessing PCI devices
+using WB is unsupported.
+
+On bare metal and non-CoCO, the conflicting requirements "work" as firmware
+configures the PCI hole (and other device memory) to be UC in the MTRRs.
+So even though the ACPI mappings request WB, they are forced to UC- in the
+kernel's tracking due to the kernel properly handling the MTRR overrides,
+and thus are compatible with the drivers' requested WC/UC-.
+
+With force WB MTRRs on SNP and TDX guests, the ACPI mappings get their
+requested WB if the ACPI mappings are established before the dedicated
+driver code attempts to initialize the device.  E.g. if acpi_init()
+runs before the corresponding device driver is probed, ACPI's WB mapping
+will "win", and result in the driver's ioremap() failing because the
+existing WB mapping isn't compatible with the requested WC/UC-.
+
+E.g. when a TPM is emulated by the hypervisor (ignoring the security
+implications of relying on what is allegedly an untrusted entity to store
+measurements), the TPM driver will request UC and fail:
+
+  [  1.730459] ioremap error for 0xfed40000-0xfed45000, requested 0x2, got 0x0
+  [  1.732780] tpm_tis MSFT0101:00: probe with driver tpm_tis failed with error -12
+
+Note, the '0x2' and '0x0' values refer to "enum page_cache_mode", not x86's
+memtypes (which frustratingly are an almost pure inversion; 2 == WB, 0 == UC).
+E.g. tracing mapping requests for TPM TIS yields:
+
+ Mapping TPM TIS with req_type = 0
+ WARNING: CPU: 22 PID: 1 at arch/x86/mm/pat/memtype.c:530 memtype_reserve+0x2ab/0x460
+ Modules linked in:
+ CPU: 22 UID: 0 PID: 1 Comm: swapper/0 Tainted: G        W           6.16.0-rc7+ #2 VOLUNTARY
+ Tainted: [W]=WARN
+ Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 05/29/2025
+ RIP: 0010:memtype_reserve+0x2ab/0x460
+  __ioremap_caller+0x16d/0x3d0
+  ioremap_cache+0x17/0x30
+  x86_acpi_os_ioremap+0xe/0x20
+  acpi_os_map_iomem+0x1f3/0x240
+  acpi_os_map_memory+0xe/0x20
+  acpi_ex_system_memory_space_handler+0x273/0x440
+  acpi_ev_address_space_dispatch+0x176/0x4c0
+  acpi_ex_access_region+0x2ad/0x530
+  acpi_ex_field_datum_io+0xa2/0x4f0
+  acpi_ex_extract_from_field+0x296/0x3e0
+  acpi_ex_read_data_from_field+0xd1/0x460
+  acpi_ex_resolve_node_to_value+0x2ee/0x530
+  acpi_ex_resolve_to_value+0x1f2/0x540
+  acpi_ds_evaluate_name_path+0x11b/0x190
+  acpi_ds_exec_end_op+0x456/0x960
+  acpi_ps_parse_loop+0x27a/0xa50
+  acpi_ps_parse_aml+0x226/0x600
+  acpi_ps_execute_method+0x172/0x3e0
+  acpi_ns_evaluate+0x175/0x5f0
+  acpi_evaluate_object+0x213/0x490
+  acpi_evaluate_integer+0x6d/0x140
+  acpi_bus_get_status+0x93/0x150
+  acpi_add_single_object+0x43a/0x7c0
+  acpi_bus_check_add+0x149/0x3a0
+  acpi_bus_check_add_1+0x16/0x30
+  acpi_ns_walk_namespace+0x22c/0x360
+  acpi_walk_namespace+0x15c/0x170
+  acpi_bus_scan+0x1dd/0x200
+  acpi_scan_init+0xe5/0x2b0
+  acpi_init+0x264/0x5b0
+  do_one_initcall+0x5a/0x310
+  kernel_init_freeable+0x34f/0x4f0
+  kernel_init+0x1b/0x200
+  ret_from_fork+0x186/0x1b0
+  ret_from_fork_asm+0x1a/0x30
+  </TASK>
+
+The above traces are from a Google-VMM based VM, but the same behavior
+happens with a QEMU based VM that is modified to add a SystemMemory range
+for the TPM TIS address space.
+
+The only reason this doesn't cause problems for HPET, which appears to
+require a SystemMemory region, is because HPET gets special treatment via
+x86_init.timers.timer_init(), and so gets a chance to create its UC-
+mapping before acpi_init() clobbers things.  Disabling the early call to
+hpet_time_init() yields the same behavior for HPET:
+
+  [  0.318264] ioremap error for 0xfed00000-0xfed01000, requested 0x2, got 0x0
+
+Hack around the ACPI gap by forcing the legacy PCI hole to UC when
+overriding the (virtual) MTRRs for CoCo guest, so that ioremap handling
+of MTRRs naturally kicks in and forces the ACPI mappings to be UC.
+
+Note, the requested/mapped memtype doesn't actually matter in terms of
+accessing the device.  In practically every setup, legacy PCI devices are
+emulated by the hypervisor, and accesses are intercepted and handled as
+emulated MMIO, i.e. never access physical memory and thus don't have an
+effective memtype.
+
+Even in a theoretical setup where such devices are passed through by the
+host, i.e. point at real MMIO memory, it is KVM's (as the hypervisor)
+responsibility to force the memory to be WC/UC, e.g. via EPT memtype
+under TDX or real hardware MTRRs under SNP.  Not doing so cannot work,
+and the hypervisor is highly motivated to do the right thing as letting
+the guest access hardware MMIO with WB would likely result in a variety
+of fatal #MCs.
+
+In other words, forcing the range to be UC is all about coercing the
+kernel's tracking into thinking that it has established UC mappings, so
+that the ioremap code doesn't reject mappings from e.g. the TPM driver and
+thus prevent the driver from loading and the device from functioning.
+
+Note #2, relying on guest firmware to handle this scenario, e.g. by setting
+virtual MTRRs and then consuming them in Linux, is not a viable option, as
+the virtual MTRR state is managed by the untrusted hypervisor, and because
+OVMF at least has stopped programming virtual MTRRs when running as a TDX
+guest.
+
+Link: https://lore.kernel.org/all/8137d98e-8825-415b-9282-1d2a115bb51a@linux.intel.com
+Fixes: 8e690b817e38 ("x86/kvm: Override default caching mode for SEV-SNP and TDX")
+Cc: stable@vger.kernel.org
+Cc: Peter Gonda <pgonda@google.com>
+Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Jürgen Groß <jgross@suse.com>
+Cc: Korakit Seemakhupt <korakit@google.com>
+Cc: Jianxiong Gao <jxgao@google.com>
+Cc: Nikolay Borisov <nik.borisov@suse.com>
+Suggested-by: Binbin Wu <binbin.wu@linux.intel.com>
+Reviewed-by: Binbin Wu <binbin.wu@linux.intel.com>
+Tested-by: Korakit Seemakhupt <korakit@google.com>
+Link: https://lore.kernel.org/r/20250828005249.39339-1-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/kvm.c |   21 +++++++++++++++++++--
+ 1 file changed, 19 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -933,6 +933,19 @@ static void kvm_sev_hc_page_enc_status(u
+ static void __init kvm_init_platform(void)
+ {
++      u64 tolud = PFN_PHYS(e820__end_of_low_ram_pfn());
++      /*
++       * Note, hardware requires variable MTRR ranges to be power-of-2 sized
++       * and naturally aligned.  But when forcing guest MTRR state, Linux
++       * doesn't program the forced ranges into hardware.  Don't bother doing
++       * the math to generate a technically-legal range.
++       */
++      struct mtrr_var_range pci_hole = {
++              .base_lo = tolud | X86_MEMTYPE_UC,
++              .mask_lo = (u32)(~(SZ_4G - tolud - 1)) | MTRR_PHYSMASK_V,
++              .mask_hi = (BIT_ULL(boot_cpu_data.x86_phys_bits) - 1) >> 32,
++      };
++
+       if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) &&
+           kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL)) {
+               unsigned long nr_pages;
+@@ -982,8 +995,12 @@ static void __init kvm_init_platform(voi
+       kvmclock_init();
+       x86_platform.apic_post_init = kvm_apic_init;
+-      /* Set WB as the default cache mode for SEV-SNP and TDX */
+-      guest_force_mtrr_state(NULL, 0, MTRR_TYPE_WRBACK);
++      /*
++       * Set WB as the default cache mode for SEV-SNP and TDX, with a single
++       * UC range for the legacy PCI hole, e.g. so that devices that expect
++       * to get UC/WC mappings don't get surprised with WB.
++       */
++      guest_force_mtrr_state(&pci_hole, 1, MTRR_TYPE_WRBACK);
+ }
+ #if defined(CONFIG_AMD_MEM_ENCRYPT)
diff --git a/queue-6.12/x86-mtrr-rename-mtrr_overwrite_state-to-guest_force_mtrr_state.patch b/queue-6.12/x86-mtrr-rename-mtrr_overwrite_state-to-guest_force_mtrr_state.patch
new file mode 100644 (file)
index 0000000..0649bd2
--- /dev/null
@@ -0,0 +1,132 @@
+From stable+bounces-186228-greg=kroah.com@vger.kernel.org Fri Oct 17 03:13:19 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Oct 2025 21:13:05 -0400
+Subject: x86/mtrr: Rename mtrr_overwrite_state() to guest_force_mtrr_state()
+To: stable@vger.kernel.org
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>, Dave Hansen <dave.hansen@intel.com>, Dave Hansen <dave.hansen@linux.intel.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251017011306.3501479-1-sashal@kernel.org>
+
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+
+[ Upstream commit 6a5abeea9c72e1d2c538622b4cf66c80cc816fd3 ]
+
+Rename the helper to better reflect its function.
+
+Suggested-by: Dave Hansen <dave.hansen@intel.com>
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Acked-by: Dave Hansen <dave.hansen@intel.com>
+Link: https://lore.kernel.org/all/20241202073139.448208-1-kirill.shutemov%40linux.intel.com
+Stable-dep-of: 0dccbc75e18d ("x86/kvm: Force legacy PCI hole to UC when overriding MTRRs for TDX/SNP")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/hyperv/ivm.c              |    2 +-
+ arch/x86/include/asm/mtrr.h        |   10 +++++-----
+ arch/x86/kernel/cpu/mtrr/generic.c |    6 +++---
+ arch/x86/kernel/cpu/mtrr/mtrr.c    |    2 +-
+ arch/x86/kernel/kvm.c              |    2 +-
+ arch/x86/xen/enlighten_pv.c        |    4 ++--
+ 6 files changed, 13 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/hyperv/ivm.c
++++ b/arch/x86/hyperv/ivm.c
+@@ -681,7 +681,7 @@ void __init hv_vtom_init(void)
+       x86_platform.guest.enc_status_change_finish = hv_vtom_set_host_visibility;
+       /* Set WB as the default cache mode. */
+-      mtrr_overwrite_state(NULL, 0, MTRR_TYPE_WRBACK);
++      guest_force_mtrr_state(NULL, 0, MTRR_TYPE_WRBACK);
+ }
+ #endif /* defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) */
+--- a/arch/x86/include/asm/mtrr.h
++++ b/arch/x86/include/asm/mtrr.h
+@@ -58,8 +58,8 @@ struct mtrr_state_type {
+  */
+ # ifdef CONFIG_MTRR
+ void mtrr_bp_init(void);
+-void mtrr_overwrite_state(struct mtrr_var_range *var, unsigned int num_var,
+-                        mtrr_type def_type);
++void guest_force_mtrr_state(struct mtrr_var_range *var, unsigned int num_var,
++                          mtrr_type def_type);
+ extern u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform);
+ extern void mtrr_save_fixed_ranges(void *);
+ extern void mtrr_save_state(void);
+@@ -75,9 +75,9 @@ void mtrr_disable(void);
+ void mtrr_enable(void);
+ void mtrr_generic_set_state(void);
+ #  else
+-static inline void mtrr_overwrite_state(struct mtrr_var_range *var,
+-                                      unsigned int num_var,
+-                                      mtrr_type def_type)
++static inline void guest_force_mtrr_state(struct mtrr_var_range *var,
++                                        unsigned int num_var,
++                                        mtrr_type def_type)
+ {
+ }
+--- a/arch/x86/kernel/cpu/mtrr/generic.c
++++ b/arch/x86/kernel/cpu/mtrr/generic.c
+@@ -423,7 +423,7 @@ void __init mtrr_copy_map(void)
+ }
+ /**
+- * mtrr_overwrite_state - set static MTRR state
++ * guest_force_mtrr_state - set static MTRR state for a guest
+  *
+  * Used to set MTRR state via different means (e.g. with data obtained from
+  * a hypervisor).
+@@ -436,8 +436,8 @@ void __init mtrr_copy_map(void)
+  * @num_var: length of the @var array
+  * @def_type: default caching type
+  */
+-void mtrr_overwrite_state(struct mtrr_var_range *var, unsigned int num_var,
+-                        mtrr_type def_type)
++void guest_force_mtrr_state(struct mtrr_var_range *var, unsigned int num_var,
++                          mtrr_type def_type)
+ {
+       unsigned int i;
+--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
++++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
+@@ -625,7 +625,7 @@ void mtrr_save_state(void)
+ static int __init mtrr_init_finalize(void)
+ {
+       /*
+-       * Map might exist if mtrr_overwrite_state() has been called or if
++       * Map might exist if guest_force_mtrr_state() has been called or if
+        * mtrr_enabled() returns true.
+        */
+       mtrr_copy_map();
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -983,7 +983,7 @@ static void __init kvm_init_platform(voi
+       x86_platform.apic_post_init = kvm_apic_init;
+       /* Set WB as the default cache mode for SEV-SNP and TDX */
+-      mtrr_overwrite_state(NULL, 0, MTRR_TYPE_WRBACK);
++      guest_force_mtrr_state(NULL, 0, MTRR_TYPE_WRBACK);
+ }
+ #if defined(CONFIG_AMD_MEM_ENCRYPT)
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -171,7 +171,7 @@ static void __init xen_set_mtrr_data(voi
+       /* Only overwrite MTRR state if any MTRR could be got from Xen. */
+       if (reg)
+-              mtrr_overwrite_state(var, reg, MTRR_TYPE_UNCACHABLE);
++              guest_force_mtrr_state(var, reg, MTRR_TYPE_UNCACHABLE);
+ #endif
+ }
+@@ -195,7 +195,7 @@ static void __init xen_pv_init_platform(
+       if (xen_initial_domain())
+               xen_set_mtrr_data();
+       else
+-              mtrr_overwrite_state(NULL, 0, MTRR_TYPE_WRBACK);
++              guest_force_mtrr_state(NULL, 0, MTRR_TYPE_WRBACK);
+       /* Adjust nr_cpu_ids before "enumeration" happens */
+       xen_smp_count_cpus();