--- /dev/null
+From f04dd30f1bef1ed2e74a4050af6e5e5e3869bac3 Mon Sep 17 00:00:00 2001
+From: Vishal Badole <Vishal.Badole@amd.com>
+Date: Thu, 24 Apr 2025 18:32:48 +0530
+Subject: amd-xgbe: Fix to ensure dependent features are toggled with RX checksum offload
+
+From: Vishal Badole <Vishal.Badole@amd.com>
+
+commit f04dd30f1bef1ed2e74a4050af6e5e5e3869bac3 upstream.
+
+According to the XGMAC specification, enabling features such as Layer 3
+and Layer 4 Packet Filtering, Split Header and Virtualized Network support
+automatically selects the IPC Full Checksum Offload Engine on the receive
+side.
+
+When RX checksum offload is disabled, these dependent features must also
+be disabled to prevent abnormal behavior caused by mismatched feature
+dependencies.
+
+Ensure that toggling RX checksum offload (disabling or enabling) properly
+disables or enables all dependent features, maintaining consistent and
+expected behavior in the network device.
+
+Cc: stable@vger.kernel.org
+Fixes: 1a510ccf5869 ("amd-xgbe: Add support for VXLAN offload capabilities")
+Signed-off-by: Vishal Badole <Vishal.Badole@amd.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250424130248.428865-1-Vishal.Badole@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-desc.c | 9 +++++++--
+ drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 24 ++++++++++++++++++++++--
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 11 +++++++++--
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 4 ++++
+ 4 files changed, 42 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+@@ -373,8 +373,13 @@ static int xgbe_map_rx_buffer(struct xgb
+ }
+
+ /* Set up the header page info */
+- xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
+- XGBE_SKB_ALLOC_SIZE);
++ if (pdata->netdev->features & NETIF_F_RXCSUM) {
++ xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
++ XGBE_SKB_ALLOC_SIZE);
++ } else {
++ xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
++ pdata->rx_buf_size);
++ }
+
+ /* Set up the buffer page info */
+ xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -320,6 +320,18 @@ static void xgbe_config_sph_mode(struct
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
+ }
+
++static void xgbe_disable_sph_mode(struct xgbe_prv_data *pdata)
++{
++ unsigned int i;
++
++ for (i = 0; i < pdata->channel_count; i++) {
++ if (!pdata->channel[i]->rx_ring)
++ break;
++
++ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0);
++ }
++}
++
+ static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
+ unsigned int index, unsigned int val)
+ {
+@@ -3545,8 +3557,12 @@ static int xgbe_init(struct xgbe_prv_dat
+ xgbe_config_tx_coalesce(pdata);
+ xgbe_config_rx_buffer_size(pdata);
+ xgbe_config_tso_mode(pdata);
+- xgbe_config_sph_mode(pdata);
+- xgbe_config_rss(pdata);
++
++ if (pdata->netdev->features & NETIF_F_RXCSUM) {
++ xgbe_config_sph_mode(pdata);
++ xgbe_config_rss(pdata);
++ }
++
+ desc_if->wrapper_tx_desc_init(pdata);
+ desc_if->wrapper_rx_desc_init(pdata);
+ xgbe_enable_dma_interrupts(pdata);
+@@ -3702,5 +3718,9 @@ void xgbe_init_function_ptrs_dev(struct
+ hw_if->disable_vxlan = xgbe_disable_vxlan;
+ hw_if->set_vxlan_id = xgbe_set_vxlan_id;
+
++ /* For Split Header*/
++ hw_if->enable_sph = xgbe_config_sph_mode;
++ hw_if->disable_sph = xgbe_disable_sph_mode;
++
+ DBGPR("<--xgbe_init_function_ptrs\n");
+ }
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -2257,10 +2257,17 @@ static int xgbe_set_features(struct net_
+ if (ret)
+ return ret;
+
+- if ((features & NETIF_F_RXCSUM) && !rxcsum)
++ if ((features & NETIF_F_RXCSUM) && !rxcsum) {
++ hw_if->enable_sph(pdata);
++ hw_if->enable_vxlan(pdata);
+ hw_if->enable_rx_csum(pdata);
+- else if (!(features & NETIF_F_RXCSUM) && rxcsum)
++ schedule_work(&pdata->restart_work);
++ } else if (!(features & NETIF_F_RXCSUM) && rxcsum) {
++ hw_if->disable_sph(pdata);
++ hw_if->disable_vxlan(pdata);
+ hw_if->disable_rx_csum(pdata);
++ schedule_work(&pdata->restart_work);
++ }
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
+ hw_if->enable_rx_vlan_stripping(pdata);
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -865,6 +865,10 @@ struct xgbe_hw_if {
+ void (*enable_vxlan)(struct xgbe_prv_data *);
+ void (*disable_vxlan)(struct xgbe_prv_data *);
+ void (*set_vxlan_id)(struct xgbe_prv_data *);
++
++ /* For Split Header */
++ void (*enable_sph)(struct xgbe_prv_data *pdata);
++ void (*disable_sph)(struct xgbe_prv_data *pdata);
+ };
+
+ /* This structure represents implementation specific routines for an
--- /dev/null
+From fee4d171451c1ad9e8aaf65fc0ab7d143a33bd72 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will@kernel.org>
+Date: Thu, 1 May 2025 11:47:47 +0100
+Subject: arm64: errata: Add missing sentinels to Spectre-BHB MIDR arrays
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Will Deacon <will@kernel.org>
+
+commit fee4d171451c1ad9e8aaf65fc0ab7d143a33bd72 upstream.
+
+Commit a5951389e58d ("arm64: errata: Add newer ARM cores to the
+spectre_bhb_loop_affected() lists") added some additional CPUs to the
+Spectre-BHB workaround, including some new arrays for designs that
+require new 'k' values for the workaround to be effective.
+
+Unfortunately, the new arrays omitted the sentinel entry and so
+is_midr_in_range_list() will walk off the end when it doesn't find a
+match. With UBSAN enabled, this leads to a crash during boot when
+is_midr_in_range_list() is inlined (which was more common prior to
+c8c2647e69be ("arm64: Make _midr_in_range_list() an exported
+function")):
+
+ | Internal error: aarch64 BRK: 00000000f2000001 [#1] PREEMPT SMP
+ | pstate: 804000c5 (Nzcv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+ | pc : spectre_bhb_loop_affected+0x28/0x30
+ | lr : is_spectre_bhb_affected+0x170/0x190
+ | [...]
+ | Call trace:
+ | spectre_bhb_loop_affected+0x28/0x30
+ | update_cpu_capabilities+0xc0/0x184
+ | init_cpu_features+0x188/0x1a4
+ | cpuinfo_store_boot_cpu+0x4c/0x60
+ | smp_prepare_boot_cpu+0x38/0x54
+ | start_kernel+0x8c/0x478
+ | __primary_switched+0xc8/0xd4
+ | Code: 6b09011f 54000061 52801080 d65f03c0 (d4200020)
+ | ---[ end trace 0000000000000000 ]---
+ | Kernel panic - not syncing: aarch64 BRK: Fatal exception
+
+Add the missing sentinel entries.
+
+Cc: Lee Jones <lee@kernel.org>
+Cc: James Morse <james.morse@arm.com>
+Cc: Doug Anderson <dianders@chromium.org>
+Cc: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
+Cc: <stable@vger.kernel.org>
+Reported-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Fixes: a5951389e58d ("arm64: errata: Add newer ARM cores to the spectre_bhb_loop_affected() lists")
+Signed-off-by: Will Deacon <will@kernel.org>
+Reviewed-by: Lee Jones <lee@kernel.org>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Link: https://lore.kernel.org/r/20250501104747.28431-1-will@kernel.org
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/proton-pack.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arm64/kernel/proton-pack.c
++++ b/arch/arm64/kernel/proton-pack.c
+@@ -879,10 +879,12 @@ static u8 spectre_bhb_loop_affected(void
+ static const struct midr_range spectre_bhb_k132_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
++ {},
+ };
+ static const struct midr_range spectre_bhb_k38_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
++ {},
+ };
+ static const struct midr_range spectre_bhb_k32_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
--- /dev/null
+From 170d1a3738908eef6a0dbf378ea77fb4ae8e294d Mon Sep 17 00:00:00 2001
+From: Carlos Llamas <cmllamas@google.com>
+Date: Tue, 25 Mar 2025 18:49:00 +0000
+Subject: binder: fix offset calculation in debug log
+
+From: Carlos Llamas <cmllamas@google.com>
+
+commit 170d1a3738908eef6a0dbf378ea77fb4ae8e294d upstream.
+
+The vma start address should be substracted from the buffer's user data
+address and not the other way around.
+
+Cc: Tiffany Y. Yang <ynaffit@google.com>
+Cc: stable <stable@kernel.org>
+Fixes: 162c79731448 ("binder: avoid user addresses in debug logs")
+Signed-off-by: Carlos Llamas <cmllamas@google.com>
+Reviewed-by: Tiffany Y. Yang <ynaffit@google.com>
+Link: https://lore.kernel.org/r/20250325184902.587138-1-cmllamas@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+[cmllamas: fix conflicts due to alloc->buffer renaming]
+Signed-off-by: Carlos Llamas <cmllamas@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/android/binder.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -6374,7 +6374,7 @@ static void print_binder_transaction_ilo
+ seq_printf(m, " node %d", buffer->target_node->debug_id);
+ seq_printf(m, " size %zd:%zd offset %lx\n",
+ buffer->data_size, buffer->offsets_size,
+- proc->alloc.buffer - buffer->user_data);
++ buffer->user_data - proc->alloc.buffer);
+ }
+
+ static void print_binder_work_ilocked(struct seq_file *m,
--- /dev/null
+From e08e49d986f82c30f42ad0ed43ebbede1e1e3739 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@toxicpanda.com>
+Date: Mon, 14 Apr 2025 14:51:58 -0400
+Subject: btrfs: adjust subpage bit start based on sectorsize
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+commit e08e49d986f82c30f42ad0ed43ebbede1e1e3739 upstream.
+
+When running machines with 64k page size and a 16k nodesize we started
+seeing tree log corruption in production. This turned out to be because
+we were not writing out dirty blocks sometimes, so this in fact affects
+all metadata writes.
+
+When writing out a subpage EB we scan the subpage bitmap for a dirty
+range. If the range isn't dirty we do
+
+ bit_start++;
+
+to move onto the next bit. The problem is the bitmap is based on the
+number of sectors that an EB has. So in this case, we have a 64k
+pagesize, 16k nodesize, but a 4k sectorsize. This means our bitmap is 4
+bits for every node. With a 64k page size we end up with 4 nodes per
+page.
+
+To make this easier this is how everything looks
+
+[0 16k 32k 48k ] logical address
+[0 4 8 12 ] radix tree offset
+[ 64k page ] folio
+[ 16k eb ][ 16k eb ][ 16k eb ][ 16k eb ] extent buffers
+[ | | | | | | | | | | | | | | | | ] bitmap
+
+Now we use all of our addressing based on fs_info->sectorsize_bits, so
+as you can see the above our 16k eb->start turns into radix entry 4.
+
+When we find a dirty range for our eb, we correctly do bit_start +=
+sectors_per_node, because if we start at bit 0, the next bit for the
+next eb is 4, to correspond to eb->start 16k.
+
+However if our range is clean, we will do bit_start++, which will now
+put us offset from our radix tree entries.
+
+In our case, assume that the first time we check the bitmap the block is
+not dirty, we increment bit_start so now it == 1, and then we loop
+around and check again. This time it is dirty, and we go to find that
+start using the following equation
+
+ start = folio_start + bit_start * fs_info->sectorsize;
+
+so in the case above, eb->start 0 is now dirty, and we calculate start
+as
+
+ 0 + 1 * fs_info->sectorsize = 4096
+ 4096 >> 12 = 1
+
+Now we're looking up the radix tree for 1, and we won't find an eb.
+What's worse is now we're using bit_start == 1, so we do bit_start +=
+sectors_per_node, which is now 5. If that eb is dirty we will run into
+the same thing, we will look at an offset that is not populated in the
+radix tree, and now we're skipping the writeout of dirty extent buffers.
+
+The best fix for this is to not use sectorsize_bits to address nodes,
+but that's a larger change. Since this is a fs corruption problem fix
+it simply by always using sectors_per_node to increment the start bit.
+
+Fixes: c4aec299fa8f ("btrfs: introduce submit_eb_subpage() to submit a subpage metadata page")
+CC: stable@vger.kernel.org # 5.15+
+Reviewed-by: Boris Burkov <boris@bur.io>
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/extent_io.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -1870,7 +1870,7 @@ static int submit_eb_subpage(struct foli
+ subpage->bitmaps)) {
+ spin_unlock_irqrestore(&subpage->lock, flags);
+ spin_unlock(&folio->mapping->i_private_lock);
+- bit_start++;
++ bit_start += sectors_per_node;
+ continue;
+ }
+
--- /dev/null
+From be3f1938d3e6ea8186f0de3dd95245dda4f22c1e Mon Sep 17 00:00:00 2001
+From: Dave Chen <davechen@synology.com>
+Date: Tue, 15 Apr 2025 14:33:42 +0800
+Subject: btrfs: fix COW handling in run_delalloc_nocow()
+
+From: Dave Chen <davechen@synology.com>
+
+commit be3f1938d3e6ea8186f0de3dd95245dda4f22c1e upstream.
+
+In run_delalloc_nocow(), when the found btrfs_key's offset > cur_offset,
+it indicates a gap between the current processing region and
+the next file extent. The original code would directly jump to
+the "must_cow" label, which increments the slot and forces a fallback
+to COW. This behavior might skip an extent item and result in an
+overestimated COW fallback range.
+
+This patch modifies the logic so that when a gap is detected:
+
+- If no COW range is already being recorded (cow_start is unset),
+ cow_start is set to cur_offset.
+
+- cur_offset is then advanced to the beginning of the next extent.
+
+- Instead of jumping to "must_cow", control flows directly to
+ "next_slot" so that the same extent item can be reexamined properly.
+
+The change ensures that we accurately account for the extent gap and
+avoid accidentally extending the range that needs to fallback to COW.
+
+CC: stable@vger.kernel.org # 6.6+
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Dave Chen <davechen@synology.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/inode.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -2155,12 +2155,13 @@ next_slot:
+
+ /*
+ * If the found extent starts after requested offset, then
+- * adjust extent_end to be right before this extent begins
++ * adjust cur_offset to be right before this extent begins.
+ */
+ if (found_key.offset > cur_offset) {
+- extent_end = found_key.offset;
+- extent_type = 0;
+- goto must_cow;
++ if (cow_start == (u64)-1)
++ cow_start = cur_offset;
++ cur_offset = found_key.offset;
++ goto next_slot;
+ }
+
+ /*
--- /dev/null
+From 7491cdf46b5cbdf123fc84fbe0a07e9e3d7b7620 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Wed, 16 Apr 2025 16:12:37 +0200
+Subject: cpufreq: Avoid using inconsistent policy->min and policy->max
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit 7491cdf46b5cbdf123fc84fbe0a07e9e3d7b7620 upstream.
+
+Since cpufreq_driver_resolve_freq() can run in parallel with
+cpufreq_set_policy() and there is no synchronization between them,
+the former may access policy->min and policy->max while the latter
+is updating them and it may see intermediate values of them due
+to the way the update is carried out. Also the compiler is free
+to apply any optimizations it wants both to the stores in
+cpufreq_set_policy() and to the loads in cpufreq_driver_resolve_freq()
+which may result in additional inconsistencies.
+
+To address this, use WRITE_ONCE() when updating policy->min and
+policy->max in cpufreq_set_policy() and use READ_ONCE() for reading
+them in cpufreq_driver_resolve_freq(). Moreover, rearrange the update
+in cpufreq_set_policy() to avoid storing intermediate values in
+policy->min and policy->max with the help of the observation that
+their new values are expected to be properly ordered upfront.
+
+Also modify cpufreq_driver_resolve_freq() to take the possible reverse
+ordering of policy->min and policy->max, which may happen depending on
+the ordering of operations when this function and cpufreq_set_policy()
+run concurrently, into account by always honoring the max when it
+turns out to be less than the min (in case it comes from thermal
+throttling or similar).
+
+Fixes: 151717690694 ("cpufreq: Make policy min/max hard requirements")
+Cc: 5.16+ <stable@vger.kernel.org> # 5.16+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Christian Loehle <christian.loehle@arm.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Link: https://patch.msgid.link/5907080.DvuYhMxLoT@rjwysocki.net
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpufreq/cpufreq.c | 32 +++++++++++++++++++++++++-------
+ 1 file changed, 25 insertions(+), 7 deletions(-)
+
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -538,8 +538,6 @@ static unsigned int __resolve_freq(struc
+ {
+ unsigned int idx;
+
+- target_freq = clamp_val(target_freq, policy->min, policy->max);
+-
+ if (!policy->freq_table)
+ return target_freq;
+
+@@ -563,7 +561,22 @@ static unsigned int __resolve_freq(struc
+ unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+ {
+- return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
++ unsigned int min = READ_ONCE(policy->min);
++ unsigned int max = READ_ONCE(policy->max);
++
++ /*
++ * If this function runs in parallel with cpufreq_set_policy(), it may
++ * read policy->min before the update and policy->max after the update
++ * or the other way around, so there is no ordering guarantee.
++ *
++ * Resolve this by always honoring the max (in case it comes from
++ * thermal throttling or similar).
++ */
++ if (unlikely(min > max))
++ min = max;
++
++ return __resolve_freq(policy, clamp_val(target_freq, min, max),
++ CPUFREQ_RELATION_LE);
+ }
+ EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
+
+@@ -2323,6 +2336,7 @@ int __cpufreq_driver_target(struct cpufr
+ if (cpufreq_disabled())
+ return -ENODEV;
+
++ target_freq = clamp_val(target_freq, policy->min, policy->max);
+ target_freq = __resolve_freq(policy, target_freq, relation);
+
+ pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
+@@ -2647,11 +2661,15 @@ static int cpufreq_set_policy(struct cpu
+ * Resolve policy min/max to available frequencies. It ensures
+ * no frequency resolution will neither overshoot the requested maximum
+ * nor undershoot the requested minimum.
++ *
++ * Avoid storing intermediate values in policy->max or policy->min and
++ * compiler optimizations around them because they may be accessed
++ * concurrently by cpufreq_driver_resolve_freq() during the update.
+ */
+- policy->min = new_data.min;
+- policy->max = new_data.max;
+- policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
+- policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
++ WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max, CPUFREQ_RELATION_H));
++ new_data.min = __resolve_freq(policy, new_data.min, CPUFREQ_RELATION_L);
++ WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min);
++
+ trace_cpu_frequency_limits(policy);
+
+ cpufreq_update_pressure(policy);
--- /dev/null
+From b79028039f440e7d2c4df6ab243060c4e3803e84 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Fri, 25 Apr 2025 13:36:21 +0200
+Subject: cpufreq: Fix setting policy limits when frequency tables are used
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit b79028039f440e7d2c4df6ab243060c4e3803e84 upstream.
+
+Commit 7491cdf46b5c ("cpufreq: Avoid using inconsistent policy->min and
+policy->max") overlooked the fact that policy->min and policy->max were
+accessed directly in cpufreq_frequency_table_target() and in the
+functions called by it. Consequently, the changes made by that commit
+led to problems with setting policy limits.
+
+Address this by passing the target frequency limits to __resolve_freq()
+and cpufreq_frequency_table_target() and propagating them to the
+functions called by the latter.
+
+Fixes: 7491cdf46b5c ("cpufreq: Avoid using inconsistent policy->min and policy->max")
+Cc: 5.16+ <stable@vger.kernel.org> # 5.16+
+Closes: https://lore.kernel.org/linux-pm/aAplED3IA_J0eZN0@linaro.org/
+Reported-by: Stephan Gerhold <stephan.gerhold@linaro.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Tested-by: Stephan Gerhold <stephan.gerhold@linaro.org>
+Reviewed-by: Lifeng Zheng <zhenglifeng1@huawei.com>
+Link: https://patch.msgid.link/5896780.DvuYhMxLoT@rjwysocki.net
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpufreq/cpufreq.c | 22 ++++++---
+ drivers/cpufreq/cpufreq_ondemand.c | 3 -
+ drivers/cpufreq/freq_table.c | 6 +-
+ include/linux/cpufreq.h | 83 ++++++++++++++++++++++++-------------
+ 4 files changed, 73 insertions(+), 41 deletions(-)
+
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -534,14 +534,18 @@ void cpufreq_disable_fast_switch(struct
+ EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
+
+ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
+- unsigned int target_freq, unsigned int relation)
++ unsigned int target_freq,
++ unsigned int min, unsigned int max,
++ unsigned int relation)
+ {
+ unsigned int idx;
+
++ target_freq = clamp_val(target_freq, min, max);
++
+ if (!policy->freq_table)
+ return target_freq;
+
+- idx = cpufreq_frequency_table_target(policy, target_freq, relation);
++ idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation);
+ policy->cached_resolved_idx = idx;
+ policy->cached_target_freq = target_freq;
+ return policy->freq_table[idx].frequency;
+@@ -575,8 +579,7 @@ unsigned int cpufreq_driver_resolve_freq
+ if (unlikely(min > max))
+ min = max;
+
+- return __resolve_freq(policy, clamp_val(target_freq, min, max),
+- CPUFREQ_RELATION_LE);
++ return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE);
+ }
+ EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
+
+@@ -2336,8 +2339,8 @@ int __cpufreq_driver_target(struct cpufr
+ if (cpufreq_disabled())
+ return -ENODEV;
+
+- target_freq = clamp_val(target_freq, policy->min, policy->max);
+- target_freq = __resolve_freq(policy, target_freq, relation);
++ target_freq = __resolve_freq(policy, target_freq, policy->min,
++ policy->max, relation);
+
+ pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
+ policy->cpu, target_freq, relation, old_target_freq);
+@@ -2666,8 +2669,11 @@ static int cpufreq_set_policy(struct cpu
+ * compiler optimizations around them because they may be accessed
+ * concurrently by cpufreq_driver_resolve_freq() during the update.
+ */
+- WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max, CPUFREQ_RELATION_H));
+- new_data.min = __resolve_freq(policy, new_data.min, CPUFREQ_RELATION_L);
++ WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max,
++ new_data.min, new_data.max,
++ CPUFREQ_RELATION_H));
++ new_data.min = __resolve_freq(policy, new_data.min, new_data.min,
++ new_data.max, CPUFREQ_RELATION_L);
+ WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min);
+
+ trace_cpu_frequency_limits(policy);
+--- a/drivers/cpufreq/cpufreq_ondemand.c
++++ b/drivers/cpufreq/cpufreq_ondemand.c
+@@ -76,7 +76,8 @@ static unsigned int generic_powersave_bi
+ return freq_next;
+ }
+
+- index = cpufreq_frequency_table_target(policy, freq_next, relation);
++ index = cpufreq_frequency_table_target(policy, freq_next, policy->min,
++ policy->max, relation);
+ freq_req = freq_table[index].frequency;
+ freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
+ freq_avg = freq_req - freq_reduc;
+--- a/drivers/cpufreq/freq_table.c
++++ b/drivers/cpufreq/freq_table.c
+@@ -116,8 +116,8 @@ int cpufreq_generic_frequency_table_veri
+ EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
+
+ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
+- unsigned int target_freq,
+- unsigned int relation)
++ unsigned int target_freq, unsigned int min,
++ unsigned int max, unsigned int relation)
+ {
+ struct cpufreq_frequency_table optimal = {
+ .driver_data = ~0,
+@@ -148,7 +148,7 @@ int cpufreq_table_index_unsorted(struct
+ cpufreq_for_each_valid_entry_idx(pos, table, i) {
+ freq = pos->frequency;
+
+- if ((freq < policy->min) || (freq > policy->max))
++ if (freq < min || freq > max)
+ continue;
+ if (freq == target_freq) {
+ optimal.driver_data = i;
+--- a/include/linux/cpufreq.h
++++ b/include/linux/cpufreq.h
+@@ -770,8 +770,8 @@ int cpufreq_frequency_table_verify(struc
+ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy);
+
+ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
+- unsigned int target_freq,
+- unsigned int relation);
++ unsigned int target_freq, unsigned int min,
++ unsigned int max, unsigned int relation);
+ int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
+ unsigned int freq);
+
+@@ -836,12 +836,12 @@ static inline int cpufreq_table_find_ind
+ return best;
+ }
+
+-/* Works only on sorted freq-tables */
+-static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
+- unsigned int target_freq,
+- bool efficiencies)
++static inline int find_index_l(struct cpufreq_policy *policy,
++ unsigned int target_freq,
++ unsigned int min, unsigned int max,
++ bool efficiencies)
+ {
+- target_freq = clamp_val(target_freq, policy->min, policy->max);
++ target_freq = clamp_val(target_freq, min, max);
+
+ if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
+ return cpufreq_table_find_index_al(policy, target_freq,
+@@ -851,6 +851,14 @@ static inline int cpufreq_table_find_ind
+ efficiencies);
+ }
+
++/* Works only on sorted freq-tables */
++static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
++ unsigned int target_freq,
++ bool efficiencies)
++{
++ return find_index_l(policy, target_freq, policy->min, policy->max, efficiencies);
++}
++
+ /* Find highest freq at or below target in a table in ascending order */
+ static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+@@ -904,12 +912,12 @@ static inline int cpufreq_table_find_ind
+ return best;
+ }
+
+-/* Works only on sorted freq-tables */
+-static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
+- unsigned int target_freq,
+- bool efficiencies)
++static inline int find_index_h(struct cpufreq_policy *policy,
++ unsigned int target_freq,
++ unsigned int min, unsigned int max,
++ bool efficiencies)
+ {
+- target_freq = clamp_val(target_freq, policy->min, policy->max);
++ target_freq = clamp_val(target_freq, min, max);
+
+ if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
+ return cpufreq_table_find_index_ah(policy, target_freq,
+@@ -919,6 +927,14 @@ static inline int cpufreq_table_find_ind
+ efficiencies);
+ }
+
++/* Works only on sorted freq-tables */
++static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
++ unsigned int target_freq,
++ bool efficiencies)
++{
++ return find_index_h(policy, target_freq, policy->min, policy->max, efficiencies);
++}
++
+ /* Find closest freq to target in a table in ascending order */
+ static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+@@ -989,12 +1005,12 @@ static inline int cpufreq_table_find_ind
+ return best;
+ }
+
+-/* Works only on sorted freq-tables */
+-static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
+- unsigned int target_freq,
+- bool efficiencies)
++static inline int find_index_c(struct cpufreq_policy *policy,
++ unsigned int target_freq,
++ unsigned int min, unsigned int max,
++ bool efficiencies)
+ {
+- target_freq = clamp_val(target_freq, policy->min, policy->max);
++ target_freq = clamp_val(target_freq, min, max);
+
+ if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
+ return cpufreq_table_find_index_ac(policy, target_freq,
+@@ -1004,7 +1020,17 @@ static inline int cpufreq_table_find_ind
+ efficiencies);
+ }
+
+-static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx)
++/* Works only on sorted freq-tables */
++static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
++ unsigned int target_freq,
++ bool efficiencies)
++{
++ return find_index_c(policy, target_freq, policy->min, policy->max, efficiencies);
++}
++
++static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy,
++ unsigned int min, unsigned int max,
++ int idx)
+ {
+ unsigned int freq;
+
+@@ -1013,11 +1039,13 @@ static inline bool cpufreq_is_in_limits(
+
+ freq = policy->freq_table[idx].frequency;
+
+- return freq == clamp_val(freq, policy->min, policy->max);
++ return freq == clamp_val(freq, min, max);
+ }
+
+ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
++ unsigned int min,
++ unsigned int max,
+ unsigned int relation)
+ {
+ bool efficiencies = policy->efficiencies_available &&
+@@ -1028,29 +1056,26 @@ static inline int cpufreq_frequency_tabl
+ relation &= ~CPUFREQ_RELATION_E;
+
+ if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED))
+- return cpufreq_table_index_unsorted(policy, target_freq,
+- relation);
++ return cpufreq_table_index_unsorted(policy, target_freq, min,
++ max, relation);
+ retry:
+ switch (relation) {
+ case CPUFREQ_RELATION_L:
+- idx = cpufreq_table_find_index_l(policy, target_freq,
+- efficiencies);
++ idx = find_index_l(policy, target_freq, min, max, efficiencies);
+ break;
+ case CPUFREQ_RELATION_H:
+- idx = cpufreq_table_find_index_h(policy, target_freq,
+- efficiencies);
++ idx = find_index_h(policy, target_freq, min, max, efficiencies);
+ break;
+ case CPUFREQ_RELATION_C:
+- idx = cpufreq_table_find_index_c(policy, target_freq,
+- efficiencies);
++ idx = find_index_c(policy, target_freq, min, max, efficiencies);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+
+- /* Limit frequency index to honor policy->min/max */
+- if (!cpufreq_is_in_limits(policy, idx) && efficiencies) {
++ /* Limit frequency index to honor min and max */
++ if (!cpufreq_is_in_limits(policy, min, max, idx) && efficiencies) {
+ efficiencies = false;
+ goto retry;
+ }
--- /dev/null
+From ac4e04d9e378f5aa826c2406ad7871ae1b6a6fb9 Mon Sep 17 00:00:00 2001
+From: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Date: Tue, 29 Apr 2025 14:07:11 -0700
+Subject: cpufreq: intel_pstate: Unchecked MSR aceess in legacy mode
+
+From: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+
+commit ac4e04d9e378f5aa826c2406ad7871ae1b6a6fb9 upstream.
+
+When turbo mode is unavailable on a Skylake-X system, executing the
+command:
+
+ # echo 1 > /sys/devices/system/cpu/intel_pstate/no_turbo
+
+results in an unchecked MSR access error:
+
+ WRMSR to 0x199 (attempted to write 0x0000000100001300).
+
+This issue was reproduced on an OEM (Original Equipment Manufacturer)
+system and is not a common problem across all Skylake-X systems.
+
+This error occurs because the MSR 0x199 Turbo Engage Bit (bit 32) is set
+when turbo mode is disabled. The issue arises when intel_pstate fails to
+detect that turbo mode is disabled. Here intel_pstate relies on
+MSR_IA32_MISC_ENABLE bit 38 to determine the status of turbo mode.
+However, on this system, bit 38 is not set even when turbo mode is
+disabled.
+
+According to the Intel Software Developer's Manual (SDM), the BIOS sets
+this bit during platform initialization to enable or disable
+opportunistic processor performance operations. Logically, this bit
+should be set in such cases. However, the SDM also specifies that "OS
+and applications must use CPUID leaf 06H to detect processors with
+opportunistic processor performance operations enabled."
+
+Therefore, in addition to checking MSR_IA32_MISC_ENABLE bit 38, verify
+that CPUID.06H:EAX[1] is 0 to accurately determine if turbo mode is
+disabled.
+
+Fixes: 4521e1a0ce17 ("cpufreq: intel_pstate: Reflect current no_turbo state correctly")
+Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Cc: All applicable <stable@vger.kernel.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpufreq/intel_pstate.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -600,6 +600,9 @@ static bool turbo_is_disabled(void)
+ {
+ u64 misc_en;
+
++ if (!cpu_feature_enabled(X86_FEATURE_IDA))
++ return true;
++
+ rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
+
+ return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
--- /dev/null
+From 5a2a6c428190f945c5cbf5791f72dbea83e97f66 Mon Sep 17 00:00:00 2001
+From: Benjamin Marzinski <bmarzins@redhat.com>
+Date: Tue, 15 Apr 2025 00:17:16 -0400
+Subject: dm: always update the array size in realloc_argv on success
+
+From: Benjamin Marzinski <bmarzins@redhat.com>
+
+commit 5a2a6c428190f945c5cbf5791f72dbea83e97f66 upstream.
+
+realloc_argv() was only updating the array size if it was called with
+old_argv already allocated. The first time it was called to create an
+argv array, it would allocate the array but return the array size as
+zero. dm_split_args() would think that it couldn't store any arguments
+in the array and would call realloc_argv() again, causing it to
+reallocate the initial slots (this time using GPF_KERNEL) and finally
+return a size. Aside from being wasteful, this could cause deadlocks on
+targets that need to process messages without starting new IO. Instead,
+realloc_argv should always update the allocated array size on success.
+
+Fixes: a0651926553c ("dm table: don't copy from a NULL pointer in realloc_argv()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Benjamin Marzinski <bmarzins@redhat.com>
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-table.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -523,9 +523,10 @@ static char **realloc_argv(unsigned int
+ gfp = GFP_NOIO;
+ }
+ argv = kmalloc_array(new_size, sizeof(*argv), gfp);
+- if (argv && old_argv) {
+- memcpy(argv, old_argv, *size * sizeof(*argv));
++ if (argv) {
+ *size = new_size;
++ if (old_argv)
++ memcpy(argv, old_argv, *size * sizeof(*argv));
+ }
+
+ kfree(old_argv);
--- /dev/null
+From a3d8f0a7f5e8b193db509c7191fefeed3533fc44 Mon Sep 17 00:00:00 2001
+From: LongPing Wei <weilongping@oppo.com>
+Date: Thu, 17 Apr 2025 11:07:38 +0800
+Subject: dm-bufio: don't schedule in atomic context
+
+From: LongPing Wei <weilongping@oppo.com>
+
+commit a3d8f0a7f5e8b193db509c7191fefeed3533fc44 upstream.
+
+A BUG was reported as below when CONFIG_DEBUG_ATOMIC_SLEEP and
+try_verify_in_tasklet are enabled.
+[ 129.444685][ T934] BUG: sleeping function called from invalid context at drivers/md/dm-bufio.c:2421
+[ 129.444723][ T934] in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 934, name: kworker/1:4
+[ 129.444740][ T934] preempt_count: 201, expected: 0
+[ 129.444756][ T934] RCU nest depth: 0, expected: 0
+[ 129.444781][ T934] Preemption disabled at:
+[ 129.444789][ T934] [<ffffffd816231900>] shrink_work+0x21c/0x248
+[ 129.445167][ T934] kernel BUG at kernel/sched/walt/walt_debug.c:16!
+[ 129.445183][ T934] Internal error: Oops - BUG: 00000000f2000800 [#1] PREEMPT SMP
+[ 129.445204][ T934] Skip md ftrace buffer dump for: 0x1609e0
+[ 129.447348][ T934] CPU: 1 PID: 934 Comm: kworker/1:4 Tainted: G W OE 6.6.56-android15-8-o-g6f82312b30b9-debug #1 1400000003000000474e5500b3187743670464e8
+[ 129.447362][ T934] Hardware name: Qualcomm Technologies, Inc. Parrot QRD, Alpha-M (DT)
+[ 129.447373][ T934] Workqueue: dm_bufio_cache shrink_work
+[ 129.447394][ T934] pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+[ 129.447406][ T934] pc : android_rvh_schedule_bug+0x0/0x8 [sched_walt_debug]
+[ 129.447435][ T934] lr : __traceiter_android_rvh_schedule_bug+0x44/0x6c
+[ 129.447451][ T934] sp : ffffffc0843dbc90
+[ 129.447459][ T934] x29: ffffffc0843dbc90 x28: ffffffffffffffff x27: 0000000000000c8b
+[ 129.447479][ T934] x26: 0000000000000040 x25: ffffff804b3d6260 x24: ffffffd816232b68
+[ 129.447497][ T934] x23: ffffff805171c5b4 x22: 0000000000000000 x21: ffffffd816231900
+[ 129.447517][ T934] x20: ffffff80306ba898 x19: 0000000000000000 x18: ffffffc084159030
+[ 129.447535][ T934] x17: 00000000d2b5dd1f x16: 00000000d2b5dd1f x15: ffffffd816720358
+[ 129.447554][ T934] x14: 0000000000000004 x13: ffffff89ef978000 x12: 0000000000000003
+[ 129.447572][ T934] x11: ffffffd817a823c4 x10: 0000000000000202 x9 : 7e779c5735de9400
+[ 129.447591][ T934] x8 : ffffffd81560d004 x7 : 205b5d3938373434 x6 : ffffffd8167397c8
+[ 129.447610][ T934] x5 : 0000000000000000 x4 : 0000000000000001 x3 : ffffffc0843db9e0
+[ 129.447629][ T934] x2 : 0000000000002f15 x1 : 0000000000000000 x0 : 0000000000000000
+[ 129.447647][ T934] Call trace:
+[ 129.447655][ T934] android_rvh_schedule_bug+0x0/0x8 [sched_walt_debug 1400000003000000474e550080cce8a8a78606b6]
+[ 129.447681][ T934] __might_resched+0x190/0x1a8
+[ 129.447694][ T934] shrink_work+0x180/0x248
+[ 129.447706][ T934] process_one_work+0x260/0x624
+[ 129.447718][ T934] worker_thread+0x28c/0x454
+[ 129.447729][ T934] kthread+0x118/0x158
+[ 129.447742][ T934] ret_from_fork+0x10/0x20
+[ 129.447761][ T934] Code: ???????? ???????? ???????? d2b5dd1f (d4210000)
+[ 129.447772][ T934] ---[ end trace 0000000000000000 ]---
+
+dm_bufio_lock will call spin_lock_bh when try_verify_in_tasklet
+is enabled, and __scan will be called in atomic context.
+
+Fixes: 7cd326747f46 ("dm bufio: remove dm_bufio_cond_resched()")
+Signed-off-by: LongPing Wei <weilongping@oppo.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-bufio.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -68,6 +68,8 @@
+ #define LIST_DIRTY 1
+ #define LIST_SIZE 2
+
++#define SCAN_RESCHED_CYCLE 16
++
+ /*--------------------------------------------------------------*/
+
+ /*
+@@ -2414,7 +2416,12 @@ static void __scan(struct dm_bufio_clien
+
+ atomic_long_dec(&c->need_shrink);
+ freed++;
+- cond_resched();
++
++ if (unlikely(freed % SCAN_RESCHED_CYCLE == 0)) {
++ dm_bufio_unlock(c);
++ cond_resched();
++ dm_bufio_lock(c);
++ }
+ }
+ }
+ }
--- /dev/null
+From 0a533c3e4246c29d502a7e0fba0e86d80a906b04 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Tue, 22 Apr 2025 21:18:33 +0200
+Subject: dm-integrity: fix a warning on invalid table line
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 0a533c3e4246c29d502a7e0fba0e86d80a906b04 upstream.
+
+If we use the 'B' mode and we have an invalit table line,
+cancel_delayed_work_sync would trigger a warning. This commit avoids the
+warning.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-integrity.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -5173,7 +5173,7 @@ static void dm_integrity_dtr(struct dm_t
+ BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
+ BUG_ON(!list_empty(&ic->wait_list));
+
+- if (ic->mode == 'B')
++ if (ic->mode == 'B' && ic->bitmap_flush_work.work.func)
+ cancel_delayed_work_sync(&ic->bitmap_flush_work);
+ if (ic->metadata_wq)
+ destroy_workqueue(ic->metadata_wq);
--- /dev/null
+From 79af0604eb80ca1f86a1f265a0b1f9d4fccbc18f Mon Sep 17 00:00:00 2001
+From: Lijo Lazar <lijo.lazar@amd.com>
+Date: Mon, 21 Apr 2025 13:25:51 +0530
+Subject: drm/amdgpu: Fix offset for HDP remap in nbio v7.11
+
+From: Lijo Lazar <lijo.lazar@amd.com>
+
+commit 79af0604eb80ca1f86a1f265a0b1f9d4fccbc18f upstream.
+
+APUs in passthrough mode use HDP flush. 0x7F000 offset used for
+remapping HDP flush is mapped to VPE space which could get power gated.
+Use another unused offset in BIF space.
+
+Signed-off-by: Lijo Lazar <lijo.lazar@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit d8116a32cdbe456c7f511183eb9ab187e3d590fb)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
+@@ -361,7 +361,7 @@ static void nbio_v7_11_get_clockgating_s
+ *flags |= AMD_CG_SUPPORT_BIF_LS;
+ }
+
+-#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
++#define MMIO_REG_HOLE_OFFSET 0x44000
+
+ static void nbio_v7_11_set_reg_remap(struct amdgpu_device *adev)
+ {
--- /dev/null
+From 5b1834d6202f86180e451ad1a2a8a193a1da18fc Mon Sep 17 00:00:00 2001
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Date: Fri, 18 Apr 2025 17:25:12 +0100
+Subject: drm/fdinfo: Protect against driver unbind
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+
+commit 5b1834d6202f86180e451ad1a2a8a193a1da18fc upstream.
+
+If we unbind a driver from the PCI device with an active DRM client,
+subsequent read of the fdinfo data associated with the file descriptor in
+question will not end well.
+
+Protect the path with a drm_dev_enter/exit() pair.
+
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Cc: Christian König <christian.koenig@amd.com>
+Cc: Lucas De Marchi <lucas.demarchi@intel.com>
+Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Cc: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Fixes: 3f09a0cd4ea3 ("drm: Add common fdinfo helper")
+Cc: <stable@vger.kernel.org> # v6.5+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Link: https://lore.kernel.org/r/20250418162512.72324-1-tvrtko.ursulin@igalia.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/drm_file.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/gpu/drm/drm_file.c
++++ b/drivers/gpu/drm/drm_file.c
+@@ -938,6 +938,10 @@ void drm_show_fdinfo(struct seq_file *m,
+ struct drm_file *file = f->private_data;
+ struct drm_device *dev = file->minor->dev;
+ struct drm_printer p = drm_seq_file_printer(m);
++ int idx;
++
++ if (!drm_dev_enter(dev, &idx))
++ return;
+
+ drm_printf(&p, "drm-driver:\t%s\n", dev->driver->name);
+ drm_printf(&p, "drm-client-id:\t%llu\n", file->client_id);
+@@ -952,6 +956,8 @@ void drm_show_fdinfo(struct seq_file *m,
+
+ if (dev->driver->show_fdinfo)
+ dev->driver->show_fdinfo(&p, file);
++
++ drm_dev_exit(idx);
+ }
+ EXPORT_SYMBOL(drm_show_fdinfo);
+
--- /dev/null
+From bbe5679f30d7690a9b6838a583b9690ea73fe0e9 Mon Sep 17 00:00:00 2001
+From: Philipp Stanner <phasta@kernel.org>
+Date: Tue, 15 Apr 2025 14:19:00 +0200
+Subject: drm/nouveau: Fix WARN_ON in nouveau_fence_context_kill()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Philipp Stanner <phasta@kernel.org>
+
+commit bbe5679f30d7690a9b6838a583b9690ea73fe0e9 upstream.
+
+Nouveau is mostly designed in a way that it's expected that fences only
+ever get signaled through nouveau_fence_signal(). However, in at least
+one other place, nouveau_fence_done(), can signal fences, too. If that
+happens (race) a signaled fence remains in the pending list for a while,
+until it gets removed by nouveau_fence_update().
+
+Should nouveau_fence_context_kill() run in the meantime, this would be
+a bug because the function would attempt to set an error code on an
+already signaled fence.
+
+Have nouveau_fence_context_kill() check for a fence being signaled.
+
+Cc: stable@vger.kernel.org # v5.10+
+Fixes: ea13e5abf807 ("drm/nouveau: signal pending fences when channel has been killed")
+Suggested-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Philipp Stanner <phasta@kernel.org>
+Link: https://lore.kernel.org/r/20250415121900.55719-3-phasta@kernel.org
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/nouveau/nouveau_fence.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
++++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
+@@ -90,7 +90,7 @@ nouveau_fence_context_kill(struct nouvea
+ while (!list_empty(&fctx->pending)) {
+ fence = list_entry(fctx->pending.next, typeof(*fence), head);
+
+- if (error)
++ if (error && !dma_fence_is_signaled_locked(&fence->base))
+ dma_fence_set_error(&fence->base, error);
+
+ if (nouveau_fence_signal(fence))
--- /dev/null
+From 32dce6b1949a696dc7abddc04de8cbe35c260217 Mon Sep 17 00:00:00 2001
+From: Janne Grunau <j@jannau.net>
+Date: Tue, 4 Mar 2025 20:12:14 +0100
+Subject: drm: Select DRM_KMS_HELPER from DRM_DEBUG_DP_MST_TOPOLOGY_REFS
+
+From: Janne Grunau <j@jannau.net>
+
+commit 32dce6b1949a696dc7abddc04de8cbe35c260217 upstream.
+
+Using "depends on" and "select" for the same Kconfig symbol is known to
+cause circular dependencies (cmp. "Kconfig recursive dependency
+limitations" in Documentation/kbuild/kconfig-language.rst.
+DRM drivers are selecting drm helpers so do the same for
+DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
+Fixes following circular dependency reported on x86 for the downstream
+Asahi Linux tree:
+
+error: recursive dependency detected!
+ symbol DRM_KMS_HELPER is selected by DRM_GEM_SHMEM_HELPER
+ symbol DRM_GEM_SHMEM_HELPER is selected by RUST_DRM_GEM_SHMEM_HELPER
+ symbol RUST_DRM_GEM_SHMEM_HELPER is selected by DRM_ASAHI
+ symbol DRM_ASAHI depends on RUST
+ symbol RUST depends on CALL_PADDING
+ symbol CALL_PADDING depends on OBJTOOL
+ symbol OBJTOOL is selected by STACK_VALIDATION
+ symbol STACK_VALIDATION depends on UNWINDER_FRAME_POINTER
+ symbol UNWINDER_FRAME_POINTER is part of choice block at arch/x86/Kconfig.debug:224
+ symbol <choice> unknown is visible depending on UNWINDER_GUESS
+ symbol UNWINDER_GUESS prompt is visible depending on STACKDEPOT
+ symbol STACKDEPOT is selected by DRM_DEBUG_DP_MST_TOPOLOGY_REFS
+ symbol DRM_DEBUG_DP_MST_TOPOLOGY_REFS depends on DRM_KMS_HELPER
+
+Fixes: 12a280c72868 ("drm/dp_mst: Add topology ref history tracking for debugging")
+Cc: stable@vger.kernel.org
+Signed-off-by: Janne Grunau <j@jannau.net>
+Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://lore.kernel.org/r/20250304-drm_debug_dp_mst_topo_kconfig-v1-1-e16fd152f258@jannau.net
+Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -185,7 +185,7 @@ config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
+ bool "Enable refcount backtrace history in the DP MST helpers"
+ depends on STACKTRACE_SUPPORT
+ select STACKDEPOT
+- depends on DRM_KMS_HELPER
++ select DRM_KMS_HELPER
+ depends on DEBUG_KERNEL
+ depends on EXPERT
+ help
--- /dev/null
+From 6dbe3c5418c4368e824bff6ae4889257dd544892 Mon Sep 17 00:00:00 2001
+From: Niravkumar L Rabara <niravkumar.l.rabara@altera.com>
+Date: Fri, 25 Apr 2025 07:26:40 -0700
+Subject: EDAC/altera: Set DDR and SDMMC interrupt mask before registration
+
+From: Niravkumar L Rabara <niravkumar.l.rabara@altera.com>
+
+commit 6dbe3c5418c4368e824bff6ae4889257dd544892 upstream.
+
+Mask DDR and SDMMC in probe function to avoid spurious interrupts before
+registration. Removed invalid register write to system manager.
+
+Fixes: 1166fde93d5b ("EDAC, altera: Add Arria10 ECC memory init functions")
+Signed-off-by: Niravkumar L Rabara <niravkumar.l.rabara@altera.com>
+Signed-off-by: Matthew Gerlach <matthew.gerlach@altera.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Acked-by: Dinh Nguyen <dinguyen@kernel.org>
+Cc: stable@kernel.org
+Link: https://lore.kernel.org/20250425142640.33125-3-matthew.gerlach@altera.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/edac/altera_edac.c | 7 ++++---
+ drivers/edac/altera_edac.h | 2 ++
+ 2 files changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/edac/altera_edac.c
++++ b/drivers/edac/altera_edac.c
+@@ -1005,9 +1005,6 @@ altr_init_a10_ecc_block(struct device_no
+ }
+ }
+
+- /* Interrupt mode set to every SBERR */
+- regmap_write(ecc_mgr_map, ALTR_A10_ECC_INTMODE_OFST,
+- ALTR_A10_ECC_INTMODE);
+ /* Enable ECC */
+ ecc_set_bits(ecc_ctrl_en_mask, (ecc_block_base +
+ ALTR_A10_ECC_CTRL_OFST));
+@@ -2127,6 +2124,10 @@ static int altr_edac_a10_probe(struct pl
+ return PTR_ERR(edac->ecc_mgr_map);
+ }
+
++ /* Set irq mask for DDR SBE to avoid any pending irq before registration */
++ regmap_write(edac->ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_SET_OFST,
++ (A10_SYSMGR_ECC_INTMASK_SDMMCB | A10_SYSMGR_ECC_INTMASK_DDR0));
++
+ edac->irq_chip.name = pdev->dev.of_node->name;
+ edac->irq_chip.irq_mask = a10_eccmgr_irq_mask;
+ edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask;
+--- a/drivers/edac/altera_edac.h
++++ b/drivers/edac/altera_edac.h
+@@ -249,6 +249,8 @@ struct altr_sdram_mc_data {
+ #define A10_SYSMGR_ECC_INTMASK_SET_OFST 0x94
+ #define A10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98
+ #define A10_SYSMGR_ECC_INTMASK_OCRAM BIT(1)
++#define A10_SYSMGR_ECC_INTMASK_SDMMCB BIT(16)
++#define A10_SYSMGR_ECC_INTMASK_DDR0 BIT(17)
+
+ #define A10_SYSMGR_ECC_INTSTAT_SERR_OFST 0x9C
+ #define A10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0
--- /dev/null
+From 4fb7b8fceb0beebbe00712c3daf49ade0386076a Mon Sep 17 00:00:00 2001
+From: Niravkumar L Rabara <niravkumar.l.rabara@altera.com>
+Date: Fri, 25 Apr 2025 07:26:39 -0700
+Subject: EDAC/altera: Test the correct error reg offset
+
+From: Niravkumar L Rabara <niravkumar.l.rabara@altera.com>
+
+commit 4fb7b8fceb0beebbe00712c3daf49ade0386076a upstream.
+
+Test correct structure member, ecc_cecnt_offset, before using it.
+
+ [ bp: Massage commit message. ]
+
+Fixes: 73bcc942f427 ("EDAC, altera: Add Arria10 EDAC support")
+Signed-off-by: Niravkumar L Rabara <niravkumar.l.rabara@altera.com>
+Signed-off-by: Matthew Gerlach <matthew.gerlach@altera.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Acked-by: Dinh Nguyen <dinguyen@kernel.org>
+Cc: stable@kernel.org
+Link: https://lore.kernel.org/20250425142640.33125-2-matthew.gerlach@altera.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/edac/altera_edac.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/edac/altera_edac.c
++++ b/drivers/edac/altera_edac.c
+@@ -99,7 +99,7 @@ static irqreturn_t altr_sdram_mc_err_han
+ if (status & priv->ecc_stat_ce_mask) {
+ regmap_read(drvdata->mc_vbase, priv->ecc_saddr_offset,
+ &err_addr);
+- if (priv->ecc_uecnt_offset)
++ if (priv->ecc_cecnt_offset)
+ regmap_read(drvdata->mc_vbase, priv->ecc_cecnt_offset,
+ &err_count);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count,
--- /dev/null
+From b1852c5de2f2a37dd4462f7837c9e3e678f9e546 Mon Sep 17 00:00:00 2001
+From: Clark Wang <xiaoning.wang@nxp.com>
+Date: Mon, 21 Apr 2025 14:23:41 +0800
+Subject: i2c: imx-lpi2c: Fix clock count when probe defers
+
+From: Clark Wang <xiaoning.wang@nxp.com>
+
+commit b1852c5de2f2a37dd4462f7837c9e3e678f9e546 upstream.
+
+Deferred probe with pm_runtime_put() may delay clock disable, causing
+incorrect clock usage count. Use pm_runtime_put_sync() to ensure the
+clock is disabled immediately.
+
+Fixes: 13d6eb20fc79 ("i2c: imx-lpi2c: add runtime pm support")
+Signed-off-by: Clark Wang <xiaoning.wang@nxp.com>
+Signed-off-by: Carlos Song <carlos.song@nxp.com>
+Cc: <stable@vger.kernel.org> # v4.16+
+Link: https://lore.kernel.org/r/20250421062341.2471922-1-carlos.song@nxp.com
+Signed-off-by: Andi Shyti <andi.shyti@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i2c/busses/i2c-imx-lpi2c.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
++++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
+@@ -652,9 +652,9 @@ static int lpi2c_imx_probe(struct platfo
+ return 0;
+
+ rpm_disable:
+- pm_runtime_put(&pdev->dev);
+- pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
++ pm_runtime_put_sync(&pdev->dev);
++ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+ }
--- /dev/null
+From 8dee308e4c01dea48fc104d37f92d5b58c50b96c Mon Sep 17 00:00:00 2001
+From: Pavel Paklov <Pavel.Paklov@cyberprotect.ru>
+Date: Tue, 25 Mar 2025 09:22:44 +0000
+Subject: iommu/amd: Fix potential buffer overflow in parse_ivrs_acpihid
+
+From: Pavel Paklov <Pavel.Paklov@cyberprotect.ru>
+
+commit 8dee308e4c01dea48fc104d37f92d5b58c50b96c upstream.
+
+There is a string parsing logic error which can lead to an overflow of hid
+or uid buffers. Comparing ACPIID_LEN against a total string length doesn't
+take into account the lengths of individual hid and uid buffers so the
+check is insufficient in some cases. For example if the length of hid
+string is 4 and the length of the uid string is 260, the length of str
+will be equal to ACPIID_LEN + 1 but uid string will overflow uid buffer
+which size is 256.
+
+The same applies to the hid string with length 13 and uid string with
+length 250.
+
+Check the length of hid and uid strings separately to prevent
+buffer overflow.
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Fixes: ca3bf5d47cec ("iommu/amd: Introduces ivrs_acpihid kernel parameter")
+Cc: stable@vger.kernel.org
+Signed-off-by: Pavel Paklov <Pavel.Paklov@cyberprotect.ru>
+Link: https://lore.kernel.org/r/20250325092259.392844-1-Pavel.Paklov@cyberprotect.ru
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iommu/amd/init.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -3655,6 +3655,14 @@ found:
+ while (*uid == '0' && *(uid + 1))
+ uid++;
+
++ if (strlen(hid) >= ACPIHID_HID_LEN) {
++ pr_err("Invalid command line: hid is too long\n");
++ return 1;
++ } else if (strlen(uid) >= ACPIHID_UID_LEN) {
++ pr_err("Invalid command line: uid is too long\n");
++ return 1;
++ }
++
+ i = early_acpihid_map_size++;
+ memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
+ memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
--- /dev/null
+From b00d24997a11c10d3e420614f0873b83ce358a34 Mon Sep 17 00:00:00 2001
+From: Nicolin Chen <nicolinc@nvidia.com>
+Date: Tue, 15 Apr 2025 11:56:20 -0700
+Subject: iommu/arm-smmu-v3: Fix iommu_device_probe bug due to duplicated stream ids
+
+From: Nicolin Chen <nicolinc@nvidia.com>
+
+commit b00d24997a11c10d3e420614f0873b83ce358a34 upstream.
+
+ASPEED VGA card has two built-in devices:
+ 0008:06:00.0 PCI bridge: ASPEED Technology, Inc. AST1150 PCI-to-PCI Bridge (rev 06)
+ 0008:07:00.0 VGA compatible controller: ASPEED Technology, Inc. ASPEED Graphics Family (rev 52)
+
+Its toplogy looks like this:
+ +-[0008:00]---00.0-[01-09]--+-00.0-[02-09]--+-00.0-[03]----00.0 Sandisk Corp Device 5017
+ | +-01.0-[04]--
+ | +-02.0-[05]----00.0 NVIDIA Corporation Device
+ | +-03.0-[06-07]----00.0-[07]----00.0 ASPEED Technology, Inc. ASPEED Graphics Family
+ | +-04.0-[08]----00.0 Renesas Technology Corp. uPD720201 USB 3.0 Host Controller
+ | \-05.0-[09]----00.0 Realtek Semiconductor Co., Ltd. RTL8111/8168/8411 PCI Express Gigabit Ethernet Controller
+ \-00.1 PMC-Sierra Inc. Device 4028
+
+The IORT logic populaties two identical IDs into the fwspec->ids array via
+DMA aliasing in iort_pci_iommu_init() called by pci_for_each_dma_alias().
+
+Though the SMMU driver had been able to handle this situation since commit
+563b5cbe334e ("iommu/arm-smmu-v3: Cope with duplicated Stream IDs"), that
+got broken by the later commit cdf315f907d4 ("iommu/arm-smmu-v3: Maintain
+a SID->device structure"), which ended up with allocating separate streams
+with the same stuffing.
+
+On a kernel prior to v6.15-rc1, there has been an overlooked warning:
+ pci 0008:07:00.0: vgaarb: setting as boot VGA device
+ pci 0008:07:00.0: vgaarb: bridge control possible
+ pci 0008:07:00.0: vgaarb: VGA device added: decodes=io+mem,owns=none,locks=none
+ pcieport 0008:06:00.0: Adding to iommu group 14
+ ast 0008:07:00.0: stream 67328 already in tree <===== WARNING
+ ast 0008:07:00.0: enabling device (0002 -> 0003)
+ ast 0008:07:00.0: Using default configuration
+ ast 0008:07:00.0: AST 2600 detected
+ ast 0008:07:00.0: [drm] Using analog VGA
+ ast 0008:07:00.0: [drm] dram MCLK=396 Mhz type=1 bus_width=16
+ [drm] Initialized ast 0.1.0 for 0008:07:00.0 on minor 0
+ ast 0008:07:00.0: [drm] fb0: astdrmfb frame buffer device
+
+With v6.15-rc, since the commit bcb81ac6ae3c ("iommu: Get DT/ACPI parsing
+into the proper probe path"), the error returned with the warning is moved
+to the SMMU device probe flow:
+ arm_smmu_probe_device+0x15c/0x4c0
+ __iommu_probe_device+0x150/0x4f8
+ probe_iommu_group+0x44/0x80
+ bus_for_each_dev+0x7c/0x100
+ bus_iommu_probe+0x48/0x1a8
+ iommu_device_register+0xb8/0x178
+ arm_smmu_device_probe+0x1350/0x1db0
+which then fails the entire SMMU driver probe:
+ pci 0008:06:00.0: Adding to iommu group 21
+ pci 0008:07:00.0: stream 67328 already in tree
+ arm-smmu-v3 arm-smmu-v3.9.auto: Failed to register iommu
+ arm-smmu-v3 arm-smmu-v3.9.auto: probe with driver arm-smmu-v3 failed with error -22
+
+Since SMMU driver had been already expecting a potential duplicated Stream
+ID in arm_smmu_install_ste_for_dev(), change the arm_smmu_insert_master()
+routine to ignore a duplicated ID from the fwspec->sids array as well.
+
+Note: this has been failing the iommu_device_probe() since 2021, although a
+recent iommu commit in v6.15-rc1 that moves iommu_device_probe() started to
+fail the SMMU driver probe. Since nobody has cared about DMA Alias support,
+leave that as it was but fix the fundamental iommu_device_probe() breakage.
+
+Fixes: cdf315f907d4 ("iommu/arm-smmu-v3: Maintain a SID->device structure")
+Cc: stable@vger.kernel.org
+Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
+Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
+Link: https://lore.kernel.org/r/20250415185620.504299-1-nicolinc@nvidia.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 19 +++++++++++++++----
+ 1 file changed, 15 insertions(+), 4 deletions(-)
+
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+@@ -3220,6 +3220,7 @@ static int arm_smmu_insert_master(struct
+ mutex_lock(&smmu->streams_mutex);
+ for (i = 0; i < fwspec->num_ids; i++) {
+ struct arm_smmu_stream *new_stream = &master->streams[i];
++ struct rb_node *existing;
+ u32 sid = fwspec->ids[i];
+
+ new_stream->id = sid;
+@@ -3230,10 +3231,20 @@ static int arm_smmu_insert_master(struct
+ break;
+
+ /* Insert into SID tree */
+- if (rb_find_add(&new_stream->node, &smmu->streams,
+- arm_smmu_streams_cmp_node)) {
+- dev_warn(master->dev, "stream %u already in tree\n",
+- sid);
++ existing = rb_find_add(&new_stream->node, &smmu->streams,
++ arm_smmu_streams_cmp_node);
++ if (existing) {
++ struct arm_smmu_master *existing_master =
++ rb_entry(existing, struct arm_smmu_stream, node)
++ ->master;
++
++ /* Bridged PCI devices may end up with duplicated IDs */
++ if (existing_master == master)
++ continue;
++
++ dev_warn(master->dev,
++ "stream %u already in tree from dev %s\n", sid,
++ dev_name(existing_master->dev));
+ ret = -EINVAL;
+ break;
+ }
--- /dev/null
+From 12f78021973ae422564b234136c702a305932d73 Mon Sep 17 00:00:00 2001
+From: Balbir Singh <balbirs@nvidia.com>
+Date: Sat, 12 Apr 2025 10:23:54 +1000
+Subject: iommu/arm-smmu-v3: Fix pgsize_bit for sva domains
+
+From: Balbir Singh <balbirs@nvidia.com>
+
+commit 12f78021973ae422564b234136c702a305932d73 upstream.
+
+UBSan caught a bug with IOMMU SVA domains, where the reported exponent
+value in __arm_smmu_tlb_inv_range() was >= 64.
+__arm_smmu_tlb_inv_range() uses the domain's pgsize_bitmap to compute
+the number of pages to invalidate and the invalidation range. Currently
+arm_smmu_sva_domain_alloc() does not setup the iommu domain's
+pgsize_bitmap. This leads to __ffs() on the value returning 64 and that
+leads to undefined behaviour w.r.t. shift operations
+
+Fix this by initializing the iommu_domain's pgsize_bitmap to PAGE_SIZE.
+Effectively the code needs to use the smallest page size for
+invalidation
+
+Cc: stable@vger.kernel.org
+Fixes: eb6c97647be2 ("iommu/arm-smmu-v3: Avoid constructing invalid range commands")
+Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Balbir Singh <balbirs@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+Cc: Jean-Philippe Brucker <jean-philippe@linaro.org>
+Cc: Will Deacon <will@kernel.org>
+Cc: Robin Murphy <robin.murphy@arm.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Jason Gunthorpe <jgg@ziepe.ca>
+Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
+Link: https://lore.kernel.org/r/20250412002354.3071449-1-balbirs@nvidia.com
+Signed-off-by: Will Deacon <will@kernel.org>
+---
+ drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+@@ -397,6 +397,12 @@ struct iommu_domain *arm_smmu_sva_domain
+ return ERR_CAST(smmu_domain);
+ smmu_domain->domain.type = IOMMU_DOMAIN_SVA;
+ smmu_domain->domain.ops = &arm_smmu_sva_domain_ops;
++
++ /*
++ * Choose page_size as the leaf page size for invalidation when
++ * ARM_SMMU_FEAT_RANGE_INV is present
++ */
++ smmu_domain->domain.pgsize_bitmap = PAGE_SIZE;
+ smmu_domain->smmu = smmu;
+
+ ret = xa_alloc(&arm_smmu_asid_xa, &asid, smmu_domain,
--- /dev/null
+From 30a3f2f3e4bd6335b727c83c08a982d969752bc1 Mon Sep 17 00:00:00 2001
+From: Nicolin Chen <nicolinc@nvidia.com>
+Date: Mon, 14 Apr 2025 12:16:35 -0700
+Subject: iommu: Fix two issues in iommu_copy_struct_from_user()
+
+From: Nicolin Chen <nicolinc@nvidia.com>
+
+commit 30a3f2f3e4bd6335b727c83c08a982d969752bc1 upstream.
+
+In the review for iommu_copy_struct_to_user() helper, Matt pointed out that
+a NULL pointer should be rejected prior to dereferencing it:
+https://lore.kernel.org/all/86881827-8E2D-461C-BDA3-FA8FD14C343C@nvidia.com
+
+And Alok pointed out a typo at the same time:
+https://lore.kernel.org/all/480536af-6830-43ce-a327-adbd13dc3f1d@oracle.com
+
+Since both issues were copied from iommu_copy_struct_from_user(), fix them
+first in the current header.
+
+Fixes: e9d36c07bb78 ("iommu: Add iommu_copy_struct_from_user helper")
+Cc: stable@vger.kernel.org
+Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Acked-by: Alok Tiwari <alok.a.tiwari@oracle.com>
+Reviewed-by: Matthew R. Ochs <mochs@nvidia.com>
+Link: https://lore.kernel.org/r/20250414191635.450472-1-nicolinc@nvidia.com
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/iommu.h | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/include/linux/iommu.h
++++ b/include/linux/iommu.h
+@@ -423,10 +423,10 @@ static inline int __iommu_copy_struct_fr
+ void *dst_data, const struct iommu_user_data *src_data,
+ unsigned int data_type, size_t data_len, size_t min_len)
+ {
+- if (src_data->type != data_type)
+- return -EINVAL;
+ if (WARN_ON(!dst_data || !src_data))
+ return -EINVAL;
++ if (src_data->type != data_type)
++ return -EINVAL;
+ if (src_data->len < min_len || data_len < src_data->len)
+ return -EINVAL;
+ return copy_struct_from_user(dst_data, data_len, src_data->uptr,
+@@ -439,8 +439,8 @@ static inline int __iommu_copy_struct_fr
+ * include/uapi/linux/iommufd.h
+ * @user_data: Pointer to a struct iommu_user_data for user space data info
+ * @data_type: The data type of the @kdst. Must match with @user_data->type
+- * @min_last: The last memember of the data structure @kdst points in the
+- * initial version.
++ * @min_last: The last member of the data structure @kdst points in the initial
++ * version.
+ * Return 0 for success, otherwise -error.
+ */
+ #define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \
--- /dev/null
+From 2c8a7c66c90832432496616a9a3c07293f1364f3 Mon Sep 17 00:00:00 2001
+From: Mingcong Bai <jeffbai@aosc.io>
+Date: Fri, 18 Apr 2025 11:16:42 +0800
+Subject: iommu/vt-d: Apply quirk_iommu_igfx for 8086:0044 (QM57/QS57)
+
+From: Mingcong Bai <jeffbai@aosc.io>
+
+commit 2c8a7c66c90832432496616a9a3c07293f1364f3 upstream.
+
+On the Lenovo ThinkPad X201, when Intel VT-d is enabled in the BIOS, the
+kernel boots with errors related to DMAR, the graphical interface appeared
+quite choppy, and the system resets erratically within a minute after it
+booted:
+
+DMAR: DRHD: handling fault status reg 3
+DMAR: [DMA Write NO_PASID] Request device [00:02.0] fault addr 0xb97ff000
+[fault reason 0x05] PTE Write access is not set
+
+Upon comparing boot logs with VT-d on/off, I found that the Intel Calpella
+quirk (`quirk_calpella_no_shadow_gtt()') correctly applied the igfx IOMMU
+disable/quirk correctly:
+
+pci 0000:00:00.0: DMAR: BIOS has allocated no shadow GTT; disabling IOMMU
+for graphics
+
+Whereas with VT-d on, it went into the "else" branch, which then
+triggered the DMAR handling fault above:
+
+... else if (!disable_igfx_iommu) {
+ /* we have to ensure the gfx device is idle before we flush */
+ pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n");
+ iommu_set_dma_strict();
+}
+
+Now, this is not exactly scientific, but moving 0x0044 to quirk_iommu_igfx
+seems to have fixed the aforementioned issue. Running a few `git blame'
+runs on the function, I have found that the quirk was originally
+introduced as a fix specific to ThinkPad X201:
+
+commit 9eecabcb9a92 ("intel-iommu: Abort IOMMU setup for igfx if BIOS gave
+no shadow GTT space")
+
+Which was later revised twice to the "else" branch we saw above:
+
+- 2011: commit 6fbcfb3e467a ("intel-iommu: Workaround IOTLB hang on
+ Ironlake GPU")
+- 2024: commit ba00196ca41c ("iommu/vt-d: Decouple igfx_off from graphic
+ identity mapping")
+
+I'm uncertain whether further testings on this particular laptops were
+done in 2011 and (honestly I'm not sure) 2024, but I would be happy to do
+some distro-specific testing if that's what would be required to verify
+this patch.
+
+P.S., I also see IDs 0x0040, 0x0062, and 0x006a listed under the same
+`quirk_calpella_no_shadow_gtt()' quirk, but I'm not sure how similar these
+chipsets are (if they share the same issue with VT-d or even, indeed, if
+this issue is specific to a bug in the Lenovo BIOS). With regards to
+0x0062, it seems to be a Centrino wireless card, but not a chipset?
+
+I have also listed a couple (distro and kernel) bug reports below as
+references (some of them are from 7-8 years ago!), as they seem to be
+similar issue found on different Westmere/Ironlake, Haswell, and Broadwell
+hardware setups.
+
+Cc: stable@vger.kernel.org
+Fixes: 6fbcfb3e467a ("intel-iommu: Workaround IOTLB hang on Ironlake GPU")
+Fixes: ba00196ca41c ("iommu/vt-d: Decouple igfx_off from graphic identity mapping")
+Link: https://groups.google.com/g/qubes-users/c/4NP4goUds2c?pli=1
+Link: https://bugs.archlinux.org/task/65362
+Link: https://bbs.archlinux.org/viewtopic.php?id=230323
+Reported-by: Wenhao Sun <weiguangtwk@outlook.com>
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=197029
+Signed-off-by: Mingcong Bai <jeffbai@aosc.io>
+Link: https://lore.kernel.org/r/20250415133330.12528-1-jeffbai@aosc.io
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iommu/intel/iommu.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -4666,6 +4666,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_I
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx);
+
++/* QM57/QS57 integrated gfx malfunctions with dmar */
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_iommu_igfx);
++
+ /* Broadwell igfx malfunctions with dmar */
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx);
+@@ -4743,7 +4746,6 @@ static void quirk_calpella_no_shadow_gtt
+ }
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
+
--- /dev/null
+From 38a05c0b87833f5b188ae43b428b1f792df2b384 Mon Sep 17 00:00:00 2001
+From: Stephan Gerhold <stephan.gerhold@linaro.org>
+Date: Fri, 2 May 2025 13:22:28 +0200
+Subject: irqchip/qcom-mpm: Prevent crash when trying to handle non-wake GPIOs
+
+From: Stephan Gerhold <stephan.gerhold@linaro.org>
+
+commit 38a05c0b87833f5b188ae43b428b1f792df2b384 upstream.
+
+On Qualcomm chipsets not all GPIOs are wakeup capable. Those GPIOs do not
+have a corresponding MPM pin and should not be handled inside the MPM
+driver. The IRQ domain hierarchy is always applied, so it's required to
+explicitly disconnect the hierarchy for those. The pinctrl-msm driver marks
+these with GPIO_NO_WAKE_IRQ. qcom-pdc has a check for this, but
+irq-qcom-mpm is currently missing the check. This is causing crashes when
+setting up interrupts for non-wake GPIOs:
+
+ root@rb1:~# gpiomon -c gpiochip1 10
+ irq: IRQ159: trimming hierarchy from :soc@0:interrupt-controller@f200000-1
+ Unable to handle kernel paging request at virtual address ffff8000a1dc3820
+ Hardware name: Qualcomm Technologies, Inc. Robotics RB1 (DT)
+ pc : mpm_set_type+0x80/0xcc
+ lr : mpm_set_type+0x5c/0xcc
+ Call trace:
+ mpm_set_type+0x80/0xcc (P)
+ qcom_mpm_set_type+0x64/0x158
+ irq_chip_set_type_parent+0x20/0x38
+ msm_gpio_irq_set_type+0x50/0x530
+ __irq_set_trigger+0x60/0x184
+ __setup_irq+0x304/0x6bc
+ request_threaded_irq+0xc8/0x19c
+ edge_detector_setup+0x260/0x364
+ linereq_create+0x420/0x5a8
+ gpio_ioctl+0x2d4/0x6c0
+
+Fix this by copying the check for GPIO_NO_WAKE_IRQ from qcom-pdc.c, so that
+MPM is removed entirely from the hierarchy for non-wake GPIOs.
+
+Fixes: a6199bb514d8 ("irqchip: Add Qualcomm MPM controller driver")
+Reported-by: Alexey Klimov <alexey.klimov@linaro.org>
+Signed-off-by: Stephan Gerhold <stephan.gerhold@linaro.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Alexey Klimov <alexey.klimov@linaro.org>
+Reviewed-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/all/20250502-irq-qcom-mpm-fix-no-wake-v1-1-8a1eafcd28d4@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/irqchip/irq-qcom-mpm.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/irqchip/irq-qcom-mpm.c
++++ b/drivers/irqchip/irq-qcom-mpm.c
+@@ -227,6 +227,9 @@ static int qcom_mpm_alloc(struct irq_dom
+ if (ret)
+ return ret;
+
++ if (pin == GPIO_NO_WAKE_IRQ)
++ return irq_domain_disconnect_hierarchy(domain, virq);
++
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, pin,
+ &qcom_mpm_chip, priv);
+ if (ret)
--- /dev/null
+From e86e9134e1d1c90a960dd57f59ce574d27b9a124 Mon Sep 17 00:00:00 2001
+From: Sean Heelan <seanheelan@gmail.com>
+Date: Sat, 19 Apr 2025 19:59:28 +0100
+Subject: ksmbd: fix use-after-free in kerberos authentication
+
+From: Sean Heelan <seanheelan@gmail.com>
+
+commit e86e9134e1d1c90a960dd57f59ce574d27b9a124 upstream.
+
+Setting sess->user = NULL was introduced to fix the dangling pointer
+created by ksmbd_free_user. However, it is possible another thread could
+be operating on the session and make use of sess->user after it has been
+passed to ksmbd_free_user but before sess->user is set to NULL.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Heelan <seanheelan@gmail.com>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/auth.c | 14 +++++++++++++-
+ fs/smb/server/smb2pdu.c | 5 -----
+ 2 files changed, 13 insertions(+), 6 deletions(-)
+
+--- a/fs/smb/server/auth.c
++++ b/fs/smb/server/auth.c
+@@ -550,7 +550,19 @@ int ksmbd_krb5_authenticate(struct ksmbd
+ retval = -ENOMEM;
+ goto out;
+ }
+- sess->user = user;
++
++ if (!sess->user) {
++ /* First successful authentication */
++ sess->user = user;
++ } else {
++ if (!ksmbd_compare_user(sess->user, user)) {
++ ksmbd_debug(AUTH, "different user tried to reuse session\n");
++ retval = -EPERM;
++ ksmbd_free_user(user);
++ goto out;
++ }
++ ksmbd_free_user(user);
++ }
+
+ memcpy(sess->sess_key, resp->payload, resp->session_key_len);
+ memcpy(out_blob, resp->payload + resp->session_key_len,
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -1600,11 +1600,6 @@ static int krb5_authenticate(struct ksmb
+ if (prev_sess_id && prev_sess_id != sess->id)
+ destroy_previous_session(conn, sess->user, prev_sess_id);
+
+- if (sess->state == SMB2_SESSION_VALID) {
+- ksmbd_free_user(sess->user);
+- sess->user = NULL;
+- }
+-
+ retval = ksmbd_krb5_authenticate(sess, in_blob, in_len,
+ out_blob, &out_len);
+ if (retval) {
--- /dev/null
+From a1f46c99d9ea411f9bf30025b912d881d36fc709 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Thu, 17 Apr 2025 10:10:15 +0900
+Subject: ksmbd: fix use-after-free in ksmbd_session_rpc_open
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit a1f46c99d9ea411f9bf30025b912d881d36fc709 upstream.
+
+A UAF issue can occur due to a race condition between
+ksmbd_session_rpc_open() and __session_rpc_close().
+Add rpc_lock to the session to protect it.
+
+Cc: stable@vger.kernel.org
+Reported-by: Norbert Szetei <norbert@doyensec.com>
+Tested-by: Norbert Szetei <norbert@doyensec.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/mgmt/user_session.c | 20 ++++++++++++++------
+ fs/smb/server/mgmt/user_session.h | 1 +
+ 2 files changed, 15 insertions(+), 6 deletions(-)
+
+--- a/fs/smb/server/mgmt/user_session.c
++++ b/fs/smb/server/mgmt/user_session.c
+@@ -59,10 +59,12 @@ static void ksmbd_session_rpc_clear_list
+ struct ksmbd_session_rpc *entry;
+ long index;
+
++ down_write(&sess->rpc_lock);
+ xa_for_each(&sess->rpc_handle_list, index, entry) {
+ xa_erase(&sess->rpc_handle_list, index);
+ __session_rpc_close(sess, entry);
+ }
++ up_write(&sess->rpc_lock);
+
+ xa_destroy(&sess->rpc_handle_list);
+ }
+@@ -92,7 +94,7 @@ int ksmbd_session_rpc_open(struct ksmbd_
+ {
+ struct ksmbd_session_rpc *entry, *old;
+ struct ksmbd_rpc_command *resp;
+- int method;
++ int method, id;
+
+ method = __rpc_method(rpc_name);
+ if (!method)
+@@ -102,26 +104,29 @@ int ksmbd_session_rpc_open(struct ksmbd_
+ if (!entry)
+ return -ENOMEM;
+
++ down_read(&sess->rpc_lock);
+ entry->method = method;
+- entry->id = ksmbd_ipc_id_alloc();
+- if (entry->id < 0)
++ entry->id = id = ksmbd_ipc_id_alloc();
++ if (id < 0)
+ goto free_entry;
+- old = xa_store(&sess->rpc_handle_list, entry->id, entry, KSMBD_DEFAULT_GFP);
++ old = xa_store(&sess->rpc_handle_list, id, entry, KSMBD_DEFAULT_GFP);
+ if (xa_is_err(old))
+ goto free_id;
+
+- resp = ksmbd_rpc_open(sess, entry->id);
++ resp = ksmbd_rpc_open(sess, id);
+ if (!resp)
+ goto erase_xa;
+
++ up_read(&sess->rpc_lock);
+ kvfree(resp);
+- return entry->id;
++ return id;
+ erase_xa:
+ xa_erase(&sess->rpc_handle_list, entry->id);
+ free_id:
+ ksmbd_rpc_id_free(entry->id);
+ free_entry:
+ kfree(entry);
++ up_read(&sess->rpc_lock);
+ return -EINVAL;
+ }
+
+@@ -129,9 +134,11 @@ void ksmbd_session_rpc_close(struct ksmb
+ {
+ struct ksmbd_session_rpc *entry;
+
++ down_write(&sess->rpc_lock);
+ entry = xa_erase(&sess->rpc_handle_list, id);
+ if (entry)
+ __session_rpc_close(sess, entry);
++ up_write(&sess->rpc_lock);
+ }
+
+ int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id)
+@@ -439,6 +446,7 @@ static struct ksmbd_session *__session_c
+ sess->sequence_number = 1;
+ rwlock_init(&sess->tree_conns_lock);
+ atomic_set(&sess->refcnt, 2);
++ init_rwsem(&sess->rpc_lock);
+
+ ret = __init_smb2_session(sess);
+ if (ret)
+--- a/fs/smb/server/mgmt/user_session.h
++++ b/fs/smb/server/mgmt/user_session.h
+@@ -63,6 +63,7 @@ struct ksmbd_session {
+ rwlock_t tree_conns_lock;
+
+ atomic_t refcnt;
++ struct rw_semaphore rpc_lock;
+ };
+
+ static inline int test_session_flag(struct ksmbd_session *sess, int bit)
--- /dev/null
+From 2fc9feff45d92a92cd5f96487655d5be23fb7e2b Mon Sep 17 00:00:00 2001
+From: Sean Heelan <seanheelan@gmail.com>
+Date: Mon, 21 Apr 2025 15:39:29 +0000
+Subject: ksmbd: fix use-after-free in session logoff
+
+From: Sean Heelan <seanheelan@gmail.com>
+
+commit 2fc9feff45d92a92cd5f96487655d5be23fb7e2b upstream.
+
+The sess->user object can currently be in use by another thread, for
+example if another connection has sent a session setup request to
+bind to the session being free'd. The handler for that connection could
+be in the smb2_sess_setup function which makes use of sess->user.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Heelan <seanheelan@gmail.com>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/smb2pdu.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -2240,10 +2240,6 @@ int smb2_session_logoff(struct ksmbd_wor
+ sess->state = SMB2_SESSION_EXPIRED;
+ up_write(&conn->session_lock);
+
+- if (sess->user) {
+- ksmbd_free_user(sess->user);
+- sess->user = NULL;
+- }
+ ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_SETUP);
+
+ rsp->StructureSize = cpu_to_le16(4);
--- /dev/null
+From 06eaa824fd239edd1eab2754f29b2d03da313003 Mon Sep 17 00:00:00 2001
+From: Wei Yang <richard.weiyang@gmail.com>
+Date: Tue, 18 Mar 2025 07:19:46 +0000
+Subject: mm/memblock: pass size instead of end to memblock_set_node()
+
+From: Wei Yang <richard.weiyang@gmail.com>
+
+commit 06eaa824fd239edd1eab2754f29b2d03da313003 upstream.
+
+The second parameter of memblock_set_node() is size instead of end.
+
+Since it iterates from lower address to higher address, finally the node
+id is correct. But during the process, some of them are wrong.
+
+Pass size instead of end.
+
+Fixes: 61167ad5fecd ("mm: pass nid to reserve_bootmem_region()")
+Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
+CC: Mike Rapoport <rppt@kernel.org>
+CC: Yajun Deng <yajun.deng@linux.dev>
+CC: stable@vger.kernel.org
+Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
+Link: https://lore.kernel.org/r/20250318071948.23854-2-richard.weiyang@gmail.com
+Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memblock.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -2173,7 +2173,7 @@ static void __init memmap_init_reserved_
+ if (memblock_is_nomap(region))
+ reserve_bootmem_region(start, end, nid);
+
+- memblock_set_node(start, end, &memblock.reserved, nid);
++ memblock_set_node(start, region->size, &memblock.reserved, nid);
+ }
+
+ /*
--- /dev/null
+From eac8ea8736ccc09513152d970eb2a42ed78e87e8 Mon Sep 17 00:00:00 2001
+From: Wei Yang <richard.weiyang@gmail.com>
+Date: Tue, 18 Mar 2025 07:19:47 +0000
+Subject: mm/memblock: repeat setting reserved region nid if array is doubled
+
+From: Wei Yang <richard.weiyang@gmail.com>
+
+commit eac8ea8736ccc09513152d970eb2a42ed78e87e8 upstream.
+
+Commit 61167ad5fecd ("mm: pass nid to reserve_bootmem_region()") introduce
+a way to set nid to all reserved region.
+
+But there is a corner case it will leave some region with invalid nid.
+When memblock_set_node() doubles the array of memblock.reserved, it may
+lead to a new reserved region before current position. The new region
+will be left with an invalid node id.
+
+Repeat the process when detecting it.
+
+Fixes: 61167ad5fecd ("mm: pass nid to reserve_bootmem_region()")
+Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
+CC: Mike Rapoport <rppt@kernel.org>
+CC: Yajun Deng <yajun.deng@linux.dev>
+CC: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20250318071948.23854-3-richard.weiyang@gmail.com
+Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memblock.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -2160,11 +2160,14 @@ static void __init memmap_init_reserved_
+ struct memblock_region *region;
+ phys_addr_t start, end;
+ int nid;
++ unsigned long max_reserved;
+
+ /*
+ * set nid on all reserved pages and also treat struct
+ * pages for the NOMAP regions as PageReserved
+ */
++repeat:
++ max_reserved = memblock.reserved.max;
+ for_each_mem_region(region) {
+ nid = memblock_get_region_node(region);
+ start = region->base;
+@@ -2175,6 +2178,13 @@ static void __init memmap_init_reserved_
+
+ memblock_set_node(start, region->size, &memblock.reserved, nid);
+ }
++ /*
++ * 'max' is changed means memblock.reserved has been doubled its
++ * array, which may result a new reserved region before current
++ * 'start'. Now we should repeat the procedure to set its node id.
++ */
++ if (max_reserved != memblock.reserved.max)
++ goto repeat;
+
+ /*
+ * initialize struct pages for reserved regions that don't have
--- /dev/null
+From 649b50a82f09fa44c2f7a65618e4584072145ab7 Mon Sep 17 00:00:00 2001
+From: Ruslan Piasetskyi <ruslan.piasetskyi@gmail.com>
+Date: Wed, 26 Mar 2025 23:06:38 +0100
+Subject: mmc: renesas_sdhi: Fix error handling in renesas_sdhi_probe
+
+From: Ruslan Piasetskyi <ruslan.piasetskyi@gmail.com>
+
+commit 649b50a82f09fa44c2f7a65618e4584072145ab7 upstream.
+
+After moving tmio_mmc_host_probe down, error handling has to be
+adjusted.
+
+Fixes: 74f45de394d9 ("mmc: renesas_sdhi: register irqs before registering controller")
+Reviewed-by: Ihar Salauyou <salauyou.ihar@gmail.com>
+Signed-off-by: Ruslan Piasetskyi <ruslan.piasetskyi@gmail.com>
+Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Reviewed-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Tested-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20250326220638.460083-1-ruslan.piasetskyi@gmail.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/renesas_sdhi_core.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+--- a/drivers/mmc/host/renesas_sdhi_core.c
++++ b/drivers/mmc/host/renesas_sdhi_core.c
+@@ -1112,26 +1112,26 @@ int renesas_sdhi_probe(struct platform_d
+ num_irqs = platform_irq_count(pdev);
+ if (num_irqs < 0) {
+ ret = num_irqs;
+- goto eirq;
++ goto edisclk;
+ }
+
+ /* There must be at least one IRQ source */
+ if (!num_irqs) {
+ ret = -ENXIO;
+- goto eirq;
++ goto edisclk;
+ }
+
+ for (i = 0; i < num_irqs; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0) {
+ ret = irq;
+- goto eirq;
++ goto edisclk;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
+ dev_name(&pdev->dev), host);
+ if (ret)
+- goto eirq;
++ goto edisclk;
+ }
+
+ ret = tmio_mmc_host_probe(host);
+@@ -1143,8 +1143,6 @@ int renesas_sdhi_probe(struct platform_d
+
+ return ret;
+
+-eirq:
+- tmio_mmc_host_remove(host);
+ edisclk:
+ renesas_sdhi_clk_disable(host);
+ efree:
--- /dev/null
+From de3629baf5a33af1919dec7136d643b0662e85ef Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Sat, 3 May 2025 18:24:01 +0200
+Subject: parisc: Fix double SIGFPE crash
+
+From: Helge Deller <deller@gmx.de>
+
+commit de3629baf5a33af1919dec7136d643b0662e85ef upstream.
+
+Camm noticed that on parisc a SIGFPE exception will crash an application with
+a second SIGFPE in the signal handler. Dave analyzed it, and it happens
+because glibc uses a double-word floating-point store to atomically update
+function descriptors. As a result of lazy binding, we hit a floating-point
+store in fpe_func almost immediately.
+
+When the T bit is set, an assist exception trap occurs when when the
+co-processor encounters *any* floating-point instruction except for a double
+store of register %fr0. The latter cancels all pending traps. Let's fix this
+by clearing the Trap (T) bit in the FP status register before returning to the
+signal handler in userspace.
+
+The issue can be reproduced with this test program:
+
+root@parisc:~# cat fpe.c
+
+static void fpe_func(int sig, siginfo_t *i, void *v) {
+ sigset_t set;
+ sigemptyset(&set);
+ sigaddset(&set, SIGFPE);
+ sigprocmask(SIG_UNBLOCK, &set, NULL);
+ printf("GOT signal %d with si_code %ld\n", sig, i->si_code);
+}
+
+int main() {
+ struct sigaction action = {
+ .sa_sigaction = fpe_func,
+ .sa_flags = SA_RESTART|SA_SIGINFO };
+ sigaction(SIGFPE, &action, 0);
+ feenableexcept(FE_OVERFLOW);
+ return printf("%lf\n",1.7976931348623158E308*1.7976931348623158E308);
+}
+
+root@parisc:~# gcc fpe.c -lm
+root@parisc:~# ./a.out
+ Floating point exception
+
+root@parisc:~# strace -f ./a.out
+ execve("./a.out", ["./a.out"], 0xf9ac7034 /* 20 vars */) = 0
+ getrlimit(RLIMIT_STACK, {rlim_cur=8192*1024, rlim_max=RLIM_INFINITY}) = 0
+ ...
+ rt_sigaction(SIGFPE, {sa_handler=0x1110a, sa_mask=[], sa_flags=SA_RESTART|SA_SIGINFO}, NULL, 8) = 0
+ --- SIGFPE {si_signo=SIGFPE, si_code=FPE_FLTOVF, si_addr=0x1078f} ---
+ --- SIGFPE {si_signo=SIGFPE, si_code=FPE_FLTOVF, si_addr=0xf8f21237} ---
+ +++ killed by SIGFPE +++
+ Floating point exception
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Suggested-by: John David Anglin <dave.anglin@bell.net>
+Reported-by: Camm Maguire <camm@maguirefamily.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/parisc/math-emu/driver.c | 16 +++++++++++++---
+ 1 file changed, 13 insertions(+), 3 deletions(-)
+
+--- a/arch/parisc/math-emu/driver.c
++++ b/arch/parisc/math-emu/driver.c
+@@ -97,9 +97,19 @@ handle_fpe(struct pt_regs *regs)
+
+ memcpy(regs->fr, frcopy, sizeof regs->fr);
+ if (signalcode != 0) {
+- force_sig_fault(signalcode >> 24, signalcode & 0xffffff,
+- (void __user *) regs->iaoq[0]);
+- return -1;
++ int sig = signalcode >> 24;
++
++ if (sig == SIGFPE) {
++ /*
++ * Clear floating point trap bit to avoid trapping
++ * again on the first floating-point instruction in
++ * the userspace signal handler.
++ */
++ regs->fr[0] &= ~(1ULL << 38);
++ }
++ force_sig_fault(sig, signalcode & 0xffffff,
++ (void __user *) regs->iaoq[0]);
++ return -1;
+ }
+
+ return signalcode ? -1 : 0;
--- /dev/null
+From 58f6217e5d0132a9f14e401e62796916aa055c1b Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Fri, 25 Apr 2025 17:13:55 -0700
+Subject: perf/x86/intel: KVM: Mask PEBS_ENABLE loaded for guest with vCPU's value.
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 58f6217e5d0132a9f14e401e62796916aa055c1b upstream.
+
+When generating the MSR_IA32_PEBS_ENABLE value that will be loaded on
+VM-Entry to a KVM guest, mask the value with the vCPU's desired PEBS_ENABLE
+value. Consulting only the host kernel's host vs. guest masks results in
+running the guest with PEBS enabled even when the guest doesn't want to use
+PEBS. Because KVM uses perf events to proxy the guest virtual PMU, simply
+looking at exclude_host can't differentiate between events created by host
+userspace, and events created by KVM on behalf of the guest.
+
+Running the guest with PEBS unexpectedly enabled typically manifests as
+crashes due to a near-infinite stream of #PFs. E.g. if the guest hasn't
+written MSR_IA32_DS_AREA, the CPU will hit page faults on address '0' when
+trying to record PEBS events.
+
+The issue is most easily reproduced by running `perf kvm top` from before
+commit 7b100989b4f6 ("perf evlist: Remove __evlist__add_default") (after
+which, `perf kvm top` effectively stopped using PEBS). The userspace side
+of perf creates a guest-only PEBS event, which intel_guest_get_msrs()
+misconstrues a guest-*owned* PEBS event.
+
+Arguably, this is a userspace bug, as enabling PEBS on guest-only events
+simply cannot work, and userspace can kill VMs in many other ways (there
+is no danger to the host). However, even if this is considered to be bad
+userspace behavior, there's zero downside to perf/KVM restricting PEBS to
+guest-owned events.
+
+Note, commit 854250329c02 ("KVM: x86/pmu: Disable guest PEBS temporarily
+in two rare situations") fixed the case where host userspace is profiling
+KVM *and* userspace, but missed the case where userspace is profiling only
+KVM.
+
+Fixes: c59a1f106f5c ("KVM: x86/pmu: Add IA32_PEBS_ENABLE MSR emulation for extended PEBS")
+Closes: https://lore.kernel.org/all/Z_VUswFkWiTYI0eD@do-x1carbon
+Reported-by: Seth Forshee <sforshee@kernel.org>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
+Tested-by: "Seth Forshee (DigitalOcean)" <sforshee@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20250426001355.1026530-1-seanjc@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/intel/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -4333,7 +4333,7 @@ static struct perf_guest_switch_msr *int
+ arr[pebs_enable] = (struct perf_guest_switch_msr){
+ .msr = MSR_IA32_PEBS_ENABLE,
+ .host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask,
+- .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask,
++ .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask & kvm_pmu->pebs_enable,
+ };
+
+ if (arr[pebs_enable].host) {
--- /dev/null
+From 75aea4b0656ead0facd13d2aae4cb77326e53d2f Mon Sep 17 00:00:00 2001
+From: Kan Liang <kan.liang@linux.intel.com>
+Date: Thu, 24 Apr 2025 06:47:14 -0700
+Subject: perf/x86/intel: Only check the group flag for X86 leader
+
+From: Kan Liang <kan.liang@linux.intel.com>
+
+commit 75aea4b0656ead0facd13d2aae4cb77326e53d2f upstream.
+
+A warning in intel_pmu_lbr_counters_reorder() may be triggered by below
+perf command.
+
+perf record -e "{cpu-clock,cycles/call-graph="lbr"/}" -- sleep 1
+
+It's because the group is mistakenly treated as a branch counter group.
+
+The hw.flags of the leader are used to determine whether a group is a
+branch counters group. However, the hw.flags is only available for a
+hardware event. The field to store the flags is a union type. For a
+software event, it's a hrtimer. The corresponding bit may be set if the
+leader is a software event.
+
+For a branch counter group and other groups that have a group flag
+(e.g., topdown, PEBS counters snapshotting, and ACR), the leader must
+be a X86 event. Check the X86 event before checking the flag.
+The patch only fixes the issue for the branch counter group.
+The following patch will fix the other groups.
+
+There may be an alternative way to fix the issue by moving the hw.flags
+out of the union type. It should work for now. But it's still possible
+that the flags will be used by other types of events later. As long as
+that type of event is used as a leader, a similar issue will be
+triggered. So the alternative way is dropped.
+
+Fixes: 33744916196b ("perf/x86/intel: Support branch counters logging")
+Closes: https://lore.kernel.org/lkml/20250412091423.1839809-1-luogengkun@huaweicloud.com/
+Reported-by: Luo Gengkun <luogengkun@huaweicloud.com>
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20250424134718.311934-2-kan.liang@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/core.c | 2 +-
+ arch/x86/events/perf_event.h | 9 ++++++++-
+ 2 files changed, 9 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -753,7 +753,7 @@ void x86_pmu_enable_all(int added)
+ }
+ }
+
+-static inline int is_x86_event(struct perf_event *event)
++int is_x86_event(struct perf_event *event)
+ {
+ int i;
+
+--- a/arch/x86/events/perf_event.h
++++ b/arch/x86/events/perf_event.h
+@@ -110,9 +110,16 @@ static inline bool is_topdown_event(stru
+ return is_metric_event(event) || is_slots_event(event);
+ }
+
++int is_x86_event(struct perf_event *event);
++
++static inline bool check_leader_group(struct perf_event *leader, int flags)
++{
++ return is_x86_event(leader) ? !!(leader->hw.flags & flags) : false;
++}
++
+ static inline bool is_branch_counters_group(struct perf_event *event)
+ {
+- return event->group_leader->hw.flags & PERF_X86_EVENT_BRANCH_COUNTERS;
++ return check_leader_group(event->group_leader, PERF_X86_EVENT_BRANCH_COUNTERS);
+ }
+
+ struct amd_nb {
--- /dev/null
+From 9f5595d5f03fd4dc640607a71e89a1daa68fd19d Mon Sep 17 00:00:00 2001
+From: Mario Limonciello <mario.limonciello@amd.com>
+Date: Mon, 14 Apr 2025 11:24:00 -0500
+Subject: platform/x86/amd: pmc: Require at least 2.5 seconds between HW sleep cycles
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+commit 9f5595d5f03fd4dc640607a71e89a1daa68fd19d upstream.
+
+When an APU exits HW sleep with no active wake sources the Linux kernel will
+rapidly assert that the APU can enter back into HW sleep. This happens in a
+few ms. Contrasting this to Windows, Windows can take 10s of seconds to
+enter back into the resiliency phase for Modern Standby.
+
+For some situations this can be problematic because it can cause leakage
+from VDDCR_SOC to VDD_MISC and force VDD_MISC outside of the electrical
+design guide specifications. On some designs this will trip the over
+voltage protection feature (OVP) of the voltage regulator module, but it
+could cause APU damage as well.
+
+To prevent this risk, add an explicit sleep call so that future attempts
+to enter into HW sleep will have enough time to settle. This will occur
+while the screen is dark and only on cases that the APU should enter HW
+sleep again, so it shouldn't be noticeable to any user.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Acked-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Link: https://lore.kernel.org/r/20250414162446.3853194-1-superm1@kernel.org
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/platform/x86/amd/pmc/pmc.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/drivers/platform/x86/amd/pmc/pmc.c
++++ b/drivers/platform/x86/amd/pmc/pmc.c
+@@ -892,10 +892,9 @@ static void amd_pmc_s2idle_check(void)
+ struct smu_metrics table;
+ int rc;
+
+- /* CZN: Ensure that future s0i3 entry attempts at least 10ms passed */
+- if (pdev->cpu_id == AMD_CPU_ID_CZN && !get_metrics_table(pdev, &table) &&
+- table.s0i3_last_entry_status)
+- usleep_range(10000, 20000);
++ /* Avoid triggering OVP */
++ if (!get_metrics_table(pdev, &table) && table.s0i3_last_entry_status)
++ msleep(2500);
+
+ /* Dump the IdleMask before we add to the STB */
+ amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
--- /dev/null
+From 8d6955ed76e8a47115f2ea1d9c263ee6f505d737 Mon Sep 17 00:00:00 2001
+From: Shouye Liu <shouyeliu@tencent.com>
+Date: Thu, 17 Apr 2025 11:23:21 +0800
+Subject: platform/x86/intel-uncore-freq: Fix missing uncore sysfs during CPU hotplug
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Shouye Liu <shouyeliu@tencent.com>
+
+commit 8d6955ed76e8a47115f2ea1d9c263ee6f505d737 upstream.
+
+In certain situations, the sysfs for uncore may not be present when all
+CPUs in a package are offlined and then brought back online after boot.
+
+This issue can occur if there is an error in adding the sysfs entry due
+to a memory allocation failure. Retrying to bring the CPUs online will
+not resolve the issue, as the uncore_cpu_mask is already set for the
+package before the failure condition occurs.
+
+This issue does not occur if the failure happens during module
+initialization, as the module will fail to load in the event of any
+error.
+
+To address this, ensure that the uncore_cpu_mask is not set until the
+successful return of uncore_freq_add_entry().
+
+Fixes: dbce412a7733 ("platform/x86/intel-uncore-freq: Split common and enumeration part")
+Signed-off-by: Shouye Liu <shouyeliu@tencent.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20250417032321.75580-1-shouyeliu@gmail.com
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c | 13 ++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
++++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
+@@ -146,15 +146,13 @@ static int uncore_event_cpu_online(unsig
+ {
+ struct uncore_data *data;
+ int target;
++ int ret;
+
+ /* Check if there is an online cpu in the package for uncore MSR */
+ target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
+ if (target < nr_cpu_ids)
+ return 0;
+
+- /* Use this CPU on this die as a control CPU */
+- cpumask_set_cpu(cpu, &uncore_cpu_mask);
+-
+ data = uncore_get_instance(cpu);
+ if (!data)
+ return 0;
+@@ -163,7 +161,14 @@ static int uncore_event_cpu_online(unsig
+ data->die_id = topology_die_id(cpu);
+ data->domain_id = UNCORE_DOMAIN_ID_INVALID;
+
+- return uncore_freq_add_entry(data, cpu);
++ ret = uncore_freq_add_entry(data, cpu);
++ if (ret)
++ return ret;
++
++ /* Use this CPU on this die as a control CPU */
++ cpumask_set_cpu(cpu, &uncore_cpu_mask);
++
++ return 0;
+ }
+
+ static int uncore_event_cpu_offline(unsigned int cpu)
revert-rndis_host-flag-rndis-modems-as-wwan-devices.patch
alsa-usb-audio-add-retry-on-eproto-from-usb_set_interface.patch
alsa-usb-audio-add-second-usb-id-for-jabra-evolve-65-headset.patch
+binder-fix-offset-calculation-in-debug-log.patch
+btrfs-adjust-subpage-bit-start-based-on-sectorsize.patch
+btrfs-fix-cow-handling-in-run_delalloc_nocow.patch
+cpufreq-intel_pstate-unchecked-msr-aceess-in-legacy-mode.patch
+drm-fdinfo-protect-against-driver-unbind.patch
+drm-nouveau-fix-warn_on-in-nouveau_fence_context_kill.patch
+edac-altera-test-the-correct-error-reg-offset.patch
+edac-altera-set-ddr-and-sdmmc-interrupt-mask-before-registration.patch
+i2c-imx-lpi2c-fix-clock-count-when-probe-defers.patch
+arm64-errata-add-missing-sentinels-to-spectre-bhb-midr-arrays.patch
+parisc-fix-double-sigfpe-crash.patch
+perf-x86-intel-only-check-the-group-flag-for-x86-leader.patch
+perf-x86-intel-kvm-mask-pebs_enable-loaded-for-guest-with-vcpu-s-value.patch
+amd-xgbe-fix-to-ensure-dependent-features-are-toggled-with-rx-checksum-offload.patch
+irqchip-qcom-mpm-prevent-crash-when-trying-to-handle-non-wake-gpios.patch
+mm-memblock-pass-size-instead-of-end-to-memblock_set_node.patch
+mm-memblock-repeat-setting-reserved-region-nid-if-array-is-doubled.patch
+mmc-renesas_sdhi-fix-error-handling-in-renesas_sdhi_probe.patch
+spi-tegra114-don-t-fail-set_cs_timing-when-delays-are-zero.patch
+tracing-do-not-take-trace_event_sem-in-print_event_fields.patch
+wifi-brcm80211-fmac-add-error-handling-for-brcmf_usb_dl_writeimage.patch
+x86-boot-sev-support-memory-acceptance-in-the-efi-stub-under-svsm.patch
+dm-bufio-don-t-schedule-in-atomic-context.patch
+dm-integrity-fix-a-warning-on-invalid-table-line.patch
+dm-always-update-the-array-size-in-realloc_argv-on-success.patch
+drm-amdgpu-fix-offset-for-hdp-remap-in-nbio-v7.11.patch
+drm-select-drm_kms_helper-from-drm_debug_dp_mst_topology_refs.patch
+iommu-amd-fix-potential-buffer-overflow-in-parse_ivrs_acpihid.patch
+iommu-arm-smmu-v3-fix-iommu_device_probe-bug-due-to-duplicated-stream-ids.patch
+iommu-arm-smmu-v3-fix-pgsize_bit-for-sva-domains.patch
+iommu-vt-d-apply-quirk_iommu_igfx-for-8086-0044-qm57-qs57.patch
+iommu-fix-two-issues-in-iommu_copy_struct_from_user.patch
+platform-x86-amd-pmc-require-at-least-2.5-seconds-between-hw-sleep-cycles.patch
+platform-x86-intel-uncore-freq-fix-missing-uncore-sysfs-during-cpu-hotplug.patch
+ksmbd-fix-use-after-free-in-ksmbd_session_rpc_open.patch
+ksmbd-fix-use-after-free-in-kerberos-authentication.patch
+ksmbd-fix-use-after-free-in-session-logoff.patch
+smb-client-fix-zero-length-for-mkdir-posix-create-context.patch
+cpufreq-avoid-using-inconsistent-policy-min-and-policy-max.patch
+cpufreq-fix-setting-policy-limits-when-frequency-tables-are-used.patch
+tracing-fix-oob-write-in-trace_seq_to_buffer.patch
--- /dev/null
+From 74c72419ec8da5cbc9c49410d3c44bb954538bdd Mon Sep 17 00:00:00 2001
+From: Jethro Donaldson <devel@jro.nz>
+Date: Wed, 30 Apr 2025 00:59:15 +1200
+Subject: smb: client: fix zero length for mkdir POSIX create context
+
+From: Jethro Donaldson <devel@jro.nz>
+
+commit 74c72419ec8da5cbc9c49410d3c44bb954538bdd upstream.
+
+SMB create requests issued via smb311_posix_mkdir() have an incorrect
+length of zero bytes for the POSIX create context data. ksmbd server
+rejects such requests and logs "cli req too short" causing mkdir to fail
+with "invalid argument" on the client side. It also causes subsequent
+rmmod to crash in cifs_destroy_request_bufs()
+
+Inspection of packets sent by cifs.ko using wireshark show valid data for
+the SMB2_POSIX_CREATE_CONTEXT is appended with the correct offset, but
+with an incorrect length of zero bytes. Fails with ksmbd+cifs.ko only as
+Windows server/client does not use POSIX extensions.
+
+Fix smb311_posix_mkdir() to set req->CreateContextsLength as part of
+appending the POSIX creation context to the request.
+
+Signed-off-by: Jethro Donaldson <devel@jro.nz>
+Acked-by: Paulo Alcantara (Red Hat) <pc@manguebit.com>
+Reviewed-by: Namjae Jeon <linkinjeon@kernel.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/smb2pdu.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -2938,6 +2938,7 @@ replay_again:
+ req->CreateContextsOffset = cpu_to_le32(
+ sizeof(struct smb2_create_req) +
+ iov[1].iov_len);
++ le32_add_cpu(&req->CreateContextsLength, iov[n_iov-1].iov_len);
+ pc_buf = iov[n_iov-1].iov_base;
+ }
+
--- /dev/null
+From 4426e6b4ecf632bb75d973051e1179b8bfac2320 Mon Sep 17 00:00:00 2001
+From: Aaron Kling <webgeek1234@gmail.com>
+Date: Wed, 23 Apr 2025 21:03:03 -0500
+Subject: spi: tegra114: Don't fail set_cs_timing when delays are zero
+
+From: Aaron Kling <webgeek1234@gmail.com>
+
+commit 4426e6b4ecf632bb75d973051e1179b8bfac2320 upstream.
+
+The original code would skip null delay pointers, but when the pointers
+were converted to point within the spi_device struct, the check was not
+updated to skip delays of zero. Hence all spi devices that didn't set
+delays would fail to probe.
+
+Fixes: 04e6bb0d6bb1 ("spi: modify set_cs_timing parameter")
+Cc: stable@vger.kernel.org
+Signed-off-by: Aaron Kling <webgeek1234@gmail.com>
+Link: https://patch.msgid.link/20250423-spi-tegra114-v1-1-2d608bcc12f9@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/spi/spi-tegra114.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/spi/spi-tegra114.c
++++ b/drivers/spi/spi-tegra114.c
+@@ -728,9 +728,9 @@ static int tegra_spi_set_hw_cs_timing(st
+ u32 inactive_cycles;
+ u8 cs_state;
+
+- if (setup->unit != SPI_DELAY_UNIT_SCK ||
+- hold->unit != SPI_DELAY_UNIT_SCK ||
+- inactive->unit != SPI_DELAY_UNIT_SCK) {
++ if ((setup->unit && setup->unit != SPI_DELAY_UNIT_SCK) ||
++ (hold->unit && hold->unit != SPI_DELAY_UNIT_SCK) ||
++ (inactive->unit && inactive->unit != SPI_DELAY_UNIT_SCK)) {
+ dev_err(&spi->dev,
+ "Invalid delay unit %d, should be SPI_DELAY_UNIT_SCK\n",
+ SPI_DELAY_UNIT_SCK);
--- /dev/null
+From 0a8f11f8569e7ed16cbcedeb28c4350f6378fea6 Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Thu, 1 May 2025 22:41:28 -0400
+Subject: tracing: Do not take trace_event_sem in print_event_fields()
+
+From: Steven Rostedt <rostedt@goodmis.org>
+
+commit 0a8f11f8569e7ed16cbcedeb28c4350f6378fea6 upstream.
+
+On some paths in print_event_fields() it takes the trace_event_sem for
+read, even though it should always be held when the function is called.
+
+Remove the taking of that mutex and add a lockdep_assert_held_read() to
+make sure the trace_event_sem is held when print_event_fields() is called.
+
+Cc: stable@vger.kernel.org
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Link: https://lore.kernel.org/20250501224128.0b1f0571@batman.local.home
+Fixes: 80a76994b2d88 ("tracing: Add "fields" option to show raw trace event fields")
+Reported-by: syzbot+441582c1592938fccf09@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/6813ff5e.050a0220.14dd7d.001b.GAE@google.com/
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_output.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/trace/trace_output.c
++++ b/kernel/trace/trace_output.c
+@@ -950,11 +950,12 @@ enum print_line_t print_event_fields(str
+ struct trace_event_call *call;
+ struct list_head *head;
+
++ lockdep_assert_held_read(&trace_event_sem);
++
+ /* ftrace defined events have separate call structures */
+ if (event->type <= __TRACE_LAST_TYPE) {
+ bool found = false;
+
+- down_read(&trace_event_sem);
+ list_for_each_entry(call, &ftrace_events, list) {
+ if (call->event.type == event->type) {
+ found = true;
+@@ -964,7 +965,6 @@ enum print_line_t print_event_fields(str
+ if (call->event.type > __TRACE_LAST_TYPE)
+ break;
+ }
+- up_read(&trace_event_sem);
+ if (!found) {
+ trace_seq_printf(&iter->seq, "UNKNOWN TYPE %d\n", event->type);
+ goto out;
--- /dev/null
+From f5178c41bb43444a6008150fe6094497135d07cb Mon Sep 17 00:00:00 2001
+From: Jeongjun Park <aha310510@gmail.com>
+Date: Tue, 22 Apr 2025 20:30:25 +0900
+Subject: tracing: Fix oob write in trace_seq_to_buffer()
+
+From: Jeongjun Park <aha310510@gmail.com>
+
+commit f5178c41bb43444a6008150fe6094497135d07cb upstream.
+
+syzbot reported this bug:
+==================================================================
+BUG: KASAN: slab-out-of-bounds in trace_seq_to_buffer kernel/trace/trace.c:1830 [inline]
+BUG: KASAN: slab-out-of-bounds in tracing_splice_read_pipe+0x6be/0xdd0 kernel/trace/trace.c:6822
+Write of size 4507 at addr ffff888032b6b000 by task syz.2.320/7260
+
+CPU: 1 UID: 0 PID: 7260 Comm: syz.2.320 Not tainted 6.15.0-rc1-syzkaller-00301-g3bde70a2c827 #0 PREEMPT(full)
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 02/12/2025
+Call Trace:
+ <TASK>
+ __dump_stack lib/dump_stack.c:94 [inline]
+ dump_stack_lvl+0x116/0x1f0 lib/dump_stack.c:120
+ print_address_description mm/kasan/report.c:408 [inline]
+ print_report+0xc3/0x670 mm/kasan/report.c:521
+ kasan_report+0xe0/0x110 mm/kasan/report.c:634
+ check_region_inline mm/kasan/generic.c:183 [inline]
+ kasan_check_range+0xef/0x1a0 mm/kasan/generic.c:189
+ __asan_memcpy+0x3c/0x60 mm/kasan/shadow.c:106
+ trace_seq_to_buffer kernel/trace/trace.c:1830 [inline]
+ tracing_splice_read_pipe+0x6be/0xdd0 kernel/trace/trace.c:6822
+ ....
+==================================================================
+
+It has been reported that trace_seq_to_buffer() tries to copy more data
+than PAGE_SIZE to buf. Therefore, to prevent this, we should use the
+smaller of trace_seq_used(&iter->seq) and PAGE_SIZE as an argument.
+
+Link: https://lore.kernel.org/20250422113026.13308-1-aha310510@gmail.com
+Reported-by: syzbot+c8cd2d2c412b868263fb@syzkaller.appspotmail.com
+Fixes: 3c56819b14b0 ("tracing: splice support for tracing_pipe")
+Suggested-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Jeongjun Park <aha310510@gmail.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -6703,13 +6703,14 @@ static ssize_t tracing_splice_read_pipe(
+ /* Copy the data into the page, so we can start over. */
+ ret = trace_seq_to_buffer(&iter->seq,
+ page_address(spd.pages[i]),
+- trace_seq_used(&iter->seq));
++ min((size_t)trace_seq_used(&iter->seq),
++ PAGE_SIZE));
+ if (ret < 0) {
+ __free_page(spd.pages[i]);
+ break;
+ }
+ spd.partial[i].offset = 0;
+- spd.partial[i].len = trace_seq_used(&iter->seq);
++ spd.partial[i].len = ret;
+
+ trace_seq_init(&iter->seq);
+ }
--- /dev/null
+From 8e089e7b585d95122c8122d732d1d5ef8f879396 Mon Sep 17 00:00:00 2001
+From: Wentao Liang <vulab@iscas.ac.cn>
+Date: Tue, 22 Apr 2025 12:22:02 +0800
+Subject: wifi: brcm80211: fmac: Add error handling for brcmf_usb_dl_writeimage()
+
+From: Wentao Liang <vulab@iscas.ac.cn>
+
+commit 8e089e7b585d95122c8122d732d1d5ef8f879396 upstream.
+
+The function brcmf_usb_dl_writeimage() calls the function
+brcmf_usb_dl_cmd() but dose not check its return value. The
+'state.state' and the 'state.bytes' are uninitialized if the
+function brcmf_usb_dl_cmd() fails. It is dangerous to use
+uninitialized variables in the conditions.
+
+Add error handling for brcmf_usb_dl_cmd() to jump to error
+handling path if the brcmf_usb_dl_cmd() fails and the
+'state.state' and the 'state.bytes' are uninitialized.
+
+Improve the error message to report more detailed error
+information.
+
+Fixes: 71bb244ba2fd ("brcm80211: fmac: add USB support for bcm43235/6/8 chipsets")
+Cc: stable@vger.kernel.org # v3.4+
+Signed-off-by: Wentao Liang <vulab@iscas.ac.cn>
+Acked-by: Arend van Spriel <arend.vanspriel@broadcom.com>
+Link: https://patch.msgid.link/20250422042203.2259-1-vulab@iscas.ac.cn
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+@@ -896,14 +896,16 @@ brcmf_usb_dl_writeimage(struct brcmf_usb
+ }
+
+ /* 1) Prepare USB boot loader for runtime image */
+- brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state));
++ err = brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state));
++ if (err)
++ goto fail;
+
+ rdlstate = le32_to_cpu(state.state);
+ rdlbytes = le32_to_cpu(state.bytes);
+
+ /* 2) Check we are in the Waiting state */
+ if (rdlstate != DL_WAITING) {
+- brcmf_err("Failed to DL_START\n");
++ brcmf_err("Invalid DL state: %u\n", rdlstate);
+ err = -EINVAL;
+ goto fail;
+ }
--- /dev/null
+From 8ed12ab1319b2d8e4a529504777aacacf71371e4 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Mon, 28 Apr 2025 19:43:22 +0200
+Subject: x86/boot/sev: Support memory acceptance in the EFI stub under SVSM
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+commit 8ed12ab1319b2d8e4a529504777aacacf71371e4 upstream.
+
+Commit:
+
+ d54d610243a4 ("x86/boot/sev: Avoid shared GHCB page for early memory acceptance")
+
+provided a fix for SEV-SNP memory acceptance from the EFI stub when
+running at VMPL #0. However, that fix was insufficient for SVSM SEV-SNP
+guests running at VMPL >0, as those rely on a SVSM calling area, which
+is a shared buffer whose address is programmed into a SEV-SNP MSR, and
+the SEV init code that sets up this calling area executes much later
+during the boot.
+
+Given that booting via the EFI stub at VMPL >0 implies that the firmware
+has configured this calling area already, reuse it for performing memory
+acceptance in the EFI stub.
+
+Fixes: fcd042e86422 ("x86/sev: Perform PVALIDATE using the SVSM when not at VMPL0")
+Tested-by: Tom Lendacky <thomas.lendacky@amd.com>
+Co-developed-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: <stable@vger.kernel.org>
+Cc: Dionna Amalie Glaze <dionnaglaze@google.com>
+Cc: Kevin Loughlin <kevinloughlin@google.com>
+Cc: linux-efi@vger.kernel.org
+Link: https://lore.kernel.org/r/20250428174322.2780170-2-ardb+git@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/boot/compressed/mem.c | 5 +----
+ arch/x86/boot/compressed/sev.c | 40 ++++++++++++++++++++++++++++++++++++++++
+ arch/x86/boot/compressed/sev.h | 2 ++
+ 3 files changed, 43 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/boot/compressed/mem.c
++++ b/arch/x86/boot/compressed/mem.c
+@@ -34,14 +34,11 @@ static bool early_is_tdx_guest(void)
+
+ void arch_accept_memory(phys_addr_t start, phys_addr_t end)
+ {
+- static bool sevsnp;
+-
+ /* Platform-specific memory-acceptance call goes here */
+ if (early_is_tdx_guest()) {
+ if (!tdx_accept_memory(start, end))
+ panic("TDX: Failed to accept memory\n");
+- } else if (sevsnp || (sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED)) {
+- sevsnp = true;
++ } else if (early_is_sevsnp_guest()) {
+ snp_accept_memory(start, end);
+ } else {
+ error("Cannot accept memory: unknown platform\n");
+--- a/arch/x86/boot/compressed/sev.c
++++ b/arch/x86/boot/compressed/sev.c
+@@ -644,3 +644,43 @@ void sev_prep_identity_maps(unsigned lon
+
+ sev_verify_cbit(top_level_pgt);
+ }
++
++bool early_is_sevsnp_guest(void)
++{
++ static bool sevsnp;
++
++ if (sevsnp)
++ return true;
++
++ if (!(sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED))
++ return false;
++
++ sevsnp = true;
++
++ if (!snp_vmpl) {
++ unsigned int eax, ebx, ecx, edx;
++
++ /*
++ * CPUID Fn8000_001F_EAX[28] - SVSM support
++ */
++ eax = 0x8000001f;
++ ecx = 0;
++ native_cpuid(&eax, &ebx, &ecx, &edx);
++ if (eax & BIT(28)) {
++ struct msr m;
++
++ /* Obtain the address of the calling area to use */
++ boot_rdmsr(MSR_SVSM_CAA, &m);
++ boot_svsm_caa = (void *)m.q;
++ boot_svsm_caa_pa = m.q;
++
++ /*
++ * The real VMPL level cannot be discovered, but the
++ * memory acceptance routines make no use of that so
++ * any non-zero value suffices here.
++ */
++ snp_vmpl = U8_MAX;
++ }
++ }
++ return true;
++}
+--- a/arch/x86/boot/compressed/sev.h
++++ b/arch/x86/boot/compressed/sev.h
+@@ -13,12 +13,14 @@
+ bool sev_snp_enabled(void);
+ void snp_accept_memory(phys_addr_t start, phys_addr_t end);
+ u64 sev_get_status(void);
++bool early_is_sevsnp_guest(void);
+
+ #else
+
+ static inline bool sev_snp_enabled(void) { return false; }
+ static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
+ static inline u64 sev_get_status(void) { return 0; }
++static inline bool early_is_sevsnp_guest(void) { return false; }
+
+ #endif
+