--- /dev/null
+From 7904e53ed5a20fc678c01d5d1b07ec486425bb6a Mon Sep 17 00:00:00 2001
+From: Oleg Nesterov <oleg@redhat.com>
+Date: Sat, 9 Sep 2023 18:45:01 +0200
+Subject: fs/proc: do_task_stat: use __for_each_thread()
+
+From: Oleg Nesterov <oleg@redhat.com>
+
+commit 7904e53ed5a20fc678c01d5d1b07ec486425bb6a upstream.
+
+do/while_each_thread should be avoided when possible.
+
+Link: https://lkml.kernel.org/r/20230909164501.GA11581@redhat.com
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Cc: Eric W. Biederman <ebiederm@xmission.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: 7601df8031fd ("fs/proc: do_task_stat: use sig->stats_lock to gather the threads/children stats")
+Cc: stable@vger.kernel.org
+[ mheyne: adjusted context ]
+Signed-off-by: Maximilian Heyne <mheyne@amazon.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/proc/array.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -530,18 +530,18 @@ static int do_task_stat(struct seq_file
+ cgtime = sig->cgtime;
+
+ if (whole) {
+- struct task_struct *t = task;
++ struct task_struct *t;
+
+ min_flt = sig->min_flt;
+ maj_flt = sig->maj_flt;
+ gtime = sig->gtime;
+
+ rcu_read_lock();
+- do {
++ __for_each_thread(sig, t) {
+ min_flt += t->min_flt;
+ maj_flt += t->maj_flt;
+ gtime += task_gtime(t);
+- } while_each_thread(task, t);
++ }
+ rcu_read_unlock();
+ }
+ } while (need_seqretry(&sig->stats_lock, seq));
--- /dev/null
+From 1a0f25a52e08b1f67510cabbb44888d2b3c46359 Mon Sep 17 00:00:00 2001
+From: Jesse Brandeburg <jesse.brandeburg@intel.com>
+Date: Fri, 12 Nov 2021 17:06:02 -0800
+Subject: ice: safer stats processing
+
+From: Jesse Brandeburg <jesse.brandeburg@intel.com>
+
+commit 1a0f25a52e08b1f67510cabbb44888d2b3c46359 upstream.
+
+The driver was zeroing live stats that could be fetched by
+ndo_get_stats64 at any time. This could result in inconsistent
+statistics, and the telltale sign was when reading stats frequently from
+/proc/net/dev, the stats would go backwards.
+
+Fix by collecting stats into a local, and delaying when we write to the
+structure so it's not incremental.
+
+Fixes: fcea6f3da546 ("ice: Add stats and ethtool support")
+Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
+Tested-by: Gurucharan G <gurucharanx.g@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Closes: https://lore.kernel.org/intel-wired-lan/CAP8M2pGttT4JBjt+i4GJkxy7yERbqWJ5a8R14HzoonTLByc2Cw@mail.gmail.com
+Signed-off-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/ice/ice_main.c | 29 ++++++++++++++++++-----------
+ 1 file changed, 18 insertions(+), 11 deletions(-)
+
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -5731,14 +5731,15 @@ ice_fetch_u64_stats_per_ring(struct ice_
+ /**
+ * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
+ * @vsi: the VSI to be updated
++ * @vsi_stats: the stats struct to be updated
+ * @rings: rings to work on
+ * @count: number of rings
+ */
+ static void
+-ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings,
+- u16 count)
++ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
++ struct rtnl_link_stats64 *vsi_stats,
++ struct ice_ring **rings, u16 count)
+ {
+- struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
+ u16 i;
+
+ for (i = 0; i < count; i++) {
+@@ -5761,15 +5762,13 @@ ice_update_vsi_tx_ring_stats(struct ice_
+ */
+ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
+ {
+- struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
++ struct rtnl_link_stats64 *vsi_stats;
+ u64 pkts, bytes;
+ int i;
+
+- /* reset netdev stats */
+- vsi_stats->tx_packets = 0;
+- vsi_stats->tx_bytes = 0;
+- vsi_stats->rx_packets = 0;
+- vsi_stats->rx_bytes = 0;
++ vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
++ if (!vsi_stats)
++ return;
+
+ /* reset non-netdev (extended) stats */
+ vsi->tx_restart = 0;
+@@ -5781,7 +5780,8 @@ static void ice_update_vsi_ring_stats(st
+ rcu_read_lock();
+
+ /* update Tx rings counters */
+- ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
++ ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
++ vsi->num_txq);
+
+ /* update Rx rings counters */
+ ice_for_each_rxq(vsi, i) {
+@@ -5796,10 +5796,17 @@ static void ice_update_vsi_ring_stats(st
+
+ /* update XDP Tx rings counters */
+ if (ice_is_xdp_ena_vsi(vsi))
+- ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
++ ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
+ vsi->num_xdp_txq);
+
+ rcu_read_unlock();
++
++ vsi->net_stats.tx_packets = vsi_stats->tx_packets;
++ vsi->net_stats.tx_bytes = vsi_stats->tx_bytes;
++ vsi->net_stats.rx_packets = vsi_stats->rx_packets;
++ vsi->net_stats.rx_bytes = vsi_stats->rx_bytes;
++
++ kfree(vsi_stats);
+ }
+
+ /**
atm-clip-fix-infinite-recursive-call-of-clip_push.patch
atm-clip-fix-null-pointer-dereference-in-vcc_sendmsg.patch
net-sched-abort-__tc_modify_qdisc-if-parent-class-do.patch
+x86-cpu-amd-properly-check-the-tsa-microcode.patch
+fs-proc-do_task_stat-use-__for_each_thread.patch
+ice-safer-stats-processing.patch
--- /dev/null
+From bp@alien8.de Sat Jul 12 14:03:30 2025
+From: Borislav Petkov <bp@alien8.de>
+Date: Fri, 11 Jul 2025 21:45:58 +0200
+Subject: x86/CPU/AMD: Properly check the TSA microcode
+To: stable@vger.kernel.org
+Cc: Thomas Voegtle <tv@lio96.de>, kim.phillips@amd.com
+Message-ID: <20250711194558.GLaHFp9kw1s5dSmBUa@fat_crate.local>
+Content-Disposition: inline
+
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+
+In order to simplify backports, I resorted to an older version of the
+microcode revision checking which didn't pull in the whole struct
+x86_cpu_id matching machinery.
+
+My simpler method, however, forgot to add the extended CPU model to the
+patch revision, which lead to mismatches when determining whether TSA
+mitigation support is present.
+
+So add that forgotten extended model.
+
+Also, fix a backport mismerge which put tsa_init() where it doesn't
+belong.
+
+This is a stable-only fix and the preference is to do it this way
+because it is a lot simpler. Also, the Fixes: tag below points to the
+respective stable patch.
+
+Fixes: 90293047df18 ("x86/bugs: Add a Transient Scheduler Attacks mitigation")
+Reported-by: Thomas Voegtle <tv@lio96.de>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Tested-by: Thomas Voegtle <tv@lio96.de>
+Message-ID: <04ea0a8e-edb0-c59e-ce21-5f3d5d167af3@lio96.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/amd.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -590,6 +590,7 @@ static bool amd_check_tsa_microcode(void
+
+ p.ext_fam = c->x86 - 0xf;
+ p.model = c->x86_model;
++ p.ext_model = c->x86_model >> 4;
+ p.stepping = c->x86_stepping;
+
+ if (c->x86 == 0x19) {
+@@ -704,6 +705,8 @@ static void bsp_init_amd(struct cpuinfo_
+ }
+
+ resctrl_cpu_detect(c);
++
++ tsa_init(c);
+ }
+
+ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
+@@ -743,8 +746,6 @@ static void early_detect_mem_encrypt(str
+ goto clear_sev;
+
+
+- tsa_init(c);
+-
+ return;
+
+ clear_all: