--- /dev/null
+From d364eee14c682b141f4667efc3c65191339d88bd Mon Sep 17 00:00:00 2001
+From: Dhananjay Ugwekar <dhananjay.ugwekar@amd.com>
+Date: Wed, 5 Feb 2025 11:25:12 +0000
+Subject: cpufreq/amd-pstate: Remove the goto label in amd_pstate_update_limits
+
+From: Dhananjay Ugwekar <dhananjay.ugwekar@amd.com>
+
+commit d364eee14c682b141f4667efc3c65191339d88bd upstream.
+
+Scope based guard/cleanup macros should not be used together with goto
+labels. Hence, remove the goto label.
+
+Fixes: 6c093d5a5b73 ("cpufreq/amd-pstate: convert mutex use to guard()")
+Signed-off-by: Dhananjay Ugwekar <dhananjay.ugwekar@amd.com>
+Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
+Link: https://lore.kernel.org/r/20250205112523.201101-2-dhananjay.ugwekar@amd.com
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpufreq/amd-pstate.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -827,8 +827,10 @@ static void amd_pstate_update_limits(uns
+ guard(mutex)(&amd_pstate_driver_lock);
+
+ ret = amd_get_highest_perf(cpu, &cur_high);
+- if (ret)
+- goto free_cpufreq_put;
++ if (ret) {
++ cpufreq_cpu_put(policy);
++ return;
++ }
+
+ prev_high = READ_ONCE(cpudata->prefcore_ranking);
+ highest_perf_changed = (prev_high != cur_high);
+@@ -838,8 +840,6 @@ static void amd_pstate_update_limits(uns
+ if (cur_high < CPPC_MAX_PERF)
+ sched_set_itmt_core_prio((int)cur_high, cpu);
+ }
+-
+-free_cpufreq_put:
+ cpufreq_cpu_put(policy);
+
+ if (!highest_perf_changed)
--- /dev/null
+From bff406bc042408c021e41a439698a346119c2f11 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 17 Jan 2025 22:46:26 +0000
+Subject: net: destroy dev->lock later in free_netdev()
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit bff406bc042408c021e41a439698a346119c2f11 upstream.
+
+syzbot complained that free_netdev() was calling netif_napi_del()
+after dev->lock mutex has been destroyed.
+
+This fires a warning for CONFIG_DEBUG_MUTEXES=y builds.
+
+Move mutex_destroy(&dev->lock) near the end of free_netdev().
+
+[1]
+DEBUG_LOCKS_WARN_ON(lock->magic != lock)
+ WARNING: CPU: 0 PID: 5971 at kernel/locking/mutex.c:564 __mutex_lock_common kernel/locking/mutex.c:564 [inline]
+ WARNING: CPU: 0 PID: 5971 at kernel/locking/mutex.c:564 __mutex_lock+0xdac/0xee0 kernel/locking/mutex.c:735
+Modules linked in:
+CPU: 0 UID: 0 PID: 5971 Comm: syz-executor Not tainted 6.13.0-rc7-syzkaller-01131-g8d20dcda404d #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 12/27/2024
+ RIP: 0010:__mutex_lock_common kernel/locking/mutex.c:564 [inline]
+ RIP: 0010:__mutex_lock+0xdac/0xee0 kernel/locking/mutex.c:735
+Code: 0f b6 04 38 84 c0 0f 85 1a 01 00 00 83 3d 6f 40 4c 04 00 75 19 90 48 c7 c7 60 84 0a 8c 48 c7 c6 00 85 0a 8c e8 f5 dc 91 f5 90 <0f> 0b 90 90 90 e9 c7 f3 ff ff 90 0f 0b 90 e9 29 f8 ff ff 90 0f 0b
+RSP: 0018:ffffc90003317580 EFLAGS: 00010246
+RAX: ee0f97edaf7b7d00 RBX: ffff8880299f8cb0 RCX: ffff8880323c9e00
+RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000
+RBP: ffffc90003317710 R08: ffffffff81602ac2 R09: 1ffff110170c519a
+R10: dffffc0000000000 R11: ffffed10170c519b R12: 0000000000000000
+R13: 0000000000000000 R14: 1ffff92000662ec4 R15: dffffc0000000000
+FS: 000055557a046500(0000) GS:ffff8880b8600000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007fd581d46ff8 CR3: 000000006f870000 CR4: 00000000003526f0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ <TASK>
+ netdev_lock include/linux/netdevice.h:2691 [inline]
+ __netif_napi_del include/linux/netdevice.h:2829 [inline]
+ netif_napi_del include/linux/netdevice.h:2848 [inline]
+ free_netdev+0x2d9/0x610 net/core/dev.c:11621
+ netdev_run_todo+0xf21/0x10d0 net/core/dev.c:11189
+ nsim_destroy+0x3c3/0x620 drivers/net/netdevsim/netdev.c:1028
+ __nsim_dev_port_del+0x14b/0x1b0 drivers/net/netdevsim/dev.c:1428
+ nsim_dev_port_del_all drivers/net/netdevsim/dev.c:1440 [inline]
+ nsim_dev_reload_destroy+0x28a/0x490 drivers/net/netdevsim/dev.c:1661
+ nsim_drv_remove+0x58/0x160 drivers/net/netdevsim/dev.c:1676
+ device_remove drivers/base/dd.c:567 [inline]
+
+Fixes: 1b23cdbd2bbc ("net: protect netdev->napi_list with netdev_lock()")
+Reported-by: syzbot+85ff1051228a04613a32@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/netdev/678add43.050a0220.303755.0016.GAE@google.com/T/#u
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/20250117224626.1427577-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/dev.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -11403,8 +11403,6 @@ void free_netdev(struct net_device *dev)
+ return;
+ }
+
+- mutex_destroy(&dev->lock);
+-
+ kfree(dev->ethtool);
+ netif_free_tx_queues(dev);
+ netif_free_rx_queues(dev);
+@@ -11431,6 +11429,8 @@ void free_netdev(struct net_device *dev)
+
+ netdev_free_phy_link_topology(dev);
+
++ mutex_destroy(&dev->lock);
++
+ /* Compatibility with error handling in drivers */
+ if (dev->reg_state == NETREG_UNINITIALIZED ||
+ dev->reg_state == NETREG_DUMMY) {
--- /dev/null
+From c71a192976ded2f2f416d03c4f595cdd4478b825 Mon Sep 17 00:00:00 2001
+From: Jakub Kicinski <kuba@kernel.org>
+Date: Wed, 29 Jan 2025 19:15:18 -0800
+Subject: net: ipv6: fix dst refleaks in rpl, seg6 and ioam6 lwtunnels
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+commit c71a192976ded2f2f416d03c4f595cdd4478b825 upstream.
+
+dst_cache_get() gives us a reference, we need to release it.
+
+Discovered by the ioam6.sh test, kmemleak was recently fixed
+to catch per-cpu memory leaks.
+
+Fixes: 985ec6f5e623 ("net: ipv6: rpl_iptunnel: mitigate 2-realloc issue")
+Fixes: 40475b63761a ("net: ipv6: seg6_iptunnel: mitigate 2-realloc issue")
+Fixes: dce525185bc9 ("net: ipv6: ioam6_iptunnel: mitigate 2-realloc issue")
+Reviewed-by: Justin Iurman <justin.iurman@uliege.be>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250130031519.2716843-1-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ioam6_iptunnel.c | 5 +++--
+ net/ipv6/rpl_iptunnel.c | 6 ++++--
+ net/ipv6/seg6_iptunnel.c | 6 ++++--
+ 3 files changed, 11 insertions(+), 6 deletions(-)
+
+--- a/net/ipv6/ioam6_iptunnel.c
++++ b/net/ipv6/ioam6_iptunnel.c
+@@ -336,7 +336,7 @@ static int ioam6_do_encap(struct net *ne
+
+ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
+- struct dst_entry *dst = skb_dst(skb), *cache_dst;
++ struct dst_entry *dst = skb_dst(skb), *cache_dst = NULL;
+ struct in6_addr orig_daddr;
+ struct ioam6_lwt *ilwt;
+ int err = -EINVAL;
+@@ -407,7 +407,6 @@ do_encap:
+ cache_dst = ip6_route_output(net, NULL, &fl6);
+ if (cache_dst->error) {
+ err = cache_dst->error;
+- dst_release(cache_dst);
+ goto drop;
+ }
+
+@@ -429,8 +428,10 @@ do_encap:
+ return dst_output(net, sk, skb);
+ }
+ out:
++ dst_release(cache_dst);
+ return dst->lwtstate->orig_output(net, sk, skb);
+ drop:
++ dst_release(cache_dst);
+ kfree_skb(skb);
+ return err;
+ }
+--- a/net/ipv6/rpl_iptunnel.c
++++ b/net/ipv6/rpl_iptunnel.c
+@@ -232,7 +232,6 @@ static int rpl_output(struct net *net, s
+ dst = ip6_route_output(net, NULL, &fl6);
+ if (dst->error) {
+ err = dst->error;
+- dst_release(dst);
+ goto drop;
+ }
+
+@@ -254,6 +253,7 @@ static int rpl_output(struct net *net, s
+ return dst_output(net, sk, skb);
+
+ drop:
++ dst_release(dst);
+ kfree_skb(skb);
+ return err;
+ }
+@@ -272,8 +272,10 @@ static int rpl_input(struct sk_buff *skb
+ local_bh_enable();
+
+ err = rpl_do_srh(skb, rlwt, dst);
+- if (unlikely(err))
++ if (unlikely(err)) {
++ dst_release(dst);
+ goto drop;
++ }
+
+ if (!dst) {
+ ip6_route_input(skb);
+--- a/net/ipv6/seg6_iptunnel.c
++++ b/net/ipv6/seg6_iptunnel.c
+@@ -482,8 +482,10 @@ static int seg6_input_core(struct net *n
+ local_bh_enable();
+
+ err = seg6_do_srh(skb, dst);
+- if (unlikely(err))
++ if (unlikely(err)) {
++ dst_release(dst);
+ goto drop;
++ }
+
+ if (!dst) {
+ ip6_route_input(skb);
+@@ -571,7 +573,6 @@ static int seg6_output_core(struct net *
+ dst = ip6_route_output(net, NULL, &fl6);
+ if (dst->error) {
+ err = dst->error;
+- dst_release(dst);
+ goto drop;
+ }
+
+@@ -596,6 +597,7 @@ static int seg6_output_core(struct net *
+
+ return dst_output(net, sk, skb);
+ drop:
++ dst_release(dst);
+ kfree_skb(skb);
+ return err;
+ }
--- /dev/null
+From f3f08c3acfb8860e07a22814a344e83c99ad7398 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Mon, 10 Feb 2025 09:27:09 -1000
+Subject: sched_ext: Fix incorrect assumption about migration disabled tasks in task_can_run_on_remote_rq()
+
+From: Tejun Heo <tj@kernel.org>
+
+commit f3f08c3acfb8860e07a22814a344e83c99ad7398 upstream.
+
+While fixing migration disabled task handling, 32966821574c ("sched_ext: Fix
+migration disabled handling in targeted dispatches") assumed that a
+migration disabled task's ->cpus_ptr would only have the pinned CPU. While
+this is eventually true for migration disabled tasks that are switched out,
+->cpus_ptr update is performed by migrate_disable_switch() which is called
+right before context_switch() in __scheduler(). However, the task is
+enqueued earlier during pick_next_task() via put_prev_task_scx(), so there
+is a race window where another CPU can see the task on a DSQ.
+
+If the CPU tries to dispatch the migration disabled task while in that
+window, task_allowed_on_cpu() will succeed and task_can_run_on_remote_rq()
+will subsequently trigger SCHED_WARN(is_migration_disabled()).
+
+ WARNING: CPU: 8 PID: 1837 at kernel/sched/ext.c:2466 task_can_run_on_remote_rq+0x12e/0x140
+ Sched_ext: layered (enabled+all), task: runnable_at=-10ms
+ RIP: 0010:task_can_run_on_remote_rq+0x12e/0x140
+ ...
+ <TASK>
+ consume_dispatch_q+0xab/0x220
+ scx_bpf_dsq_move_to_local+0x58/0xd0
+ bpf_prog_84dd17b0654b6cf0_layered_dispatch+0x290/0x1cfa
+ bpf__sched_ext_ops_dispatch+0x4b/0xab
+ balance_one+0x1fe/0x3b0
+ balance_scx+0x61/0x1d0
+ prev_balance+0x46/0xc0
+ __pick_next_task+0x73/0x1c0
+ __schedule+0x206/0x1730
+ schedule+0x3a/0x160
+ __do_sys_sched_yield+0xe/0x20
+ do_syscall_64+0xbb/0x1e0
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+
+Fix it by converting the SCHED_WARN() back to a regular failure path. Also,
+perform the migration disabled test before task_allowed_on_cpu() test so
+that BPF schedulers which fail to handle migration disabled tasks can be
+noticed easily.
+
+While at it, adjust scx_ops_error() message for !task_allowed_on_cpu() case
+for brevity and consistency.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Fixes: 32966821574c ("sched_ext: Fix migration disabled handling in targeted dispatches")
+Acked-by: Andrea Righi <arighi@nvidia.com>
+Reported-by: Jake Hillion <jakehillion@meta.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/ext.c | 29 +++++++++++++++++++++--------
+ 1 file changed, 21 insertions(+), 8 deletions(-)
+
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -2324,6 +2324,25 @@ static bool task_can_run_on_remote_rq(st
+ SCHED_WARN_ON(task_cpu(p) == cpu);
+
+ /*
++ * If @p has migration disabled, @p->cpus_ptr is updated to contain only
++ * the pinned CPU in migrate_disable_switch() while @p is being switched
++ * out. However, put_prev_task_scx() is called before @p->cpus_ptr is
++ * updated and thus another CPU may see @p on a DSQ inbetween leading to
++ * @p passing the below task_allowed_on_cpu() check while migration is
++ * disabled.
++ *
++ * Test the migration disabled state first as the race window is narrow
++ * and the BPF scheduler failing to check migration disabled state can
++ * easily be masked if task_allowed_on_cpu() is done first.
++ */
++ if (unlikely(is_migration_disabled(p))) {
++ if (trigger_error)
++ scx_ops_error("SCX_DSQ_LOCAL[_ON] cannot move migration disabled %s[%d] from CPU %d to %d",
++ p->comm, p->pid, task_cpu(p), cpu);
++ return false;
++ }
++
++ /*
+ * We don't require the BPF scheduler to avoid dispatching to offline
+ * CPUs mostly for convenience but also because CPUs can go offline
+ * between scx_bpf_dsq_insert() calls and here. Trigger error iff the
+@@ -2331,17 +2350,11 @@ static bool task_can_run_on_remote_rq(st
+ */
+ if (!task_allowed_on_cpu(p, cpu)) {
+ if (trigger_error)
+- scx_ops_error("SCX_DSQ_LOCAL[_ON] verdict target cpu %d not allowed for %s[%d]",
+- cpu_of(rq), p->comm, p->pid);
++ scx_ops_error("SCX_DSQ_LOCAL[_ON] target CPU %d not allowed for %s[%d]",
++ cpu, p->comm, p->pid);
+ return false;
+ }
+
+- /*
+- * If @p has migration disabled, @p->cpus_ptr only contains its current
+- * CPU and the above task_allowed_on_cpu() test should have failed.
+- */
+- SCHED_WARN_ON(is_migration_disabled(p));
+-
+ if (!scx_rq_online(rq))
+ return false;
+
--- /dev/null
+From 3d4114a1d34413dfffa0094c2eb7b95e61087abd Mon Sep 17 00:00:00 2001
+From: Avri Altman <avri.altman@wdc.com>
+Date: Tue, 28 Jan 2025 09:12:06 +0200
+Subject: scsi: ufs: core: Ensure clk_gating.lock is used only after initialization
+
+From: Avri Altman <avri.altman@wdc.com>
+
+commit 3d4114a1d34413dfffa0094c2eb7b95e61087abd upstream.
+
+Address a lockdep warning triggered by the use of the clk_gating.lock before
+it is properly initialized. The warning is as follows:
+
+[ 4.388838] INFO: trying to register non-static key.
+[ 4.395673] The code is fine but needs lockdep annotation, or maybe
+[ 4.402118] you didn't initialize this object before use?
+[ 4.407673] turning off the locking correctness validator.
+[ 4.413334] CPU: 5 UID: 0 PID: 58 Comm: kworker/u32:1 Not tainted 6.12-rc1 #185
+[ 4.413343] Hardware name: Qualcomm Technologies, Inc. Robotics RB5 (DT)
+[ 4.413362] Call trace:
+[ 4.413364] show_stack+0x18/0x24 (C)
+[ 4.413374] dump_stack_lvl+0x90/0xd0
+[ 4.413384] dump_stack+0x18/0x24
+[ 4.413392] register_lock_class+0x498/0x4a8
+[ 4.413400] __lock_acquire+0xb4/0x1b90
+[ 4.413406] lock_acquire+0x114/0x310
+[ 4.413413] _raw_spin_lock_irqsave+0x60/0x88
+[ 4.413423] ufshcd_setup_clocks+0x2c0/0x490
+[ 4.413433] ufshcd_init+0x198/0x10ec
+[ 4.413437] ufshcd_pltfrm_init+0x600/0x7c0
+[ 4.413444] ufs_qcom_probe+0x20/0x58
+[ 4.413449] platform_probe+0x68/0xd8
+[ 4.413459] really_probe+0xbc/0x268
+[ 4.413466] __driver_probe_device+0x78/0x12c
+[ 4.413473] driver_probe_device+0x40/0x11c
+[ 4.413481] __device_attach_driver+0xb8/0xf8
+[ 4.413489] bus_for_each_drv+0x84/0xe4
+[ 4.413495] __device_attach+0xfc/0x18c
+[ 4.413502] device_initial_probe+0x14/0x20
+[ 4.413510] bus_probe_device+0xb0/0xb4
+[ 4.413517] deferred_probe_work_func+0x8c/0xc8
+[ 4.413524] process_scheduled_works+0x250/0x658
+[ 4.413534] worker_thread+0x15c/0x2c8
+[ 4.413542] kthread+0x134/0x200
+[ 4.413550] ret_from_fork+0x10/0x20
+
+To fix this issue, ensure that the spinlock is only used after it has been
+properly initialized before using it in ufshcd_setup_clocks(). Do that
+unconditionally as initializing a spinlock is a fast operation.
+
+Fixes: 209f4e43b806 ("scsi: ufs: core: Introduce a new clock_gating lock")
+Reported-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Signed-off-by: Avri Altman <avri.altman@wdc.com>
+Link: https://lore.kernel.org/r/20250128071207.75494-2-avri.altman@wdc.com
+Reviewed-by: Bean Huo <beanhuo@micron.com>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/ufs/core/ufshcd.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -2126,8 +2126,6 @@ static void ufshcd_init_clk_gating(struc
+ INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
+ INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
+
+- spin_lock_init(&hba->clk_gating.lock);
+-
+ hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(
+ "ufs_clk_gating_%d", WQ_MEM_RECLAIM | WQ_HIGHPRI,
+ hba->host->host_no);
+@@ -10490,6 +10488,12 @@ int ufshcd_init(struct ufs_hba *hba, voi
+ hba->irq = irq;
+ hba->vps = &ufs_hba_vps;
+
++ /*
++ * Initialize clk_gating.lock early since it is being used in
++ * ufshcd_setup_clocks()
++ */
++ spin_lock_init(&hba->clk_gating.lock);
++
+ err = ufshcd_hba_init(hba);
+ if (err)
+ goto out_error;
sched-deadline-restore-dl_server-bandwidth-on-non-destructive-root-domain-changes.patch
sched-deadline-correctly-account-for-allocated-bandwidth-during-hotplug.patch
sched-deadline-check-bandwidth-overflow-earlier-for-hotplug.patch
+net-destroy-dev-lock-later-in-free_netdev.patch
+cpufreq-amd-pstate-remove-the-goto-label-in-amd_pstate_update_limits.patch
+net-ipv6-fix-dst-refleaks-in-rpl-seg6-and-ioam6-lwtunnels.patch
+scsi-ufs-core-ensure-clk_gating.lock-is-used-only-after-initialization.patch
+sched_ext-fix-incorrect-assumption-about-migration-disabled-tasks-in-task_can_run_on_remote_rq.patch