--- /dev/null
+From 890ed45bde808c422c3c27d3285fc45affa0f930 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <brauner@kernel.org>
+Date: Tue, 11 Feb 2025 18:16:00 +0100
+Subject: acct: block access to kernel internal filesystems
+
+From: Christian Brauner <brauner@kernel.org>
+
+commit 890ed45bde808c422c3c27d3285fc45affa0f930 upstream.
+
+There's no point in allowing anything kernel internal nor procfs or
+sysfs.
+
+Link: https://lore.kernel.org/r/20250127091811.3183623-1-quzicheng@huawei.com
+Link: https://lore.kernel.org/r/20250211-work-acct-v1-2-1c16aecab8b3@kernel.org
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reviewed-by: Amir Goldstein <amir73il@gmail.com>
+Reported-by: Zicheng Qu <quzicheng@huawei.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/acct.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/kernel/acct.c
++++ b/kernel/acct.c
+@@ -225,6 +225,20 @@ static int acct_on(struct filename *path
+ return -EACCES;
+ }
+
++ /* Exclude kernel kernel internal filesystems. */
++ if (file_inode(file)->i_sb->s_flags & (SB_NOUSER | SB_KERNMOUNT)) {
++ kfree(acct);
++ filp_close(file, NULL);
++ return -EINVAL;
++ }
++
++ /* Exclude procfs and sysfs. */
++ if (file_inode(file)->i_sb->s_iflags & SB_I_USERNS_VISIBLE) {
++ kfree(acct);
++ filp_close(file, NULL);
++ return -EINVAL;
++ }
++
+ if (!(file->f_mode & FMODE_CAN_WRITE)) {
+ kfree(acct);
+ filp_close(file, NULL);
--- /dev/null
+From 56d5f3eba3f5de0efdd556de4ef381e109b973a9 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <brauner@kernel.org>
+Date: Tue, 11 Feb 2025 18:15:59 +0100
+Subject: acct: perform last write from workqueue
+
+From: Christian Brauner <brauner@kernel.org>
+
+commit 56d5f3eba3f5de0efdd556de4ef381e109b973a9 upstream.
+
+In [1] it was reported that the acct(2) system call can be used to
+trigger NULL deref in cases where it is set to write to a file that
+triggers an internal lookup. This can e.g., happen when pointing acc(2)
+to /sys/power/resume. At the point the where the write to this file
+happens the calling task has already exited and called exit_fs(). A
+lookup will thus trigger a NULL-deref when accessing current->fs.
+
+Reorganize the code so that the the final write happens from the
+workqueue but with the caller's credentials. This preserves the
+(strange) permission model and has almost no regression risk.
+
+This api should stop to exist though.
+
+Link: https://lore.kernel.org/r/20250127091811.3183623-1-quzicheng@huawei.com [1]
+Link: https://lore.kernel.org/r/20250211-work-acct-v1-1-1c16aecab8b3@kernel.org
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: Zicheng Qu <quzicheng@huawei.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/acct.c | 120 +++++++++++++++++++++++++++++++++-------------------------
+ 1 file changed, 70 insertions(+), 50 deletions(-)
+
+--- a/kernel/acct.c
++++ b/kernel/acct.c
+@@ -85,48 +85,50 @@ struct bsd_acct_struct {
+ atomic_long_t count;
+ struct rcu_head rcu;
+ struct mutex lock;
+- int active;
++ bool active;
++ bool check_space;
+ unsigned long needcheck;
+ struct file *file;
+ struct pid_namespace *ns;
+ struct work_struct work;
+ struct completion done;
++ acct_t ac;
+ };
+
+-static void do_acct_process(struct bsd_acct_struct *acct);
++static void fill_ac(struct bsd_acct_struct *acct);
++static void acct_write_process(struct bsd_acct_struct *acct);
+
+ /*
+ * Check the amount of free space and suspend/resume accordingly.
+ */
+-static int check_free_space(struct bsd_acct_struct *acct)
++static bool check_free_space(struct bsd_acct_struct *acct)
+ {
+ struct kstatfs sbuf;
+
+- if (time_is_after_jiffies(acct->needcheck))
+- goto out;
++ if (!acct->check_space)
++ return acct->active;
+
+ /* May block */
+ if (vfs_statfs(&acct->file->f_path, &sbuf))
+- goto out;
++ return acct->active;
+
+ if (acct->active) {
+ u64 suspend = sbuf.f_blocks * SUSPEND;
+ do_div(suspend, 100);
+ if (sbuf.f_bavail <= suspend) {
+- acct->active = 0;
++ acct->active = false;
+ pr_info("Process accounting paused\n");
+ }
+ } else {
+ u64 resume = sbuf.f_blocks * RESUME;
+ do_div(resume, 100);
+ if (sbuf.f_bavail >= resume) {
+- acct->active = 1;
++ acct->active = true;
+ pr_info("Process accounting resumed\n");
+ }
+ }
+
+ acct->needcheck = jiffies + ACCT_TIMEOUT*HZ;
+-out:
+ return acct->active;
+ }
+
+@@ -171,7 +173,11 @@ static void acct_pin_kill(struct fs_pin
+ {
+ struct bsd_acct_struct *acct = to_acct(pin);
+ mutex_lock(&acct->lock);
+- do_acct_process(acct);
++ /*
++ * Fill the accounting struct with the exiting task's info
++ * before punting to the workqueue.
++ */
++ fill_ac(acct);
+ schedule_work(&acct->work);
+ wait_for_completion(&acct->done);
+ cmpxchg(&acct->ns->bacct, pin, NULL);
+@@ -184,6 +190,9 @@ static void close_work(struct work_struc
+ {
+ struct bsd_acct_struct *acct = container_of(work, struct bsd_acct_struct, work);
+ struct file *file = acct->file;
++
++ /* We were fired by acct_pin_kill() which holds acct->lock. */
++ acct_write_process(acct);
+ if (file->f_op->flush)
+ file->f_op->flush(file, NULL);
+ __fput_sync(file);
+@@ -412,13 +421,27 @@ static u32 encode_float(u64 value)
+ * do_exit() or when switching to a different output file.
+ */
+
+-static void fill_ac(acct_t *ac)
++static void fill_ac(struct bsd_acct_struct *acct)
+ {
+ struct pacct_struct *pacct = ¤t->signal->pacct;
++ struct file *file = acct->file;
++ acct_t *ac = &acct->ac;
+ u64 elapsed, run_time;
+ time64_t btime;
+ struct tty_struct *tty;
+
++ lockdep_assert_held(&acct->lock);
++
++ if (time_is_after_jiffies(acct->needcheck)) {
++ acct->check_space = false;
++
++ /* Don't fill in @ac if nothing will be written. */
++ if (!acct->active)
++ return;
++ } else {
++ acct->check_space = true;
++ }
++
+ /*
+ * Fill the accounting struct with the needed info as recorded
+ * by the different kernel functions.
+@@ -466,64 +489,61 @@ static void fill_ac(acct_t *ac)
+ ac->ac_majflt = encode_comp_t(pacct->ac_majflt);
+ ac->ac_exitcode = pacct->ac_exitcode;
+ spin_unlock_irq(¤t->sighand->siglock);
+-}
+-/*
+- * do_acct_process does all actual work. Caller holds the reference to file.
+- */
+-static void do_acct_process(struct bsd_acct_struct *acct)
+-{
+- acct_t ac;
+- unsigned long flim;
+- const struct cred *orig_cred;
+- struct file *file = acct->file;
+-
+- /*
+- * Accounting records are not subject to resource limits.
+- */
+- flim = rlimit(RLIMIT_FSIZE);
+- current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
+- /* Perform file operations on behalf of whoever enabled accounting */
+- orig_cred = override_creds(file->f_cred);
+
+- /*
+- * First check to see if there is enough free_space to continue
+- * the process accounting system.
+- */
+- if (!check_free_space(acct))
+- goto out;
+-
+- fill_ac(&ac);
+ /* we really need to bite the bullet and change layout */
+- ac.ac_uid = from_kuid_munged(file->f_cred->user_ns, orig_cred->uid);
+- ac.ac_gid = from_kgid_munged(file->f_cred->user_ns, orig_cred->gid);
++ ac->ac_uid = from_kuid_munged(file->f_cred->user_ns, current_uid());
++ ac->ac_gid = from_kgid_munged(file->f_cred->user_ns, current_gid());
+ #if ACCT_VERSION == 1 || ACCT_VERSION == 2
+ /* backward-compatible 16 bit fields */
+- ac.ac_uid16 = ac.ac_uid;
+- ac.ac_gid16 = ac.ac_gid;
++ ac->ac_uid16 = ac->ac_uid;
++ ac->ac_gid16 = ac->ac_gid;
+ #elif ACCT_VERSION == 3
+ {
+ struct pid_namespace *ns = acct->ns;
+
+- ac.ac_pid = task_tgid_nr_ns(current, ns);
++ ac->ac_pid = task_tgid_nr_ns(current, ns);
+ rcu_read_lock();
+- ac.ac_ppid = task_tgid_nr_ns(rcu_dereference(current->real_parent),
+- ns);
++ ac->ac_ppid = task_tgid_nr_ns(rcu_dereference(current->real_parent), ns);
+ rcu_read_unlock();
+ }
+ #endif
++}
++
++static void acct_write_process(struct bsd_acct_struct *acct)
++{
++ struct file *file = acct->file;
++ const struct cred *cred;
++ acct_t *ac = &acct->ac;
++
++ /* Perform file operations on behalf of whoever enabled accounting */
++ cred = override_creds(file->f_cred);
++
+ /*
+- * Get freeze protection. If the fs is frozen, just skip the write
+- * as we could deadlock the system otherwise.
++ * First check to see if there is enough free_space to continue
++ * the process accounting system. Then get freeze protection. If
++ * the fs is frozen, just skip the write as we could deadlock
++ * the system otherwise.
+ */
+- if (file_start_write_trylock(file)) {
++ if (check_free_space(acct) && file_start_write_trylock(file)) {
+ /* it's been opened O_APPEND, so position is irrelevant */
+ loff_t pos = 0;
+- __kernel_write(file, &ac, sizeof(acct_t), &pos);
++ __kernel_write(file, ac, sizeof(acct_t), &pos);
+ file_end_write(file);
+ }
+-out:
++
++ revert_creds(cred);
++}
++
++static void do_acct_process(struct bsd_acct_struct *acct)
++{
++ unsigned long flim;
++
++ /* Accounting records are not subject to resource limits. */
++ flim = rlimit(RLIMIT_FSIZE);
++ current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
++ fill_ac(acct);
++ acct_write_process(acct);
+ current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
+- revert_creds(orig_cred);
+ }
+
+ /**
--- /dev/null
+From 6d1f86610f23b0bc334d6506a186f21a98f51392 Mon Sep 17 00:00:00 2001
+From: John Veness <john-linux@pelago.org.uk>
+Date: Mon, 17 Feb 2025 12:15:50 +0000
+Subject: ALSA: hda/conexant: Add quirk for HP ProBook 450 G4 mute LED
+
+From: John Veness <john-linux@pelago.org.uk>
+
+commit 6d1f86610f23b0bc334d6506a186f21a98f51392 upstream.
+
+Allows the LED on the dedicated mute button on the HP ProBook 450 G4
+laptop to change colour correctly.
+
+Signed-off-by: John Veness <john-linux@pelago.org.uk>
+Cc: <stable@vger.kernel.org>
+Link: https://patch.msgid.link/2fb55d48-6991-4a42-b591-4c78f2fad8d7@pelago.org.uk
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/pci/hda/patch_conexant.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -1098,6 +1098,7 @@ static const struct snd_pci_quirk cxt506
+ SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
+ SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
++ SND_PCI_QUIRK(0x103c, 0x8231, "HP ProBook 450 G4", CXT_FIXUP_MUTE_LED_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
--- /dev/null
+From 07b598c0e6f06a0f254c88dafb4ad50f8a8c6eea Mon Sep 17 00:00:00 2001
+From: Gavrilov Ilia <Ilia.Gavrilov@infotecs.ru>
+Date: Thu, 13 Feb 2025 15:20:55 +0000
+Subject: drop_monitor: fix incorrect initialization order
+
+From: Gavrilov Ilia <Ilia.Gavrilov@infotecs.ru>
+
+commit 07b598c0e6f06a0f254c88dafb4ad50f8a8c6eea upstream.
+
+Syzkaller reports the following bug:
+
+BUG: spinlock bad magic on CPU#1, syz-executor.0/7995
+ lock: 0xffff88805303f3e0, .magic: 00000000, .owner: <none>/-1, .owner_cpu: 0
+CPU: 1 PID: 7995 Comm: syz-executor.0 Tainted: G E 5.10.209+ #1
+Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 11/12/2020
+Call Trace:
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack+0x119/0x179 lib/dump_stack.c:118
+ debug_spin_lock_before kernel/locking/spinlock_debug.c:83 [inline]
+ do_raw_spin_lock+0x1f6/0x270 kernel/locking/spinlock_debug.c:112
+ __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:117 [inline]
+ _raw_spin_lock_irqsave+0x50/0x70 kernel/locking/spinlock.c:159
+ reset_per_cpu_data+0xe6/0x240 [drop_monitor]
+ net_dm_cmd_trace+0x43d/0x17a0 [drop_monitor]
+ genl_family_rcv_msg_doit+0x22f/0x330 net/netlink/genetlink.c:739
+ genl_family_rcv_msg net/netlink/genetlink.c:783 [inline]
+ genl_rcv_msg+0x341/0x5a0 net/netlink/genetlink.c:800
+ netlink_rcv_skb+0x14d/0x440 net/netlink/af_netlink.c:2497
+ genl_rcv+0x29/0x40 net/netlink/genetlink.c:811
+ netlink_unicast_kernel net/netlink/af_netlink.c:1322 [inline]
+ netlink_unicast+0x54b/0x800 net/netlink/af_netlink.c:1348
+ netlink_sendmsg+0x914/0xe00 net/netlink/af_netlink.c:1916
+ sock_sendmsg_nosec net/socket.c:651 [inline]
+ __sock_sendmsg+0x157/0x190 net/socket.c:663
+ ____sys_sendmsg+0x712/0x870 net/socket.c:2378
+ ___sys_sendmsg+0xf8/0x170 net/socket.c:2432
+ __sys_sendmsg+0xea/0x1b0 net/socket.c:2461
+ do_syscall_64+0x30/0x40 arch/x86/entry/common.c:46
+ entry_SYSCALL_64_after_hwframe+0x62/0xc7
+RIP: 0033:0x7f3f9815aee9
+Code: ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48
+RSP: 002b:00007f3f972bf0c8 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
+RAX: ffffffffffffffda RBX: 00007f3f9826d050 RCX: 00007f3f9815aee9
+RDX: 0000000020000000 RSI: 0000000020001300 RDI: 0000000000000007
+RBP: 00007f3f981b63bd R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
+R13: 000000000000006e R14: 00007f3f9826d050 R15: 00007ffe01ee6768
+
+If drop_monitor is built as a kernel module, syzkaller may have time
+to send a netlink NET_DM_CMD_START message during the module loading.
+This will call the net_dm_monitor_start() function that uses
+a spinlock that has not yet been initialized.
+
+To fix this, let's place resource initialization above the registration
+of a generic netlink family.
+
+Found by InfoTeCS on behalf of Linux Verification Center
+(linuxtesting.org) with Syzkaller.
+
+Fixes: 9a8afc8d3962 ("Network Drop Monitor: Adding drop monitor implementation & Netlink protocol")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ilia Gavrilov <Ilia.Gavrilov@infotecs.ru>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Link: https://patch.msgid.link/20250213152054.2785669-1-Ilia.Gavrilov@infotecs.ru
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/drop_monitor.c | 29 ++++++++++++++---------------
+ 1 file changed, 14 insertions(+), 15 deletions(-)
+
+--- a/net/core/drop_monitor.c
++++ b/net/core/drop_monitor.c
+@@ -1729,30 +1729,30 @@ static int __init init_net_drop_monitor(
+ return -ENOSPC;
+ }
+
+- rc = genl_register_family(&net_drop_monitor_family);
+- if (rc) {
+- pr_err("Could not create drop monitor netlink family\n");
+- return rc;
++ for_each_possible_cpu(cpu) {
++ net_dm_cpu_data_init(cpu);
++ net_dm_hw_cpu_data_init(cpu);
+ }
+- WARN_ON(net_drop_monitor_family.mcgrp_offset != NET_DM_GRP_ALERT);
+
+ rc = register_netdevice_notifier(&dropmon_net_notifier);
+ if (rc < 0) {
+ pr_crit("Failed to register netdevice notifier\n");
++ return rc;
++ }
++
++ rc = genl_register_family(&net_drop_monitor_family);
++ if (rc) {
++ pr_err("Could not create drop monitor netlink family\n");
+ goto out_unreg;
+ }
++ WARN_ON(net_drop_monitor_family.mcgrp_offset != NET_DM_GRP_ALERT);
+
+ rc = 0;
+
+- for_each_possible_cpu(cpu) {
+- net_dm_cpu_data_init(cpu);
+- net_dm_hw_cpu_data_init(cpu);
+- }
+-
+ goto out;
+
+ out_unreg:
+- genl_unregister_family(&net_drop_monitor_family);
++ WARN_ON(unregister_netdevice_notifier(&dropmon_net_notifier));
+ out:
+ return rc;
+ }
+@@ -1761,19 +1761,18 @@ static void exit_net_drop_monitor(void)
+ {
+ int cpu;
+
+- BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier));
+-
+ /*
+ * Because of the module_get/put we do in the trace state change path
+ * we are guaranteed not to have any current users when we get here
+ */
++ BUG_ON(genl_unregister_family(&net_drop_monitor_family));
++
++ BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier));
+
+ for_each_possible_cpu(cpu) {
+ net_dm_hw_cpu_data_fini(cpu);
+ net_dm_cpu_data_fini(cpu);
+ }
+-
+- BUG_ON(genl_unregister_family(&net_drop_monitor_family));
+ }
+
+ module_init(init_net_drop_monitor);
--- /dev/null
+From 2b9df00cded911e2ca2cfae5c45082166b24f8aa Mon Sep 17 00:00:00 2001
+From: Niravkumar L Rabara <niravkumar.l.rabara@intel.com>
+Date: Mon, 10 Feb 2025 13:35:49 +0800
+Subject: mtd: rawnand: cadence: fix error code in cadence_nand_init()
+
+From: Niravkumar L Rabara <niravkumar.l.rabara@intel.com>
+
+commit 2b9df00cded911e2ca2cfae5c45082166b24f8aa upstream.
+
+Replace dma_request_channel() with dma_request_chan_by_mask() and use
+helper functions to return proper error code instead of fixed -EBUSY.
+
+Fixes: ec4ba01e894d ("mtd: rawnand: Add new Cadence NAND driver to MTD subsystem")
+Cc: stable@vger.kernel.org
+Signed-off-by: Niravkumar L Rabara <niravkumar.l.rabara@intel.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/nand/raw/cadence-nand-controller.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
++++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
+@@ -2866,11 +2866,10 @@ static int cadence_nand_init(struct cdns
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ if (cdns_ctrl->caps1->has_dma) {
+- cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL);
+- if (!cdns_ctrl->dmac) {
+- dev_err(cdns_ctrl->dev,
+- "Unable to get a DMA channel\n");
+- ret = -EBUSY;
++ cdns_ctrl->dmac = dma_request_chan_by_mask(&mask);
++ if (IS_ERR(cdns_ctrl->dmac)) {
++ ret = dev_err_probe(cdns_ctrl->dev, PTR_ERR(cdns_ctrl->dmac),
++ "%d: Failed to get a DMA channel\n", ret);
+ goto disable_irq;
+ }
+ }
--- /dev/null
+From f37d135b42cb484bdecee93f56b9f483214ede78 Mon Sep 17 00:00:00 2001
+From: Niravkumar L Rabara <niravkumar.l.rabara@intel.com>
+Date: Mon, 10 Feb 2025 13:35:51 +0800
+Subject: mtd: rawnand: cadence: fix incorrect device in dma_unmap_single
+
+From: Niravkumar L Rabara <niravkumar.l.rabara@intel.com>
+
+commit f37d135b42cb484bdecee93f56b9f483214ede78 upstream.
+
+dma_map_single is using physical/bus device (DMA) but dma_unmap_single
+is using framework device(NAND controller), which is incorrect.
+Fixed dma_unmap_single to use correct physical/bus device.
+
+Fixes: ec4ba01e894d ("mtd: rawnand: Add new Cadence NAND driver to MTD subsystem")
+Cc: stable@vger.kernel.org
+Signed-off-by: Niravkumar L Rabara <niravkumar.l.rabara@intel.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/nand/raw/cadence-nand-controller.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
++++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
+@@ -1858,12 +1858,12 @@ static int cadence_nand_slave_dma_transf
+ dma_async_issue_pending(cdns_ctrl->dmac);
+ wait_for_completion(&finished);
+
+- dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
++ dma_unmap_single(dma_dev->dev, buf_dma, len, dir);
+
+ return 0;
+
+ err_unmap:
+- dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
++ dma_unmap_single(dma_dev->dev, buf_dma, len, dir);
+
+ err:
+ dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n");
--- /dev/null
+From d76d22b5096c5b05208fd982b153b3f182350b19 Mon Sep 17 00:00:00 2001
+From: Niravkumar L Rabara <niravkumar.l.rabara@intel.com>
+Date: Mon, 10 Feb 2025 13:35:50 +0800
+Subject: mtd: rawnand: cadence: use dma_map_resource for sdma address
+
+From: Niravkumar L Rabara <niravkumar.l.rabara@intel.com>
+
+commit d76d22b5096c5b05208fd982b153b3f182350b19 upstream.
+
+Remap the slave DMA I/O resources to enhance driver portability.
+Using a physical address causes DMA translation failure when the
+ARM SMMU is enabled.
+
+Fixes: ec4ba01e894d ("mtd: rawnand: Add new Cadence NAND driver to MTD subsystem")
+Cc: stable@vger.kernel.org
+Signed-off-by: Niravkumar L Rabara <niravkumar.l.rabara@intel.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/nand/raw/cadence-nand-controller.c | 29 +++++++++++++++++++++----
+ 1 file changed, 25 insertions(+), 4 deletions(-)
+
+--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
++++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
+@@ -469,6 +469,8 @@ struct cdns_nand_ctrl {
+ struct {
+ void __iomem *virt;
+ dma_addr_t dma;
++ dma_addr_t iova_dma;
++ u32 size;
+ } io;
+
+ int irq;
+@@ -1830,11 +1832,11 @@ static int cadence_nand_slave_dma_transf
+ }
+
+ if (dir == DMA_FROM_DEVICE) {
+- src_dma = cdns_ctrl->io.dma;
++ src_dma = cdns_ctrl->io.iova_dma;
+ dst_dma = buf_dma;
+ } else {
+ src_dma = buf_dma;
+- dst_dma = cdns_ctrl->io.dma;
++ dst_dma = cdns_ctrl->io.iova_dma;
+ }
+
+ tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len,
+@@ -2831,6 +2833,7 @@ cadence_nand_irq_cleanup(int irqnum, str
+ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
+ {
+ dma_cap_mask_t mask;
++ struct dma_device *dma_dev = cdns_ctrl->dmac->device;
+ int ret;
+
+ cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev,
+@@ -2874,6 +2877,16 @@ static int cadence_nand_init(struct cdns
+ }
+ }
+
++ cdns_ctrl->io.iova_dma = dma_map_resource(dma_dev->dev, cdns_ctrl->io.dma,
++ cdns_ctrl->io.size,
++ DMA_BIDIRECTIONAL, 0);
++
++ ret = dma_mapping_error(dma_dev->dev, cdns_ctrl->io.iova_dma);
++ if (ret) {
++ dev_err(cdns_ctrl->dev, "Failed to map I/O resource to DMA\n");
++ goto dma_release_chnl;
++ }
++
+ nand_controller_init(&cdns_ctrl->controller);
+ INIT_LIST_HEAD(&cdns_ctrl->chips);
+
+@@ -2884,18 +2897,22 @@ static int cadence_nand_init(struct cdns
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n",
+ ret);
+- goto dma_release_chnl;
++ goto unmap_dma_resource;
+ }
+
+ kfree(cdns_ctrl->buf);
+ cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL);
+ if (!cdns_ctrl->buf) {
+ ret = -ENOMEM;
+- goto dma_release_chnl;
++ goto unmap_dma_resource;
+ }
+
+ return 0;
+
++unmap_dma_resource:
++ dma_unmap_resource(dma_dev->dev, cdns_ctrl->io.iova_dma,
++ cdns_ctrl->io.size, DMA_BIDIRECTIONAL, 0);
++
+ dma_release_chnl:
+ if (cdns_ctrl->dmac)
+ dma_release_channel(cdns_ctrl->dmac);
+@@ -2917,6 +2934,8 @@ free_buf_desc:
+ static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl)
+ {
+ cadence_nand_chips_cleanup(cdns_ctrl);
++ dma_unmap_resource(cdns_ctrl->dmac->device->dev, cdns_ctrl->io.iova_dma,
++ cdns_ctrl->io.size, DMA_BIDIRECTIONAL, 0);
+ cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
+ kfree(cdns_ctrl->buf);
+ dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
+@@ -2985,7 +3004,9 @@ static int cadence_nand_dt_probe(struct
+ cdns_ctrl->io.virt = devm_platform_get_and_ioremap_resource(ofdev, 1, &res);
+ if (IS_ERR(cdns_ctrl->io.virt))
+ return PTR_ERR(cdns_ctrl->io.virt);
++
+ cdns_ctrl->io.dma = res->start;
++ cdns_ctrl->io.size = resource_size(res);
+
+ dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk");
+ if (IS_ERR(dt->clk))
--- /dev/null
+From 878e7b11736e062514e58f3b445ff343e6705537 Mon Sep 17 00:00:00 2001
+From: Haoxiang Li <haoxiang_li2024@163.com>
+Date: Tue, 18 Feb 2025 11:04:09 +0800
+Subject: nfp: bpf: Add check for nfp_app_ctrl_msg_alloc()
+
+From: Haoxiang Li <haoxiang_li2024@163.com>
+
+commit 878e7b11736e062514e58f3b445ff343e6705537 upstream.
+
+Add check for the return value of nfp_app_ctrl_msg_alloc() in
+nfp_bpf_cmsg_alloc() to prevent null pointer dereference.
+
+Fixes: ff3d43f7568c ("nfp: bpf: implement helpers for FW map ops")
+Cc: stable@vger.kernel.org
+Signed-off-by: Haoxiang Li <haoxiang_li2024@163.com>
+Link: https://patch.msgid.link/20250218030409.2425798-1-haoxiang_li2024@163.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/netronome/nfp/bpf/cmsg.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
++++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+@@ -20,6 +20,8 @@ nfp_bpf_cmsg_alloc(struct nfp_app_bpf *b
+ struct sk_buff *skb;
+
+ skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
++ if (!skb)
++ return NULL;
+ skb_put(skb, size);
+
+ return skb;
power-supply-da9150-fg-fix-potential-overflow.patch
nvme-ioctl-add-missing-space-in-err-message.patch
bpf-skip-non-exist-keys-in-generic_map_lookup_batch.patch
+tee-optee-fix-supplicant-wait-loop.patch
+drop_monitor-fix-incorrect-initialization-order.patch
+nfp-bpf-add-check-for-nfp_app_ctrl_msg_alloc.patch
+alsa-hda-conexant-add-quirk-for-hp-probook-450-g4-mute-led.patch
+acct-perform-last-write-from-workqueue.patch
+acct-block-access-to-kernel-internal-filesystems.patch
+mtd-rawnand-cadence-fix-error-code-in-cadence_nand_init.patch
+mtd-rawnand-cadence-use-dma_map_resource-for-sdma-address.patch
+mtd-rawnand-cadence-fix-incorrect-device-in-dma_unmap_single.patch
--- /dev/null
+From 70b0d6b0a199c5a3ee6c72f5e61681ed6f759612 Mon Sep 17 00:00:00 2001
+From: Sumit Garg <sumit.garg@linaro.org>
+Date: Tue, 4 Feb 2025 13:04:18 +0530
+Subject: tee: optee: Fix supplicant wait loop
+
+From: Sumit Garg <sumit.garg@linaro.org>
+
+commit 70b0d6b0a199c5a3ee6c72f5e61681ed6f759612 upstream.
+
+OP-TEE supplicant is a user-space daemon and it's possible for it
+be hung or crashed or killed in the middle of processing an OP-TEE
+RPC call. It becomes more complicated when there is incorrect shutdown
+ordering of the supplicant process vs the OP-TEE client application which
+can eventually lead to system hang-up waiting for the closure of the
+client application.
+
+Allow the client process waiting in kernel for supplicant response to
+be killed rather than indefinitely waiting in an unkillable state. Also,
+a normal uninterruptible wait should not have resulted in the hung-task
+watchdog getting triggered, but the endless loop would.
+
+This fixes issues observed during system reboot/shutdown when supplicant
+got hung for some reason or gets crashed/killed which lead to client
+getting hung in an unkillable state. It in turn lead to system being in
+hung up state requiring hard power off/on to recover.
+
+Fixes: 4fb0a5eb364d ("tee: add OP-TEE driver")
+Suggested-by: Arnd Bergmann <arnd@arndb.de>
+Cc: stable@vger.kernel.org
+Signed-off-by: Sumit Garg <sumit.garg@linaro.org>
+Reviewed-by: Arnd Bergmann <arnd@arndb.de>
+Reviewed-by: Jens Wiklander <jens.wiklander@linaro.org>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tee/optee/supp.c | 35 ++++++++---------------------------
+ 1 file changed, 8 insertions(+), 27 deletions(-)
+
+--- a/drivers/tee/optee/supp.c
++++ b/drivers/tee/optee/supp.c
+@@ -80,7 +80,6 @@ u32 optee_supp_thrd_req(struct tee_conte
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_supp *supp = &optee->supp;
+ struct optee_supp_req *req;
+- bool interruptable;
+ u32 ret;
+
+ /*
+@@ -111,36 +110,18 @@ u32 optee_supp_thrd_req(struct tee_conte
+ /*
+ * Wait for supplicant to process and return result, once we've
+ * returned from wait_for_completion(&req->c) successfully we have
+- * exclusive access again.
++ * exclusive access again. Allow the wait to be killable such that
++ * the wait doesn't turn into an indefinite state if the supplicant
++ * gets hung for some reason.
+ */
+- while (wait_for_completion_interruptible(&req->c)) {
++ if (wait_for_completion_killable(&req->c)) {
+ mutex_lock(&supp->mutex);
+- interruptable = !supp->ctx;
+- if (interruptable) {
+- /*
+- * There's no supplicant available and since the
+- * supp->mutex currently is held none can
+- * become available until the mutex released
+- * again.
+- *
+- * Interrupting an RPC to supplicant is only
+- * allowed as a way of slightly improving the user
+- * experience in case the supplicant hasn't been
+- * started yet. During normal operation the supplicant
+- * will serve all requests in a timely manner and
+- * interrupting then wouldn't make sense.
+- */
+- if (req->in_queue) {
+- list_del(&req->link);
+- req->in_queue = false;
+- }
++ if (req->in_queue) {
++ list_del(&req->link);
++ req->in_queue = false;
+ }
+ mutex_unlock(&supp->mutex);
+-
+- if (interruptable) {
+- req->ret = TEEC_ERROR_COMMUNICATION;
+- break;
+- }
++ req->ret = TEEC_ERROR_COMMUNICATION;
+ }
+
+ ret = req->ret;