--- /dev/null
+From eed07c5bce2670cec50e6546d9278e7c11814eeb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 2 Nov 2023 16:26:59 +0000
+Subject: afs: Fix afs_server_list to be cleaned up with RCU
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit e6bace7313d61e31f2b16fa3d774fd8cb3cb869e ]
+
+afs_server_list is accessed with the rcu_read_lock() held from
+volume->servers, so it needs to be cleaned up correctly.
+
+Fix this by using kfree_rcu() instead of kfree().
+
+Fixes: 8a070a964877 ("afs: Detect cell aliases 1 - Cells with root volumes")
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Marc Dionne <marc.dionne@auristor.com>
+cc: linux-afs@lists.infradead.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/afs/internal.h | 1 +
+ fs/afs/server_list.c | 2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index 9ba7b68375c9f..c2d70fc1698c0 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -552,6 +552,7 @@ struct afs_server_entry {
+ };
+
+ struct afs_server_list {
++ struct rcu_head rcu;
+ afs_volid_t vids[AFS_MAXTYPES]; /* Volume IDs */
+ refcount_t usage;
+ unsigned char nr_servers;
+diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
+index ed9056703505f..b59896b1de0af 100644
+--- a/fs/afs/server_list.c
++++ b/fs/afs/server_list.c
+@@ -17,7 +17,7 @@ void afs_put_serverlist(struct afs_net *net, struct afs_server_list *slist)
+ for (i = 0; i < slist->nr_servers; i++)
+ afs_unuse_server(net, slist->servers[i].server,
+ afs_server_trace_put_slist);
+- kfree(slist);
++ kfree_rcu(slist, rcu);
+ }
+ }
+
+--
+2.42.0
+
--- /dev/null
+From 4512c9e01ae6e6a6219b309ec3993a615c39e331 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Nov 2023 22:03:28 +0000
+Subject: afs: Fix file locking on R/O volumes to operate in local mode
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit b590eb41be766c5a63acc7e8896a042f7a4e8293 ]
+
+AFS doesn't really do locking on R/O volumes as fileservers don't maintain
+state with each other and thus a lock on a R/O volume file on one
+fileserver will not be be visible to someone looking at the same file on
+another fileserver.
+
+Further, the server may return an error if you try it.
+
+Fix this by doing what other AFS clients do and handle filelocking on R/O
+volume files entirely within the client and don't touch the server.
+
+Fixes: 6c6c1d63c243 ("afs: Provide mount-time configurable byte-range file locking emulation")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Reviewed-by: Marc Dionne <marc.dionne@auristor.com>
+cc: linux-afs@lists.infradead.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/afs/super.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/fs/afs/super.c b/fs/afs/super.c
+index 95d713074dc81..e95fb4cb4fcd2 100644
+--- a/fs/afs/super.c
++++ b/fs/afs/super.c
+@@ -407,6 +407,8 @@ static int afs_validate_fc(struct fs_context *fc)
+ return PTR_ERR(volume);
+
+ ctx->volume = volume;
++ if (volume->type != AFSVL_RWVOL)
++ ctx->flock_mode = afs_flock_mode_local;
+ }
+
+ return 0;
+--
+2.42.0
+
--- /dev/null
+From 2e495e3b0bd5b52f589c9273dd648364dc3e23aa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jun 2023 09:43:54 +0100
+Subject: afs: Make error on cell lookup failure consistent with OpenAFS
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 2a4ca1b4b77850544408595e2433f5d7811a9daa ]
+
+When kafs tries to look up a cell in the DNS or the local config, it will
+translate a lookup failure into EDESTADDRREQ whereas OpenAFS translates it
+into ENOENT. Applications such as West expect the latter behaviour and
+fail if they see the former.
+
+This can be seen by trying to mount an unknown cell:
+
+ # mount -t afs %example.com:cell.root /mnt
+ mount: /mnt: mount(2) system call failed: Destination address required.
+
+Fixes: 4d673da14533 ("afs: Support the AFS dynamic root")
+Reported-by: Markus Suvanto <markus.suvanto@gmail.com>
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=216637
+Signed-off-by: David Howells <dhowells@redhat.com>
+Reviewed-by: Jeffrey Altman <jaltman@auristor.com>
+cc: Marc Dionne <marc.dionne@auristor.com>
+cc: linux-afs@lists.infradead.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/afs/dynroot.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
+index d7d9402ff7182..91e804c70dd0a 100644
+--- a/fs/afs/dynroot.c
++++ b/fs/afs/dynroot.c
+@@ -132,8 +132,8 @@ static int afs_probe_cell_name(struct dentry *dentry)
+
+ ret = dns_query(net->net, "afsdb", name, len, "srv=1",
+ NULL, NULL, false);
+- if (ret == -ENODATA)
+- ret = -EDESTADDRREQ;
++ if (ret == -ENODATA || ret == -ENOKEY)
++ ret = -ENOENT;
+ return ret;
+ }
+
+--
+2.42.0
+
--- /dev/null
+From 295abf702055b3d44d54bb937384d94c9d7393da Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Oct 2023 01:25:07 +0100
+Subject: afs: Return ENOENT if no cell DNS record can be found
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 0167236e7d66c5e1e85d902a6abc2529b7544539 ]
+
+Make AFS return error ENOENT if no cell SRV or AFSDB DNS record (or
+cellservdb config file record) can be found rather than returning
+EDESTADDRREQ.
+
+Also add cell name lookup info to the cursor dump.
+
+Fixes: d5c32c89b208 ("afs: Fix cell DNS lookup")
+Reported-by: Markus Suvanto <markus.suvanto@gmail.com>
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=216637
+Signed-off-by: David Howells <dhowells@redhat.com>
+Reviewed-by: Marc Dionne <marc.dionne@auristor.com>
+cc: linux-afs@lists.infradead.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/afs/vl_rotate.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c
+index 488e58490b16e..eb415ce563600 100644
+--- a/fs/afs/vl_rotate.c
++++ b/fs/afs/vl_rotate.c
+@@ -58,6 +58,12 @@ static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
+ }
+
+ /* Status load is ordered after lookup counter load */
++ if (cell->dns_status == DNS_LOOKUP_GOT_NOT_FOUND) {
++ pr_warn("No record of cell %s\n", cell->name);
++ vc->error = -ENOENT;
++ return false;
++ }
++
+ if (cell->dns_source == DNS_RECORD_UNAVAILABLE) {
+ vc->error = -EDESTADDRREQ;
+ return false;
+@@ -285,6 +291,7 @@ bool afs_select_vlserver(struct afs_vl_cursor *vc)
+ */
+ static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
+ {
++ struct afs_cell *cell = vc->cell;
+ static int count;
+ int i;
+
+@@ -294,6 +301,9 @@ static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
+
+ rcu_read_lock();
+ pr_notice("EDESTADDR occurred\n");
++ pr_notice("CELL: %s err=%d\n", cell->name, cell->error);
++ pr_notice("DNS: src=%u st=%u lc=%x\n",
++ cell->dns_source, cell->dns_status, cell->dns_lookup_count);
+ pr_notice("VC: ut=%lx ix=%u ni=%hu fl=%hx err=%hd\n",
+ vc->untried, vc->index, vc->nr_iterations, vc->flags, vc->error);
+
+--
+2.42.0
+
--- /dev/null
+From 3c8a8673b5c507359208a3cba4eb3f8ae280295c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Nov 2023 00:44:33 +0530
+Subject: amd-xgbe: handle corner-case during sfp hotplug
+
+From: Raju Rangoju <Raju.Rangoju@amd.com>
+
+[ Upstream commit 676ec53844cbdf2f47e68a076cdff7f0ec6cbe3f ]
+
+Force the mode change for SFI in Fixed PHY configurations. Fixed PHY
+configurations needs PLL to be enabled while doing mode set. When the
+SFP module isn't connected during boot, driver assumes AN is ON and
+attempts auto-negotiation. However, if the connected SFP comes up in
+Fixed PHY configuration the link will not come up as PLL isn't enabled
+while the initial mode set command is issued. So, force the mode change
+for SFI in Fixed PHY configuration to fix link issues.
+
+Fixes: e57f7a3feaef ("amd-xgbe: Prepare for working with more than one type of phy")
+Acked-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Signed-off-by: Raju Rangoju <Raju.Rangoju@amd.com>
+Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index ca7372369b3e6..60be836b294bb 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -1178,7 +1178,19 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
+ if (pdata->phy.duplex != DUPLEX_FULL)
+ return -EINVAL;
+
+- xgbe_set_mode(pdata, mode);
++ /* Force the mode change for SFI in Fixed PHY config.
++ * Fixed PHY configs needs PLL to be enabled while doing mode set.
++ * When the SFP module isn't connected during boot, driver assumes
++ * AN is ON and attempts autonegotiation. However, if the connected
++ * SFP comes up in Fixed PHY config, the link will not come up as
++ * PLL isn't enabled while the initial mode set command is issued.
++ * So, force the mode change for SFI in Fixed PHY configuration to
++ * fix link issues.
++ */
++ if (mode == XGBE_MODE_SFI)
++ xgbe_change_mode(pdata, mode);
++ else
++ xgbe_set_mode(pdata, mode);
+
+ return 0;
+ }
+--
+2.42.0
+
--- /dev/null
+From 0b0b5b980d6d529d5d7f2bbb18ef9d09e2788cc2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Nov 2023 00:44:34 +0530
+Subject: amd-xgbe: handle the corner-case during tx completion
+
+From: Raju Rangoju <Raju.Rangoju@amd.com>
+
+[ Upstream commit 7121205d5330c6a3cb3379348886d47c77b78d06 ]
+
+The existing implementation uses software logic to accumulate tx
+completions until the specified time (1ms) is met and then poll them.
+However, there exists a tiny gap which leads to a race between
+resetting and checking the tx_activate flag. Due to this the tx
+completions are not reported to upper layer and tx queue timeout
+kicks-in restarting the device.
+
+To address this, introduce a tx cleanup mechanism as part of the
+periodic maintenance process.
+
+Fixes: c5aa9e3b8156 ("amd-xgbe: Initial AMD 10GbE platform driver")
+Acked-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Signed-off-by: Raju Rangoju <Raju.Rangoju@amd.com>
+Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 614c0278419bc..6b73648b37793 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -682,10 +682,24 @@ static void xgbe_service(struct work_struct *work)
+ static void xgbe_service_timer(struct timer_list *t)
+ {
+ struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer);
++ struct xgbe_channel *channel;
++ unsigned int i;
+
+ queue_work(pdata->dev_workqueue, &pdata->service_work);
+
+ mod_timer(&pdata->service_timer, jiffies + HZ);
++
++ if (!pdata->tx_usecs)
++ return;
++
++ for (i = 0; i < pdata->channel_count; i++) {
++ channel = pdata->channel[i];
++ if (!channel->tx_ring || channel->tx_timer_active)
++ break;
++ channel->tx_timer_active = 1;
++ mod_timer(&channel->tx_timer,
++ jiffies + usecs_to_jiffies(pdata->tx_usecs));
++ }
+ }
+
+ static void xgbe_init_timers(struct xgbe_prv_data *pdata)
+--
+2.42.0
+
--- /dev/null
+From db2ca2302e284c7929a9fedb2709bb3b004ce861 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Nov 2023 00:44:35 +0530
+Subject: amd-xgbe: propagate the correct speed and duplex status
+
+From: Raju Rangoju <Raju.Rangoju@amd.com>
+
+[ Upstream commit 7a2323ac24a50311f64a3a9b54ed5bef5821ecae ]
+
+xgbe_get_link_ksettings() does not propagate correct speed and duplex
+information to ethtool during cable unplug. Due to which ethtool reports
+incorrect values for speed and duplex.
+
+Address this by propagating correct information.
+
+Fixes: 7c12aa08779c ("amd-xgbe: Move the PHY support into amd-xgbe")
+Acked-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Signed-off-by: Raju Rangoju <Raju.Rangoju@amd.com>
+Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+index 6e83ff59172a3..32fab5e772462 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+@@ -314,10 +314,15 @@ static int xgbe_get_link_ksettings(struct net_device *netdev,
+
+ cmd->base.phy_address = pdata->phy.address;
+
+- cmd->base.autoneg = pdata->phy.autoneg;
+- cmd->base.speed = pdata->phy.speed;
+- cmd->base.duplex = pdata->phy.duplex;
++ if (netif_carrier_ok(netdev)) {
++ cmd->base.speed = pdata->phy.speed;
++ cmd->base.duplex = pdata->phy.duplex;
++ } else {
++ cmd->base.speed = SPEED_UNKNOWN;
++ cmd->base.duplex = DUPLEX_UNKNOWN;
++ }
+
++ cmd->base.autoneg = pdata->phy.autoneg;
+ cmd->base.port = PORT_NONE;
+
+ XGBE_LM_COPY(cmd, supported, lks, supported);
+--
+2.42.0
+
--- /dev/null
+From b33159fa32076cacb4de5417c66d376e9c3ce4e6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Nov 2023 15:07:41 -0800
+Subject: arm/xen: fix xen_vcpu_info allocation alignment
+
+From: Stefano Stabellini <sstabellini@kernel.org>
+
+[ Upstream commit 7bf9a6b46549852a37e6d07e52c601c3c706b562 ]
+
+xen_vcpu_info is a percpu area than needs to be mapped by Xen.
+Currently, it could cross a page boundary resulting in Xen being unable
+to map it:
+
+[ 0.567318] kernel BUG at arch/arm64/xen/../../arm/xen/enlighten.c:164!
+[ 0.574002] Internal error: Oops - BUG: 00000000f2000800 [#1] PREEMPT SMP
+
+Fix the issue by using __alloc_percpu and requesting alignment for the
+memory allocation.
+
+Signed-off-by: Stefano Stabellini <stefano.stabellini@amd.com>
+
+Link: https://lore.kernel.org/r/alpine.DEB.2.22.394.2311221501340.2053963@ubuntu-linux-20-04-desktop
+Fixes: 24d5373dda7c ("arm/xen: Use alloc_percpu rather than __alloc_percpu")
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/xen/enlighten.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
+index b647306eb1608..d12fdb9c05a89 100644
+--- a/arch/arm/xen/enlighten.c
++++ b/arch/arm/xen/enlighten.c
+@@ -484,7 +484,8 @@ static int __init xen_guest_init(void)
+ * for secondary CPUs as they are brought up.
+ * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
+ */
+- xen_vcpu_info = alloc_percpu(struct vcpu_info);
++ xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
++ 1 << fls(sizeof(struct vcpu_info) - 1));
+ if (xen_vcpu_info == NULL)
+ return -ENOMEM;
+
+--
+2.42.0
+
--- /dev/null
+From cc6ed74b3a9a2151dc391af64424b3a0e8aa6a83 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Nov 2023 13:14:22 +0000
+Subject: arm64: mm: Fix "rodata=on" when CONFIG_RODATA_FULL_DEFAULT_ENABLED=y
+
+From: Will Deacon <will@kernel.org>
+
+[ Upstream commit acfa60dbe03802d6afd28401aa47801270e82021 ]
+
+When CONFIG_RODATA_FULL_DEFAULT_ENABLED=y, passing "rodata=on" on the
+kernel command-line (rather than "rodata=full") should turn off the
+"full" behaviour, leaving writable linear aliases of read-only kernel
+memory. Unfortunately, the option has no effect in this situation and
+the only way to disable the "rodata=full" behaviour is to disable rodata
+protection entirely by passing "rodata=off".
+
+Fix this by parsing the "on" and "off" options in the arch code,
+additionally enforcing that 'rodata_full' cannot be set without also
+setting 'rodata_enabled', allowing us to simplify a couple of checks
+in the process.
+
+Fixes: 2e8cff0a0eee ("arm64: fix rodata=full")
+Cc: Ard Biesheuvel <ardb@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Reviewed-by: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
+Link: https://lore.kernel.org/r/20231117131422.29663-1-will@kernel.org
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/include/asm/setup.h | 17 +++++++++++++++--
+ arch/arm64/mm/pageattr.c | 7 +++----
+ 2 files changed, 18 insertions(+), 6 deletions(-)
+
+diff --git a/arch/arm64/include/asm/setup.h b/arch/arm64/include/asm/setup.h
+index f4af547ef54ca..2e4d7da74fb87 100644
+--- a/arch/arm64/include/asm/setup.h
++++ b/arch/arm64/include/asm/setup.h
+@@ -21,9 +21,22 @@ static inline bool arch_parse_debug_rodata(char *arg)
+ extern bool rodata_enabled;
+ extern bool rodata_full;
+
+- if (arg && !strcmp(arg, "full")) {
++ if (!arg)
++ return false;
++
++ if (!strcmp(arg, "full")) {
++ rodata_enabled = rodata_full = true;
++ return true;
++ }
++
++ if (!strcmp(arg, "off")) {
++ rodata_enabled = rodata_full = false;
++ return true;
++ }
++
++ if (!strcmp(arg, "on")) {
+ rodata_enabled = true;
+- rodata_full = true;
++ rodata_full = false;
+ return true;
+ }
+
+diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
+index 47f532e13d532..826cb200b204f 100644
+--- a/arch/arm64/mm/pageattr.c
++++ b/arch/arm64/mm/pageattr.c
+@@ -29,8 +29,8 @@ bool can_set_direct_map(void)
+ *
+ * KFENCE pool requires page-granular mapping if initialized late.
+ */
+- return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
+- arm64_kfence_can_set_direct_map();
++ return rodata_full || debug_pagealloc_enabled() ||
++ arm64_kfence_can_set_direct_map();
+ }
+
+ static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
+@@ -105,8 +105,7 @@ static int change_memory_common(unsigned long addr, int numpages,
+ * If we are manipulating read-only permissions, apply the same
+ * change to the linear mapping of the pages that back this VM area.
+ */
+- if (rodata_enabled &&
+- rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
++ if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
+ pgprot_val(clear_mask) == PTE_RDONLY)) {
+ for (i = 0; i < area->nr_pages; i++) {
+ __change_memory_common((u64)page_address(area->pages[i]),
+--
+2.42.0
+
--- /dev/null
+From cf4fab78b356d20d8257298d970b86ea97901139 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Oct 2023 04:00:07 +0000
+Subject: ata: pata_isapnp: Add missing error check for devm_ioport_map()
+
+From: Chen Ni <nichen@iscas.ac.cn>
+
+[ Upstream commit a6925165ea82b7765269ddd8dcad57c731aa00de ]
+
+Add missing error return check for devm_ioport_map() and return the
+error if this function call fails.
+
+Fixes: 0d5ff566779f ("libata: convert to iomap")
+Signed-off-by: Chen Ni <nichen@iscas.ac.cn>
+Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru>
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ata/pata_isapnp.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
+index 43bb224430d3c..8892931ea8676 100644
+--- a/drivers/ata/pata_isapnp.c
++++ b/drivers/ata/pata_isapnp.c
+@@ -82,6 +82,9 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
+ if (pnp_port_valid(idev, 1)) {
+ ctl_addr = devm_ioport_map(&idev->dev,
+ pnp_port_start(idev, 1), 1);
++ if (!ctl_addr)
++ return -ENOMEM;
++
+ ap->ioaddr.altstatus_addr = ctl_addr;
+ ap->ioaddr.ctl_addr = ctl_addr;
+ ap->ops = &isapnp_port_ops;
+--
+2.42.0
+
--- /dev/null
+From 0b57a080102a7ba081948537b325746f7ce37f75 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Mar 2023 11:14:58 +0000
+Subject: cifs: account for primary channel in the interface list
+
+From: Shyam Prasad N <sprasad@microsoft.com>
+
+[ Upstream commit fa1d0508bdd4a68c5e40f85f635712af8c12f180 ]
+
+The refcounting of server interfaces should account
+for the primary channel too. Although this is not
+strictly necessary, doing so will account for the primary
+channel in DebugData.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Paulo Alcantara (SUSE) <pc@manguebit.com>
+Signed-off-by: Shyam Prasad N <sprasad@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/sess.c | 28 ++++++++++++++++++++++++++++
+ fs/smb/client/smb2ops.c | 6 ++++++
+ 2 files changed, 34 insertions(+)
+
+diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
+index 33e724545c5b4..634035bcb9347 100644
+--- a/fs/smb/client/sess.c
++++ b/fs/smb/client/sess.c
+@@ -288,6 +288,7 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
+ struct cifs_server_iface *iface = NULL;
+ struct cifs_server_iface *old_iface = NULL;
+ struct cifs_server_iface *last_iface = NULL;
++ struct sockaddr_storage ss;
+ int rc = 0;
+
+ spin_lock(&ses->chan_lock);
+@@ -306,6 +307,10 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
+ }
+ spin_unlock(&ses->chan_lock);
+
++ spin_lock(&server->srv_lock);
++ ss = server->dstaddr;
++ spin_unlock(&server->srv_lock);
++
+ spin_lock(&ses->iface_lock);
+ if (!ses->iface_count) {
+ spin_unlock(&ses->iface_lock);
+@@ -319,6 +324,16 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
+
+ /* then look for a new one */
+ list_for_each_entry(iface, &ses->iface_list, iface_head) {
++ if (!chan_index) {
++ /* if we're trying to get the updated iface for primary channel */
++ if (!cifs_match_ipaddr((struct sockaddr *) &ss,
++ (struct sockaddr *) &iface->sockaddr))
++ continue;
++
++ kref_get(&iface->refcount);
++ break;
++ }
++
+ /* do not mix rdma and non-rdma interfaces */
+ if (iface->rdma_capable != server->rdma)
+ continue;
+@@ -345,6 +360,13 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
+ cifs_dbg(FYI, "unable to find a suitable iface\n");
+ }
+
++ if (!chan_index && !iface) {
++ cifs_dbg(FYI, "unable to get the interface matching: %pIS\n",
++ &ss);
++ spin_unlock(&ses->iface_lock);
++ return 0;
++ }
++
+ /* now drop the ref to the current iface */
+ if (old_iface && iface) {
+ cifs_dbg(FYI, "replacing iface: %pIS with %pIS\n",
+@@ -367,6 +389,12 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
+ old_iface->weight_fulfilled--;
+
+ kref_put(&old_iface->refcount, release_iface);
++ } else if (!chan_index) {
++ /* special case: update interface for primary channel */
++ cifs_dbg(FYI, "referencing primary channel iface: %pIS\n",
++ &iface->sockaddr);
++ iface->num_channels++;
++ iface->weight_fulfilled++;
+ } else {
+ WARN_ON(!iface);
+ cifs_dbg(FYI, "adding new iface: %pIS\n", &iface->sockaddr);
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 2c1898803279a..4cc56e4695fbc 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -752,6 +752,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_
+ unsigned int ret_data_len = 0;
+ struct network_interface_info_ioctl_rsp *out_buf = NULL;
+ struct cifs_ses *ses = tcon->ses;
++ struct TCP_Server_Info *pserver;
+
+ /* do not query too frequently */
+ if (ses->iface_last_update &&
+@@ -776,6 +777,11 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_
+ if (rc)
+ goto out;
+
++ /* check if iface is still active */
++ pserver = ses->chans[0].server;
++ if (pserver && !cifs_chan_is_iface_active(ses, pserver))
++ cifs_chan_update_iface(ses, pserver);
++
+ out:
+ kfree(out_buf);
+ return rc;
+--
+2.42.0
+
--- /dev/null
+From 1e071242f8769c04c2923ecd0ae99db1d23ac95b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Dec 2022 11:24:56 +0000
+Subject: cifs: distribute channels across interfaces based on speed
+
+From: Shyam Prasad N <sprasad@microsoft.com>
+
+[ Upstream commit a6d8fb54a515f0546ffdb7870102b1238917e567 ]
+
+Today, if the server interfaces RSS capable, we simply
+choose the fastest interface to setup a channel. This is not
+a scalable approach, and does not make a lot of attempt to
+distribute the connections.
+
+This change does a weighted distribution of channels across
+all the available server interfaces, where the weight is
+a function of the advertised interface speed.
+
+Also make sure that we don't mix rdma and non-rdma for channels.
+
+Signed-off-by: Shyam Prasad N <sprasad@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: fa1d0508bdd4 ("cifs: account for primary channel in the interface list")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/cifs_debug.c | 16 ++++++++
+ fs/smb/client/cifsglob.h | 2 +
+ fs/smb/client/sess.c | 84 +++++++++++++++++++++++++++++++-------
+ 3 files changed, 88 insertions(+), 14 deletions(-)
+
+diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
+index 8233fb2f0ca63..0acb455368f23 100644
+--- a/fs/smb/client/cifs_debug.c
++++ b/fs/smb/client/cifs_debug.c
+@@ -220,6 +220,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
+ struct cifs_server_iface *iface;
++ size_t iface_weight = 0, iface_min_speed = 0;
++ struct cifs_server_iface *last_iface = NULL;
+ int c, i, j;
+
+ seq_puts(m,
+@@ -461,11 +463,25 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+ "\tLast updated: %lu seconds ago",
+ ses->iface_count,
+ (jiffies - ses->iface_last_update) / HZ);
++
++ last_iface = list_last_entry(&ses->iface_list,
++ struct cifs_server_iface,
++ iface_head);
++ iface_min_speed = last_iface->speed;
++
+ j = 0;
+ list_for_each_entry(iface, &ses->iface_list,
+ iface_head) {
+ seq_printf(m, "\n\t%d)", ++j);
+ cifs_dump_iface(m, iface);
++
++ iface_weight = iface->speed / iface_min_speed;
++ seq_printf(m, "\t\tWeight (cur,total): (%zu,%zu)"
++ "\n\t\tAllocated channels: %u\n",
++ iface->weight_fulfilled,
++ iface_weight,
++ iface->num_channels);
++
+ if (is_ses_using_iface(ses, iface))
+ seq_puts(m, "\t\t[CONNECTED]\n");
+ }
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 6c8a55608c9bd..2e814eadd6aef 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -956,6 +956,8 @@ struct cifs_server_iface {
+ struct list_head iface_head;
+ struct kref refcount;
+ size_t speed;
++ size_t weight_fulfilled;
++ unsigned int num_channels;
+ unsigned int rdma_capable : 1;
+ unsigned int rss_capable : 1;
+ unsigned int is_active : 1; /* unset if non existent */
+diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
+index f0d164873500b..33e724545c5b4 100644
+--- a/fs/smb/client/sess.c
++++ b/fs/smb/client/sess.c
+@@ -164,7 +164,9 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ int left;
+ int rc = 0;
+ int tries = 0;
++ size_t iface_weight = 0, iface_min_speed = 0;
+ struct cifs_server_iface *iface = NULL, *niface = NULL;
++ struct cifs_server_iface *last_iface = NULL;
+
+ spin_lock(&ses->chan_lock);
+
+@@ -192,21 +194,11 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ }
+ spin_unlock(&ses->chan_lock);
+
+- /*
+- * Keep connecting to same, fastest, iface for all channels as
+- * long as its RSS. Try next fastest one if not RSS or channel
+- * creation fails.
+- */
+- spin_lock(&ses->iface_lock);
+- iface = list_first_entry(&ses->iface_list, struct cifs_server_iface,
+- iface_head);
+- spin_unlock(&ses->iface_lock);
+-
+ while (left > 0) {
+
+ tries++;
+ if (tries > 3*ses->chan_max) {
+- cifs_dbg(FYI, "too many channel open attempts (%d channels left to open)\n",
++ cifs_dbg(VFS, "too many channel open attempts (%d channels left to open)\n",
+ left);
+ break;
+ }
+@@ -214,17 +206,35 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ spin_lock(&ses->iface_lock);
+ if (!ses->iface_count) {
+ spin_unlock(&ses->iface_lock);
++ cifs_dbg(VFS, "server %s does not advertise interfaces\n",
++ ses->server->hostname);
+ break;
+ }
+
++ if (!iface)
++ iface = list_first_entry(&ses->iface_list, struct cifs_server_iface,
++ iface_head);
++ last_iface = list_last_entry(&ses->iface_list, struct cifs_server_iface,
++ iface_head);
++ iface_min_speed = last_iface->speed;
++
+ list_for_each_entry_safe_from(iface, niface, &ses->iface_list,
+ iface_head) {
++ /* do not mix rdma and non-rdma interfaces */
++ if (iface->rdma_capable != ses->server->rdma)
++ continue;
++
+ /* skip ifaces that are unusable */
+ if (!iface->is_active ||
+ (is_ses_using_iface(ses, iface) &&
+- !iface->rss_capable)) {
++ !iface->rss_capable))
++ continue;
++
++ /* check if we already allocated enough channels */
++ iface_weight = iface->speed / iface_min_speed;
++
++ if (iface->weight_fulfilled >= iface_weight)
+ continue;
+- }
+
+ /* take ref before unlock */
+ kref_get(&iface->refcount);
+@@ -241,10 +251,21 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ continue;
+ }
+
+- cifs_dbg(FYI, "successfully opened new channel on iface:%pIS\n",
++ iface->num_channels++;
++ iface->weight_fulfilled++;
++ cifs_dbg(VFS, "successfully opened new channel on iface:%pIS\n",
+ &iface->sockaddr);
+ break;
+ }
++
++ /* reached end of list. reset weight_fulfilled and start over */
++ if (list_entry_is_head(iface, &ses->iface_list, iface_head)) {
++ list_for_each_entry(iface, &ses->iface_list, iface_head)
++ iface->weight_fulfilled = 0;
++ spin_unlock(&ses->iface_lock);
++ iface = NULL;
++ continue;
++ }
+ spin_unlock(&ses->iface_lock);
+
+ left--;
+@@ -263,8 +284,10 @@ int
+ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
+ {
+ unsigned int chan_index;
++ size_t iface_weight = 0, iface_min_speed = 0;
+ struct cifs_server_iface *iface = NULL;
+ struct cifs_server_iface *old_iface = NULL;
++ struct cifs_server_iface *last_iface = NULL;
+ int rc = 0;
+
+ spin_lock(&ses->chan_lock);
+@@ -284,13 +307,34 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
+ spin_unlock(&ses->chan_lock);
+
+ spin_lock(&ses->iface_lock);
++ if (!ses->iface_count) {
++ spin_unlock(&ses->iface_lock);
++ cifs_dbg(VFS, "server %s does not advertise interfaces\n", ses->server->hostname);
++ return 0;
++ }
++
++ last_iface = list_last_entry(&ses->iface_list, struct cifs_server_iface,
++ iface_head);
++ iface_min_speed = last_iface->speed;
++
+ /* then look for a new one */
+ list_for_each_entry(iface, &ses->iface_list, iface_head) {
++ /* do not mix rdma and non-rdma interfaces */
++ if (iface->rdma_capable != server->rdma)
++ continue;
++
+ if (!iface->is_active ||
+ (is_ses_using_iface(ses, iface) &&
+ !iface->rss_capable)) {
+ continue;
+ }
++
++ /* check if we already allocated enough channels */
++ iface_weight = iface->speed / iface_min_speed;
++
++ if (iface->weight_fulfilled >= iface_weight)
++ continue;
++
+ kref_get(&iface->refcount);
+ break;
+ }
+@@ -306,10 +350,22 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
+ cifs_dbg(FYI, "replacing iface: %pIS with %pIS\n",
+ &old_iface->sockaddr,
+ &iface->sockaddr);
++
++ old_iface->num_channels--;
++ if (old_iface->weight_fulfilled)
++ old_iface->weight_fulfilled--;
++ iface->num_channels++;
++ iface->weight_fulfilled++;
++
+ kref_put(&old_iface->refcount, release_iface);
+ } else if (old_iface) {
+ cifs_dbg(FYI, "releasing ref to iface: %pIS\n",
+ &old_iface->sockaddr);
++
++ old_iface->num_channels--;
++ if (old_iface->weight_fulfilled)
++ old_iface->weight_fulfilled--;
++
+ kref_put(&old_iface->refcount, release_iface);
+ } else {
+ WARN_ON(!iface);
+--
+2.42.0
+
--- /dev/null
+From f8054a4bd390845492da2bf1dc7ded0f855bd37b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Nov 2023 04:54:12 +0000
+Subject: cifs: fix leak of iface for primary channel
+
+From: Shyam Prasad N <sprasad@microsoft.com>
+
+[ Upstream commit 29954d5b1e0d67a4cd61c30c2201030c97e94b1e ]
+
+My last change in this area introduced a change which
+accounted for primary channel in the interface ref count.
+However, it did not reduce this ref count on deallocation
+of the primary channel. i.e. during umount.
+
+Fixing this leak here, by dropping this ref count for
+primary channel while freeing up the session.
+
+Fixes: fa1d0508bdd4 ("cifs: account for primary channel in the interface list")
+Cc: stable@vger.kernel.org
+Reported-by: Paulo Alcantara <pc@manguebit.com>
+Signed-off-by: Shyam Prasad N <sprasad@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/connect.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 6ca1e00b3f76a..5b19918938346 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -2070,6 +2070,12 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
+ }
+ }
+
++ /* we now account for primary channel in iface->refcount */
++ if (ses->chans[0].iface) {
++ kref_put(&ses->chans[0].iface->refcount, release_iface);
++ ses->chans[0].server = NULL;
++ }
++
+ sesInfoFree(ses);
+ cifs_put_tcp_session(server, 0);
+ }
+--
+2.42.0
+
--- /dev/null
+From 45715956ab2990e5a33f8d8cfd74577fd5b69944 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Dec 2022 16:11:00 -0600
+Subject: cifs: minor cleanup of some headers
+
+From: Steve French <stfrench@microsoft.com>
+
+[ Upstream commit c19204cbd65c12fdcd34fb8f5d645007238ed5cd ]
+
+checkpatch showed formatting problems with extra spaces,
+and extra semicolon and some missing blank lines in some
+cifs headers.
+
+Reviewed-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
+Reviewed-by: Germano Percossi <germano.percossi@gmail.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: de4eceab578e ("smb3: allow dumping session and tcon id to improve stats analysis and debugging")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/cifs_ioctl.h | 2 +-
+ fs/smb/client/cifsfs.h | 4 ++--
+ fs/smb/client/cifsglob.h | 7 +++++--
+ 3 files changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/fs/smb/client/cifs_ioctl.h b/fs/smb/client/cifs_ioctl.h
+index d86d78d5bfdc1..332588e77c311 100644
+--- a/fs/smb/client/cifs_ioctl.h
++++ b/fs/smb/client/cifs_ioctl.h
+@@ -108,7 +108,7 @@ struct smb3_notify_info {
+ #define CIFS_IOC_NOTIFY _IOW(CIFS_IOCTL_MAGIC, 9, struct smb3_notify)
+ #define CIFS_DUMP_FULL_KEY _IOWR(CIFS_IOCTL_MAGIC, 10, struct smb3_full_key_debug_info)
+ #define CIFS_IOC_NOTIFY_INFO _IOWR(CIFS_IOCTL_MAGIC, 11, struct smb3_notify_info)
+-#define CIFS_IOC_SHUTDOWN _IOR ('X', 125, __u32)
++#define CIFS_IOC_SHUTDOWN _IOR('X', 125, __u32)
+
+ /*
+ * Flags for going down operation
+diff --git a/fs/smb/client/cifsfs.h b/fs/smb/client/cifsfs.h
+index b6c38896fb2db..a1d8791c4fcd2 100644
+--- a/fs/smb/client/cifsfs.h
++++ b/fs/smb/client/cifsfs.h
+@@ -105,8 +105,8 @@ extern int cifs_lock(struct file *, int, struct file_lock *);
+ extern int cifs_fsync(struct file *, loff_t, loff_t, int);
+ extern int cifs_strict_fsync(struct file *, loff_t, loff_t, int);
+ extern int cifs_flush(struct file *, fl_owner_t id);
+-extern int cifs_file_mmap(struct file * , struct vm_area_struct *);
+-extern int cifs_file_strict_mmap(struct file * , struct vm_area_struct *);
++extern int cifs_file_mmap(struct file *file, struct vm_area_struct *vma);
++extern int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma);
+ extern const struct file_operations cifs_dir_ops;
+ extern int cifs_dir_open(struct inode *inode, struct file *file);
+ extern int cifs_readdir(struct file *file, struct dir_context *ctx);
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 39602f39aea8f..6c8a55608c9bd 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -788,6 +788,7 @@ static inline unsigned int
+ in_flight(struct TCP_Server_Info *server)
+ {
+ unsigned int num;
++
+ spin_lock(&server->req_lock);
+ num = server->in_flight;
+ spin_unlock(&server->req_lock);
+@@ -798,6 +799,7 @@ static inline bool
+ has_credits(struct TCP_Server_Info *server, int *credits, int num_credits)
+ {
+ int num;
++
+ spin_lock(&server->req_lock);
+ num = *credits;
+ spin_unlock(&server->req_lock);
+@@ -991,7 +993,7 @@ struct cifs_ses {
+ struct TCP_Server_Info *server; /* pointer to server info */
+ int ses_count; /* reference counter */
+ enum ses_status_enum ses_status; /* updates protected by cifs_tcp_ses_lock */
+- unsigned overrideSecFlg; /* if non-zero override global sec flags */
++ unsigned int overrideSecFlg; /* if non-zero override global sec flags */
+ char *serverOS; /* name of operating system underlying server */
+ char *serverNOS; /* name of network operating system of server */
+ char *serverDomain; /* security realm of server */
+@@ -1347,7 +1349,7 @@ struct cifsFileInfo {
+ __u32 pid; /* process id who opened file */
+ struct cifs_fid fid; /* file id from remote */
+ struct list_head rlist; /* reconnect list */
+- /* BB add lock scope info here if needed */ ;
++ /* BB add lock scope info here if needed */
+ /* lock scope id (0 if none) */
+ struct dentry *dentry;
+ struct tcon_link *tlink;
+@@ -1735,6 +1737,7 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
+ int number_of_items)
+ {
+ int i;
++
+ if ((number_of_items == 0) || (param == NULL))
+ return;
+ for (i = 0; i < number_of_items; i++) {
+--
+2.42.0
+
--- /dev/null
+From 20c7817edfe365afe08429e6974f98d3122b10bc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Dec 2022 10:41:25 +0000
+Subject: cifs: print last update time for interface list
+
+From: Shyam Prasad N <sprasad@microsoft.com>
+
+[ Upstream commit 05844bd661d9fd478df1175b6639bf2d9398becb ]
+
+We store the last updated time for interface list while
+parsing the interfaces. This change is to just print that
+info in DebugData.
+
+Signed-off-by: Shyam Prasad N <sprasad@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: fa1d0508bdd4 ("cifs: account for primary channel in the interface list")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/cifs_debug.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
+index ed396b186c5a4..8233fb2f0ca63 100644
+--- a/fs/smb/client/cifs_debug.c
++++ b/fs/smb/client/cifs_debug.c
+@@ -457,8 +457,10 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+
+ spin_lock(&ses->iface_lock);
+ if (ses->iface_count)
+- seq_printf(m, "\n\n\tServer interfaces: %zu",
+- ses->iface_count);
++ seq_printf(m, "\n\n\tServer interfaces: %zu"
++ "\tLast updated: %lu seconds ago",
++ ses->iface_count,
++ (jiffies - ses->iface_last_update) / HZ);
+ j = 0;
+ list_for_each_entry(iface, &ses->iface_list,
+ iface_head) {
+--
+2.42.0
+
--- /dev/null
+From ea477583c4ed283187470922a022b385462af2a2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Nov 2023 18:21:14 +0100
+Subject: dm-delay: fix a race between delay_presuspend and delay_bio
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+[ Upstream commit 6fc45b6ed921dc00dfb264dc08c7d67ee63d2656 ]
+
+In delay_presuspend, we set the atomic variable may_delay and then stop
+the timer and flush pending bios. The intention here is to prevent the
+delay target from re-arming the timer again.
+
+However, this test is racy. Suppose that one thread goes to delay_bio,
+sees that dc->may_delay is one and proceeds; now, another thread executes
+delay_presuspend, it sets dc->may_delay to zero, deletes the timer and
+flushes pending bios. Then, the first thread continues and adds the bio to
+delayed->list despite the fact that dc->may_delay is false.
+
+Fix this bug by changing may_delay's type from atomic_t to bool and
+only access it while holding the delayed_bios_lock mutex. Note that we
+don't have to grab the mutex in delay_resume because there are no bios
+in flight at this point.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-delay.c | 16 +++++++++++-----
+ 1 file changed, 11 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
+index 358e870a03a56..5a8ece66ea696 100644
+--- a/drivers/md/dm-delay.c
++++ b/drivers/md/dm-delay.c
+@@ -32,7 +32,7 @@ struct delay_c {
+ struct work_struct flush_expired_bios;
+ struct list_head delayed_bios;
+ struct task_struct *worker;
+- atomic_t may_delay;
++ bool may_delay;
+
+ struct delay_class read;
+ struct delay_class write;
+@@ -235,7 +235,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+
+ ti->private = dc;
+ INIT_LIST_HEAD(&dc->delayed_bios);
+- atomic_set(&dc->may_delay, 1);
++ dc->may_delay = true;
+ dc->argc = argc;
+
+ ret = delay_class_ctr(ti, &dc->read, argv);
+@@ -311,7 +311,7 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
+ struct dm_delay_info *delayed;
+ unsigned long expires = 0;
+
+- if (!c->delay || !atomic_read(&dc->may_delay))
++ if (!c->delay)
+ return DM_MAPIO_REMAPPED;
+
+ delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
+@@ -320,6 +320,10 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
+ delayed->expires = expires = jiffies + msecs_to_jiffies(c->delay);
+
+ mutex_lock(&delayed_bios_lock);
++ if (unlikely(!dc->may_delay)) {
++ mutex_unlock(&delayed_bios_lock);
++ return DM_MAPIO_REMAPPED;
++ }
+ c->ops++;
+ list_add_tail(&delayed->list, &dc->delayed_bios);
+ mutex_unlock(&delayed_bios_lock);
+@@ -336,7 +340,9 @@ static void delay_presuspend(struct dm_target *ti)
+ {
+ struct delay_c *dc = ti->private;
+
+- atomic_set(&dc->may_delay, 0);
++ mutex_lock(&delayed_bios_lock);
++ dc->may_delay = false;
++ mutex_unlock(&delayed_bios_lock);
+
+ if (delay_is_fast(dc))
+ flush_delayed_bios_fast(dc, true);
+@@ -350,7 +356,7 @@ static void delay_resume(struct dm_target *ti)
+ {
+ struct delay_c *dc = ti->private;
+
+- atomic_set(&dc->may_delay, 1);
++ dc->may_delay = true;
+ }
+
+ static int delay_map(struct dm_target *ti, struct bio *bio)
+--
+2.42.0
+
--- /dev/null
+From 8fc3662c0d81119ae3a7c3fc1c2cd441c3054905 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Oct 2023 12:46:05 +0100
+Subject: dm delay: for short delays, use kthread instead of timers and wq
+
+From: Christian Loehle <christian.loehle@arm.com>
+
+[ Upstream commit 70bbeb29fab09d6ea6cfe64109db60a97d84d739 ]
+
+DM delay's current design of using timers and wq to realize the delays
+is insufficient for delays below ~50ms.
+
+This commit enhances the design to use a kthread to flush the expired
+delays, trading some CPU time (in some cases) for better delay
+accuracy and delays closer to what the user requested for smaller
+delays. The new design is chosen as long as all the delays are below
+50ms.
+
+Since bios can't be completed in interrupt context using a kthread
+is probably the most reasonable way to approach this.
+
+Testing with
+echo "0 2097152 zero" | dmsetup create dm-zeros
+for i in $(seq 0 20);
+do
+ echo "0 2097152 delay /dev/mapper/dm-zeros 0 $i" | dmsetup create dm-delay-${i}ms;
+done
+
+Some performance numbers for comparison, on beaglebone black (single
+core) CONFIG_HZ_1000=y:
+
+fio --name=1msread --rw=randread --bs=4k --runtime=60 --time_based \
+ --filename=/dev/mapper/dm-delay-1ms
+Theoretical maximum: 1000 IOPS
+Previous: 250 IOPS
+Kthread: 500 IOPS
+
+fio --name=10msread --rw=randread --bs=4k --runtime=60 --time_based \
+ --filename=/dev/mapper/dm-delay-10ms
+Theoretical maximum: 100 IOPS
+Previous: 45 IOPS
+Kthread: 50 IOPS
+
+fio --name=1mswrite --rw=randwrite --direct=1 --bs=4k --runtime=60 \
+ --time_based --filename=/dev/mapper/dm-delay-1ms
+Theoretical maximum: 1000 IOPS
+Previous: 498 IOPS
+Kthread: 1000 IOPS
+
+fio --name=10mswrite --rw=randwrite --direct=1 --bs=4k --runtime=60 \
+ --time_based --filename=/dev/mapper/dm-delay-10ms
+Theoretical maximum: 100 IOPS
+Previous: 90 IOPS
+Kthread: 100 IOPS
+
+(This one is just to prove the new design isn't impacting throughput,
+not really about delays):
+fio --name=10mswriteasync --rw=randwrite --direct=1 --bs=4k \
+ --runtime=60 --time_based --filename=/dev/mapper/dm-delay-10ms \
+ --numjobs=32 --iodepth=64 --ioengine=libaio --group_reporting
+Previous: 13.3k IOPS
+Kthread: 13.3k IOPS
+
+Signed-off-by: Christian Loehle <christian.loehle@arm.com>
+[Harshit: kthread_create error handling fix in delay_ctr]
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Stable-dep-of: 6fc45b6ed921 ("dm-delay: fix a race between delay_presuspend and delay_bio")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-delay.c | 103 ++++++++++++++++++++++++++++++++++++------
+ 1 file changed, 88 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
+index 02b8f4e818276..358e870a03a56 100644
+--- a/drivers/md/dm-delay.c
++++ b/drivers/md/dm-delay.c
+@@ -12,6 +12,7 @@
+ #include <linux/blkdev.h>
+ #include <linux/bio.h>
+ #include <linux/slab.h>
++#include <linux/kthread.h>
+
+ #include <linux/device-mapper.h>
+
+@@ -30,6 +31,7 @@ struct delay_c {
+ struct workqueue_struct *kdelayd_wq;
+ struct work_struct flush_expired_bios;
+ struct list_head delayed_bios;
++ struct task_struct *worker;
+ atomic_t may_delay;
+
+ struct delay_class read;
+@@ -65,6 +67,44 @@ static void queue_timeout(struct delay_c *dc, unsigned long expires)
+ mutex_unlock(&dc->timer_lock);
+ }
+
++static inline bool delay_is_fast(struct delay_c *dc)
++{
++ return !!dc->worker;
++}
++
++static void flush_delayed_bios_fast(struct delay_c *dc, bool flush_all)
++{
++ struct dm_delay_info *delayed, *next;
++
++ mutex_lock(&delayed_bios_lock);
++ list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {
++ if (flush_all || time_after_eq(jiffies, delayed->expires)) {
++ struct bio *bio = dm_bio_from_per_bio_data(delayed,
++ sizeof(struct dm_delay_info));
++ list_del(&delayed->list);
++ dm_submit_bio_remap(bio, NULL);
++ delayed->class->ops--;
++ }
++ }
++ mutex_unlock(&delayed_bios_lock);
++}
++
++static int flush_worker_fn(void *data)
++{
++ struct delay_c *dc = data;
++
++ while (1) {
++ flush_delayed_bios_fast(dc, false);
++ if (unlikely(list_empty(&dc->delayed_bios))) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ schedule();
++ } else
++ cond_resched();
++ }
++
++ return 0;
++}
++
+ static void flush_bios(struct bio *bio)
+ {
+ struct bio *n;
+@@ -77,7 +117,7 @@ static void flush_bios(struct bio *bio)
+ }
+ }
+
+-static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all)
++static struct bio *flush_delayed_bios(struct delay_c *dc, bool flush_all)
+ {
+ struct dm_delay_info *delayed, *next;
+ unsigned long next_expires = 0;
+@@ -114,7 +154,10 @@ static void flush_expired_bios(struct work_struct *work)
+ struct delay_c *dc;
+
+ dc = container_of(work, struct delay_c, flush_expired_bios);
+- flush_bios(flush_delayed_bios(dc, 0));
++ if (delay_is_fast(dc))
++ flush_delayed_bios_fast(dc, false);
++ else
++ flush_bios(flush_delayed_bios(dc, false));
+ }
+
+ static void delay_dtr(struct dm_target *ti)
+@@ -130,8 +173,11 @@ static void delay_dtr(struct dm_target *ti)
+ dm_put_device(ti, dc->write.dev);
+ if (dc->flush.dev)
+ dm_put_device(ti, dc->flush.dev);
++ if (dc->worker)
++ kthread_stop(dc->worker);
+
+- mutex_destroy(&dc->timer_lock);
++ if (!delay_is_fast(dc))
++ mutex_destroy(&dc->timer_lock);
+
+ kfree(dc);
+ }
+@@ -174,6 +220,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ {
+ struct delay_c *dc;
+ int ret;
++ unsigned int max_delay;
+
+ if (argc != 3 && argc != 6 && argc != 9) {
+ ti->error = "Requires exactly 3, 6 or 9 arguments";
+@@ -187,16 +234,14 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ }
+
+ ti->private = dc;
+- timer_setup(&dc->delay_timer, handle_delayed_timer, 0);
+- INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
+ INIT_LIST_HEAD(&dc->delayed_bios);
+- mutex_init(&dc->timer_lock);
+ atomic_set(&dc->may_delay, 1);
+ dc->argc = argc;
+
+ ret = delay_class_ctr(ti, &dc->read, argv);
+ if (ret)
+ goto bad;
++ max_delay = dc->read.delay;
+
+ if (argc == 3) {
+ ret = delay_class_ctr(ti, &dc->write, argv);
+@@ -205,6 +250,8 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ ret = delay_class_ctr(ti, &dc->flush, argv);
+ if (ret)
+ goto bad;
++ max_delay = max(max_delay, dc->write.delay);
++ max_delay = max(max_delay, dc->flush.delay);
+ goto out;
+ }
+
+@@ -215,19 +262,37 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ ret = delay_class_ctr(ti, &dc->flush, argv + 3);
+ if (ret)
+ goto bad;
++ max_delay = max(max_delay, dc->flush.delay);
+ goto out;
+ }
+
+ ret = delay_class_ctr(ti, &dc->flush, argv + 6);
+ if (ret)
+ goto bad;
++ max_delay = max(max_delay, dc->flush.delay);
+
+ out:
+- dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
+- if (!dc->kdelayd_wq) {
+- ret = -EINVAL;
+- DMERR("Couldn't start kdelayd");
+- goto bad;
++ if (max_delay < 50) {
++ /*
++ * In case of small requested delays, use kthread instead of
++ * timers and workqueue to achieve better latency.
++ */
++ dc->worker = kthread_create(&flush_worker_fn, dc,
++ "dm-delay-flush-worker");
++ if (IS_ERR(dc->worker)) {
++ ret = PTR_ERR(dc->worker);
++ goto bad;
++ }
++ } else {
++ timer_setup(&dc->delay_timer, handle_delayed_timer, 0);
++ INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
++ mutex_init(&dc->timer_lock);
++ dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
++ if (!dc->kdelayd_wq) {
++ ret = -EINVAL;
++ DMERR("Couldn't start kdelayd");
++ goto bad;
++ }
+ }
+
+ ti->num_flush_bios = 1;
+@@ -259,7 +324,10 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
+ list_add_tail(&delayed->list, &dc->delayed_bios);
+ mutex_unlock(&delayed_bios_lock);
+
+- queue_timeout(dc, expires);
++ if (delay_is_fast(dc))
++ wake_up_process(dc->worker);
++ else
++ queue_timeout(dc, expires);
+
+ return DM_MAPIO_SUBMITTED;
+ }
+@@ -269,8 +337,13 @@ static void delay_presuspend(struct dm_target *ti)
+ struct delay_c *dc = ti->private;
+
+ atomic_set(&dc->may_delay, 0);
+- del_timer_sync(&dc->delay_timer);
+- flush_bios(flush_delayed_bios(dc, 1));
++
++ if (delay_is_fast(dc))
++ flush_delayed_bios_fast(dc, true);
++ else {
++ del_timer_sync(&dc->delay_timer);
++ flush_bios(flush_delayed_bios(dc, true));
++ }
+ }
+
+ static void delay_resume(struct dm_target *ti)
+@@ -355,7 +428,7 @@ static int delay_iterate_devices(struct dm_target *ti,
+
+ static struct target_type delay_target = {
+ .name = "delay",
+- .version = {1, 3, 0},
++ .version = {1, 4, 0},
+ .features = DM_TARGET_PASSES_INTEGRITY,
+ .module = THIS_MODULE,
+ .ctr = delay_ctr,
+--
+2.42.0
+
--- /dev/null
+From 594202a0908a3b48d59cf4b6c399d168176c296b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Nov 2023 11:54:03 +0100
+Subject: drm/i915: do not clean GT table on error path
+
+From: Andrzej Hajda <andrzej.hajda@intel.com>
+
+[ Upstream commit 0561794b6b642b84b879bf97061c4b4fa692839e ]
+
+The only task of intel_gt_release_all is to zero gt table. Calling
+it on error path prevents intel_gt_driver_late_release_all (called from
+i915_driver_late_release) to cleanup GTs, causing leakage.
+After i915_driver_late_release GT array is not used anymore so
+it does not need cleaning at all.
+
+Sample leak report:
+
+BUG i915_request (...): Objects remaining in i915_request on __kmem_cache_shutdown()
+...
+Object 0xffff888113420040 @offset=64
+Allocated in __i915_request_create+0x75/0x610 [i915] age=18339 cpu=1 pid=1454
+ kmem_cache_alloc+0x25b/0x270
+ __i915_request_create+0x75/0x610 [i915]
+ i915_request_create+0x109/0x290 [i915]
+ __engines_record_defaults+0xca/0x440 [i915]
+ intel_gt_init+0x275/0x430 [i915]
+ i915_gem_init+0x135/0x2c0 [i915]
+ i915_driver_probe+0x8d1/0xdc0 [i915]
+
+v2: removed whole intel_gt_release_all
+
+Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/8489
+Fixes: bec68cc9ea42 ("drm/i915: Prepare for multiple GTs")
+Signed-off-by: Andrzej Hajda <andrzej.hajda@intel.com>
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Reviewed-by: Nirmoy Das <nirmoy.das@intel.com>
+Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20231115-dont_clean_gt_on_error_path-v2-1-54250125470a@intel.com
+(cherry picked from commit e899505533852bf1da133f2f4c9a9655ff77f7e5)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/gt/intel_gt.c | 11 -----------
+ drivers/gpu/drm/i915/i915_driver.c | 4 +---
+ 2 files changed, 1 insertion(+), 14 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
+index d12ec092e62df..91a005c46b107 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt.c
++++ b/drivers/gpu/drm/i915/gt/intel_gt.c
+@@ -903,8 +903,6 @@ int intel_gt_probe_all(struct drm_i915_private *i915)
+
+ err:
+ i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret);
+- intel_gt_release_all(i915);
+-
+ return ret;
+ }
+
+@@ -923,15 +921,6 @@ int intel_gt_tiles_init(struct drm_i915_private *i915)
+ return 0;
+ }
+
+-void intel_gt_release_all(struct drm_i915_private *i915)
+-{
+- struct intel_gt *gt;
+- unsigned int id;
+-
+- for_each_gt(gt, i915, id)
+- i915->gt[id] = NULL;
+-}
+-
+ void intel_gt_info_print(const struct intel_gt_info *info,
+ struct drm_printer *p)
+ {
+diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
+index 75a93951fe429..be0ebed2a360f 100644
+--- a/drivers/gpu/drm/i915/i915_driver.c
++++ b/drivers/gpu/drm/i915/i915_driver.c
+@@ -901,7 +901,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ ret = i915_driver_mmio_probe(i915);
+ if (ret < 0)
+- goto out_tiles_cleanup;
++ goto out_runtime_pm_put;
+
+ ret = i915_driver_hw_probe(i915);
+ if (ret < 0)
+@@ -959,8 +959,6 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ i915_ggtt_driver_late_release(i915);
+ out_cleanup_mmio:
+ i915_driver_mmio_release(i915);
+-out_tiles_cleanup:
+- intel_gt_release_all(i915);
+ out_runtime_pm_put:
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
+ i915_driver_late_release(i915);
+--
+2.42.0
+
--- /dev/null
+From 9f94c1c5f5ff5a7b6225483cc2664113fdfde9fe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Nov 2023 12:42:05 +0800
+Subject: drm/panel: auo,b101uan08.3: Fine tune the panel power sequence
+
+From: Xuxin Xiong <xuxinxiong@huaqin.corp-partner.google.com>
+
+[ Upstream commit 6965809e526917b73c8f9178173184dcf13cec4b ]
+
+For "auo,b101uan08.3" this panel, it is stipulated in the panel spec that
+MIPI needs to keep the LP11 state before the lcm_reset pin is pulled high.
+
+Fixes: 56ad624b4cb5 ("drm/panel: support for auo, b101uan08.3 wuxga dsi video mode panel")
+Signed-off-by: Xuxin Xiong <xuxinxiong@huaqin.corp-partner.google.com>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20231114044205.613421-1-xuxinxiong@huaqin.corp-partner.google.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+index 733e28a2536a4..1c008bd9102ff 100644
+--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
++++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+@@ -1473,6 +1473,7 @@ static const struct panel_desc auo_b101uan08_3_desc = {
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = auo_b101uan08_3_init_cmd,
++ .lp11_before_reset = true,
+ };
+
+ static const struct drm_display_mode boe_tv105wum_nw0_default_mode = {
+--
+2.42.0
+
--- /dev/null
+From 65c774bf3ee82d13e6f7e54154f67029e60de14a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 May 2023 17:49:55 +0800
+Subject: drm/panel: boe-tv101wum-nl6: Fine tune the panel power sequence
+
+From: Shuijing Li <shuijing.li@mediatek.com>
+
+[ Upstream commit 812562b8d881ce6d33fed8052b3a10b718430fb5 ]
+
+For "boe,tv105wum-nw0" this special panel, it is stipulated in
+the panel spec that MIPI needs to keep the LP11 state before
+the lcm_reset pin is pulled high.
+
+Signed-off-by: Shuijing Li <shuijing.li@mediatek.com>
+Signed-off-by: Xinlei Lee <xinlei.lee@mediatek.com>
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Signed-off-by: Neil Armstrong <neil.armstrong@linaro.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230515094955.15982-3-shuijing.li@mediatek.com
+Stable-dep-of: 6965809e5269 ("drm/panel: auo,b101uan08.3: Fine tune the panel power sequence")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+index c924f1124ebca..733e28a2536a4 100644
+--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
++++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+@@ -36,6 +36,7 @@ struct panel_desc {
+ const struct panel_init_cmd *init_cmds;
+ unsigned int lanes;
+ bool discharge_on_disable;
++ bool lp11_before_reset;
+ };
+
+ struct boe_panel {
+@@ -1269,6 +1270,10 @@ static int boe_panel_prepare(struct drm_panel *panel)
+
+ usleep_range(10000, 11000);
+
++ if (boe->desc->lp11_before_reset) {
++ mipi_dsi_dcs_nop(boe->dsi);
++ usleep_range(1000, 2000);
++ }
+ gpiod_set_value(boe->enable_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value(boe->enable_gpio, 0);
+@@ -1495,6 +1500,7 @@ static const struct panel_desc boe_tv105wum_nw0_desc = {
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = boe_init_cmd,
++ .lp11_before_reset = true,
+ };
+
+ static int boe_panel_get_modes(struct drm_panel *panel,
+--
+2.42.0
+
--- /dev/null
+From 5fc0df9c834b79da081dd8c074ed2ac251efa768 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Oct 2023 00:33:15 +0200
+Subject: drm/panel: simple: Fix Innolux G101ICE-L01 bus flags
+
+From: Marek Vasut <marex@denx.de>
+
+[ Upstream commit 06fc41b09cfbc02977acd9189473593a37d82d9b ]
+
+Add missing .bus_flags = DRM_BUS_FLAG_DE_HIGH to this panel description,
+ones which match both the datasheet and the panel display_timing flags .
+
+Fixes: 1e29b840af9f ("drm/panel: simple: Add Innolux G101ICE-L01 panel")
+Signed-off-by: Marek Vasut <marex@denx.de>
+Reviewed-by: Neil Armstrong <neil.armstrong@linaro.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20231008223315.279215-1-marex@denx.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/panel/panel-simple.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index 0e8622ccd3a0f..be2900a42b808 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -2228,6 +2228,7 @@ static const struct panel_desc innolux_g101ice_l01 = {
+ .disable = 200,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
++ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+ };
+
+--
+2.42.0
+
--- /dev/null
+From d105a653b1723ed98232cb3fbfa69a7c9606cd16 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Oct 2023 00:32:56 +0200
+Subject: drm/panel: simple: Fix Innolux G101ICE-L01 timings
+
+From: Marek Vasut <marex@denx.de>
+
+[ Upstream commit 3f9a91b6c00e655d27bd785dcda1742dbdc31bda ]
+
+The Innolux G101ICE-L01 datasheet [1] page 17 table
+6.1 INPUT SIGNAL TIMING SPECIFICATIONS
+indicates that maximum vertical blanking time is 40 lines.
+Currently the driver uses 29 lines.
+
+Fix it, and since this panel is a DE panel, adjust the timings
+to make them less hostile to controllers which cannot do 1 px
+HSA/VSA, distribute the delays evenly between all three parts.
+
+[1] https://www.data-modul.com/sites/default/files/products/G101ICE-L01-C2-specification-12042389.pdf
+
+Fixes: 1e29b840af9f ("drm/panel: simple: Add Innolux G101ICE-L01 panel")
+Signed-off-by: Marek Vasut <marex@denx.de>
+Reviewed-by: Neil Armstrong <neil.armstrong@linaro.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20231008223256.279196-1-marex@denx.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/panel/panel-simple.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index be2900a42b808..005377f58eb4a 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -2205,13 +2205,13 @@ static const struct panel_desc innolux_g070y2_t02 = {
+ static const struct display_timing innolux_g101ice_l01_timing = {
+ .pixelclock = { 60400000, 71100000, 74700000 },
+ .hactive = { 1280, 1280, 1280 },
+- .hfront_porch = { 41, 80, 100 },
+- .hback_porch = { 40, 79, 99 },
+- .hsync_len = { 1, 1, 1 },
++ .hfront_porch = { 30, 60, 70 },
++ .hback_porch = { 30, 60, 70 },
++ .hsync_len = { 22, 40, 60 },
+ .vactive = { 800, 800, 800 },
+- .vfront_porch = { 5, 11, 14 },
+- .vback_porch = { 4, 11, 14 },
+- .vsync_len = { 1, 1, 1 },
++ .vfront_porch = { 3, 8, 14 },
++ .vback_porch = { 3, 8, 14 },
++ .vsync_len = { 4, 7, 12 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+ };
+
+--
+2.42.0
+
--- /dev/null
+From 8497337c1d7d5a3b0037504f2c800ed3d2b46f42 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Oct 2023 19:14:58 +0000
+Subject: drm/rockchip: vop: Fix color for RGB888/BGR888 format on VOP full
+
+From: Jonas Karlman <jonas@kwiboo.se>
+
+[ Upstream commit bb0a05acd6121ff0e810b44fdc24dbdfaa46b642 ]
+
+Use of DRM_FORMAT_RGB888 and DRM_FORMAT_BGR888 on e.g. RK3288, RK3328
+and RK3399 result in wrong colors being displayed.
+
+The issue can be observed using modetest:
+
+ modetest -s <connector_id>@<crtc_id>:1920x1080-60@RG24
+ modetest -s <connector_id>@<crtc_id>:1920x1080-60@BG24
+
+Vendor 4.4 kernel apply an inverted rb swap for these formats on VOP
+full framework (IP version 3.x) compared to VOP little framework (2.x).
+
+Fix colors by applying different rb swap for VOP full framework (3.x)
+and VOP little framework (2.x) similar to vendor 4.4 kernel.
+
+Fixes: 85a359f25388 ("drm/rockchip: Add BGR formats to VOP")
+Signed-off-by: Jonas Karlman <jonas@kwiboo.se>
+Tested-by: Diederik de Haas <didi.debian@cknow.org>
+Reviewed-by: Christopher Obbard <chris.obbard@collabora.com>
+Tested-by: Christopher Obbard <chris.obbard@collabora.com>
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Link: https://patchwork.freedesktop.org/patch/msgid/20231026191500.2994225-1-jonas@kwiboo.se
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/rockchip/rockchip_drm_vop.c | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+index ae8c532f7fc84..632ab8941eb44 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+@@ -248,14 +248,22 @@ static inline void vop_cfg_done(struct vop *vop)
+ VOP_REG_SET(vop, common, cfg_done, 1);
+ }
+
+-static bool has_rb_swapped(uint32_t format)
++static bool has_rb_swapped(uint32_t version, uint32_t format)
+ {
+ switch (format) {
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+- case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_BGR565:
+ return true;
++ /*
++ * full framework (IP version 3.x) only need rb swapped for RGB888 and
++ * little framework (IP version 2.x) only need rb swapped for BGR888,
++ * check for 3.x to also only rb swap BGR888 for unknown vop version
++ */
++ case DRM_FORMAT_RGB888:
++ return VOP_MAJOR(version) == 3;
++ case DRM_FORMAT_BGR888:
++ return VOP_MAJOR(version) != 3;
+ default:
+ return false;
+ }
+@@ -1017,7 +1025,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
+ VOP_WIN_SET(vop, win, dsp_info, dsp_info);
+ VOP_WIN_SET(vop, win, dsp_st, dsp_st);
+
+- rb_swap = has_rb_swapped(fb->format->format);
++ rb_swap = has_rb_swapped(vop->data->version, fb->format->format);
+ VOP_WIN_SET(vop, win, rb_swap, rb_swap);
+
+ /*
+--
+2.42.0
+
--- /dev/null
+From 3582fcd27d07ad4b34d7bc489afc0e31bffd28dc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Apr 2023 11:38:36 +0800
+Subject: ext4: add a new helper to check if es must be kept
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit 9649eb18c6288f514cacffdd699d5cd999c2f8f6 ]
+
+In the extent status tree, we have extents which we can just drop without
+issues and extents we must not drop - this depends on the extent's status
+- currently ext4_es_is_delayed() extents must stay, others may be dropped.
+
+A helper function is added to help determine if the current extent can
+be dropped, although only ext4_es_is_delayed() extents cannot be dropped
+currently.
+
+Suggested-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20230424033846.4732-3-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 8e387c89e96b ("ext4: make sure allocate pending entry not fail")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents_status.c | 34 +++++++++++++++++++++-------------
+ 1 file changed, 21 insertions(+), 13 deletions(-)
+
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index b57e497679ef9..c9ab439b29f56 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -448,6 +448,19 @@ static void ext4_es_list_del(struct inode *inode)
+ spin_unlock(&sbi->s_es_lock);
+ }
+
++/*
++ * Returns true if we cannot fail to allocate memory for this extent_status
++ * entry and cannot reclaim it until its status changes.
++ */
++static inline bool ext4_es_must_keep(struct extent_status *es)
++{
++ /* fiemap, bigalloc, and seek_data/hole need to use it. */
++ if (ext4_es_is_delayed(es))
++ return true;
++
++ return false;
++}
++
+ static struct extent_status *
+ ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
+ ext4_fsblk_t pblk)
+@@ -460,10 +473,8 @@ ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
+ es->es_len = len;
+ es->es_pblk = pblk;
+
+- /*
+- * We don't count delayed extent because we never try to reclaim them
+- */
+- if (!ext4_es_is_delayed(es)) {
++ /* We never try to reclaim a must kept extent, so we don't count it. */
++ if (!ext4_es_must_keep(es)) {
+ if (!EXT4_I(inode)->i_es_shk_nr++)
+ ext4_es_list_add(inode);
+ percpu_counter_inc(&EXT4_SB(inode->i_sb)->
+@@ -481,8 +492,8 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
+ EXT4_I(inode)->i_es_all_nr--;
+ percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
+
+- /* Decrease the shrink counter when this es is not delayed */
+- if (!ext4_es_is_delayed(es)) {
++ /* Decrease the shrink counter when we can reclaim the extent. */
++ if (!ext4_es_must_keep(es)) {
+ BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0);
+ if (!--EXT4_I(inode)->i_es_shk_nr)
+ ext4_es_list_del(inode);
+@@ -853,7 +864,7 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+ if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
+ 128, EXT4_I(inode)))
+ goto retry;
+- if (err == -ENOMEM && !ext4_es_is_delayed(&newes))
++ if (err == -ENOMEM && !ext4_es_must_keep(&newes))
+ err = 0;
+
+ if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) &&
+@@ -1704,11 +1715,8 @@ static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end,
+
+ (*nr_to_scan)--;
+ node = rb_next(&es->rb_node);
+- /*
+- * We can't reclaim delayed extent from status tree because
+- * fiemap, bigallic, and seek_data/hole need to use it.
+- */
+- if (ext4_es_is_delayed(es))
++
++ if (ext4_es_must_keep(es))
+ goto next;
+ if (ext4_es_is_referenced(es)) {
+ ext4_es_clear_referenced(es);
+@@ -1772,7 +1780,7 @@ void ext4_clear_inode_es(struct inode *inode)
+ while (node) {
+ es = rb_entry(node, struct extent_status, rb_node);
+ node = rb_next(node);
+- if (!ext4_es_is_delayed(es)) {
++ if (!ext4_es_must_keep(es)) {
+ rb_erase(&es->rb_node, &tree->root);
+ ext4_es_free_extent(inode, es);
+ }
+--
+2.42.0
+
--- /dev/null
+From 55b0ae2d53f51297034beb9b33ff70ae1e066c62 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Apr 2023 11:38:37 +0800
+Subject: ext4: factor out __es_alloc_extent() and __es_free_extent()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit 73a2f033656be11298912201ad50615307b4477a ]
+
+Factor out __es_alloc_extent() and __es_free_extent(), which only allocate
+and free extent_status in these two helpers.
+
+The ext4_es_alloc_extent() function is split into __es_alloc_extent()
+and ext4_es_init_extent(). In __es_alloc_extent() we allocate memory using
+GFP_KERNEL | __GFP_NOFAIL | __GFP_ZERO if the memory allocation cannot
+fail, otherwise we use GFP_ATOMIC. and the ext4_es_init_extent() is used to
+initialize extent_status and update related variables after a successful
+allocation.
+
+This is to prepare for the use of pre-allocated extent_status later.
+
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20230424033846.4732-4-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 8e387c89e96b ("ext4: make sure allocate pending entry not fail")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents_status.c | 30 +++++++++++++++++++-----------
+ 1 file changed, 19 insertions(+), 11 deletions(-)
+
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index c9ab439b29f56..45f97c9670a3c 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -461,14 +461,17 @@ static inline bool ext4_es_must_keep(struct extent_status *es)
+ return false;
+ }
+
+-static struct extent_status *
+-ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
+- ext4_fsblk_t pblk)
++static inline struct extent_status *__es_alloc_extent(bool nofail)
++{
++ if (!nofail)
++ return kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
++
++ return kmem_cache_zalloc(ext4_es_cachep, GFP_KERNEL | __GFP_NOFAIL);
++}
++
++static void ext4_es_init_extent(struct inode *inode, struct extent_status *es,
++ ext4_lblk_t lblk, ext4_lblk_t len, ext4_fsblk_t pblk)
+ {
+- struct extent_status *es;
+- es = kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
+- if (es == NULL)
+- return NULL;
+ es->es_lblk = lblk;
+ es->es_len = len;
+ es->es_pblk = pblk;
+@@ -483,8 +486,11 @@ ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
+
+ EXT4_I(inode)->i_es_all_nr++;
+ percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
++}
+
+- return es;
++static inline void __es_free_extent(struct extent_status *es)
++{
++ kmem_cache_free(ext4_es_cachep, es);
+ }
+
+ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
+@@ -501,7 +507,7 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
+ s_es_stats.es_stats_shk_cnt);
+ }
+
+- kmem_cache_free(ext4_es_cachep, es);
++ __es_free_extent(es);
+ }
+
+ /*
+@@ -802,10 +808,12 @@ static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
+ }
+ }
+
+- es = ext4_es_alloc_extent(inode, newes->es_lblk, newes->es_len,
+- newes->es_pblk);
++ es = __es_alloc_extent(false);
+ if (!es)
+ return -ENOMEM;
++ ext4_es_init_extent(inode, es, newes->es_lblk, newes->es_len,
++ newes->es_pblk);
++
+ rb_link_node(&es->rb_node, parent, p);
+ rb_insert_color(&es->rb_node, &tree->root);
+
+--
+2.42.0
+
--- /dev/null
+From ac45c2099e6af9400b454ef68cb56517a584f345 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 15 Aug 2023 15:08:08 +0800
+Subject: ext4: fix slab-use-after-free in ext4_es_insert_extent()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit 768d612f79822d30a1e7d132a4d4b05337ce42ec ]
+
+Yikebaer reported an issue:
+==================================================================
+BUG: KASAN: slab-use-after-free in ext4_es_insert_extent+0xc68/0xcb0
+fs/ext4/extents_status.c:894
+Read of size 4 at addr ffff888112ecc1a4 by task syz-executor/8438
+
+CPU: 1 PID: 8438 Comm: syz-executor Not tainted 6.5.0-rc5 #1
+Call Trace:
+ [...]
+ kasan_report+0xba/0xf0 mm/kasan/report.c:588
+ ext4_es_insert_extent+0xc68/0xcb0 fs/ext4/extents_status.c:894
+ ext4_map_blocks+0x92a/0x16f0 fs/ext4/inode.c:680
+ ext4_alloc_file_blocks.isra.0+0x2df/0xb70 fs/ext4/extents.c:4462
+ ext4_zero_range fs/ext4/extents.c:4622 [inline]
+ ext4_fallocate+0x251c/0x3ce0 fs/ext4/extents.c:4721
+ [...]
+
+Allocated by task 8438:
+ [...]
+ kmem_cache_zalloc include/linux/slab.h:693 [inline]
+ __es_alloc_extent fs/ext4/extents_status.c:469 [inline]
+ ext4_es_insert_extent+0x672/0xcb0 fs/ext4/extents_status.c:873
+ ext4_map_blocks+0x92a/0x16f0 fs/ext4/inode.c:680
+ ext4_alloc_file_blocks.isra.0+0x2df/0xb70 fs/ext4/extents.c:4462
+ ext4_zero_range fs/ext4/extents.c:4622 [inline]
+ ext4_fallocate+0x251c/0x3ce0 fs/ext4/extents.c:4721
+ [...]
+
+Freed by task 8438:
+ [...]
+ kmem_cache_free+0xec/0x490 mm/slub.c:3823
+ ext4_es_try_to_merge_right fs/ext4/extents_status.c:593 [inline]
+ __es_insert_extent+0x9f4/0x1440 fs/ext4/extents_status.c:802
+ ext4_es_insert_extent+0x2ca/0xcb0 fs/ext4/extents_status.c:882
+ ext4_map_blocks+0x92a/0x16f0 fs/ext4/inode.c:680
+ ext4_alloc_file_blocks.isra.0+0x2df/0xb70 fs/ext4/extents.c:4462
+ ext4_zero_range fs/ext4/extents.c:4622 [inline]
+ ext4_fallocate+0x251c/0x3ce0 fs/ext4/extents.c:4721
+ [...]
+==================================================================
+
+The flow of issue triggering is as follows:
+1. remove es
+ raw es es removed es1
+|-------------------| -> |----|.......|------|
+
+2. insert es
+ es insert es1 merge with es es1 merge with es and free es1
+|----|.......|------| -> |------------|------| -> |-------------------|
+
+es merges with newes, then merges with es1, frees es1, then determines
+if es1->es_len is 0 and triggers a UAF.
+
+The code flow is as follows:
+ext4_es_insert_extent
+ es1 = __es_alloc_extent(true);
+ es2 = __es_alloc_extent(true);
+ __es_remove_extent(inode, lblk, end, NULL, es1)
+ __es_insert_extent(inode, &newes, es1) ---> insert es1 to es tree
+ __es_insert_extent(inode, &newes, es2)
+ ext4_es_try_to_merge_right
+ ext4_es_free_extent(inode, es1) ---> es1 is freed
+ if (es1 && !es1->es_len)
+ // Trigger UAF by determining if es1 is used.
+
+We determine whether es1 or es2 is used immediately after calling
+__es_remove_extent() or __es_insert_extent() to avoid triggering a
+UAF if es1 or es2 is freed.
+
+Reported-by: Yikebaer Aizezi <yikebaer61@gmail.com>
+Closes: https://lore.kernel.org/lkml/CALcu4raD4h9coiyEBL4Bm0zjDwxC2CyPiTwsP3zFuhot6y9Beg@mail.gmail.com
+Fixes: 2a69c450083d ("ext4: using nofail preallocation in ext4_es_insert_extent()")
+Cc: stable@kernel.org
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20230815070808.3377171-1-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 8e387c89e96b ("ext4: make sure allocate pending entry not fail")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents_status.c | 44 +++++++++++++++++++++++++++-------------
+ 1 file changed, 30 insertions(+), 14 deletions(-)
+
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index 934c14f9edb9f..e8533b4f891bf 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -882,23 +882,29 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+ err1 = __es_remove_extent(inode, lblk, end, NULL, es1);
+ if (err1 != 0)
+ goto error;
++ /* Free preallocated extent if it didn't get used. */
++ if (es1) {
++ if (!es1->es_len)
++ __es_free_extent(es1);
++ es1 = NULL;
++ }
+
+ err2 = __es_insert_extent(inode, &newes, es2);
+ if (err2 == -ENOMEM && !ext4_es_must_keep(&newes))
+ err2 = 0;
+ if (err2 != 0)
+ goto error;
++ /* Free preallocated extent if it didn't get used. */
++ if (es2) {
++ if (!es2->es_len)
++ __es_free_extent(es2);
++ es2 = NULL;
++ }
+
+ if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) &&
+ (status & EXTENT_STATUS_WRITTEN ||
+ status & EXTENT_STATUS_UNWRITTEN))
+ __revise_pending(inode, lblk, len);
+-
+- /* es is pre-allocated but not used, free it. */
+- if (es1 && !es1->es_len)
+- __es_free_extent(es1);
+- if (es2 && !es2->es_len)
+- __es_free_extent(es2);
+ error:
+ write_unlock(&EXT4_I(inode)->i_es_lock);
+ if (err1 || err2)
+@@ -1495,8 +1501,12 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ */
+ write_lock(&EXT4_I(inode)->i_es_lock);
+ err = __es_remove_extent(inode, lblk, end, &reserved, es);
+- if (es && !es->es_len)
+- __es_free_extent(es);
++ /* Free preallocated extent if it didn't get used. */
++ if (es) {
++ if (!es->es_len)
++ __es_free_extent(es);
++ es = NULL;
++ }
+ write_unlock(&EXT4_I(inode)->i_es_lock);
+ if (err)
+ goto retry;
+@@ -2055,19 +2065,25 @@ int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
+ err1 = __es_remove_extent(inode, lblk, lblk, NULL, es1);
+ if (err1 != 0)
+ goto error;
++ /* Free preallocated extent if it didn't get used. */
++ if (es1) {
++ if (!es1->es_len)
++ __es_free_extent(es1);
++ es1 = NULL;
++ }
+
+ err2 = __es_insert_extent(inode, &newes, es2);
+ if (err2 != 0)
+ goto error;
++ /* Free preallocated extent if it didn't get used. */
++ if (es2) {
++ if (!es2->es_len)
++ __es_free_extent(es2);
++ es2 = NULL;
++ }
+
+ if (allocated)
+ __insert_pending(inode, lblk);
+-
+- /* es is pre-allocated but not used, free it. */
+- if (es1 && !es1->es_len)
+- __es_free_extent(es1);
+- if (es2 && !es2->es_len)
+- __es_free_extent(es2);
+ error:
+ write_unlock(&EXT4_I(inode)->i_es_lock);
+ if (err1 || err2)
+--
+2.42.0
+
--- /dev/null
+From 09388ad049dd0139d8ecb78b9bf2862568970793 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Aug 2023 17:26:05 +0800
+Subject: ext4: make sure allocate pending entry not fail
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 8e387c89e96b9543a339f84043cf9df15fed2632 ]
+
+__insert_pending() allocate memory in atomic context, so the allocation
+could fail, but we are not handling that failure now. It could lead
+ext4_es_remove_extent() to get wrong reserved clusters, and the global
+data blocks reservation count will be incorrect. The same to
+extents_status entry preallocation, preallocate pending entry out of the
+i_es_lock with __GFP_NOFAIL, make sure __insert_pending() and
+__revise_pending() always succeeds.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Cc: stable@kernel.org
+Link: https://lore.kernel.org/r/20230824092619.1327976-3-yi.zhang@huaweicloud.com
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents_status.c | 123 ++++++++++++++++++++++++++++-----------
+ 1 file changed, 89 insertions(+), 34 deletions(-)
+
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index e8533b4f891bf..470d29fb407a5 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -152,8 +152,9 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
+ static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
+ struct ext4_inode_info *locked_ei);
+-static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+- ext4_lblk_t len);
++static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
++ ext4_lblk_t len,
++ struct pending_reservation **prealloc);
+
+ int __init ext4_init_es(void)
+ {
+@@ -450,6 +451,19 @@ static void ext4_es_list_del(struct inode *inode)
+ spin_unlock(&sbi->s_es_lock);
+ }
+
++static inline struct pending_reservation *__alloc_pending(bool nofail)
++{
++ if (!nofail)
++ return kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
++
++ return kmem_cache_zalloc(ext4_pending_cachep, GFP_KERNEL | __GFP_NOFAIL);
++}
++
++static inline void __free_pending(struct pending_reservation *pr)
++{
++ kmem_cache_free(ext4_pending_cachep, pr);
++}
++
+ /*
+ * Returns true if we cannot fail to allocate memory for this extent_status
+ * entry and cannot reclaim it until its status changes.
+@@ -840,11 +854,12 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+ {
+ struct extent_status newes;
+ ext4_lblk_t end = lblk + len - 1;
+- int err1 = 0;
+- int err2 = 0;
++ int err1 = 0, err2 = 0, err3 = 0;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct extent_status *es1 = NULL;
+ struct extent_status *es2 = NULL;
++ struct pending_reservation *pr = NULL;
++ bool revise_pending = false;
+
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ return 0;
+@@ -872,11 +887,17 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+
+ ext4_es_insert_extent_check(inode, &newes);
+
++ revise_pending = sbi->s_cluster_ratio > 1 &&
++ test_opt(inode->i_sb, DELALLOC) &&
++ (status & (EXTENT_STATUS_WRITTEN |
++ EXTENT_STATUS_UNWRITTEN));
+ retry:
+ if (err1 && !es1)
+ es1 = __es_alloc_extent(true);
+ if ((err1 || err2) && !es2)
+ es2 = __es_alloc_extent(true);
++ if ((err1 || err2 || err3) && revise_pending && !pr)
++ pr = __alloc_pending(true);
+ write_lock(&EXT4_I(inode)->i_es_lock);
+
+ err1 = __es_remove_extent(inode, lblk, end, NULL, es1);
+@@ -901,13 +922,18 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+ es2 = NULL;
+ }
+
+- if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) &&
+- (status & EXTENT_STATUS_WRITTEN ||
+- status & EXTENT_STATUS_UNWRITTEN))
+- __revise_pending(inode, lblk, len);
++ if (revise_pending) {
++ err3 = __revise_pending(inode, lblk, len, &pr);
++ if (err3 != 0)
++ goto error;
++ if (pr) {
++ __free_pending(pr);
++ pr = NULL;
++ }
++ }
+ error:
+ write_unlock(&EXT4_I(inode)->i_es_lock);
+- if (err1 || err2)
++ if (err1 || err2 || err3)
+ goto retry;
+
+ ext4_es_print_tree(inode);
+@@ -1315,7 +1341,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
+ rc->ndelonly--;
+ node = rb_next(&pr->rb_node);
+ rb_erase(&pr->rb_node, &tree->root);
+- kmem_cache_free(ext4_pending_cachep, pr);
++ __free_pending(pr);
+ if (!node)
+ break;
+ pr = rb_entry(node, struct pending_reservation,
+@@ -1913,11 +1939,13 @@ static struct pending_reservation *__get_pending(struct inode *inode,
+ *
+ * @inode - file containing the cluster
+ * @lblk - logical block in the cluster to be added
++ * @prealloc - preallocated pending entry
+ *
+ * Returns 0 on successful insertion and -ENOMEM on failure. If the
+ * pending reservation is already in the set, returns successfully.
+ */
+-static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
++static int __insert_pending(struct inode *inode, ext4_lblk_t lblk,
++ struct pending_reservation **prealloc)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
+@@ -1943,10 +1971,15 @@ static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
+ }
+ }
+
+- pr = kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
+- if (pr == NULL) {
+- ret = -ENOMEM;
+- goto out;
++ if (likely(*prealloc == NULL)) {
++ pr = __alloc_pending(false);
++ if (!pr) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ } else {
++ pr = *prealloc;
++ *prealloc = NULL;
+ }
+ pr->lclu = lclu;
+
+@@ -1976,7 +2009,7 @@ static void __remove_pending(struct inode *inode, ext4_lblk_t lblk)
+ if (pr != NULL) {
+ tree = &EXT4_I(inode)->i_pending_tree;
+ rb_erase(&pr->rb_node, &tree->root);
+- kmem_cache_free(ext4_pending_cachep, pr);
++ __free_pending(pr);
+ }
+ }
+
+@@ -2037,10 +2070,10 @@ int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
+ bool allocated)
+ {
+ struct extent_status newes;
+- int err1 = 0;
+- int err2 = 0;
++ int err1 = 0, err2 = 0, err3 = 0;
+ struct extent_status *es1 = NULL;
+ struct extent_status *es2 = NULL;
++ struct pending_reservation *pr = NULL;
+
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ return 0;
+@@ -2060,6 +2093,8 @@ int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
+ es1 = __es_alloc_extent(true);
+ if ((err1 || err2) && !es2)
+ es2 = __es_alloc_extent(true);
++ if ((err1 || err2 || err3) && allocated && !pr)
++ pr = __alloc_pending(true);
+ write_lock(&EXT4_I(inode)->i_es_lock);
+
+ err1 = __es_remove_extent(inode, lblk, lblk, NULL, es1);
+@@ -2082,11 +2117,18 @@ int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
+ es2 = NULL;
+ }
+
+- if (allocated)
+- __insert_pending(inode, lblk);
++ if (allocated) {
++ err3 = __insert_pending(inode, lblk, &pr);
++ if (err3 != 0)
++ goto error;
++ if (pr) {
++ __free_pending(pr);
++ pr = NULL;
++ }
++ }
+ error:
+ write_unlock(&EXT4_I(inode)->i_es_lock);
+- if (err1 || err2)
++ if (err1 || err2 || err3)
+ goto retry;
+
+ ext4_es_print_tree(inode);
+@@ -2192,21 +2234,24 @@ unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
+ * @inode - file containing the range
+ * @lblk - logical block defining the start of range
+ * @len - length of range in blocks
++ * @prealloc - preallocated pending entry
+ *
+ * Used after a newly allocated extent is added to the extents status tree.
+ * Requires that the extents in the range have either written or unwritten
+ * status. Must be called while holding i_es_lock.
+ */
+-static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+- ext4_lblk_t len)
++static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
++ ext4_lblk_t len,
++ struct pending_reservation **prealloc)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ ext4_lblk_t end = lblk + len - 1;
+ ext4_lblk_t first, last;
+ bool f_del = false, l_del = false;
++ int ret = 0;
+
+ if (len == 0)
+- return;
++ return 0;
+
+ /*
+ * Two cases - block range within single cluster and block range
+@@ -2227,7 +2272,9 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ f_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ first, lblk - 1);
+ if (f_del) {
+- __insert_pending(inode, first);
++ ret = __insert_pending(inode, first, prealloc);
++ if (ret < 0)
++ goto out;
+ } else {
+ last = EXT4_LBLK_CMASK(sbi, end) +
+ sbi->s_cluster_ratio - 1;
+@@ -2235,9 +2282,11 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ l_del = __es_scan_range(inode,
+ &ext4_es_is_delonly,
+ end + 1, last);
+- if (l_del)
+- __insert_pending(inode, last);
+- else
++ if (l_del) {
++ ret = __insert_pending(inode, last, prealloc);
++ if (ret < 0)
++ goto out;
++ } else
+ __remove_pending(inode, last);
+ }
+ } else {
+@@ -2245,18 +2294,24 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ if (first != lblk)
+ f_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ first, lblk - 1);
+- if (f_del)
+- __insert_pending(inode, first);
+- else
++ if (f_del) {
++ ret = __insert_pending(inode, first, prealloc);
++ if (ret < 0)
++ goto out;
++ } else
+ __remove_pending(inode, first);
+
+ last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1;
+ if (last != end)
+ l_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ end + 1, last);
+- if (l_del)
+- __insert_pending(inode, last);
+- else
++ if (l_del) {
++ ret = __insert_pending(inode, last, prealloc);
++ if (ret < 0)
++ goto out;
++ } else
+ __remove_pending(inode, last);
+ }
++out:
++ return ret;
+ }
+--
+2.42.0
+
--- /dev/null
+From 50cfa8d272c4420883512db85e46e483d7ef4bb7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Apr 2023 11:38:38 +0800
+Subject: ext4: use pre-allocated es in __es_insert_extent()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit 95f0b320339a977cf69872eac107122bf536775d ]
+
+Pass a extent_status pointer prealloc to __es_insert_extent(). If the
+pointer is non-null, it is used directly when a new extent_status is
+needed to avoid memory allocation failures.
+
+Suggested-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20230424033846.4732-5-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 8e387c89e96b ("ext4: make sure allocate pending entry not fail")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents_status.c | 19 ++++++++++++-------
+ 1 file changed, 12 insertions(+), 7 deletions(-)
+
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index 45f97c9670a3c..e459a0c2d71ef 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -144,7 +144,8 @@
+ static struct kmem_cache *ext4_es_cachep;
+ static struct kmem_cache *ext4_pending_cachep;
+
+-static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
++static int __es_insert_extent(struct inode *inode, struct extent_status *newes,
++ struct extent_status *prealloc);
+ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t end, int *reserved);
+ static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
+@@ -768,7 +769,8 @@ static inline void ext4_es_insert_extent_check(struct inode *inode,
+ }
+ #endif
+
+-static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
++static int __es_insert_extent(struct inode *inode, struct extent_status *newes,
++ struct extent_status *prealloc)
+ {
+ struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
+ struct rb_node **p = &tree->root.rb_node;
+@@ -808,7 +810,10 @@ static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
+ }
+ }
+
+- es = __es_alloc_extent(false);
++ if (prealloc)
++ es = prealloc;
++ else
++ es = __es_alloc_extent(false);
+ if (!es)
+ return -ENOMEM;
+ ext4_es_init_extent(inode, es, newes->es_lblk, newes->es_len,
+@@ -868,7 +873,7 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+ if (err != 0)
+ goto error;
+ retry:
+- err = __es_insert_extent(inode, &newes);
++ err = __es_insert_extent(inode, &newes, NULL);
+ if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
+ 128, EXT4_I(inode)))
+ goto retry;
+@@ -918,7 +923,7 @@ void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
+
+ es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk);
+ if (!es || es->es_lblk > end)
+- __es_insert_extent(inode, &newes);
++ __es_insert_extent(inode, &newes, NULL);
+ write_unlock(&EXT4_I(inode)->i_es_lock);
+ }
+
+@@ -1364,7 +1369,7 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ orig_es.es_len - len2;
+ ext4_es_store_pblock_status(&newes, block,
+ ext4_es_status(&orig_es));
+- err = __es_insert_extent(inode, &newes);
++ err = __es_insert_extent(inode, &newes, NULL);
+ if (err) {
+ es->es_lblk = orig_es.es_lblk;
+ es->es_len = orig_es.es_len;
+@@ -2020,7 +2025,7 @@ int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
+ if (err != 0)
+ goto error;
+ retry:
+- err = __es_insert_extent(inode, &newes);
++ err = __es_insert_extent(inode, &newes, NULL);
+ if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
+ 128, EXT4_I(inode)))
+ goto retry;
+--
+2.42.0
+
--- /dev/null
+From 72b8751750f74cb84521592dc378cae216748b6e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Apr 2023 11:38:39 +0800
+Subject: ext4: use pre-allocated es in __es_remove_extent()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit bda3efaf774fb687c2b7a555aaec3006b14a8857 ]
+
+When splitting extent, if the second extent can not be dropped, we return
+-ENOMEM and use GFP_NOFAIL to preallocate an extent_status outside of
+i_es_lock and pass it to __es_remove_extent() to be used as the second
+extent. This ensures that __es_remove_extent() is executed successfully,
+thus ensuring consistency in the extent status tree. If the second extent
+is not undroppable, we simply drop it and return 0. Then retry is no longer
+necessary, remove it.
+
+Now, __es_remove_extent() will always remove what it should, maybe more.
+
+Suggested-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20230424033846.4732-6-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 8e387c89e96b ("ext4: make sure allocate pending entry not fail")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents_status.c | 26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index e459a0c2d71ef..682f5e4ce2d00 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -147,7 +147,8 @@ static struct kmem_cache *ext4_pending_cachep;
+ static int __es_insert_extent(struct inode *inode, struct extent_status *newes,
+ struct extent_status *prealloc);
+ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+- ext4_lblk_t end, int *reserved);
++ ext4_lblk_t end, int *reserved,
++ struct extent_status *prealloc);
+ static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
+ static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
+ struct ext4_inode_info *locked_ei);
+@@ -869,7 +870,7 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+ ext4_es_insert_extent_check(inode, &newes);
+
+ write_lock(&EXT4_I(inode)->i_es_lock);
+- err = __es_remove_extent(inode, lblk, end, NULL);
++ err = __es_remove_extent(inode, lblk, end, NULL, NULL);
+ if (err != 0)
+ goto error;
+ retry:
+@@ -1313,6 +1314,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
+ * @lblk - first block in range
+ * @end - last block in range
+ * @reserved - number of cluster reservations released
++ * @prealloc - pre-allocated es to avoid memory allocation failures
+ *
+ * If @reserved is not NULL and delayed allocation is enabled, counts
+ * block/cluster reservations freed by removing range and if bigalloc
+@@ -1320,7 +1322,8 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
+ * error code on failure.
+ */
+ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+- ext4_lblk_t end, int *reserved)
++ ext4_lblk_t end, int *reserved,
++ struct extent_status *prealloc)
+ {
+ struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
+ struct rb_node *node;
+@@ -1328,14 +1331,12 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ struct extent_status orig_es;
+ ext4_lblk_t len1, len2;
+ ext4_fsblk_t block;
+- int err;
++ int err = 0;
+ bool count_reserved = true;
+ struct rsvd_count rc;
+
+ if (reserved == NULL || !test_opt(inode->i_sb, DELALLOC))
+ count_reserved = false;
+-retry:
+- err = 0;
+
+ es = __es_tree_search(&tree->root, lblk);
+ if (!es)
+@@ -1369,14 +1370,13 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ orig_es.es_len - len2;
+ ext4_es_store_pblock_status(&newes, block,
+ ext4_es_status(&orig_es));
+- err = __es_insert_extent(inode, &newes, NULL);
++ err = __es_insert_extent(inode, &newes, prealloc);
+ if (err) {
++ if (!ext4_es_must_keep(&newes))
++ return 0;
++
+ es->es_lblk = orig_es.es_lblk;
+ es->es_len = orig_es.es_len;
+- if ((err == -ENOMEM) &&
+- __es_shrink(EXT4_SB(inode->i_sb),
+- 128, EXT4_I(inode)))
+- goto retry;
+ goto out;
+ }
+ } else {
+@@ -1476,7 +1476,7 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ * is reclaimed.
+ */
+ write_lock(&EXT4_I(inode)->i_es_lock);
+- err = __es_remove_extent(inode, lblk, end, &reserved);
++ err = __es_remove_extent(inode, lblk, end, &reserved, NULL);
+ write_unlock(&EXT4_I(inode)->i_es_lock);
+ ext4_es_print_tree(inode);
+ ext4_da_release_space(inode, reserved);
+@@ -2021,7 +2021,7 @@ int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
+
+ write_lock(&EXT4_I(inode)->i_es_lock);
+
+- err = __es_remove_extent(inode, lblk, lblk, NULL);
++ err = __es_remove_extent(inode, lblk, lblk, NULL, NULL);
+ if (err != 0)
+ goto error;
+ retry:
+--
+2.42.0
+
--- /dev/null
+From a7a9d448ed5f577d15f0a129a85a36f09bb680cf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Apr 2023 11:38:41 +0800
+Subject: ext4: using nofail preallocation in ext4_es_insert_delayed_block()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit 4a2d98447b37bcb68a7f06a1078edcb4f7e6ce7e ]
+
+Similar to in ext4_es_remove_extent(), we use a no-fail preallocation
+to avoid inconsistencies, except that here we may have to preallocate
+two extent_status.
+
+Suggested-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20230424033846.4732-8-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 8e387c89e96b ("ext4: make sure allocate pending entry not fail")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents_status.c | 33 ++++++++++++++++++++++-----------
+ 1 file changed, 22 insertions(+), 11 deletions(-)
+
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index cb8241adda929..e382fe1788f1e 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -2013,7 +2013,10 @@ int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
+ bool allocated)
+ {
+ struct extent_status newes;
+- int err = 0;
++ int err1 = 0;
++ int err2 = 0;
++ struct extent_status *es1 = NULL;
++ struct extent_status *es2 = NULL;
+
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ return 0;
+@@ -2028,29 +2031,37 @@ int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
+
+ ext4_es_insert_extent_check(inode, &newes);
+
++retry:
++ if (err1 && !es1)
++ es1 = __es_alloc_extent(true);
++ if ((err1 || err2) && !es2)
++ es2 = __es_alloc_extent(true);
+ write_lock(&EXT4_I(inode)->i_es_lock);
+
+- err = __es_remove_extent(inode, lblk, lblk, NULL, NULL);
+- if (err != 0)
++ err1 = __es_remove_extent(inode, lblk, lblk, NULL, es1);
++ if (err1 != 0)
+ goto error;
+-retry:
+- err = __es_insert_extent(inode, &newes, NULL);
+- if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
+- 128, EXT4_I(inode)))
+- goto retry;
+- if (err != 0)
++
++ err2 = __es_insert_extent(inode, &newes, es2);
++ if (err2 != 0)
+ goto error;
+
+ if (allocated)
+ __insert_pending(inode, lblk);
+
++ /* es is pre-allocated but not used, free it. */
++ if (es1 && !es1->es_len)
++ __es_free_extent(es1);
++ if (es2 && !es2->es_len)
++ __es_free_extent(es2);
+ error:
+ write_unlock(&EXT4_I(inode)->i_es_lock);
++ if (err1 || err2)
++ goto retry;
+
+ ext4_es_print_tree(inode);
+ ext4_print_pending_tree(inode);
+-
+- return err;
++ return 0;
+ }
+
+ /*
+--
+2.42.0
+
--- /dev/null
+From 1237f1355763deac570a735a1fc821ab322d015e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Apr 2023 11:38:42 +0800
+Subject: ext4: using nofail preallocation in ext4_es_insert_extent()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit 2a69c450083db164596c75c0f5b4d9c4c0e18eba ]
+
+Similar to in ext4_es_insert_delayed_block(), we use preallocations that
+do not fail to avoid inconsistencies, but we do not care about es that are
+not must be kept, and we return 0 even if such es memory allocation fails.
+
+Suggested-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20230424033846.4732-9-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 8e387c89e96b ("ext4: make sure allocate pending entry not fail")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents_status.c | 38 ++++++++++++++++++++++++++------------
+ 1 file changed, 26 insertions(+), 12 deletions(-)
+
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index e382fe1788f1e..934c14f9edb9f 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -840,8 +840,11 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+ {
+ struct extent_status newes;
+ ext4_lblk_t end = lblk + len - 1;
+- int err = 0;
++ int err1 = 0;
++ int err2 = 0;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
++ struct extent_status *es1 = NULL;
++ struct extent_status *es2 = NULL;
+
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ return 0;
+@@ -869,29 +872,40 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+
+ ext4_es_insert_extent_check(inode, &newes);
+
++retry:
++ if (err1 && !es1)
++ es1 = __es_alloc_extent(true);
++ if ((err1 || err2) && !es2)
++ es2 = __es_alloc_extent(true);
+ write_lock(&EXT4_I(inode)->i_es_lock);
+- err = __es_remove_extent(inode, lblk, end, NULL, NULL);
+- if (err != 0)
++
++ err1 = __es_remove_extent(inode, lblk, end, NULL, es1);
++ if (err1 != 0)
++ goto error;
++
++ err2 = __es_insert_extent(inode, &newes, es2);
++ if (err2 == -ENOMEM && !ext4_es_must_keep(&newes))
++ err2 = 0;
++ if (err2 != 0)
+ goto error;
+-retry:
+- err = __es_insert_extent(inode, &newes, NULL);
+- if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
+- 128, EXT4_I(inode)))
+- goto retry;
+- if (err == -ENOMEM && !ext4_es_must_keep(&newes))
+- err = 0;
+
+ if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) &&
+ (status & EXTENT_STATUS_WRITTEN ||
+ status & EXTENT_STATUS_UNWRITTEN))
+ __revise_pending(inode, lblk, len);
+
++ /* es is pre-allocated but not used, free it. */
++ if (es1 && !es1->es_len)
++ __es_free_extent(es1);
++ if (es2 && !es2->es_len)
++ __es_free_extent(es2);
+ error:
+ write_unlock(&EXT4_I(inode)->i_es_lock);
++ if (err1 || err2)
++ goto retry;
+
+ ext4_es_print_tree(inode);
+-
+- return err;
++ return 0;
+ }
+
+ /*
+--
+2.42.0
+
--- /dev/null
+From e0096ed05a4443956b2dc310b2d264b85f0c1fbf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Apr 2023 11:38:40 +0800
+Subject: ext4: using nofail preallocation in ext4_es_remove_extent()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit e9fe2b882bd5b26b987c9ba110c2222796f72af5 ]
+
+If __es_remove_extent() returns an error it means that when splitting
+extent, allocating an extent that must be kept failed, where returning
+an error directly would cause the extent tree to be inconsistent. So we
+use GFP_NOFAIL to pre-allocate an extent_status and pass it to
+__es_remove_extent() to avoid this problem.
+
+In addition, since the allocated memory is outside the i_es_lock, the
+extent_status tree may change and the pre-allocated extent_status is
+no longer needed, so we release the pre-allocated extent_status when
+es->es_len is not initialized.
+
+Suggested-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20230424033846.4732-7-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 8e387c89e96b ("ext4: make sure allocate pending entry not fail")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents_status.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index 682f5e4ce2d00..cb8241adda929 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -1456,6 +1456,7 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t end;
+ int err = 0;
+ int reserved = 0;
++ struct extent_status *es = NULL;
+
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ return 0;
+@@ -1470,17 +1471,25 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ end = lblk + len - 1;
+ BUG_ON(end < lblk);
+
++retry:
++ if (err && !es)
++ es = __es_alloc_extent(true);
+ /*
+ * ext4_clear_inode() depends on us taking i_es_lock unconditionally
+ * so that we are sure __es_shrink() is done with the inode before it
+ * is reclaimed.
+ */
+ write_lock(&EXT4_I(inode)->i_es_lock);
+- err = __es_remove_extent(inode, lblk, end, &reserved, NULL);
++ err = __es_remove_extent(inode, lblk, end, &reserved, es);
++ if (es && !es->es_len)
++ __es_free_extent(es);
+ write_unlock(&EXT4_I(inode)->i_es_lock);
++ if (err)
++ goto retry;
++
+ ext4_es_print_tree(inode);
+ ext4_da_release_space(inode, reserved);
+- return err;
++ return 0;
+ }
+
+ static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
+--
+2.42.0
+
--- /dev/null
+From d2709ce430affd92271ae18d2513e031f69afc19 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Oct 2023 12:32:39 +0800
+Subject: HID: fix HID device resource race between HID core and debugging
+ support
+
+From: Charles Yi <be286@163.com>
+
+[ Upstream commit fc43e9c857b7aa55efba9398419b14d9e35dcc7d ]
+
+hid_debug_events_release releases resources bound to the HID device instance.
+hid_device_release releases the underlying HID device instance potentially
+before hid_debug_events_release has completed releasing debug resources bound
+to the same HID device instance.
+
+Reference count to prevent the HID device instance from being torn down
+preemptively when HID debugging support is used. When count reaches zero,
+release core resources of HID device instance using hiddev_free.
+
+The crash:
+
+[ 120.728477][ T4396] kernel BUG at lib/list_debug.c:53!
+[ 120.728505][ T4396] Internal error: Oops - BUG: 0 [#1] PREEMPT SMP
+[ 120.739806][ T4396] Modules linked in: bcmdhd dhd_static_buf 8822cu pcie_mhi r8168
+[ 120.747386][ T4396] CPU: 1 PID: 4396 Comm: hidt_bridge Not tainted 5.10.110 #257
+[ 120.754771][ T4396] Hardware name: Rockchip RK3588 EVB4 LP4 V10 Board (DT)
+[ 120.761643][ T4396] pstate: 60400089 (nZCv daIf +PAN -UAO -TCO BTYPE=--)
+[ 120.768338][ T4396] pc : __list_del_entry_valid+0x98/0xac
+[ 120.773730][ T4396] lr : __list_del_entry_valid+0x98/0xac
+[ 120.779120][ T4396] sp : ffffffc01e62bb60
+[ 120.783126][ T4396] x29: ffffffc01e62bb60 x28: ffffff818ce3a200
+[ 120.789126][ T4396] x27: 0000000000000009 x26: 0000000000980000
+[ 120.795126][ T4396] x25: ffffffc012431000 x24: ffffff802c6d4e00
+[ 120.801125][ T4396] x23: ffffff8005c66f00 x22: ffffffc01183b5b8
+[ 120.807125][ T4396] x21: ffffff819df2f100 x20: 0000000000000000
+[ 120.813124][ T4396] x19: ffffff802c3f0700 x18: ffffffc01d2cd058
+[ 120.819124][ T4396] x17: 0000000000000000 x16: 0000000000000000
+[ 120.825124][ T4396] x15: 0000000000000004 x14: 0000000000003fff
+[ 120.831123][ T4396] x13: ffffffc012085588 x12: 0000000000000003
+[ 120.837123][ T4396] x11: 00000000ffffbfff x10: 0000000000000003
+[ 120.843123][ T4396] x9 : 455103d46b329300 x8 : 455103d46b329300
+[ 120.849124][ T4396] x7 : 74707572726f6320 x6 : ffffffc0124b8cb5
+[ 120.855124][ T4396] x5 : ffffffffffffffff x4 : 0000000000000000
+[ 120.861123][ T4396] x3 : ffffffc011cf4f90 x2 : ffffff81fee7b948
+[ 120.867122][ T4396] x1 : ffffffc011cf4f90 x0 : 0000000000000054
+[ 120.873122][ T4396] Call trace:
+[ 120.876259][ T4396] __list_del_entry_valid+0x98/0xac
+[ 120.881304][ T4396] hid_debug_events_release+0x48/0x12c
+[ 120.886617][ T4396] full_proxy_release+0x50/0xbc
+[ 120.891323][ T4396] __fput+0xdc/0x238
+[ 120.895075][ T4396] ____fput+0x14/0x24
+[ 120.898911][ T4396] task_work_run+0x90/0x148
+[ 120.903268][ T4396] do_exit+0x1bc/0x8a4
+[ 120.907193][ T4396] do_group_exit+0x8c/0xa4
+[ 120.911458][ T4396] get_signal+0x468/0x744
+[ 120.915643][ T4396] do_signal+0x84/0x280
+[ 120.919650][ T4396] do_notify_resume+0xd0/0x218
+[ 120.924262][ T4396] work_pending+0xc/0x3f0
+
+[ Rahul Rameshbabu <sergeantsagara@protonmail.com>: rework changelog ]
+Fixes: cd667ce24796 ("HID: use debugfs for events/reports dumping")
+Signed-off-by: Charles Yi <be286@163.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hid/hid-core.c | 12 ++++++++++--
+ drivers/hid/hid-debug.c | 3 +++
+ include/linux/hid.h | 3 +++
+ 3 files changed, 16 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 799a3086dbb06..cdad3a0662876 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -707,15 +707,22 @@ static void hid_close_report(struct hid_device *device)
+ * Free a device structure, all reports, and all fields.
+ */
+
+-static void hid_device_release(struct device *dev)
++void hiddev_free(struct kref *ref)
+ {
+- struct hid_device *hid = to_hid_device(dev);
++ struct hid_device *hid = container_of(ref, struct hid_device, ref);
+
+ hid_close_report(hid);
+ kfree(hid->dev_rdesc);
+ kfree(hid);
+ }
+
++static void hid_device_release(struct device *dev)
++{
++ struct hid_device *hid = to_hid_device(dev);
++
++ kref_put(&hid->ref, hiddev_free);
++}
++
+ /*
+ * Fetch a report description item from the data stream. We support long
+ * items, though they are not used yet.
+@@ -2813,6 +2820,7 @@ struct hid_device *hid_allocate_device(void)
+ spin_lock_init(&hdev->debug_list_lock);
+ sema_init(&hdev->driver_input_lock, 1);
+ mutex_init(&hdev->ll_open_lock);
++ kref_init(&hdev->ref);
+
+ return hdev;
+ }
+diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
+index 15e35702773cd..7f78622b1b0b3 100644
+--- a/drivers/hid/hid-debug.c
++++ b/drivers/hid/hid-debug.c
+@@ -1132,6 +1132,7 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
+ goto out;
+ }
+ list->hdev = (struct hid_device *) inode->i_private;
++ kref_get(&list->hdev->ref);
+ file->private_data = list;
+ mutex_init(&list->read_mutex);
+
+@@ -1224,6 +1225,8 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
+ list_del(&list->node);
+ spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
+ kfifo_free(&list->hid_debug_fifo);
++
++ kref_put(&list->hdev->ref, hiddev_free);
+ kfree(list);
+
+ return 0;
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 58f5ab29c11a7..b688069b17944 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -653,10 +653,13 @@ struct hid_device { /* device report descriptor */
+ struct list_head debug_list;
+ spinlock_t debug_list_lock;
+ wait_queue_head_t debug_wait;
++ struct kref ref;
+
+ unsigned int id; /* system unique id */
+ };
+
++void hiddev_free(struct kref *ref);
++
+ #define to_hid_device(pdev) \
+ container_of(pdev, struct hid_device, dev)
+
+--
+2.42.0
+
--- /dev/null
+From c69a982ab1a0c3c6b16c23a7ad32a8481bf5aabc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Nov 2023 13:13:36 -0800
+Subject: i40e: Fix adding unsupported cloud filters
+
+From: Ivan Vecera <ivecera@redhat.com>
+
+[ Upstream commit 4e20655e503e3a478cd1682bf25e3202dd823da8 ]
+
+If a VF tries to add unsupported cloud filter through virtchnl
+then i40e_add_del_cloud_filter(_big_buf) returns -ENOTSUPP but
+this error code is stored in 'ret' instead of 'aq_ret' that
+is used as error code sent back to VF. In this scenario where
+one of the mentioned functions fails the value of 'aq_ret'
+is zero so the VF will incorrectly receive a 'success'.
+
+Use 'aq_ret' to store return value and remove 'ret' local
+variable. Additionally fix the issue when filter allocation
+fails, in this case no notification is sent back to the VF.
+
+Fixes: e284fc280473 ("i40e: Add and delete cloud filter")
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Ivan Vecera <ivecera@redhat.com>
+Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Link: https://lore.kernel.org/r/20231121211338.3348677-1-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index fb87912b47617..cb925baf72ce0 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -3774,7 +3774,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ int aq_ret = 0;
+- int i, ret;
++ int i;
+
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+@@ -3798,8 +3798,10 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ }
+
+ cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
+- if (!cfilter)
+- return -ENOMEM;
++ if (!cfilter) {
++ aq_ret = -ENOMEM;
++ goto err_out;
++ }
+
+ /* parse destination mac address */
+ for (i = 0; i < ETH_ALEN; i++)
+@@ -3847,13 +3849,13 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+
+ /* Adding cloud filter programmed as TC filter */
+ if (tcf.dst_port)
+- ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
++ aq_ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
+ else
+- ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
+- if (ret) {
++ aq_ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
++ if (aq_ret) {
+ dev_err(&pf->pdev->dev,
+ "VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
+- vf->vf_id, ERR_PTR(ret),
++ vf->vf_id, ERR_PTR(aq_ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto err_free;
+ }
+--
+2.42.0
+
--- /dev/null
+From b8e379898bf9ea517c6fd6398bdc9f2af90135b3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Jan 2023 15:11:20 +0100
+Subject: i40e: use ERR_PTR error print in i40e messages
+
+From: Jan Sokolowski <jan.sokolowski@intel.com>
+
+[ Upstream commit d5ba18423f87709146c120b20e4a1b8a5b528a76 ]
+
+In i40e_status removal patches, i40e_status conversion
+to strings was removed in order to easily refactor
+the code to use standard errornums. This however made it
+more difficult for read error logs.
+
+Use %pe formatter to print error messages in human-readable
+format.
+
+Signed-off-by: Jan Sokolowski <jan.sokolowski@intel.com>
+Tested-by: Gurucharan G <gurucharanx.g@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Stable-dep-of: 4e20655e503e ("i40e: Fix adding unsupported cloud filters")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_client.c | 8 +-
+ drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c | 16 +-
+ .../net/ethernet/intel/i40e/i40e_ethtool.c | 40 +--
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 299 +++++++++---------
+ drivers/net/ethernet/intel/i40e/i40e_nvm.c | 4 +-
+ .../ethernet/intel/i40e/i40e_virtchnl_pf.c | 28 +-
+ 6 files changed, 198 insertions(+), 197 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
+index 8bcb98b85e3d9..a289f1bb3dbfc 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
+@@ -686,8 +686,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ if (err) {
+ dev_info(&pf->pdev->dev,
+- "couldn't get PF vsi config, err %d aq_err %s\n",
+- err,
++ "couldn't get PF vsi config, err %pe aq_err %s\n",
++ ERR_PTR(err),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ return -ENOENT;
+@@ -714,8 +714,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
+ err = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (err) {
+ dev_info(&pf->pdev->dev,
+- "update VSI ctxt for PE failed, err %d aq_err %s\n",
+- err,
++ "update VSI ctxt for PE failed, err %pe aq_err %s\n",
++ ERR_PTR(err),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ }
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+index bba70bd5703bf..195421d863ab1 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+@@ -135,8 +135,8 @@ static int i40e_dcbnl_ieee_setets(struct net_device *netdev,
+ ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "Failed setting DCB ETS configuration err %d aq_err %s\n",
+- ret,
++ "Failed setting DCB ETS configuration err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return -EINVAL;
+ }
+@@ -174,8 +174,8 @@ static int i40e_dcbnl_ieee_setpfc(struct net_device *netdev,
+ ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "Failed setting DCB PFC configuration err %d aq_err %s\n",
+- ret,
++ "Failed setting DCB PFC configuration err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return -EINVAL;
+ }
+@@ -225,8 +225,8 @@ static int i40e_dcbnl_ieee_setapp(struct net_device *netdev,
+ ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "Failed setting DCB configuration err %d aq_err %s\n",
+- ret,
++ "Failed setting DCB configuration err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return -EINVAL;
+ }
+@@ -290,8 +290,8 @@ static int i40e_dcbnl_ieee_delapp(struct net_device *netdev,
+ ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "Failed setting DCB configuration err %d aq_err %s\n",
+- ret,
++ "Failed setting DCB configuration err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return -EINVAL;
+ }
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+index e632041aed5f8..107bcca7db8c9 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+@@ -1453,8 +1453,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
+ status = i40e_aq_set_phy_config(hw, &config, NULL);
+ if (status) {
+ netdev_info(netdev,
+- "Set phy config failed, err %d aq_err %s\n",
+- status,
++ "Set phy config failed, err %pe aq_err %s\n",
++ ERR_PTR(status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ err = -EAGAIN;
+ goto done;
+@@ -1463,8 +1463,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
+ status = i40e_update_link_info(hw);
+ if (status)
+ netdev_dbg(netdev,
+- "Updating link info failed with err %d aq_err %s\n",
+- status,
++ "Updating link info failed with err %pe aq_err %s\n",
++ ERR_PTR(status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+
+ } else {
+@@ -1515,8 +1515,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
+ status = i40e_aq_set_phy_config(hw, &config, NULL);
+ if (status) {
+ netdev_info(netdev,
+- "Set phy config failed, err %d aq_err %s\n",
+- status,
++ "Set phy config failed, err %pe aq_err %s\n",
++ ERR_PTR(status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ err = -EAGAIN;
+ goto done;
+@@ -1529,8 +1529,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
+ * (e.g. no physical connection etc.)
+ */
+ netdev_dbg(netdev,
+- "Updating link info failed with err %d aq_err %s\n",
+- status,
++ "Updating link info failed with err %pe aq_err %s\n",
++ ERR_PTR(status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+
+@@ -1636,8 +1636,8 @@ static int i40e_nway_reset(struct net_device *netdev)
+
+ ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
+ if (ret) {
+- netdev_info(netdev, "link restart failed, err %d aq_err %s\n",
+- ret,
++ netdev_info(netdev, "link restart failed, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return -EIO;
+ }
+@@ -1753,20 +1753,20 @@ static int i40e_set_pauseparam(struct net_device *netdev,
+ status = i40e_set_fc(hw, &aq_failures, link_up);
+
+ if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
+- netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %s\n",
+- status,
++ netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %pe aq_err %s\n",
++ ERR_PTR(status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ err = -EAGAIN;
+ }
+ if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
+- netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %s\n",
+- status,
++ netdev_info(netdev, "Set fc failed on the set_phy_config call with err %pe aq_err %s\n",
++ ERR_PTR(status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ err = -EAGAIN;
+ }
+ if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
+- netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %s\n",
+- status,
++ netdev_info(netdev, "Set fc failed on the get_link_info call with err %pe aq_err %s\n",
++ ERR_PTR(status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ err = -EAGAIN;
+ }
+@@ -5360,8 +5360,8 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
+ 0, NULL);
+ if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ dev_info(&pf->pdev->dev,
+- "couldn't set switch config bits, err %d aq_err %s\n",
+- ret,
++ "couldn't set switch config bits, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ /* not a fatal problem, just keep going */
+@@ -5433,8 +5433,8 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
+ return -EBUSY;
+ default:
+ dev_warn(&pf->pdev->dev,
+- "Starting FW LLDP agent failed: error: %d, %s\n",
+- status,
++ "Starting FW LLDP agent failed: error: %pe, %s\n",
++ ERR_PTR(status),
+ i40e_aq_str(&pf->hw,
+ adq_err));
+ return -EINVAL;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 68ee2c59692d1..9f5824eb8808a 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -1822,8 +1822,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
+ ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
+ addr->sa_data, NULL);
+ if (ret)
+- netdev_info(netdev, "Ignoring error from firmware on LAA update, status %d, AQ ret %s\n",
+- ret,
++ netdev_info(netdev, "Ignoring error from firmware on LAA update, status %pe, AQ ret %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+
+@@ -1854,8 +1854,8 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+ ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "Cannot set RSS key, err %d aq_err %s\n",
+- ret,
++ "Cannot set RSS key, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+ }
+@@ -1866,8 +1866,8 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+ ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "Cannot set RSS lut, err %d aq_err %s\n",
+- ret,
++ "Cannot set RSS lut, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+ }
+@@ -2358,8 +2358,8 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
+ if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
+ *retval = -EIO;
+ dev_info(&vsi->back->pdev->dev,
+- "ignoring delete macvlan error on %s, err %d, aq_err %s\n",
+- vsi_name, aq_ret,
++ "ignoring delete macvlan error on %s, err %pe, aq_err %s\n",
++ vsi_name, ERR_PTR(aq_ret),
+ i40e_aq_str(hw, aq_status));
+ }
+ }
+@@ -2488,8 +2488,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
+ NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+- "Set default VSI failed, err %d, aq_err %s\n",
+- aq_ret,
++ "Set default VSI failed, err %pe, aq_err %s\n",
++ ERR_PTR(aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ } else {
+@@ -2500,8 +2500,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
+ true);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+- "set unicast promisc failed, err %d, aq_err %s\n",
+- aq_ret,
++ "set unicast promisc failed, err %pe, aq_err %s\n",
++ ERR_PTR(aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
+@@ -2510,8 +2510,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
+ promisc, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+- "set multicast promisc failed, err %d, aq_err %s\n",
+- aq_ret,
++ "set multicast promisc failed, err %pe, aq_err %s\n",
++ ERR_PTR(aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ }
+@@ -2815,9 +2815,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
+ retval = i40e_aq_rc_to_posix(aq_ret,
+ hw->aq.asq_last_status);
+ dev_info(&pf->pdev->dev,
+- "set multi promisc failed on %s, err %d aq_err %s\n",
++ "set multi promisc failed on %s, err %pe aq_err %s\n",
+ vsi_name,
+- aq_ret,
++ ERR_PTR(aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ } else {
+ dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
+@@ -2835,10 +2835,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
+ retval = i40e_aq_rc_to_posix(aq_ret,
+ hw->aq.asq_last_status);
+ dev_info(&pf->pdev->dev,
+- "Setting promiscuous %s failed on %s, err %d aq_err %s\n",
++ "Setting promiscuous %s failed on %s, err %pe aq_err %s\n",
+ cur_promisc ? "on" : "off",
+ vsi_name,
+- aq_ret,
++ ERR_PTR(aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ }
+@@ -2986,8 +2986,8 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+- "update vlan stripping failed, err %d aq_err %s\n",
+- ret,
++ "update vlan stripping failed, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&vsi->back->hw,
+ vsi->back->hw.aq.asq_last_status));
+ }
+@@ -3021,8 +3021,8 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+- "update vlan stripping failed, err %d aq_err %s\n",
+- ret,
++ "update vlan stripping failed, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&vsi->back->hw,
+ vsi->back->hw.aq.asq_last_status));
+ }
+@@ -3266,8 +3266,8 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+- "add pvid failed, err %d aq_err %s\n",
+- ret,
++ "add pvid failed, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&vsi->back->hw,
+ vsi->back->hw.aq.asq_last_status));
+ return -ENOENT;
+@@ -5533,8 +5533,8 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
+ ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "couldn't get PF vsi bw config, err %d aq_err %s\n",
+- ret,
++ "couldn't get PF vsi bw config, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return -EINVAL;
+ }
+@@ -5544,8 +5544,8 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
+ NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "couldn't get PF vsi ets bw config, err %d aq_err %s\n",
+- ret,
++ "couldn't get PF vsi ets bw config, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return -EINVAL;
+ }
+@@ -5734,8 +5734,8 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
+
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+- dev_info(&pf->pdev->dev, "Update vsi config failed, err %d aq_err %s\n",
+- ret,
++ dev_info(&pf->pdev->dev, "Update vsi config failed, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+ }
+@@ -5790,8 +5790,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+ &bw_config, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "Failed querying vsi bw info, err %d aq_err %s\n",
+- ret,
++ "Failed querying vsi bw info, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ goto out;
+ }
+@@ -5857,8 +5857,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "Update vsi tc config failed, err %d aq_err %s\n",
+- ret,
++ "Update vsi tc config failed, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ goto out;
+ }
+@@ -5870,8 +5870,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+ ret = i40e_vsi_get_bw_info(vsi);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "Failed updating vsi bw info, err %d aq_err %s\n",
+- ret,
++ "Failed updating vsi bw info, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ goto out;
+ }
+@@ -5962,8 +5962,8 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
+ I40E_MAX_BW_INACTIVE_ACCUM, NULL);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+- "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %d aq_err %s\n",
+- max_tx_rate, seid, ret,
++ "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %pe aq_err %s\n",
++ max_tx_rate, seid, ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return ret;
+ }
+@@ -6038,8 +6038,8 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
+ last_aq_status = pf->hw.aq.asq_last_status;
+ if (ret)
+ dev_info(&pf->pdev->dev,
+- "Failed to delete cloud filter, err %d aq_err %s\n",
+- ret,
++ "Failed to delete cloud filter, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, last_aq_status));
+ kfree(cfilter);
+ }
+@@ -6173,8 +6173,8 @@ static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
+ ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "Cannot set RSS lut, err %d aq_err %s\n",
+- ret,
++ "Cannot set RSS lut, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ kfree(lut);
+ return ret;
+@@ -6272,8 +6272,8 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
+ ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "add new vsi failed, err %d aq_err %s\n",
+- ret,
++ "add new vsi failed, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ return -ENOENT;
+@@ -6518,8 +6518,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
+ mode, NULL);
+ if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
+ dev_err(&pf->pdev->dev,
+- "couldn't set switch config bits, err %d aq_err %s\n",
+- ret,
++ "couldn't set switch config bits, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(hw,
+ hw->aq.asq_last_status));
+
+@@ -6719,8 +6719,8 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
+ &bw_data, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "VEB bw config failed, err %d aq_err %s\n",
+- ret,
++ "VEB bw config failed, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto out;
+ }
+@@ -6729,8 +6729,8 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
+ ret = i40e_veb_get_bw_info(veb);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "Failed getting veb bw config, err %d aq_err %s\n",
+- ret,
++ "Failed getting veb bw config, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ }
+
+@@ -6813,8 +6813,8 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
+ ret = i40e_aq_resume_port_tx(hw, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "Resume Port Tx failed, err %d aq_err %s\n",
+- ret,
++ "Resume Port Tx failed, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ /* Schedule PF reset to recover */
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
+@@ -6838,8 +6838,8 @@ static int i40e_suspend_port_tx(struct i40e_pf *pf)
+ ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "Suspend Port Tx failed, err %d aq_err %s\n",
+- ret,
++ "Suspend Port Tx failed, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ /* Schedule PF reset to recover */
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
+@@ -6878,8 +6878,8 @@ static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
+ ret = i40e_set_dcb_config(&pf->hw);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "Set DCB Config failed, err %d aq_err %s\n",
+- ret,
++ "Set DCB Config failed, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto out;
+ }
+@@ -6995,8 +6995,8 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
+ i40e_aqc_opc_modify_switching_comp_ets, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "Modify Port ETS failed, err %d aq_err %s\n",
+- ret,
++ "Modify Port ETS failed, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto out;
+ }
+@@ -7033,8 +7033,8 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
+ ret = i40e_aq_dcb_updated(&pf->hw, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "DCB Updated failed, err %d aq_err %s\n",
+- ret,
++ "DCB Updated failed, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto out;
+ }
+@@ -7117,8 +7117,8 @@ int i40e_dcb_sw_default_config(struct i40e_pf *pf)
+ i40e_aqc_opc_enable_switching_comp_ets, NULL);
+ if (err) {
+ dev_info(&pf->pdev->dev,
+- "Enable Port ETS failed, err %d aq_err %s\n",
+- err,
++ "Enable Port ETS failed, err %pe aq_err %s\n",
++ ERR_PTR(err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ err = -ENOENT;
+ goto out;
+@@ -7197,8 +7197,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
+ pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
+ } else {
+ dev_info(&pf->pdev->dev,
+- "Query for DCB configuration failed, err %d aq_err %s\n",
+- err,
++ "Query for DCB configuration failed, err %pe aq_err %s\n",
++ ERR_PTR(err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ }
+
+@@ -7436,8 +7436,8 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
+ NULL);
+ if (err) {
+ dev_err(&pf->pdev->dev,
+- "failed to get phy cap., ret = %d last_status = %s\n",
+- err,
++ "failed to get phy cap., ret = %pe last_status = %s\n",
++ ERR_PTR(err),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return err;
+ }
+@@ -7448,8 +7448,8 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
+ NULL);
+ if (err) {
+ dev_err(&pf->pdev->dev,
+- "failed to get phy cap., ret = %d last_status = %s\n",
+- err,
++ "failed to get phy cap., ret = %pe last_status = %s\n",
++ ERR_PTR(err),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return err;
+ }
+@@ -7493,8 +7493,8 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
+
+ if (err) {
+ dev_err(&pf->pdev->dev,
+- "set phy config ret = %d last_status = %s\n",
+- err,
++ "set phy config ret = %pe last_status = %s\n",
++ ERR_PTR(err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return err;
+ }
+@@ -7834,8 +7834,8 @@ static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
+ rx_ring->netdev = NULL;
+ }
+ dev_info(&pf->pdev->dev,
+- "Error adding mac filter on macvlan err %d, aq_err %s\n",
+- ret,
++ "Error adding mac filter on macvlan err %pe, aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(hw, aq_err));
+ netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
+ }
+@@ -7907,8 +7907,8 @@ static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "Update vsi tc config failed, err %d aq_err %s\n",
+- ret,
++ "Update vsi tc config failed, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+ }
+@@ -8123,8 +8123,8 @@ static void i40e_fwd_del(struct net_device *netdev, void *vdev)
+ ch->fwd = NULL;
+ } else {
+ dev_info(&pf->pdev->dev,
+- "Error deleting mac filter on macvlan err %d, aq_err %s\n",
+- ret,
++ "Error deleting mac filter on macvlan err %pe, aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(hw, aq_err));
+ }
+ break;
+@@ -8875,7 +8875,8 @@ static int i40e_delete_clsflower(struct i40e_vsi *vsi,
+ kfree(filter);
+ if (err) {
+ dev_err(&pf->pdev->dev,
+- "Failed to delete cloud filter, err %d\n", err);
++ "Failed to delete cloud filter, err %pe\n",
++ ERR_PTR(err));
+ return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
+ }
+
+@@ -9437,8 +9438,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
+ pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ } else {
+ dev_info(&pf->pdev->dev,
+- "Failed querying DCB configuration data from firmware, err %d aq_err %s\n",
+- ret,
++ "Failed querying DCB configuration data from firmware, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ }
+@@ -10264,8 +10265,8 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
+ ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "couldn't get PF vsi config, err %d aq_err %s\n",
+- ret,
++ "couldn't get PF vsi config, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return;
+ }
+@@ -10276,8 +10277,8 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "update vsi switch failed, err %d aq_err %s\n",
+- ret,
++ "update vsi switch failed, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ }
+ }
+@@ -10300,8 +10301,8 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
+ ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "couldn't get PF vsi config, err %d aq_err %s\n",
+- ret,
++ "couldn't get PF vsi config, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return;
+ }
+@@ -10312,8 +10313,8 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "update vsi switch failed, err %d aq_err %s\n",
+- ret,
++ "update vsi switch failed, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ }
+ }
+@@ -10457,8 +10458,8 @@ static int i40e_get_capabilities(struct i40e_pf *pf,
+ buf_len = data_size;
+ } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
+ dev_info(&pf->pdev->dev,
+- "capability discovery failed, err %d aq_err %s\n",
+- err,
++ "capability discovery failed, err %pe aq_err %s\n",
++ ERR_PTR(err),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ return -ENODEV;
+@@ -10595,8 +10596,8 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
+
+ if (ret) {
+ dev_dbg(&pf->pdev->dev,
+- "Failed to rebuild cloud filter, err %d aq_err %s\n",
+- ret,
++ "Failed to rebuild cloud filter, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ return ret;
+@@ -10836,8 +10837,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
+ ret = i40e_init_adminq(&pf->hw);
+ if (ret) {
+- dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %d aq_err %s\n",
+- ret,
++ dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto clear_recovery;
+ }
+@@ -10948,8 +10949,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ I40E_AQ_EVENT_MEDIA_NA |
+ I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
+ if (ret)
+- dev_info(&pf->pdev->dev, "set phy mask fail, err %d aq_err %s\n",
+- ret,
++ dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
+ /* Rebuild the VSIs and VEBs that existed before reset.
+@@ -11052,8 +11053,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ msleep(75);
+ ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+ if (ret)
+- dev_info(&pf->pdev->dev, "link restart failed, err %d aq_err %s\n",
+- ret,
++ dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ }
+@@ -11084,9 +11085,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ ret = i40e_set_promiscuous(pf, pf->cur_promisc);
+ if (ret)
+ dev_warn(&pf->pdev->dev,
+- "Failed to restore promiscuous setting: %s, err %d aq_err %s\n",
++ "Failed to restore promiscuous setting: %s, err %pe aq_err %s\n",
+ pf->cur_promisc ? "on" : "off",
+- ret,
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
+ i40e_reset_all_vfs(pf, true);
+@@ -12220,8 +12221,8 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+ (struct i40e_aqc_get_set_rss_key_data *)seed);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "Cannot get RSS key, err %d aq_err %s\n",
+- ret,
++ "Cannot get RSS key, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ return ret;
+@@ -12234,8 +12235,8 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+ ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "Cannot get RSS lut, err %d aq_err %s\n",
+- ret,
++ "Cannot get RSS lut, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ return ret;
+@@ -12575,8 +12576,8 @@ int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
+ last_aq_status = pf->hw.aq.asq_last_status;
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "Cannot acquire NVM for read access, err %d aq_err %s\n",
+- ret,
++ "Cannot acquire NVM for read access, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, last_aq_status));
+ goto bw_commit_out;
+ }
+@@ -12592,8 +12593,8 @@ int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
+ last_aq_status = pf->hw.aq.asq_last_status;
+ i40e_release_nvm(&pf->hw);
+ if (ret) {
+- dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %s\n",
+- ret,
++ dev_info(&pf->pdev->dev, "NVM read error, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, last_aq_status));
+ goto bw_commit_out;
+ }
+@@ -12606,8 +12607,8 @@ int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
+ last_aq_status = pf->hw.aq.asq_last_status;
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "Cannot acquire NVM for write access, err %d aq_err %s\n",
+- ret,
++ "Cannot acquire NVM for write access, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, last_aq_status));
+ goto bw_commit_out;
+ }
+@@ -12626,8 +12627,8 @@ int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
+ i40e_release_nvm(&pf->hw);
+ if (ret)
+ dev_info(&pf->pdev->dev,
+- "BW settings NOT SAVED, err %d aq_err %s\n",
+- ret,
++ "BW settings NOT SAVED, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, last_aq_status));
+ bw_commit_out:
+
+@@ -12681,8 +12682,8 @@ static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
+
+ err_nvm:
+ dev_warn(&pf->pdev->dev,
+- "total-port-shutdown feature is off due to read nvm error: %d\n",
+- read_status);
++ "total-port-shutdown feature is off due to read nvm error: %pe\n",
++ ERR_PTR(read_status));
+ return ret;
+ }
+
+@@ -13009,8 +13010,8 @@ static int i40e_udp_tunnel_set_port(struct net_device *netdev,
+ ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
+ NULL);
+ if (ret) {
+- netdev_info(netdev, "add UDP port failed, err %d aq_err %s\n",
+- ret,
++ netdev_info(netdev, "add UDP port failed, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return -EIO;
+ }
+@@ -13029,8 +13030,8 @@ static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
+
+ ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
+ if (ret) {
+- netdev_info(netdev, "delete UDP port failed, err %d aq_err %s\n",
+- ret,
++ netdev_info(netdev, "delete UDP port failed, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return -EIO;
+ }
+@@ -13919,8 +13920,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "couldn't get PF vsi config, err %d aq_err %s\n",
+- ret,
++ "couldn't get PF vsi config, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ return -ENOENT;
+@@ -13969,8 +13970,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "update vsi failed, err %d aq_err %s\n",
+- ret,
++ "update vsi failed, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ ret = -ENOENT;
+@@ -13992,9 +13993,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
+ * message and continue
+ */
+ dev_info(&pf->pdev->dev,
+- "failed to configure TCs for main VSI tc_map 0x%08x, err %d aq_err %s\n",
++ "failed to configure TCs for main VSI tc_map 0x%08x, err %pe aq_err %s\n",
+ enabled_tc,
+- ret,
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ }
+@@ -14088,8 +14089,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
+ ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+- "add vsi failed, err %d aq_err %s\n",
+- ret,
++ "add vsi failed, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ ret = -ENOENT;
+@@ -14120,8 +14121,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
+ ret = i40e_vsi_get_bw_info(vsi);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "couldn't get vsi bw info, err %d aq_err %s\n",
+- ret,
++ "couldn't get vsi bw info, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ /* VSI is already added so not tearing that up */
+ ret = 0;
+@@ -14567,8 +14568,8 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
+ &bw_data, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "query veb bw config failed, err %d aq_err %s\n",
+- ret,
++ "query veb bw config failed, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
+ goto out;
+ }
+@@ -14577,8 +14578,8 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
+ &ets_data, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "query veb bw ets config failed, err %d aq_err %s\n",
+- ret,
++ "query veb bw ets config failed, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
+ goto out;
+ }
+@@ -14774,8 +14775,8 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
+ /* get a VEB from the hardware */
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "couldn't add VEB, err %d aq_err %s\n",
+- ret,
++ "couldn't add VEB, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return -EPERM;
+ }
+@@ -14785,16 +14786,16 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
+ &veb->stats_idx, NULL, NULL, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "couldn't get VEB statistics idx, err %d aq_err %s\n",
+- ret,
++ "couldn't get VEB statistics idx, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return -EPERM;
+ }
+ ret = i40e_veb_get_bw_info(veb);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "couldn't get VEB bw info, err %d aq_err %s\n",
+- ret,
++ "couldn't get VEB bw info, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
+ return -ENOENT;
+@@ -15050,8 +15051,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
+ ret = i40e_fetch_switch_configuration(pf, false);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+- "couldn't fetch switch config, err %d aq_err %s\n",
+- ret,
++ "couldn't fetch switch config, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return ret;
+ }
+@@ -15077,8 +15078,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
+ NULL);
+ if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ dev_info(&pf->pdev->dev,
+- "couldn't set switch config bits, err %d aq_err %s\n",
+- ret,
++ "couldn't set switch config bits, err %pe aq_err %s\n",
++ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ /* not a fatal problem, just keep going */
+@@ -15984,8 +15985,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ I40E_AQ_EVENT_MEDIA_NA |
+ I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
+ if (err)
+- dev_info(&pf->pdev->dev, "set phy mask fail, err %d aq_err %s\n",
+- err,
++ dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
++ ERR_PTR(err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
+ /* Reconfigure hardware for allowing smaller MSS in the case
+@@ -16003,8 +16004,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ msleep(75);
+ err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+ if (err)
+- dev_info(&pf->pdev->dev, "link restart failed, err %d aq_err %s\n",
+- err,
++ dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
++ ERR_PTR(err),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ }
+@@ -16136,8 +16137,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ /* get the requested speeds from the fw */
+ err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
+ if (err)
+- dev_dbg(&pf->pdev->dev, "get requested speeds ret = %d last_status = %s\n",
+- err,
++ dev_dbg(&pf->pdev->dev, "get requested speeds ret = %pe last_status = %s\n",
++ ERR_PTR(err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
+
+@@ -16147,8 +16148,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ /* get the supported phy types from the fw */
+ err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
+ if (err)
+- dev_dbg(&pf->pdev->dev, "get supported phy types ret = %d last_status = %s\n",
+- err,
++ dev_dbg(&pf->pdev->dev, "get supported phy types ret = %pe last_status = %s\n",
++ ERR_PTR(err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
+ /* make sure the MFS hasn't been set lower than the default */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+index 779ba907009a5..f99c1f7fec406 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+@@ -1429,8 +1429,8 @@ static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ buff_size, &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+- "%s err %d aq_err %s\n",
+- __func__, status,
++ "%s err %pe aq_err %s\n",
++ __func__, ERR_PTR(status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ return status;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index 547e67d9470b7..fb87912b47617 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -1264,9 +1264,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+ int aq_err = pf->hw.aq.asq_last_status;
+
+ dev_err(&pf->pdev->dev,
+- "VF %d failed to set multicast promiscuous mode err %d aq_err %s\n",
++ "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
+ vf->vf_id,
+- aq_ret,
++ ERR_PTR(aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+
+ return aq_ret;
+@@ -1280,9 +1280,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+ int aq_err = pf->hw.aq.asq_last_status;
+
+ dev_err(&pf->pdev->dev,
+- "VF %d failed to set unicast promiscuous mode err %d aq_err %s\n",
++ "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
+ vf->vf_id,
+- aq_ret,
++ ERR_PTR(aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ }
+
+@@ -1297,9 +1297,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+ int aq_err = pf->hw.aq.asq_last_status;
+
+ dev_err(&pf->pdev->dev,
+- "VF %d failed to set multicast promiscuous mode err %d aq_err %s\n",
++ "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
+ vf->vf_id,
+- aq_ret,
++ ERR_PTR(aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+
+ if (!aq_tmp)
+@@ -1313,9 +1313,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+ int aq_err = pf->hw.aq.asq_last_status;
+
+ dev_err(&pf->pdev->dev,
+- "VF %d failed to set unicast promiscuous mode err %d aq_err %s\n",
++ "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
+ vf->vf_id,
+- aq_ret,
++ ERR_PTR(aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+
+ if (!aq_tmp)
+@@ -3615,8 +3615,8 @@ static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
+ ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+- "VF %d: Failed to delete cloud filter, err %d aq_err %s\n",
+- vf->vf_id, ret,
++ "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
++ vf->vf_id, ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+
+@@ -3718,8 +3718,8 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+- "VF %d: Failed to delete cloud filter, err %d aq_err %s\n",
+- vf->vf_id, ret,
++ "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
++ vf->vf_id, ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto err;
+ }
+@@ -3852,8 +3852,8 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+- "VF %d: Failed to add cloud filter, err %d aq_err %s\n",
+- vf->vf_id, ret,
++ "VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
++ vf->vf_id, ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto err_free;
+ }
+--
+2.42.0
+
--- /dev/null
+From ce9e0ecd7bacba004af7f394a6fe7dbd0a083a22 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 19 Nov 2023 22:17:59 +0800
+Subject: ipv4: Correct/silence an endian warning in __ip_do_redirect
+
+From: Kunwu Chan <chentao@kylinos.cn>
+
+[ Upstream commit c0e2926266af3b5acf28df0a8fc6e4d90effe0bb ]
+
+net/ipv4/route.c:783:46: warning: incorrect type in argument 2 (different base types)
+net/ipv4/route.c:783:46: expected unsigned int [usertype] key
+net/ipv4/route.c:783:46: got restricted __be32 [usertype] new_gw
+
+Fixes: 969447f226b4 ("ipv4: use new_gw for redirect neigh lookup")
+Suggested-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Kunwu Chan <chentao@kylinos.cn>
+Link: https://lore.kernel.org/r/20231119141759.420477-1-chentao@kylinos.cn
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/route.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 9cbaae4f5ee71..474f391fab35d 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -780,7 +780,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
+ goto reject_redirect;
+ }
+
+- n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
++ n = __ipv4_neigh_lookup(rt->dst.dev, (__force u32)new_gw);
+ if (!n)
+ n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
+ if (!IS_ERR(n)) {
+--
+2.42.0
+
--- /dev/null
+From eddda0f1980c2e4c8d001eab399523f82b84aa47 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Nov 2023 12:41:26 +0100
+Subject: lockdep: Fix block chain corruption
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit bca4104b00fec60be330cd32818dd5c70db3d469 ]
+
+Kent reported an occasional KASAN splat in lockdep. Mark then noted:
+
+> I suspect the dodgy access is to chain_block_buckets[-1], which hits the last 4
+> bytes of the redzone and gets (incorrectly/misleadingly) attributed to
+> nr_large_chain_blocks.
+
+That would mean @size == 0, at which point size_to_bucket() returns -1
+and the above happens.
+
+alloc_chain_hlocks() has 'size - req', for the first with the
+precondition 'size >= rq', which allows the 0.
+
+This code is trying to split a block, del_chain_block() takes what we
+need, and add_chain_block() puts back the remainder, except in the
+above case the remainder is 0 sized and things go sideways.
+
+Fixes: 810507fe6fd5 ("locking/lockdep: Reuse freed chain_hlocks entries")
+Reported-by: Kent Overstreet <kent.overstreet@linux.dev>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: Kent Overstreet <kent.overstreet@linux.dev>
+Link: https://lkml.kernel.org/r/20231121114126.GH8262@noisy.programming.kicks-ass.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/locking/lockdep.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index 0224b0329d011..3b38303ed27b3 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -3453,7 +3453,8 @@ static int alloc_chain_hlocks(int req)
+ size = chain_block_size(curr);
+ if (likely(size >= req)) {
+ del_chain_block(0, size, chain_block_next(curr));
+- add_chain_block(curr + req, size - req);
++ if (size > req)
++ add_chain_block(curr + req, size - req);
+ return curr;
+ }
+ }
+--
+2.42.0
+
--- /dev/null
+From b089e6199aebf25ed1a80f0cbe7f859ba8420836 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 26 Mar 2023 16:31:09 +0200
+Subject: media: camss: Convert to platform remove callback returning void
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit 428bbf4be4018aefa26e4d6531779fa8925ecaaf ]
+
+The .remove() callback for a platform driver returns an int which makes
+many driver authors wrongly assume it's possible to do error handling by
+returning an error code. However the value returned is (mostly) ignored
+and this typically results in resource leaks. To improve here there is a
+quest to make the remove callback return void. In the first step of this
+quest all drivers are converted to .remove_new() which already returns
+void.
+
+Trivially convert this driver from always returning zero in the remove
+callback to the void returning variant.
+
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Stable-dep-of: f69791c39745 ("media: qcom: camss: Fix genpd cleanup")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/platform/qcom/camss/camss.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
+index f7fa84f623282..04e65edbfb870 100644
+--- a/drivers/media/platform/qcom/camss/camss.c
++++ b/drivers/media/platform/qcom/camss/camss.c
+@@ -1723,7 +1723,7 @@ void camss_delete(struct camss *camss)
+ *
+ * Always returns 0.
+ */
+-static int camss_remove(struct platform_device *pdev)
++static void camss_remove(struct platform_device *pdev)
+ {
+ struct camss *camss = platform_get_drvdata(pdev);
+
+@@ -1733,8 +1733,6 @@ static int camss_remove(struct platform_device *pdev)
+
+ if (atomic_read(&camss->ref_count) == 0)
+ camss_delete(camss);
+-
+- return 0;
+ }
+
+ static const struct of_device_id camss_dt_match[] = {
+@@ -1796,7 +1794,7 @@ static const struct dev_pm_ops camss_pm_ops = {
+
+ static struct platform_driver qcom_camss_driver = {
+ .probe = camss_probe,
+- .remove = camss_remove,
++ .remove_new = camss_remove,
+ .driver = {
+ .name = "qcom-camss",
+ .of_match_table = camss_dt_match,
+--
+2.42.0
+
--- /dev/null
+From 0906a0f9ec3171a7dbcd3871f70c03b5b3a04f46 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Jul 2022 23:15:48 +0100
+Subject: media: camss: Split power domain management
+
+From: Vladimir Zapolskiy <vladimir.zapolskiy@linaro.org>
+
+[ Upstream commit 46cc031754985ee24034d55687540adb079f8630 ]
+
+There are three cases of power domain management on supported platforms:
+1) CAMSS on MSM8916, where a single VFE power domain is operated outside
+ of the camss device driver,
+2) CAMSS on MSM8996 and SDM630/SDM660, where two VFE power domains are
+ managed separately by the camss device driver, the power domains are
+ linked and unlinked on demand by their functions vfe_pm_domain_on()
+ and vfe_pm_domain_off() respectively,
+3) CAMSS on SDM845 and SM8250 platforms, and there are two VFE power
+ domains and their parent power domain TITAN_TOP, the latter one
+ shall be turned on prior to turning on any of VFE power domains.
+
+Due to a previously missing link between TITAN_TOP and VFEx power domains
+in the latter case, which is now fixed by [1], it was decided always to
+turn on all found VFE power domains and TITAN_TOP power domain, even if
+just one particular VFE is needed to be enabled or none of VFE power
+domains are required, for instance the latter case is when vfe_lite is in
+use. This misusage becomes more incovenient and clumsy, if next generations
+are to be supported, for instance CAMSS on SM8450 has three VFE power
+domains.
+
+The change splits the power management support for platforms with TITAN_TOP
+parent power domain, and, since 'power-domain-names' property is not
+present in camss device tree nodes, the assumption is that the first
+N power domains from the 'power-domains' list correspond to VFE power
+domains, and, if the number of power domains is greater than number of
+non-lite VFEs, then the last power domain from the list is the TITAN_TOP
+power domain.
+
+Signed-off-by: Vladimir Zapolskiy <vladimir.zapolskiy@linaro.org>
+Reviewed-by: Robert Foss <robert.foss@linaro.org>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
+Stable-dep-of: f69791c39745 ("media: qcom: camss: Fix genpd cleanup")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../media/platform/qcom/camss/camss-vfe-170.c | 20 ++++++++++++-
+ .../media/platform/qcom/camss/camss-vfe-480.c | 20 ++++++++++++-
+ drivers/media/platform/qcom/camss/camss.c | 30 ++++++++++---------
+ 3 files changed, 54 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/media/platform/qcom/camss/camss-vfe-170.c b/drivers/media/platform/qcom/camss/camss-vfe-170.c
+index 07b64d257512c..f9492b1d16e3e 100644
+--- a/drivers/media/platform/qcom/camss/camss-vfe-170.c
++++ b/drivers/media/platform/qcom/camss/camss-vfe-170.c
+@@ -671,7 +671,12 @@ static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
+ */
+ static void vfe_pm_domain_off(struct vfe_device *vfe)
+ {
+- /* nop */
++ struct camss *camss = vfe->camss;
++
++ if (vfe->id >= camss->vfe_num)
++ return;
++
++ device_link_del(camss->genpd_link[vfe->id]);
+ }
+
+ /*
+@@ -680,6 +685,19 @@ static void vfe_pm_domain_off(struct vfe_device *vfe)
+ */
+ static int vfe_pm_domain_on(struct vfe_device *vfe)
+ {
++ struct camss *camss = vfe->camss;
++ enum vfe_line_id id = vfe->id;
++
++ if (id >= camss->vfe_num)
++ return 0;
++
++ camss->genpd_link[id] = device_link_add(camss->dev, camss->genpd[id],
++ DL_FLAG_STATELESS |
++ DL_FLAG_PM_RUNTIME |
++ DL_FLAG_RPM_ACTIVE);
++ if (!camss->genpd_link[id])
++ return -EINVAL;
++
+ return 0;
+ }
+
+diff --git a/drivers/media/platform/qcom/camss/camss-vfe-480.c b/drivers/media/platform/qcom/camss/camss-vfe-480.c
+index ab42600f7a745..72f5cfeeb49bf 100644
+--- a/drivers/media/platform/qcom/camss/camss-vfe-480.c
++++ b/drivers/media/platform/qcom/camss/camss-vfe-480.c
+@@ -478,7 +478,12 @@ static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
+ */
+ static void vfe_pm_domain_off(struct vfe_device *vfe)
+ {
+- /* nop */
++ struct camss *camss = vfe->camss;
++
++ if (vfe->id >= camss->vfe_num)
++ return;
++
++ device_link_del(camss->genpd_link[vfe->id]);
+ }
+
+ /*
+@@ -487,6 +492,19 @@ static void vfe_pm_domain_off(struct vfe_device *vfe)
+ */
+ static int vfe_pm_domain_on(struct vfe_device *vfe)
+ {
++ struct camss *camss = vfe->camss;
++ enum vfe_line_id id = vfe->id;
++
++ if (id >= camss->vfe_num)
++ return 0;
++
++ camss->genpd_link[id] = device_link_add(camss->dev, camss->genpd[id],
++ DL_FLAG_STATELESS |
++ DL_FLAG_PM_RUNTIME |
++ DL_FLAG_RPM_ACTIVE);
++ if (!camss->genpd_link[id])
++ return -EINVAL;
++
+ return 0;
+ }
+
+diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
+index 5057b2c4cf6c4..f7fa84f623282 100644
+--- a/drivers/media/platform/qcom/camss/camss.c
++++ b/drivers/media/platform/qcom/camss/camss.c
+@@ -1453,7 +1453,6 @@ static const struct media_device_ops camss_media_ops = {
+ static int camss_configure_pd(struct camss *camss)
+ {
+ struct device *dev = camss->dev;
+- int last_pm_domain = 0;
+ int i;
+ int ret;
+
+@@ -1484,32 +1483,34 @@ static int camss_configure_pd(struct camss *camss)
+ if (!camss->genpd_link)
+ return -ENOMEM;
+
++ /*
++ * VFE power domains are in the beginning of the list, and while all
++ * power domains should be attached, only if TITAN_TOP power domain is
++ * found in the list, it should be linked over here.
++ */
+ for (i = 0; i < camss->genpd_num; i++) {
+ camss->genpd[i] = dev_pm_domain_attach_by_id(camss->dev, i);
+ if (IS_ERR(camss->genpd[i])) {
+ ret = PTR_ERR(camss->genpd[i]);
+ goto fail_pm;
+ }
++ }
+
+- camss->genpd_link[i] = device_link_add(camss->dev, camss->genpd[i],
+- DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME |
+- DL_FLAG_RPM_ACTIVE);
+- if (!camss->genpd_link[i]) {
+- dev_pm_domain_detach(camss->genpd[i], true);
++ if (i > camss->vfe_num) {
++ camss->genpd_link[i - 1] = device_link_add(camss->dev, camss->genpd[i - 1],
++ DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME |
++ DL_FLAG_RPM_ACTIVE);
++ if (!camss->genpd_link[i - 1]) {
+ ret = -EINVAL;
+ goto fail_pm;
+ }
+-
+- last_pm_domain = i;
+ }
+
+ return 0;
+
+ fail_pm:
+- for (i = 0; i < last_pm_domain; i++) {
+- device_link_del(camss->genpd_link[i]);
++ for (--i ; i >= 0; i--)
+ dev_pm_domain_detach(camss->genpd[i], true);
+- }
+
+ return ret;
+ }
+@@ -1709,10 +1710,11 @@ void camss_delete(struct camss *camss)
+ if (camss->genpd_num == 1)
+ return;
+
+- for (i = 0; i < camss->genpd_num; i++) {
+- device_link_del(camss->genpd_link[i]);
++ if (camss->genpd_num > camss->vfe_num)
++ device_link_del(camss->genpd_link[camss->genpd_num - 1]);
++
++ for (i = 0; i < camss->genpd_num; i++)
+ dev_pm_domain_detach(camss->genpd[i], true);
+- }
+ }
+
+ /*
+--
+2.42.0
+
--- /dev/null
+From 8bab88621b5e1ec6b75de52c4a3e1cf5169c5efc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Aug 2023 16:16:08 +0100
+Subject: media: qcom: camss: Fix genpd cleanup
+
+From: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+
+[ Upstream commit f69791c39745e64621216fe8919cb73c0065002b ]
+
+Right now we never release the power-domains properly on the error path.
+Add a routine to be reused for this purpose and appropriate jumps in
+probe() to run that routine where necessary.
+
+Fixes: 2f6f8af67203 ("media: camss: Refactor VFE power domain toggling")
+Cc: stable@vger.kernel.org
+Signed-off-by: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/platform/qcom/camss/camss.c | 35 ++++++++++++++---------
+ 1 file changed, 21 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
+index f490ad2266960..a30461de3e844 100644
+--- a/drivers/media/platform/qcom/camss/camss.c
++++ b/drivers/media/platform/qcom/camss/camss.c
+@@ -1538,6 +1538,20 @@ static int camss_icc_get(struct camss *camss)
+ return 0;
+ }
+
++static void camss_genpd_cleanup(struct camss *camss)
++{
++ int i;
++
++ if (camss->genpd_num == 1)
++ return;
++
++ if (camss->genpd_num > camss->vfe_num)
++ device_link_del(camss->genpd_link[camss->genpd_num - 1]);
++
++ for (i = 0; i < camss->genpd_num; i++)
++ dev_pm_domain_detach(camss->genpd[i], true);
++}
++
+ /*
+ * camss_probe - Probe CAMSS platform device
+ * @pdev: Pointer to CAMSS platform device
+@@ -1625,11 +1639,11 @@ static int camss_probe(struct platform_device *pdev)
+
+ ret = camss_init_subdevices(camss);
+ if (ret < 0)
+- return ret;
++ goto err_genpd_cleanup;
+
+ ret = dma_set_mask_and_coherent(dev, 0xffffffff);
+ if (ret)
+- return ret;
++ goto err_genpd_cleanup;
+
+ camss->media_dev.dev = camss->dev;
+ strscpy(camss->media_dev.model, "Qualcomm Camera Subsystem",
+@@ -1641,7 +1655,7 @@ static int camss_probe(struct platform_device *pdev)
+ ret = v4l2_device_register(camss->dev, &camss->v4l2_dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register V4L2 device: %d\n", ret);
+- return ret;
++ goto err_genpd_cleanup;
+ }
+
+ v4l2_async_nf_init(&camss->notifier);
+@@ -1692,28 +1706,19 @@ static int camss_probe(struct platform_device *pdev)
+ err_v4l2_device_unregister:
+ v4l2_device_unregister(&camss->v4l2_dev);
+ v4l2_async_nf_cleanup(&camss->notifier);
++err_genpd_cleanup:
++ camss_genpd_cleanup(camss);
+
+ return ret;
+ }
+
+ void camss_delete(struct camss *camss)
+ {
+- int i;
+-
+ v4l2_device_unregister(&camss->v4l2_dev);
+ media_device_unregister(&camss->media_dev);
+ media_device_cleanup(&camss->media_dev);
+
+ pm_runtime_disable(camss->dev);
+-
+- if (camss->genpd_num == 1)
+- return;
+-
+- if (camss->genpd_num > camss->vfe_num)
+- device_link_del(camss->genpd_link[camss->genpd_num - 1]);
+-
+- for (i = 0; i < camss->genpd_num; i++)
+- dev_pm_domain_detach(camss->genpd[i], true);
+ }
+
+ /*
+@@ -1732,6 +1737,8 @@ static void camss_remove(struct platform_device *pdev)
+
+ if (atomic_read(&camss->ref_count) == 0)
+ camss_delete(camss);
++
++ camss_genpd_cleanup(camss);
+ }
+
+ static const struct of_device_id camss_dt_match[] = {
+--
+2.42.0
+
--- /dev/null
+From 8b7145253cf408e137616d9eb1d66da749ac6c88 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Aug 2023 16:16:07 +0100
+Subject: media: qcom: camss: Fix V4L2 async notifier error path
+
+From: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+
+[ Upstream commit b278080a89f452063915beda0ade6b3ed5ee4271 ]
+
+Previously the jump label err_cleanup was used higher in the probe()
+function to release the async notifier however the async notifier
+registration was moved later in the code rendering the previous four jumps
+redundant.
+
+Rename the label from err_cleanup to err_v4l2_device_unregister to capture
+what the jump does.
+
+Fixes: 51397a4ec75d ("media: qcom: Initialise V4L2 async notifier later")
+Signed-off-by: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+[hverkuil: fix old name in commit log: err_v4l2_device_register -> err_v4l2_device_unregister]
+Stable-dep-of: f69791c39745 ("media: qcom: camss: Fix genpd cleanup")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/platform/qcom/camss/camss.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
+index f794215948e71..f490ad2266960 100644
+--- a/drivers/media/platform/qcom/camss/camss.c
++++ b/drivers/media/platform/qcom/camss/camss.c
+@@ -1615,21 +1615,21 @@ static int camss_probe(struct platform_device *pdev)
+
+ ret = camss_icc_get(camss);
+ if (ret < 0)
+- goto err_cleanup;
++ return ret;
+
+ ret = camss_configure_pd(camss);
+ if (ret < 0) {
+ dev_err(dev, "Failed to configure power domains: %d\n", ret);
+- goto err_cleanup;
++ return ret;
+ }
+
+ ret = camss_init_subdevices(camss);
+ if (ret < 0)
+- goto err_cleanup;
++ return ret;
+
+ ret = dma_set_mask_and_coherent(dev, 0xffffffff);
+ if (ret)
+- goto err_cleanup;
++ return ret;
+
+ camss->media_dev.dev = camss->dev;
+ strscpy(camss->media_dev.model, "Qualcomm Camera Subsystem",
+@@ -1641,7 +1641,7 @@ static int camss_probe(struct platform_device *pdev)
+ ret = v4l2_device_register(camss->dev, &camss->v4l2_dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register V4L2 device: %d\n", ret);
+- goto err_cleanup;
++ return ret;
+ }
+
+ v4l2_async_nf_init(&camss->notifier);
+@@ -1649,12 +1649,12 @@ static int camss_probe(struct platform_device *pdev)
+ num_subdevs = camss_of_parse_ports(camss);
+ if (num_subdevs < 0) {
+ ret = num_subdevs;
+- goto err_cleanup;
++ goto err_v4l2_device_unregister;
+ }
+
+ ret = camss_register_entities(camss);
+ if (ret < 0)
+- goto err_cleanup;
++ goto err_v4l2_device_unregister;
+
+ if (num_subdevs) {
+ camss->notifier.ops = &camss_subdev_notifier_ops;
+@@ -1689,7 +1689,7 @@ static int camss_probe(struct platform_device *pdev)
+
+ err_register_subdevs:
+ camss_unregister_entities(camss);
+-err_cleanup:
++err_v4l2_device_unregister:
+ v4l2_device_unregister(&camss->v4l2_dev);
+ v4l2_async_nf_cleanup(&camss->notifier);
+
+--
+2.42.0
+
--- /dev/null
+From f07ad515fb234855586ec0500a03c5d86ed08926 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Mar 2023 11:37:02 +0200
+Subject: media: qcom: Initialise V4L2 async notifier later
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Sakari Ailus <sakari.ailus@linux.intel.com>
+
+[ Upstream commit 5651bab6890a0c5d126e2559b4aa353bed201e47 ]
+
+Initialise V4L2 async notifier and parse DT for async sub-devices later,
+just before registering the notifier. This way the device can be made
+available to the V4L2 async framework from the notifier init time onwards.
+A subsequent patch will add struct v4l2_device as an argument to
+v4l2_async_nf_init().
+
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Tested-by: Philipp Zabel <p.zabel@pengutronix.de> # imx6qp
+Tested-by: Niklas Söderlund <niklas.soderlund@ragnatech.se> # rcar + adv746x
+Tested-by: Aishwarya Kothari <aishwarya.kothari@toradex.com> # Apalis i.MX6Q with TC358743
+Tested-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com> # Renesas RZ/G2L SMARC
+Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
+Stable-dep-of: f69791c39745 ("media: qcom: camss: Fix genpd cleanup")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/platform/qcom/camss/camss.c | 21 ++++++++++-----------
+ 1 file changed, 10 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
+index 04e65edbfb870..f794215948e71 100644
+--- a/drivers/media/platform/qcom/camss/camss.c
++++ b/drivers/media/platform/qcom/camss/camss.c
+@@ -1613,14 +1613,6 @@ static int camss_probe(struct platform_device *pdev)
+ if (!camss->vfe)
+ return -ENOMEM;
+
+- v4l2_async_nf_init(&camss->notifier);
+-
+- num_subdevs = camss_of_parse_ports(camss);
+- if (num_subdevs < 0) {
+- ret = num_subdevs;
+- goto err_cleanup;
+- }
+-
+ ret = camss_icc_get(camss);
+ if (ret < 0)
+ goto err_cleanup;
+@@ -1652,9 +1644,17 @@ static int camss_probe(struct platform_device *pdev)
+ goto err_cleanup;
+ }
+
++ v4l2_async_nf_init(&camss->notifier);
++
++ num_subdevs = camss_of_parse_ports(camss);
++ if (num_subdevs < 0) {
++ ret = num_subdevs;
++ goto err_cleanup;
++ }
++
+ ret = camss_register_entities(camss);
+ if (ret < 0)
+- goto err_register_entities;
++ goto err_cleanup;
+
+ if (num_subdevs) {
+ camss->notifier.ops = &camss_subdev_notifier_ops;
+@@ -1689,9 +1689,8 @@ static int camss_probe(struct platform_device *pdev)
+
+ err_register_subdevs:
+ camss_unregister_entities(camss);
+-err_register_entities:
+- v4l2_device_unregister(&camss->v4l2_dev);
+ err_cleanup:
++ v4l2_device_unregister(&camss->v4l2_dev);
+ v4l2_async_nf_cleanup(&camss->notifier);
+
+ return ret;
+--
+2.42.0
+
--- /dev/null
+From f4cc997f2220cda5b7e26b3d60d4c7ed9ddf381b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Oct 2023 16:54:34 +0800
+Subject: MIPS: KVM: Fix a build warning about variable set but not used
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+[ Upstream commit 83767a67e7b6a0291cde5681ec7e3708f3f8f877 ]
+
+After commit 411740f5422a ("KVM: MIPS/MMU: Implement KVM_CAP_SYNC_MMU")
+old_pte is no longer used in kvm_mips_map_page(). So remove it to fix a
+build warning about variable set but not used:
+
+ arch/mips/kvm/mmu.c: In function 'kvm_mips_map_page':
+>> arch/mips/kvm/mmu.c:701:29: warning: variable 'old_pte' set but not used [-Wunused-but-set-variable]
+ 701 | pte_t *ptep, entry, old_pte;
+ | ^~~~~~~
+
+Cc: stable@vger.kernel.org
+Fixes: 411740f5422a960 ("KVM: MIPS/MMU: Implement KVM_CAP_SYNC_MMU")
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202310070530.aARZCSfh-lkp@intel.com/
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/kvm/mmu.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
+index 74cd64a24d059..19ec27be20f06 100644
+--- a/arch/mips/kvm/mmu.c
++++ b/arch/mips/kvm/mmu.c
+@@ -593,7 +593,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
+ gfn_t gfn = gpa >> PAGE_SHIFT;
+ int srcu_idx, err;
+ kvm_pfn_t pfn;
+- pte_t *ptep, entry, old_pte;
++ pte_t *ptep, entry;
+ bool writeable;
+ unsigned long prot_bits;
+ unsigned long mmu_seq;
+@@ -665,7 +665,6 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
+ entry = pfn_pte(pfn, __pgprot(prot_bits));
+
+ /* Write the PTE */
+- old_pte = *ptep;
+ set_pte(ptep, entry);
+
+ err = 0;
+--
+2.42.0
+
--- /dev/null
+From 8473fa9c8f1cbc7946165881ea96b2362710059f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Mar 2023 23:29:34 +0800
+Subject: mm,kfence: decouple kfence from page granularity mapping judgement
+
+From: Zhenhua Huang <quic_zhenhuah@quicinc.com>
+
+[ Upstream commit bfa7965b33ab79fc3b2f8adc14704075fe2416cd ]
+
+Kfence only needs its pool to be mapped as page granularity, if it is
+inited early. Previous judgement was a bit over protected. From [1], Mark
+suggested to "just map the KFENCE region a page granularity". So I
+decouple it from judgement and do page granularity mapping for kfence
+pool only. Need to be noticed that late init of kfence pool still requires
+page granularity mapping.
+
+Page granularity mapping in theory cost more(2M per 1GB) memory on arm64
+platform. Like what I've tested on QEMU(emulated 1GB RAM) with
+gki_defconfig, also turning off rodata protection:
+Before:
+[root@liebao ]# cat /proc/meminfo
+MemTotal: 999484 kB
+After:
+[root@liebao ]# cat /proc/meminfo
+MemTotal: 1001480 kB
+
+To implement this, also relocate the kfence pool allocation before the
+linear mapping setting up, arm64_kfence_alloc_pool is to allocate phys
+addr, __kfence_pool is to be set after linear mapping set up.
+
+LINK: [1] https://lore.kernel.org/linux-arm-kernel/Y+IsdrvDNILA59UN@FVFF77S0Q05N/
+Suggested-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Zhenhua Huang <quic_zhenhuah@quicinc.com>
+Reviewed-by: Kefeng Wang <wangkefeng.wang@huawei.com>
+Reviewed-by: Marco Elver <elver@google.com>
+Link: https://lore.kernel.org/r/1679066974-690-1-git-send-email-quic_zhenhuah@quicinc.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Stable-dep-of: acfa60dbe038 ("arm64: mm: Fix "rodata=on" when CONFIG_RODATA_FULL_DEFAULT_ENABLED=y")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/include/asm/kfence.h | 10 ++++++
+ arch/arm64/mm/mmu.c | 61 +++++++++++++++++++++++++++++++++
+ arch/arm64/mm/pageattr.c | 7 ++--
+ mm/kfence/core.c | 4 +++
+ 4 files changed, 80 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h
+index aa855c6a0ae6f..a81937fae9f6d 100644
+--- a/arch/arm64/include/asm/kfence.h
++++ b/arch/arm64/include/asm/kfence.h
+@@ -19,4 +19,14 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
+ return true;
+ }
+
++#ifdef CONFIG_KFENCE
++extern bool kfence_early_init;
++static inline bool arm64_kfence_can_set_direct_map(void)
++{
++ return !kfence_early_init;
++}
++#else /* CONFIG_KFENCE */
++static inline bool arm64_kfence_can_set_direct_map(void) { return false; }
++#endif /* CONFIG_KFENCE */
++
+ #endif /* __ASM_KFENCE_H */
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index 9a7c389651540..4b302dbf78e96 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -24,6 +24,7 @@
+ #include <linux/mm.h>
+ #include <linux/vmalloc.h>
+ #include <linux/set_memory.h>
++#include <linux/kfence.h>
+
+ #include <asm/barrier.h>
+ #include <asm/cputype.h>
+@@ -38,6 +39,7 @@
+ #include <asm/ptdump.h>
+ #include <asm/tlbflush.h>
+ #include <asm/pgalloc.h>
++#include <asm/kfence.h>
+
+ #define NO_BLOCK_MAPPINGS BIT(0)
+ #define NO_CONT_MAPPINGS BIT(1)
+@@ -521,12 +523,67 @@ static int __init enable_crash_mem_map(char *arg)
+ }
+ early_param("crashkernel", enable_crash_mem_map);
+
++#ifdef CONFIG_KFENCE
++
++bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL;
++
++/* early_param() will be parsed before map_mem() below. */
++static int __init parse_kfence_early_init(char *arg)
++{
++ int val;
++
++ if (get_option(&arg, &val))
++ kfence_early_init = !!val;
++ return 0;
++}
++early_param("kfence.sample_interval", parse_kfence_early_init);
++
++static phys_addr_t __init arm64_kfence_alloc_pool(void)
++{
++ phys_addr_t kfence_pool;
++
++ if (!kfence_early_init)
++ return 0;
++
++ kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
++ if (!kfence_pool) {
++ pr_err("failed to allocate kfence pool\n");
++ kfence_early_init = false;
++ return 0;
++ }
++
++ /* Temporarily mark as NOMAP. */
++ memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
++
++ return kfence_pool;
++}
++
++static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp)
++{
++ if (!kfence_pool)
++ return;
++
++ /* KFENCE pool needs page-level mapping. */
++ __map_memblock(pgdp, kfence_pool, kfence_pool + KFENCE_POOL_SIZE,
++ pgprot_tagged(PAGE_KERNEL),
++ NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
++ memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
++ __kfence_pool = phys_to_virt(kfence_pool);
++}
++#else /* CONFIG_KFENCE */
++
++static inline phys_addr_t arm64_kfence_alloc_pool(void) { return 0; }
++static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) { }
++
++#endif /* CONFIG_KFENCE */
++
+ static void __init map_mem(pgd_t *pgdp)
+ {
+ static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
+ phys_addr_t kernel_start = __pa_symbol(_stext);
+ phys_addr_t kernel_end = __pa_symbol(__init_begin);
+ phys_addr_t start, end;
++ phys_addr_t early_kfence_pool;
+ int flags = NO_EXEC_MAPPINGS;
+ u64 i;
+
+@@ -539,6 +596,8 @@ static void __init map_mem(pgd_t *pgdp)
+ */
+ BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
+
++ early_kfence_pool = arm64_kfence_alloc_pool();
++
+ if (can_set_direct_map())
+ flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
+
+@@ -604,6 +663,8 @@ static void __init map_mem(pgd_t *pgdp)
+ }
+ }
+ #endif
++
++ arm64_kfence_map_pool(early_kfence_pool, pgdp);
+ }
+
+ void mark_rodata_ro(void)
+diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
+index 5922178d7a064..47f532e13d532 100644
+--- a/arch/arm64/mm/pageattr.c
++++ b/arch/arm64/mm/pageattr.c
+@@ -11,6 +11,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/set_memory.h>
+ #include <asm/tlbflush.h>
++#include <asm/kfence.h>
+
+ struct page_change_data {
+ pgprot_t set_mask;
+@@ -22,12 +23,14 @@ bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED
+ bool can_set_direct_map(void)
+ {
+ /*
+- * rodata_full, DEBUG_PAGEALLOC and KFENCE require linear map to be
++ * rodata_full and DEBUG_PAGEALLOC require linear map to be
+ * mapped at page granularity, so that it is possible to
+ * protect/unprotect single pages.
++ *
++ * KFENCE pool requires page-granular mapping if initialized late.
+ */
+ return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
+- IS_ENABLED(CONFIG_KFENCE);
++ arm64_kfence_can_set_direct_map();
+ }
+
+ static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
+diff --git a/mm/kfence/core.c b/mm/kfence/core.c
+index a477b7fb8aa33..c597cfebb0e86 100644
+--- a/mm/kfence/core.c
++++ b/mm/kfence/core.c
+@@ -828,6 +828,10 @@ void __init kfence_alloc_pool(void)
+ if (!kfence_sample_interval)
+ return;
+
++ /* if the pool has already been initialized by arch, skip the below. */
++ if (__kfence_pool)
++ return;
++
+ __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
+
+ if (!__kfence_pool)
+--
+2.42.0
+
--- /dev/null
+From 3814e0c4e2ed46035b334b8f1bb44dcc360a98e1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Nov 2023 16:42:17 -0800
+Subject: net: axienet: Fix check for partial TX checksum
+
+From: Samuel Holland <samuel.holland@sifive.com>
+
+[ Upstream commit fd0413bbf8b11f56e8aa842783b0deda0dfe2926 ]
+
+Due to a typo, the code checked the RX checksum feature in the TX path.
+
+Fixes: 8a3b7a252dca ("drivers/net/ethernet/xilinx: added Xilinx AXI Ethernet driver")
+Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Reviewed-by: Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
+Link: https://lore.kernel.org/r/20231122004219.3504219-1-samuel.holland@sifive.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index d14648558338b..5ea9dc251dd9a 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -821,7 +821,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
+ /* Tx Full Checksum Offload Enabled */
+ cur_p->app0 |= 2;
+- } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
++ } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
+ csum_start_off = skb_transport_offset(skb);
+ csum_index_off = csum_start_off + skb->csum_offset;
+ /* Tx Partial Checksum Offload Enabled */
+--
+2.42.0
+
--- /dev/null
+From 901c557d83af1ba3fa253092b2da7f3e5c5faadf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Nov 2023 10:37:05 +0800
+Subject: net/smc: avoid data corruption caused by decline
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: D. Wythe <alibuda@linux.alibaba.com>
+
+[ Upstream commit e6d71b437abc2f249e3b6a1ae1a7228e09c6e563 ]
+
+We found a data corruption issue during testing of SMC-R on Redis
+applications.
+
+The benchmark has a low probability of reporting a strange error as
+shown below.
+
+"Error: Protocol error, got "\xe2" as reply type byte"
+
+Finally, we found that the retrieved error data was as follows:
+
+0xE2 0xD4 0xC3 0xD9 0x04 0x00 0x2C 0x20 0xA6 0x56 0x00 0x16 0x3E 0x0C
+0xCB 0x04 0x02 0x01 0x00 0x00 0x20 0x00 0x00 0x00 0x00 0x00 0x00 0x00
+0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0xE2
+
+It is quite obvious that this is a SMC DECLINE message, which means that
+the applications received SMC protocol message.
+We found that this was caused by the following situations:
+
+client server
+ ¦ clc proposal
+ ------------->
+ ¦ clc accept
+ <-------------
+ ¦ clc confirm
+ ------------->
+wait llc confirm
+ send llc confirm
+ ¦failed llc confirm
+ ¦ x------
+(after 2s)timeout
+ wait llc confirm rsp
+
+wait decline
+
+(after 1s) timeout
+ (after 2s) timeout
+ ¦ decline
+ -------------->
+ ¦ decline
+ <--------------
+
+As a result, a decline message was sent in the implementation, and this
+message was read from TCP by the already-fallback connection.
+
+This patch double the client timeout as 2x of the server value,
+With this simple change, the Decline messages should never cross or
+collide (during Confirm link timeout).
+
+This issue requires an immediate solution, since the protocol updates
+involve a more long-term solution.
+
+Fixes: 0fb0b02bd6fd ("net/smc: adapt SMC client code to use the LLC flow")
+Signed-off-by: D. Wythe <alibuda@linux.alibaba.com>
+Reviewed-by: Wen Gu <guwen@linux.alibaba.com>
+Reviewed-by: Wenjia Zhang <wenjia@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/smc/af_smc.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index d676119984c09..b6609527dff62 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -585,8 +585,12 @@ static int smcr_clnt_conf_first_link(struct smc_sock *smc)
+ struct smc_llc_qentry *qentry;
+ int rc;
+
+- /* receive CONFIRM LINK request from server over RoCE fabric */
+- qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME,
++ /* Receive CONFIRM LINK request from server over RoCE fabric.
++ * Increasing the client's timeout by twice as much as the server's
++ * timeout by default can temporarily avoid decline messages of
++ * both sides crossing or colliding
++ */
++ qentry = smc_llc_wait(link->lgr, NULL, 2 * SMC_LLC_WAIT_TIME,
+ SMC_LLC_CONFIRM_LINK);
+ if (!qentry) {
+ struct smc_clc_msg_decline dclc;
+--
+2.42.0
+
--- /dev/null
+From 4ec2214c03ee60ca46e9cf4eeaffb775db75ae42 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Nov 2023 13:06:29 +0100
+Subject: net: usb: ax88179_178a: fix failed operations during ax88179_reset
+
+From: Jose Ignacio Tornos Martinez <jtornosm@redhat.com>
+
+[ Upstream commit 0739af07d1d947af27c877f797cb82ceee702515 ]
+
+Using generic ASIX Electronics Corp. AX88179 Gigabit Ethernet device,
+the following test cycle has been implemented:
+ - power on
+ - check logs
+ - shutdown
+ - after detecting the system shutdown, disconnect power
+ - after approximately 60 seconds of sleep, power is restored
+Running some cycles, sometimes error logs like this appear:
+ kernel: ax88179_178a 2-9:1.0 (unnamed net_device) (uninitialized): Failed to write reg index 0x0001: -19
+ kernel: ax88179_178a 2-9:1.0 (unnamed net_device) (uninitialized): Failed to read reg index 0x0001: -19
+ ...
+These failed operation are happening during ax88179_reset execution, so
+the initialization could not be correct.
+
+In order to avoid this, we need to increase the delay after reset and
+clock initial operations. By using these larger values, many cycles
+have been run and no failed operations appear.
+
+It would be better to check some status register to verify when the
+operation has finished, but I do not have found any available information
+(neither in the public datasheets nor in the manufacturer's driver). The
+only available information for the necessary delays is the maufacturer's
+driver (original values) but the proposed values are not enough for the
+tested devices.
+
+Fixes: e2ca90c276e1f ("ax88179_178a: ASIX AX88179_178A USB 3.0/2.0 to gigabit ethernet adapter driver")
+Reported-by: Herb Wei <weihao.bj@ieisystem.com>
+Tested-by: Herb Wei <weihao.bj@ieisystem.com>
+Signed-off-by: Jose Ignacio Tornos Martinez <jtornosm@redhat.com>
+Link: https://lore.kernel.org/r/20231120120642.54334-1-jtornosm@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/ax88179_178a.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index aff39bf3161de..4ea0e155bb0d5 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -1583,11 +1583,11 @@ static int ax88179_reset(struct usbnet *dev)
+
+ *tmp16 = AX_PHYPWR_RSTCTL_IPRL;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
+- msleep(200);
++ msleep(500);
+
+ *tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
+- msleep(100);
++ msleep(200);
+
+ /* Ethernet PHY Auto Detach*/
+ ax88179_auto_detach(dev);
+--
+2.42.0
+
--- /dev/null
+From 772ce78a53aac0fe540631d7b0a0319ce977576d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Nov 2023 08:13:36 -0500
+Subject: nvmet: nul-terminate the NQNs passed in the connect command
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit 1c22e0295a5eb571c27b53c7371f95699ef705ff ]
+
+The host and subsystem NQNs are passed in the connect command payload and
+interpreted as nul-terminated strings. Ensure they actually are
+nul-terminated before using them.
+
+Fixes: a07b4970f464 "nvmet: add a generic NVMe target")
+Reported-by: Alon Zahavi <zahavi.alon@gmail.com>
+Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/target/fabrics-cmd.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
+index 43b5bd8bb6a52..d8da840a1c0ed 100644
+--- a/drivers/nvme/target/fabrics-cmd.c
++++ b/drivers/nvme/target/fabrics-cmd.c
+@@ -244,6 +244,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
+ goto out;
+ }
+
++ d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
++ d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
+ status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
+ le32_to_cpu(c->kato), &ctrl);
+ if (status)
+@@ -313,6 +315,8 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
+ goto out;
+ }
+
++ d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
++ d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
+ ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
+ le16_to_cpu(d->cntlid), req);
+ if (!ctrl) {
+--
+2.42.0
+
--- /dev/null
+From b6f9c174db7abcdcb8dfe9caa172d87e821408a8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Nov 2023 16:10:18 +0530
+Subject: octeontx2-pf: Fix memory leak during interface down
+
+From: Suman Ghosh <sumang@marvell.com>
+
+[ Upstream commit 5f228d7c8a539714c1e9b7e7534f76bb7979f268 ]
+
+During 'ifconfig <netdev> down' one RSS memory was not getting freed.
+This patch fixes the same.
+
+Fixes: 81a4362016e7 ("octeontx2-pf: Add RSS multi group support")
+Signed-off-by: Suman Ghosh <sumang@marvell.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+index c724131172f3f..1d2d72c60a12c 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+@@ -1919,6 +1919,8 @@ int otx2_stop(struct net_device *netdev)
+ /* Clear RSS enable flag */
+ rss = &pf->hw.rss_info;
+ rss->enable = false;
++ if (!netif_is_rxfh_configured(netdev))
++ kfree(rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
+
+ /* Cleanup Queue IRQ */
+ vec = pci_irq_vector(pf->pdev,
+--
+2.42.0
+
--- /dev/null
+From 2b5d451e172b5b3d25678c43997e65f85dc47026 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Nov 2023 22:26:24 +0530
+Subject: octeontx2-pf: Fix ntuple rule creation to direct packet to VF with
+ higher Rx queue than its PF
+
+From: Suman Ghosh <sumang@marvell.com>
+
+[ Upstream commit 4aa1d8f89b10cdc25a231dabf808d8935e0b137a ]
+
+It is possible to add a ntuple rule which would like to direct packet to
+a VF whose number of queues are greater/less than its PF's queue numbers.
+For example a PF can have 2 Rx queues but a VF created on that PF can have
+8 Rx queues. As of today, ntuple rule will reject rule because it is
+checking the requested queue number against PF's number of Rx queues.
+As a part of this fix if the action of a ntuple rule is to move a packet
+to a VF's queue then the check is removed. Also, a debug information is
+printed to aware user that it is user's responsibility to cross check if
+the requested queue number on that VF is a valid one.
+
+Fixes: f0a1913f8a6f ("octeontx2-pf: Add support for ethtool ntuple filters")
+Signed-off-by: Suman Ghosh <sumang@marvell.com>
+Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://lore.kernel.org/r/20231121165624.3664182-1-sumang@marvell.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../marvell/octeontx2/nic/otx2_flows.c | 20 ++++++++++++++++++-
+ 1 file changed, 19 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+index 934c199667b59..5c4a4d3557702 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+@@ -1069,6 +1069,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
+ struct ethhdr *eth_hdr;
+ bool new = false;
+ int err = 0;
++ u64 vf_num;
+ u32 ring;
+
+ if (!flow_cfg->max_flows) {
+@@ -1081,7 +1082,21 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
+ if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
+ return -ENOMEM;
+
+- if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
++ /* Number of queues on a VF can be greater or less than
++ * the PF's queue. Hence no need to check for the
++ * queue count. Hence no need to check queue count if PF
++ * is installing for its VF. Below is the expected vf_num value
++ * based on the ethtool commands.
++ *
++ * e.g.
++ * 1. ethtool -U <netdev> ... action -1 ==> vf_num:255
++ * 2. ethtool -U <netdev> ... action <queue_num> ==> vf_num:0
++ * 3. ethtool -U <netdev> ... vf <vf_idx> queue <queue_num> ==>
++ * vf_num:vf_idx+1
++ */
++ vf_num = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
++ if (!is_otx2_vf(pfvf->pcifunc) && !vf_num &&
++ ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
+ return -EINVAL;
+
+ if (fsp->location >= otx2_get_maxflows(flow_cfg))
+@@ -1163,6 +1178,9 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
+ flow_cfg->nr_flows++;
+ }
+
++ if (flow->is_vf)
++ netdev_info(pfvf->netdev,
++ "Make sure that VF's queue number is within its queue limit\n");
+ return 0;
+ }
+
+--
+2.42.0
+
--- /dev/null
+afs-fix-afs_server_list-to-be-cleaned-up-with-rcu.patch
+afs-make-error-on-cell-lookup-failure-consistent-wit.patch
+drm-panel-boe-tv101wum-nl6-fine-tune-the-panel-power.patch
+drm-panel-auo-b101uan08.3-fine-tune-the-panel-power-.patch
+drm-panel-simple-fix-innolux-g101ice-l01-bus-flags.patch
+drm-panel-simple-fix-innolux-g101ice-l01-timings.patch
+wireguard-use-dev_stats_inc.patch
+octeontx2-pf-fix-memory-leak-during-interface-down.patch
+ata-pata_isapnp-add-missing-error-check-for-devm_iop.patch
+drm-i915-do-not-clean-gt-table-on-error-path.patch
+drm-rockchip-vop-fix-color-for-rgb888-bgr888-format-.patch
+hid-fix-hid-device-resource-race-between-hid-core-an.patch
+ipv4-correct-silence-an-endian-warning-in-__ip_do_re.patch
+net-usb-ax88179_178a-fix-failed-operations-during-ax.patch
+net-smc-avoid-data-corruption-caused-by-decline.patch
+arm-xen-fix-xen_vcpu_info-allocation-alignment.patch
+octeontx2-pf-fix-ntuple-rule-creation-to-direct-pack.patch
+amd-xgbe-handle-corner-case-during-sfp-hotplug.patch
+amd-xgbe-handle-the-corner-case-during-tx-completion.patch
+amd-xgbe-propagate-the-correct-speed-and-duplex-stat.patch
+net-axienet-fix-check-for-partial-tx-checksum.patch
+afs-return-enoent-if-no-cell-dns-record-can-be-found.patch
+afs-fix-file-locking-on-r-o-volumes-to-operate-in-lo.patch
+mm-kfence-decouple-kfence-from-page-granularity-mapp.patch
+arm64-mm-fix-rodata-on-when-config_rodata_full_defau.patch
+i40e-use-err_ptr-error-print-in-i40e-messages.patch
+i40e-fix-adding-unsupported-cloud-filters.patch
+nvmet-nul-terminate-the-nqns-passed-in-the-connect-c.patch
+usb-dwc3-qcom-fix-resource-leaks-on-probe-deferral.patch
+usb-dwc3-qcom-fix-acpi-platform-device-leak.patch
+lockdep-fix-block-chain-corruption.patch
+cifs-minor-cleanup-of-some-headers.patch
+smb3-allow-dumping-session-and-tcon-id-to-improve-st.patch
+cifs-print-last-update-time-for-interface-list.patch
+cifs-distribute-channels-across-interfaces-based-on-.patch
+cifs-account-for-primary-channel-in-the-interface-li.patch
+cifs-fix-leak-of-iface-for-primary-channel.patch
+mips-kvm-fix-a-build-warning-about-variable-set-but-.patch
+dm-delay-for-short-delays-use-kthread-instead-of-tim.patch
+dm-delay-fix-a-race-between-delay_presuspend-and-del.patch
+media-camss-split-power-domain-management.patch
+media-camss-convert-to-platform-remove-callback-retu.patch
+media-qcom-initialise-v4l2-async-notifier-later.patch
+media-qcom-camss-fix-v4l2-async-notifier-error-path.patch
+media-qcom-camss-fix-genpd-cleanup.patch
+ext4-add-a-new-helper-to-check-if-es-must-be-kept.patch
+ext4-factor-out-__es_alloc_extent-and-__es_free_exte.patch
+ext4-use-pre-allocated-es-in-__es_insert_extent.patch
+ext4-use-pre-allocated-es-in-__es_remove_extent.patch
+ext4-using-nofail-preallocation-in-ext4_es_remove_ex.patch
+ext4-using-nofail-preallocation-in-ext4_es_insert_de.patch
+ext4-using-nofail-preallocation-in-ext4_es_insert_ex.patch
+ext4-fix-slab-use-after-free-in-ext4_es_insert_exten.patch
+ext4-make-sure-allocate-pending-entry-not-fail.patch
--- /dev/null
+From ad04c180595cb5ef5c4ccda748c08914cebdda10 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Nov 2023 15:28:12 -0600
+Subject: smb3: allow dumping session and tcon id to improve stats analysis and
+ debugging
+
+From: Steve French <stfrench@microsoft.com>
+
+[ Upstream commit de4eceab578ead12a71e5b5588a57e142bbe8ceb ]
+
+When multiple mounts are to the same share from the same client it was not
+possible to determine which section of /proc/fs/cifs/Stats (and DebugData)
+correspond to that mount. In some recent examples this turned out to be
+a significant problem when trying to analyze performance data - since
+there are many cases where unless we know the tree id and session id we
+can't figure out which stats (e.g. number of SMB3.1.1 requests by type,
+the total time they take, which is slowest, how many fail etc.) apply to
+which mount. The only existing loosely related ioctl CIFS_IOC_GET_MNT_INFO
+does not return the information needed to uniquely identify which tcon
+is which mount although it does return various flags and device info.
+
+Add a cifs.ko ioctl CIFS_IOC_GET_TCON_INFO (0x800ccf0c) to return tid,
+session id, tree connect count.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Shyam Prasad N <sprasad@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/cifs_ioctl.h | 6 ++++++
+ fs/smb/client/ioctl.c | 25 +++++++++++++++++++++++++
+ 2 files changed, 31 insertions(+)
+
+diff --git a/fs/smb/client/cifs_ioctl.h b/fs/smb/client/cifs_ioctl.h
+index 332588e77c311..26327442e383b 100644
+--- a/fs/smb/client/cifs_ioctl.h
++++ b/fs/smb/client/cifs_ioctl.h
+@@ -26,6 +26,11 @@ struct smb_mnt_fs_info {
+ __u64 cifs_posix_caps;
+ } __packed;
+
++struct smb_mnt_tcon_info {
++ __u32 tid;
++ __u64 session_id;
++} __packed;
++
+ struct smb_snapshot_array {
+ __u32 number_of_snapshots;
+ __u32 number_of_snapshots_returned;
+@@ -108,6 +113,7 @@ struct smb3_notify_info {
+ #define CIFS_IOC_NOTIFY _IOW(CIFS_IOCTL_MAGIC, 9, struct smb3_notify)
+ #define CIFS_DUMP_FULL_KEY _IOWR(CIFS_IOCTL_MAGIC, 10, struct smb3_full_key_debug_info)
+ #define CIFS_IOC_NOTIFY_INFO _IOWR(CIFS_IOCTL_MAGIC, 11, struct smb3_notify_info)
++#define CIFS_IOC_GET_TCON_INFO _IOR(CIFS_IOCTL_MAGIC, 12, struct smb_mnt_tcon_info)
+ #define CIFS_IOC_SHUTDOWN _IOR('X', 125, __u32)
+
+ /*
+diff --git a/fs/smb/client/ioctl.c b/fs/smb/client/ioctl.c
+index 6419ec47c2a85..ae9905e2b9d4a 100644
+--- a/fs/smb/client/ioctl.c
++++ b/fs/smb/client/ioctl.c
+@@ -117,6 +117,20 @@ static long cifs_ioctl_copychunk(unsigned int xid, struct file *dst_file,
+ return rc;
+ }
+
++static long smb_mnt_get_tcon_info(struct cifs_tcon *tcon, void __user *arg)
++{
++ int rc = 0;
++ struct smb_mnt_tcon_info tcon_inf;
++
++ tcon_inf.tid = tcon->tid;
++ tcon_inf.session_id = tcon->ses->Suid;
++
++ if (copy_to_user(arg, &tcon_inf, sizeof(struct smb_mnt_tcon_info)))
++ rc = -EFAULT;
++
++ return rc;
++}
++
+ static long smb_mnt_get_fsinfo(unsigned int xid, struct cifs_tcon *tcon,
+ void __user *arg)
+ {
+@@ -410,6 +424,17 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
+ tcon = tlink_tcon(pSMBFile->tlink);
+ rc = smb_mnt_get_fsinfo(xid, tcon, (void __user *)arg);
+ break;
++ case CIFS_IOC_GET_TCON_INFO:
++ cifs_sb = CIFS_SB(inode->i_sb);
++ tlink = cifs_sb_tlink(cifs_sb);
++ if (IS_ERR(tlink)) {
++ rc = PTR_ERR(tlink);
++ break;
++ }
++ tcon = tlink_tcon(tlink);
++ rc = smb_mnt_get_tcon_info(tcon, (void __user *)arg);
++ cifs_put_tlink(tlink);
++ break;
+ case CIFS_ENUMERATE_SNAPSHOTS:
+ if (pSMBFile == NULL)
+ break;
+--
+2.42.0
+
--- /dev/null
+From d20a3b86dfa7b12556674ee52c84507c8f2a3b99 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Nov 2023 18:36:50 +0100
+Subject: USB: dwc3: qcom: fix ACPI platform device leak
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+[ Upstream commit 9cf87666fc6e08572341fe08ecd909935998fbbd ]
+
+Make sure to free the "urs" platform device, which is created for some
+ACPI platforms, on probe errors and on driver unbind.
+
+Compile-tested only.
+
+Fixes: c25c210f590e ("usb: dwc3: qcom: add URS Host support for sdm845 ACPI boot")
+Cc: Shawn Guo <shawn.guo@linaro.org>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Acked-by: Andrew Halaney <ahalaney@redhat.com>
+Acked-by: Shawn Guo <shawn.guo@linaro.org>
+Link: https://lore.kernel.org/r/20231117173650.21161-4-johan+linaro@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/dwc3/dwc3-qcom.c | 37 +++++++++++++++++++++++++++++-------
+ 1 file changed, 30 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
+index 0c68227fe899e..0f51a6c70b781 100644
+--- a/drivers/usb/dwc3/dwc3-qcom.c
++++ b/drivers/usb/dwc3/dwc3-qcom.c
+@@ -768,9 +768,9 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
+ return ret;
+ }
+
+-static struct platform_device *
+-dwc3_qcom_create_urs_usb_platdev(struct device *dev)
++static struct platform_device *dwc3_qcom_create_urs_usb_platdev(struct device *dev)
+ {
++ struct platform_device *urs_usb = NULL;
+ struct fwnode_handle *fwh;
+ struct acpi_device *adev;
+ char name[8];
+@@ -790,9 +790,26 @@ dwc3_qcom_create_urs_usb_platdev(struct device *dev)
+
+ adev = to_acpi_device_node(fwh);
+ if (!adev)
+- return NULL;
++ goto err_put_handle;
++
++ urs_usb = acpi_create_platform_device(adev, NULL);
++ if (IS_ERR_OR_NULL(urs_usb))
++ goto err_put_handle;
++
++ return urs_usb;
+
+- return acpi_create_platform_device(adev, NULL);
++err_put_handle:
++ fwnode_handle_put(fwh);
++
++ return urs_usb;
++}
++
++static void dwc3_qcom_destroy_urs_usb_platdev(struct platform_device *urs_usb)
++{
++ struct fwnode_handle *fwh = urs_usb->dev.fwnode;
++
++ platform_device_unregister(urs_usb);
++ fwnode_handle_put(fwh);
+ }
+
+ static int dwc3_qcom_probe(struct platform_device *pdev)
+@@ -877,13 +894,13 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
+ qcom->qscratch_base = devm_ioremap_resource(dev, parent_res);
+ if (IS_ERR(qcom->qscratch_base)) {
+ ret = PTR_ERR(qcom->qscratch_base);
+- goto clk_disable;
++ goto free_urs;
+ }
+
+ ret = dwc3_qcom_setup_irq(pdev);
+ if (ret) {
+ dev_err(dev, "failed to setup IRQs, err=%d\n", ret);
+- goto clk_disable;
++ goto free_urs;
+ }
+
+ /*
+@@ -902,7 +919,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
+
+ if (ret) {
+ dev_err(dev, "failed to register DWC3 Core, err=%d\n", ret);
+- goto clk_disable;
++ goto free_urs;
+ }
+
+ ret = dwc3_qcom_interconnect_init(qcom);
+@@ -939,6 +956,9 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
+ else
+ platform_device_del(qcom->dwc3);
+ platform_device_put(qcom->dwc3);
++free_urs:
++ if (qcom->urs_usb)
++ dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
+ clk_disable:
+ for (i = qcom->num_clocks - 1; i >= 0; i--) {
+ clk_disable_unprepare(qcom->clks[i]);
+@@ -964,6 +984,9 @@ static int dwc3_qcom_remove(struct platform_device *pdev)
+ platform_device_del(qcom->dwc3);
+ platform_device_put(qcom->dwc3);
+
++ if (qcom->urs_usb)
++ dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
++
+ for (i = qcom->num_clocks - 1; i >= 0; i--) {
+ clk_disable_unprepare(qcom->clks[i]);
+ clk_put(qcom->clks[i]);
+--
+2.42.0
+
--- /dev/null
+From aa7624df60113775f4daef7eb5878a5625842137 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Nov 2023 18:36:48 +0100
+Subject: USB: dwc3: qcom: fix resource leaks on probe deferral
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+[ Upstream commit 51392a1879ff06dc21b68aef4825f6ef68a7be42 ]
+
+The driver needs to deregister and free the newly allocated dwc3 core
+platform device on ACPI probe errors (e.g. probe deferral) and on driver
+unbind but instead it leaked those resources while erroneously dropping
+a reference to the parent platform device which is still in use.
+
+For OF probing the driver takes a reference to the dwc3 core platform
+device which has also always been leaked.
+
+Fix the broken ACPI tear down and make sure to drop the dwc3 core
+reference for both OF and ACPI.
+
+Fixes: 8fd95da2cfb5 ("usb: dwc3: qcom: Release the correct resources in dwc3_qcom_remove()")
+Fixes: 2bc02355f8ba ("usb: dwc3: qcom: Add support for booting with ACPI")
+Fixes: a4333c3a6ba9 ("usb: dwc3: Add Qualcomm DWC3 glue driver")
+Cc: stable@vger.kernel.org # 4.18
+Cc: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Cc: Lee Jones <lee@kernel.org>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Acked-by: Andrew Halaney <ahalaney@redhat.com>
+Link: https://lore.kernel.org/r/20231117173650.21161-2-johan+linaro@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 9cf87666fc6e ("USB: dwc3: qcom: fix ACPI platform device leak")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/dwc3/dwc3-qcom.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
+index 72c22851d7eef..0c68227fe899e 100644
+--- a/drivers/usb/dwc3/dwc3-qcom.c
++++ b/drivers/usb/dwc3/dwc3-qcom.c
+@@ -759,6 +759,7 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
+ if (!qcom->dwc3) {
+ ret = -ENODEV;
+ dev_err(dev, "failed to get dwc3 platform device\n");
++ of_platform_depopulate(dev);
+ }
+
+ node_put:
+@@ -901,7 +902,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
+
+ if (ret) {
+ dev_err(dev, "failed to register DWC3 Core, err=%d\n", ret);
+- goto depopulate;
++ goto clk_disable;
+ }
+
+ ret = dwc3_qcom_interconnect_init(qcom);
+@@ -936,7 +937,8 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
+ if (np)
+ of_platform_depopulate(&pdev->dev);
+ else
+- platform_device_put(pdev);
++ platform_device_del(qcom->dwc3);
++ platform_device_put(qcom->dwc3);
+ clk_disable:
+ for (i = qcom->num_clocks - 1; i >= 0; i--) {
+ clk_disable_unprepare(qcom->clks[i]);
+@@ -959,7 +961,8 @@ static int dwc3_qcom_remove(struct platform_device *pdev)
+ if (np)
+ of_platform_depopulate(&pdev->dev);
+ else
+- platform_device_put(pdev);
++ platform_device_del(qcom->dwc3);
++ platform_device_put(qcom->dwc3);
+
+ for (i = qcom->num_clocks - 1; i >= 0; i--) {
+ clk_disable_unprepare(qcom->clks[i]);
+--
+2.42.0
+
--- /dev/null
+From c1246dd29d8240443027cb33e4438f40b88276e0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Nov 2023 14:17:33 +0000
+Subject: wireguard: use DEV_STATS_INC()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 93da8d75a66568ba4bb5b14ad2833acd7304cd02 ]
+
+wg_xmit() can be called concurrently, KCSAN reported [1]
+some device stats updates can be lost.
+
+Use DEV_STATS_INC() for this unlikely case.
+
+[1]
+BUG: KCSAN: data-race in wg_xmit / wg_xmit
+
+read-write to 0xffff888104239160 of 8 bytes by task 1375 on cpu 0:
+wg_xmit+0x60f/0x680 drivers/net/wireguard/device.c:231
+__netdev_start_xmit include/linux/netdevice.h:4918 [inline]
+netdev_start_xmit include/linux/netdevice.h:4932 [inline]
+xmit_one net/core/dev.c:3543 [inline]
+dev_hard_start_xmit+0x11b/0x3f0 net/core/dev.c:3559
+...
+
+read-write to 0xffff888104239160 of 8 bytes by task 1378 on cpu 1:
+wg_xmit+0x60f/0x680 drivers/net/wireguard/device.c:231
+__netdev_start_xmit include/linux/netdevice.h:4918 [inline]
+netdev_start_xmit include/linux/netdevice.h:4932 [inline]
+xmit_one net/core/dev.c:3543 [inline]
+dev_hard_start_xmit+0x11b/0x3f0 net/core/dev.c:3559
+...
+
+v2: also change wg_packet_consume_data_done() (Hangbin Liu)
+ and wg_packet_purge_staged_packets()
+
+Fixes: e7096c131e51 ("net: WireGuard secure network tunnel")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Jason A. Donenfeld <Jason@zx2c4.com>
+Cc: Hangbin Liu <liuhangbin@gmail.com>
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Reviewed-by: Hangbin Liu <liuhangbin@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireguard/device.c | 4 ++--
+ drivers/net/wireguard/receive.c | 12 ++++++------
+ drivers/net/wireguard/send.c | 3 ++-
+ 3 files changed, 10 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
+index d58e9f818d3b7..895a621c9e267 100644
+--- a/drivers/net/wireguard/device.c
++++ b/drivers/net/wireguard/device.c
+@@ -209,7 +209,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
+ */
+ while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) {
+ dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue));
+- ++dev->stats.tx_dropped;
++ DEV_STATS_INC(dev, tx_dropped);
+ }
+ skb_queue_splice_tail(&packets, &peer->staged_packet_queue);
+ spin_unlock_bh(&peer->staged_packet_queue.lock);
+@@ -227,7 +227,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
+ err:
+- ++dev->stats.tx_errors;
++ DEV_STATS_INC(dev, tx_errors);
+ kfree_skb(skb);
+ return ret;
+ }
+diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
+index 0b3f0c8435509..a176653c88616 100644
+--- a/drivers/net/wireguard/receive.c
++++ b/drivers/net/wireguard/receive.c
+@@ -416,20 +416,20 @@ static void wg_packet_consume_data_done(struct wg_peer *peer,
+ net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n",
+ dev->name, skb, peer->internal_id,
+ &peer->endpoint.addr);
+- ++dev->stats.rx_errors;
+- ++dev->stats.rx_frame_errors;
++ DEV_STATS_INC(dev, rx_errors);
++ DEV_STATS_INC(dev, rx_frame_errors);
+ goto packet_processed;
+ dishonest_packet_type:
+ net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n",
+ dev->name, peer->internal_id, &peer->endpoint.addr);
+- ++dev->stats.rx_errors;
+- ++dev->stats.rx_frame_errors;
++ DEV_STATS_INC(dev, rx_errors);
++ DEV_STATS_INC(dev, rx_frame_errors);
+ goto packet_processed;
+ dishonest_packet_size:
+ net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n",
+ dev->name, peer->internal_id, &peer->endpoint.addr);
+- ++dev->stats.rx_errors;
+- ++dev->stats.rx_length_errors;
++ DEV_STATS_INC(dev, rx_errors);
++ DEV_STATS_INC(dev, rx_length_errors);
+ goto packet_processed;
+ packet_processed:
+ dev_kfree_skb(skb);
+diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
+index 95c853b59e1da..0d48e0f4a1ba3 100644
+--- a/drivers/net/wireguard/send.c
++++ b/drivers/net/wireguard/send.c
+@@ -333,7 +333,8 @@ static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first)
+ void wg_packet_purge_staged_packets(struct wg_peer *peer)
+ {
+ spin_lock_bh(&peer->staged_packet_queue.lock);
+- peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen;
++ DEV_STATS_ADD(peer->device->dev, tx_dropped,
++ peer->staged_packet_queue.qlen);
+ __skb_queue_purge(&peer->staged_packet_queue);
+ spin_unlock_bh(&peer->staged_packet_queue.lock);
+ }
+--
+2.42.0
+