--- /dev/null
+From f2d0a78f2c9a6fe19353b3f993184e49499ced73 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Aug 2022 13:16:42 +0100
+Subject: afs: Use the operation issue time instead of the reply time for
+ callbacks
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 7903192c4b4a82d792cb0dc5e2779a2efe60d45b ]
+
+rxrpc and kafs between them try to use the receive timestamp on the first
+data packet (ie. the one with sequence number 1) as a base from which to
+calculate the time at which callback promise and lock expiration occurs.
+
+However, we don't know how long it took for the server to send us the reply
+from it having completed the basic part of the operation - it might then,
+for instance, have to send a bunch of a callback breaks, depending on the
+particular operation.
+
+Fix this by using the time at which the operation is issued on the client
+as a base instead. That should never be longer than the server's idea of
+the expiry time.
+
+Fixes: 781070551c26 ("afs: Fix calculation of callback expiry time")
+Fixes: 2070a3e44962 ("rxrpc: Allow the reply time to be obtained on a client call")
+Suggested-by: Jeffrey E Altman <jaltman@auristor.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/afs/flock.c | 2 +-
+ fs/afs/fsclient.c | 2 +-
+ fs/afs/internal.h | 3 +--
+ fs/afs/rxrpc.c | 7 +------
+ fs/afs/yfsclient.c | 3 +--
+ 5 files changed, 5 insertions(+), 12 deletions(-)
+
+diff --git a/fs/afs/flock.c b/fs/afs/flock.c
+index c4210a3964d8b..bbcc5afd15760 100644
+--- a/fs/afs/flock.c
++++ b/fs/afs/flock.c
+@@ -76,7 +76,7 @@ void afs_lock_op_done(struct afs_call *call)
+ if (call->error == 0) {
+ spin_lock(&vnode->lock);
+ trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0);
+- vnode->locked_at = call->reply_time;
++ vnode->locked_at = call->issue_time;
+ afs_schedule_lock_extension(vnode);
+ spin_unlock(&vnode->lock);
+ }
+diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
+index 4943413d9c5f7..7d37f63ef0f09 100644
+--- a/fs/afs/fsclient.c
++++ b/fs/afs/fsclient.c
+@@ -131,7 +131,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
+
+ static time64_t xdr_decode_expiry(struct afs_call *call, u32 expiry)
+ {
+- return ktime_divns(call->reply_time, NSEC_PER_SEC) + expiry;
++ return ktime_divns(call->issue_time, NSEC_PER_SEC) + expiry;
+ }
+
+ static void xdr_decode_AFSCallBack(const __be32 **_bp,
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index 0ad97a8fc0d49..567e61b553f56 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -138,7 +138,6 @@ struct afs_call {
+ bool need_attention; /* T if RxRPC poked us */
+ bool async; /* T if asynchronous */
+ bool upgrade; /* T to request service upgrade */
+- bool have_reply_time; /* T if have got reply_time */
+ bool intr; /* T if interruptible */
+ bool unmarshalling_error; /* T if an unmarshalling error occurred */
+ u16 service_id; /* Actual service ID (after upgrade) */
+@@ -152,7 +151,7 @@ struct afs_call {
+ } __attribute__((packed));
+ __be64 tmp64;
+ };
+- ktime_t reply_time; /* Time of first reply packet */
++ ktime_t issue_time; /* Time of issue of operation */
+ };
+
+ struct afs_call_type {
+diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
+index a5434f3e57c68..e3de7fea36435 100644
+--- a/fs/afs/rxrpc.c
++++ b/fs/afs/rxrpc.c
+@@ -347,6 +347,7 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
+ if (call->max_lifespan)
+ rxrpc_kernel_set_max_life(call->net->socket, rxcall,
+ call->max_lifespan);
++ call->issue_time = ktime_get_real();
+
+ /* send the request */
+ iov[0].iov_base = call->request;
+@@ -497,12 +498,6 @@ static void afs_deliver_to_call(struct afs_call *call)
+ return;
+ }
+
+- if (!call->have_reply_time &&
+- rxrpc_kernel_get_reply_time(call->net->socket,
+- call->rxcall,
+- &call->reply_time))
+- call->have_reply_time = true;
+-
+ ret = call->type->deliver(call);
+ state = READ_ONCE(call->state);
+ if (ret == 0 && call->unmarshalling_error)
+diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
+index 2b35cba8ad62b..88ea20e79ae27 100644
+--- a/fs/afs/yfsclient.c
++++ b/fs/afs/yfsclient.c
+@@ -239,8 +239,7 @@ static void xdr_decode_YFSCallBack(const __be32 **_bp,
+ struct afs_callback *cb = &scb->callback;
+ ktime_t cb_expiry;
+
+- cb_expiry = call->reply_time;
+- cb_expiry = ktime_add(cb_expiry, xdr_to_u64(x->expiration_time) * 100);
++ cb_expiry = ktime_add(call->issue_time, xdr_to_u64(x->expiration_time) * 100);
+ cb->expires_at = ktime_divns(cb_expiry, NSEC_PER_SEC);
+ scb->have_cb = true;
+ *_bp += xdr_size(x);
+--
+2.35.1
+
--- /dev/null
+From 7cc992b3e6af67b773d3997c487d756293d8cae9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Aug 2022 14:59:00 +0200
+Subject: ALSA: usb-audio: Inform the delayed registration more properly
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 7e1afce5866e02b45bf88c27dd7de1b9dfade1cc ]
+
+The info message that was added in the commit a4aad5636c72 ("ALSA:
+usb-audio: Inform devices that need delayed registration") is actually
+useful to know the need for the delayed registration. However, it
+turned out that this doesn't catch the all cases; namely, this warned
+only when a PCM stream is attached onto the existing PCM instance, but
+it doesn't count for a newly created PCM instance. This made
+confusion as if there were no further delayed registration.
+
+This patch moves the check to the code path for either adding a stream
+or creating a PCM instance. Also, make it simpler by checking the
+card->registered flag instead of querying each snd_device state.
+
+Fixes: a4aad5636c72 ("ALSA: usb-audio: Inform devices that need delayed registration")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=216082
+Link: https://lore.kernel.org/r/20220831125901.4660-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/stream.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index 40ce8a1cb318a..f10f4e6d3fb85 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -495,6 +495,10 @@ static int __snd_usb_add_audio_stream(struct snd_usb_audio *chip,
+ return 0;
+ }
+ }
++
++ if (chip->card->registered)
++ chip->need_delayed_register = true;
++
+ /* look for an empty stream */
+ list_for_each_entry(as, &chip->pcm_list, list) {
+ if (as->fmt_type != fp->fmt_type)
+@@ -502,9 +506,6 @@ static int __snd_usb_add_audio_stream(struct snd_usb_audio *chip,
+ subs = &as->substream[stream];
+ if (subs->ep_num)
+ continue;
+- if (snd_device_get_state(chip->card, as->pcm) !=
+- SNDRV_DEV_BUILD)
+- chip->need_delayed_register = true;
+ err = snd_pcm_new_stream(as->pcm, stream, 1);
+ if (err < 0)
+ return err;
+--
+2.35.1
+
--- /dev/null
+From 1acd7d42a260f8e104cf48307f6e4a4159dcc8be Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Aug 2022 14:59:01 +0200
+Subject: ALSA: usb-audio: Register card again for iface over delayed_register
+ option
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 2027f114686e0f3f1f39971964dfc618637c88c2 ]
+
+When the delayed registration is specified via either delayed_register
+option or the quirk, we delay the invocation of snd_card_register()
+until the given interface. But if a wrong value has been set there
+and there are more interfaces over the given interface number,
+snd_card_register() call would be missing for those interfaces.
+
+This patch catches up those missing calls by fixing the comparison of
+the interface number. Now the call is skipped only if the processed
+interface is less than the given interface, instead of the exact
+match.
+
+Fixes: b70038ef4fea ("ALSA: usb-audio: Add delayed_register option")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=216082
+Link: https://lore.kernel.org/r/20220831125901.4660-2-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/card.c | 2 +-
+ sound/usb/quirks.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index ff5f8de1bc540..713b84d8d42f1 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -698,7 +698,7 @@ static bool check_delayed_register_option(struct snd_usb_audio *chip, int iface)
+ if (delayed_register[i] &&
+ sscanf(delayed_register[i], "%x:%x", &id, &inum) == 2 &&
+ id == chip->usb_id)
+- return inum != iface;
++ return iface < inum;
+ }
+
+ return false;
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 9bfead5efc4c1..5b4d8f5eade20 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1764,7 +1764,7 @@ bool snd_usb_registration_quirk(struct snd_usb_audio *chip, int iface)
+
+ for (q = registration_quirks; q->usb_id; q++)
+ if (chip->usb_id == q->usb_id)
+- return iface != q->interface;
++ return iface < q->interface;
+
+ /* Register as normal */
+ return false;
+--
+2.35.1
+
--- /dev/null
+From 23ca1760ca72e539780d8a51d2689df4754515c3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Aug 2022 11:39:21 +0300
+Subject: ARM: at91: pm: fix DDR recalibration when resuming from backup and
+ self-refresh
+
+From: Claudiu Beznea <claudiu.beznea@microchip.com>
+
+[ Upstream commit 7a94b83a7dc551607b6c4400df29151e6a951f07 ]
+
+On SAMA7G5, when resuming from backup and self-refresh, the bootloader
+performs DDR PHY recalibration by restoring the value of ZQ0SR0 (stored
+in RAM by Linux before going to backup and self-refresh). It has been
+discovered that the current procedure doesn't work for all possible values
+that might go to ZQ0SR0 due to hardware bug. The workaround to this is to
+avoid storing some values in ZQ0SR0. Thus Linux will read the ZQ0SR0
+register and cache its value in RAM after processing it (using
+modified_gray_code array). The bootloader will restore the processed value.
+
+Fixes: d2d4716d8384 ("ARM: at91: pm: save ddr phy calibration data to securam")
+Suggested-by: Frederic Schumacher <frederic.schumacher@microchip.com>
+Signed-off-by: Claudiu Beznea <claudiu.beznea@microchip.com>
+Link: https://lore.kernel.org/r/20220826083927.3107272-4-claudiu.beznea@microchip.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/mach-at91/pm.c | 36 ++++++++++++++++++++++++++++++++----
+ include/soc/at91/sama7-ddr.h | 4 ++++
+ 2 files changed, 36 insertions(+), 4 deletions(-)
+
+diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
+index ed1050404ef0a..c8cc993ca8ca1 100644
+--- a/arch/arm/mach-at91/pm.c
++++ b/arch/arm/mach-at91/pm.c
+@@ -350,9 +350,41 @@ extern u32 at91_pm_suspend_in_sram_sz;
+
+ static int at91_suspend_finish(unsigned long val)
+ {
++ unsigned char modified_gray_code[] = {
++ 0x00, 0x01, 0x02, 0x03, 0x06, 0x07, 0x04, 0x05, 0x0c, 0x0d,
++ 0x0e, 0x0f, 0x0a, 0x0b, 0x08, 0x09, 0x18, 0x19, 0x1a, 0x1b,
++ 0x1e, 0x1f, 0x1c, 0x1d, 0x14, 0x15, 0x16, 0x17, 0x12, 0x13,
++ 0x10, 0x11,
++ };
++ unsigned int tmp, index;
+ int i;
+
+ if (soc_pm.data.mode == AT91_PM_BACKUP && soc_pm.data.ramc_phy) {
++ /*
++ * Bootloader will perform DDR recalibration and will try to
++ * restore the ZQ0SR0 with the value saved here. But the
++ * calibration is buggy and restoring some values from ZQ0SR0
++ * is forbidden and risky thus we need to provide processed
++ * values for these (modified gray code values).
++ */
++ tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0);
++
++ /* Store pull-down output impedance select. */
++ index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f;
++ soc_pm.bu->ddr_phy_calibration[0] = modified_gray_code[index];
++
++ /* Store pull-up output impedance select. */
++ index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f;
++ soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
++
++ /* Store pull-down on-die termination impedance select. */
++ index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f;
++ soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
++
++ /* Store pull-up on-die termination impedance select. */
++ index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f;
++ soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
++
+ /*
+ * The 1st 8 words of memory might get corrupted in the process
+ * of DDR PHY recalibration; it is saved here in securam and it
+@@ -841,10 +873,6 @@ static int __init at91_pm_backup_init(void)
+ of_scan_flat_dt(at91_pm_backup_scan_memcs, &located);
+ if (!located)
+ goto securam_fail;
+-
+- /* DDR3PHY_ZQ0SR0 */
+- soc_pm.bu->ddr_phy_calibration[0] = readl(soc_pm.data.ramc_phy +
+- 0x188);
+ }
+
+ return 0;
+diff --git a/include/soc/at91/sama7-ddr.h b/include/soc/at91/sama7-ddr.h
+index f47a933df82ea..72d19887ab810 100644
+--- a/include/soc/at91/sama7-ddr.h
++++ b/include/soc/at91/sama7-ddr.h
+@@ -40,6 +40,10 @@
+ #define DDR3PHY_DSGCR_ODTPDD_ODT0 (1 << 20) /* ODT[0] Power Down Driver */
+
+ #define DDR3PHY_ZQ0SR0 (0x188) /* ZQ status register 0 */
++#define DDR3PHY_ZQ0SR0_PDO_OFF (0) /* Pull-down output impedance select offset */
++#define DDR3PHY_ZQ0SR0_PUO_OFF (5) /* Pull-up output impedance select offset */
++#define DDR3PHY_ZQ0SR0_PDODT_OFF (10) /* Pull-down on-die termination impedance select offset */
++#define DDR3PHY_ZQ0SRO_PUODT_OFF (15) /* Pull-up on-die termination impedance select offset */
+
+ #define DDR3PHY_DX0DLLCR (0x1CC) /* DDR3PHY DATX8 DLL Control Register */
+ #define DDR3PHY_DX1DLLCR (0x20C) /* DDR3PHY DATX8 DLL Control Register */
+--
+2.35.1
+
--- /dev/null
+From a76ecfd53cf72362d3954140589151b573f93a91 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Aug 2022 11:39:20 +0300
+Subject: ARM: at91: pm: fix self-refresh for sama7g5
+
+From: Claudiu Beznea <claudiu.beznea@microchip.com>
+
+[ Upstream commit a02875c4cbd6f3d2f33d70cc158a19ef02d4b84f ]
+
+It has been discovered that on some parts, from time to time, self-refresh
+procedure doesn't work as expected. Debugging and investigating it proved
+that disabling AC DLL introduce glitches in RAM controllers which
+leads to unexpected behavior. This is confirmed as a hardware bug. DLL
+bypass disables 3 DLLs: 2 DX DLLs and AC DLL. Thus, keep only DX DLLs
+disabled. This introduce 6mA extra current consumption on VDDCORE when
+switching to any ULP mode or standby mode but the self-refresh procedure
+still works.
+
+Fixes: f0bbf17958e8 ("ARM: at91: pm: add self-refresh support for sama7g5")
+Suggested-by: Frederic Schumacher <frederic.schumacher@microchip.com>
+Signed-off-by: Claudiu Beznea <claudiu.beznea@microchip.com>
+Tested-by: Cristian Birsan <cristian.birsan@microchip.com>
+Link: https://lore.kernel.org/r/20220826083927.3107272-3-claudiu.beznea@microchip.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/mach-at91/pm_suspend.S | 24 +++++++++++++++++-------
+ include/soc/at91/sama7-ddr.h | 4 ++++
+ 2 files changed, 21 insertions(+), 7 deletions(-)
+
+diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S
+index fdb4f63ecde4b..65cfcc19a936c 100644
+--- a/arch/arm/mach-at91/pm_suspend.S
++++ b/arch/arm/mach-at91/pm_suspend.S
+@@ -172,9 +172,15 @@ sr_ena_2:
+ /* Put DDR PHY's DLL in bypass mode for non-backup modes. */
+ cmp r7, #AT91_PM_BACKUP
+ beq sr_ena_3
+- ldr tmp1, [r3, #DDR3PHY_PIR]
+- orr tmp1, tmp1, #DDR3PHY_PIR_DLLBYP
+- str tmp1, [r3, #DDR3PHY_PIR]
++
++ /* Disable DX DLLs. */
++ ldr tmp1, [r3, #DDR3PHY_DX0DLLCR]
++ orr tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS
++ str tmp1, [r3, #DDR3PHY_DX0DLLCR]
++
++ ldr tmp1, [r3, #DDR3PHY_DX1DLLCR]
++ orr tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS
++ str tmp1, [r3, #DDR3PHY_DX1DLLCR]
+
+ sr_ena_3:
+ /* Power down DDR PHY data receivers. */
+@@ -221,10 +227,14 @@ sr_ena_3:
+ bic tmp1, tmp1, #DDR3PHY_DSGCR_ODTPDD_ODT0
+ str tmp1, [r3, #DDR3PHY_DSGCR]
+
+- /* Take DDR PHY's DLL out of bypass mode. */
+- ldr tmp1, [r3, #DDR3PHY_PIR]
+- bic tmp1, tmp1, #DDR3PHY_PIR_DLLBYP
+- str tmp1, [r3, #DDR3PHY_PIR]
++ /* Enable DX DLLs. */
++ ldr tmp1, [r3, #DDR3PHY_DX0DLLCR]
++ bic tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS
++ str tmp1, [r3, #DDR3PHY_DX0DLLCR]
++
++ ldr tmp1, [r3, #DDR3PHY_DX1DLLCR]
++ bic tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS
++ str tmp1, [r3, #DDR3PHY_DX1DLLCR]
+
+ /* Enable quasi-dynamic programming. */
+ mov tmp1, #0
+diff --git a/include/soc/at91/sama7-ddr.h b/include/soc/at91/sama7-ddr.h
+index f6542584ca139..f47a933df82ea 100644
+--- a/include/soc/at91/sama7-ddr.h
++++ b/include/soc/at91/sama7-ddr.h
+@@ -41,6 +41,10 @@
+
+ #define DDR3PHY_ZQ0SR0 (0x188) /* ZQ status register 0 */
+
++#define DDR3PHY_DX0DLLCR (0x1CC) /* DDR3PHY DATX8 DLL Control Register */
++#define DDR3PHY_DX1DLLCR (0x20C) /* DDR3PHY DATX8 DLL Control Register */
++#define DDR3PHY_DXDLLCR_DLLDIS (1 << 31) /* DLL Disable */
++
+ /* UDDRC */
+ #define UDDRC_STAT (0x04) /* UDDRC Operating Mode Status Register */
+ #define UDDRC_STAT_SELFREF_TYPE_DIS (0x0 << 4) /* SDRAM is not in Self-refresh */
+--
+2.35.1
+
--- /dev/null
+From d4e7cd9d33f1d2b8402511b239c71975f587b206 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Aug 2022 11:39:25 +0300
+Subject: ARM: dts: at91: sama5d27_wlsom1: don't keep ldo2 enabled all the time
+
+From: Claudiu Beznea <claudiu.beznea@microchip.com>
+
+[ Upstream commit 617a0d9fe6867bf5b3b7272629cd780c27c877d9 ]
+
+ldo2 is not used by any consumer on sama5d27_wlsom1 board, thus
+don't keep it enabled all the time.
+
+Fixes: 5d4c3cfb63fe ("ARM: dts: at91: sama5d27_wlsom1: add SAMA5D27 wlsom1 and wlsom1-ek")
+Signed-off-by: Claudiu Beznea <claudiu.beznea@microchip.com>
+Link: https://lore.kernel.org/r/20220826083927.3107272-8-claudiu.beznea@microchip.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
+index 70513caf3e8d0..a818e8ebd638f 100644
+--- a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
++++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
+@@ -169,7 +169,6 @@
+ regulator-name = "LDO2";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+- regulator-always-on;
+
+ regulator-state-standby {
+ regulator-on-in-suspend;
+--
+2.35.1
+
--- /dev/null
+From dbba3ccf1c7a7ba0ebb08322b4640bfd8a7486a7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Aug 2022 11:39:22 +0300
+Subject: ARM: dts: at91: sama5d27_wlsom1: specify proper regulator output
+ ranges
+
+From: Claudiu Beznea <claudiu.beznea@microchip.com>
+
+[ Upstream commit addf7efec23af2b67547800aa232d551945e7de2 ]
+
+Min and max output ranges of regulators need to satisfy board
+requirements not PMIC requirements. Thus adjust device tree to
+cope with this.
+
+Fixes: 5d4c3cfb63fe ("ARM: dts: at91: sama5d27_wlsom1: add SAMA5D27 wlsom1 and wlsom1-ek")
+Signed-off-by: Claudiu Beznea <claudiu.beznea@microchip.com>
+Link: https://lore.kernel.org/r/20220826083927.3107272-5-claudiu.beznea@microchip.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
+index 025a78310e3ab..70513caf3e8d0 100644
+--- a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
++++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
+@@ -68,8 +68,8 @@
+ regulators {
+ vdd_3v3: VDD_IO {
+ regulator-name = "VDD_IO";
+- regulator-min-microvolt = <1200000>;
+- regulator-max-microvolt = <3700000>;
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
+ regulator-initial-mode = <2>;
+ regulator-allowed-modes = <2>, <4>;
+ regulator-always-on;
+@@ -87,8 +87,8 @@
+
+ vddio_ddr: VDD_DDR {
+ regulator-name = "VDD_DDR";
+- regulator-min-microvolt = <600000>;
+- regulator-max-microvolt = <1850000>;
++ regulator-min-microvolt = <1200000>;
++ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <2>;
+ regulator-allowed-modes = <2>, <4>;
+ regulator-always-on;
+@@ -110,8 +110,8 @@
+
+ vdd_core: VDD_CORE {
+ regulator-name = "VDD_CORE";
+- regulator-min-microvolt = <600000>;
+- regulator-max-microvolt = <1850000>;
++ regulator-min-microvolt = <1250000>;
++ regulator-max-microvolt = <1250000>;
+ regulator-initial-mode = <2>;
+ regulator-allowed-modes = <2>, <4>;
+ regulator-always-on;
+@@ -152,8 +152,8 @@
+
+ LDO1 {
+ regulator-name = "LDO1";
+- regulator-min-microvolt = <1200000>;
+- regulator-max-microvolt = <3700000>;
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+
+ regulator-state-standby {
+@@ -167,8 +167,8 @@
+
+ LDO2 {
+ regulator-name = "LDO2";
+- regulator-min-microvolt = <1200000>;
+- regulator-max-microvolt = <3700000>;
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+
+ regulator-state-standby {
+--
+2.35.1
+
--- /dev/null
+From ed1350529b87037c970b47d8e150862d4514c95b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Aug 2022 11:39:26 +0300
+Subject: ARM: dts: at91: sama5d2_icp: don't keep vdd_other enabled all the
+ time
+
+From: Claudiu Beznea <claudiu.beznea@microchip.com>
+
+[ Upstream commit 3d074b750d2b4c91962f10ea1df1c289ce0d3ce8 ]
+
+VDD_OTHER is not connected to any on board consumer thus it is not
+needed to keep it enabled all the time.
+
+Fixes: 68a95ef72cef ("ARM: dts: at91: sama5d2-icp: add SAMA5D2-ICP")
+Signed-off-by: Claudiu Beznea <claudiu.beznea@microchip.com>
+Link: https://lore.kernel.org/r/20220826083927.3107272-9-claudiu.beznea@microchip.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/at91-sama5d2_icp.dts | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts
+index c175237b6d4e4..4ebbbe65c0cee 100644
+--- a/arch/arm/boot/dts/at91-sama5d2_icp.dts
++++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts
+@@ -258,7 +258,6 @@
+ regulator-max-microvolt = <1850000>;
+ regulator-initial-mode = <2>;
+ regulator-allowed-modes = <2>, <4>;
+- regulator-always-on;
+
+ regulator-state-standby {
+ regulator-on-in-suspend;
+--
+2.35.1
+
--- /dev/null
+From 2e9b59318fc02f554ea4c86700499b554445add6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Aug 2022 11:39:23 +0300
+Subject: ARM: dts: at91: sama5d2_icp: specify proper regulator output ranges
+
+From: Claudiu Beznea <claudiu.beznea@microchip.com>
+
+[ Upstream commit 7737d93666eea282febf95e5fa3b3fde1f2549f3 ]
+
+Min and max output ranges of regulators need to satisfy board
+requirements not PMIC requirements. Thus adjust device tree to
+cope with this.
+
+Fixes: 68a95ef72cef ("ARM: dts: at91: sama5d2-icp: add SAMA5D2-ICP")
+Signed-off-by: Claudiu Beznea <claudiu.beznea@microchip.com>
+Link: https://lore.kernel.org/r/20220826083927.3107272-6-claudiu.beznea@microchip.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/at91-sama5d2_icp.dts | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts
+index fd1a288f686bc..c175237b6d4e4 100644
+--- a/arch/arm/boot/dts/at91-sama5d2_icp.dts
++++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts
+@@ -197,8 +197,8 @@
+ regulators {
+ vdd_io_reg: VDD_IO {
+ regulator-name = "VDD_IO";
+- regulator-min-microvolt = <1200000>;
+- regulator-max-microvolt = <3700000>;
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
+ regulator-initial-mode = <2>;
+ regulator-allowed-modes = <2>, <4>;
+ regulator-always-on;
+@@ -216,8 +216,8 @@
+
+ VDD_DDR {
+ regulator-name = "VDD_DDR";
+- regulator-min-microvolt = <600000>;
+- regulator-max-microvolt = <1850000>;
++ regulator-min-microvolt = <1350000>;
++ regulator-max-microvolt = <1350000>;
+ regulator-initial-mode = <2>;
+ regulator-allowed-modes = <2>, <4>;
+ regulator-always-on;
+@@ -235,8 +235,8 @@
+
+ VDD_CORE {
+ regulator-name = "VDD_CORE";
+- regulator-min-microvolt = <600000>;
+- regulator-max-microvolt = <1850000>;
++ regulator-min-microvolt = <1250000>;
++ regulator-max-microvolt = <1250000>;
+ regulator-initial-mode = <2>;
+ regulator-allowed-modes = <2>, <4>;
+ regulator-always-on;
+@@ -273,8 +273,8 @@
+
+ LDO1 {
+ regulator-name = "LDO1";
+- regulator-min-microvolt = <1200000>;
+- regulator-max-microvolt = <3700000>;
++ regulator-min-microvolt = <2500000>;
++ regulator-max-microvolt = <2500000>;
+ regulator-always-on;
+
+ regulator-state-standby {
+@@ -288,8 +288,8 @@
+
+ LDO2 {
+ regulator-name = "LDO2";
+- regulator-min-microvolt = <1200000>;
+- regulator-max-microvolt = <3700000>;
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+
+ regulator-state-standby {
+--
+2.35.1
+
--- /dev/null
+From f3935860dc4efaf5ab4edde6c9a30be8623b8a19 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 26 Jul 2022 15:05:21 +0200
+Subject: ARM: dts: imx6qdl-kontron-samx6i: remove duplicated node
+
+From: Marco Felsch <m.felsch@pengutronix.de>
+
+[ Upstream commit 204f67d86f55dd4fa757ed04757d7273f71a169c ]
+
+The regulator node 'regulator-3p3v-s0' was dupplicated. Remove it to
+clean the DTS.
+
+Fixes: 2a51f9dae13d ("ARM: dts: imx6qdl-kontron-samx6i: Add iMX6-based Kontron SMARC-sAMX6i module")
+Signed-off-by: Marco Felsch <m.felsch@pengutronix.de>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi | 10 ----------
+ 1 file changed, 10 deletions(-)
+
+diff --git a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
+index b167b33bd108d..9a3e5f7827152 100644
+--- a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
+@@ -51,16 +51,6 @@
+ vin-supply = <®_3p3v_s5>;
+ };
+
+- reg_3p3v_s0: regulator-3p3v-s0 {
+- compatible = "regulator-fixed";
+- regulator-name = "V_3V3_S0";
+- regulator-min-microvolt = <3300000>;
+- regulator-max-microvolt = <3300000>;
+- regulator-always-on;
+- regulator-boot-on;
+- vin-supply = <®_3p3v_s5>;
+- };
+-
+ reg_3p3v_s5: regulator-3p3v-s5 {
+ compatible = "regulator-fixed";
+ regulator-name = "V_3V3_S5";
+--
+2.35.1
+
--- /dev/null
+From a8f4281be0bac07e19ff455efbdc3e632d13645e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Aug 2022 17:52:29 +0100
+Subject: ASoC: qcom: sm8250: add missing module owner
+
+From: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+
+[ Upstream commit c6e14bb9f50df7126ca64405ae807d8bc7b39f9a ]
+
+Add missing module owner to able to build and load this driver as module.
+
+Fixes: aa2e2785545a ("ASoC: qcom: sm8250: add sound card qrb5165-rb5 support")
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Link: https://lore.kernel.org/r/20220816165229.7971-1-srinivas.kandagatla@linaro.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/qcom/sm8250.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/sound/soc/qcom/sm8250.c b/sound/soc/qcom/sm8250.c
+index fe8fd7367e21b..e5190aa588c63 100644
+--- a/sound/soc/qcom/sm8250.c
++++ b/sound/soc/qcom/sm8250.c
+@@ -191,6 +191,7 @@ static int sm8250_platform_probe(struct platform_device *pdev)
+ if (!card)
+ return -ENOMEM;
+
++ card->owner = THIS_MODULE;
+ /* Allocate the private data */
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+--
+2.35.1
+
--- /dev/null
+From 663cf71559afbd1e01b45bc70f9e55ecc99bf26c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Sep 2022 12:57:10 +0800
+Subject: erofs: fix pcluster use-after-free on UP platforms
+
+From: Gao Xiang <hsiangkao@linux.alibaba.com>
+
+[ Upstream commit 2f44013e39984c127c6efedf70e6b5f4e9dcf315 ]
+
+During stress testing with CONFIG_SMP disabled, KASAN reports as below:
+
+==================================================================
+BUG: KASAN: use-after-free in __mutex_lock+0xe5/0xc30
+Read of size 8 at addr ffff8881094223f8 by task stress/7789
+
+CPU: 0 PID: 7789 Comm: stress Not tainted 6.0.0-rc1-00002-g0d53d2e882f9 #3
+Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
+Call Trace:
+ <TASK>
+..
+ __mutex_lock+0xe5/0xc30
+..
+ z_erofs_do_read_page+0x8ce/0x1560
+..
+ z_erofs_readahead+0x31c/0x580
+..
+Freed by task 7787
+ kasan_save_stack+0x1e/0x40
+ kasan_set_track+0x20/0x30
+ kasan_set_free_info+0x20/0x40
+ __kasan_slab_free+0x10c/0x190
+ kmem_cache_free+0xed/0x380
+ rcu_core+0x3d5/0xc90
+ __do_softirq+0x12d/0x389
+
+Last potentially related work creation:
+ kasan_save_stack+0x1e/0x40
+ __kasan_record_aux_stack+0x97/0xb0
+ call_rcu+0x3d/0x3f0
+ erofs_shrink_workstation+0x11f/0x210
+ erofs_shrink_scan+0xdc/0x170
+ shrink_slab.constprop.0+0x296/0x530
+ drop_slab+0x1c/0x70
+ drop_caches_sysctl_handler+0x70/0x80
+ proc_sys_call_handler+0x20a/0x2f0
+ vfs_write+0x555/0x6c0
+ ksys_write+0xbe/0x160
+ do_syscall_64+0x3b/0x90
+
+The root cause is that erofs_workgroup_unfreeze() doesn't reset to
+orig_val thus it causes a race that the pcluster reuses unexpectedly
+before freeing.
+
+Since UP platforms are quite rare now, such path becomes unnecessary.
+Let's drop such specific-designed path directly instead.
+
+Fixes: 73f5c66df3e2 ("staging: erofs: fix `erofs_workgroup_{try_to_freeze, unfreeze}'")
+Reviewed-by: Yue Hu <huyue2@coolpad.com>
+Reviewed-by: Chao Yu <chao@kernel.org>
+Link: https://lore.kernel.org/r/20220902045710.109530-1-hsiangkao@linux.alibaba.com
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/erofs/internal.h | 29 -----------------------------
+ 1 file changed, 29 deletions(-)
+
+diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
+index 9524e155b38fa..b77acf09726c6 100644
+--- a/fs/erofs/internal.h
++++ b/fs/erofs/internal.h
+@@ -143,7 +143,6 @@ struct erofs_workgroup {
+ atomic_t refcount;
+ };
+
+-#if defined(CONFIG_SMP)
+ static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
+ int val)
+ {
+@@ -172,34 +171,6 @@ static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
+ return atomic_cond_read_relaxed(&grp->refcount,
+ VAL != EROFS_LOCKED_MAGIC);
+ }
+-#else
+-static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
+- int val)
+-{
+- preempt_disable();
+- /* no need to spin on UP platforms, let's just disable preemption. */
+- if (val != atomic_read(&grp->refcount)) {
+- preempt_enable();
+- return false;
+- }
+- return true;
+-}
+-
+-static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
+- int orig_val)
+-{
+- preempt_enable();
+-}
+-
+-static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
+-{
+- int v = atomic_read(&grp->refcount);
+-
+- /* workgroup is never freezed on uniprocessor systems */
+- DBG_BUGON(v == EROFS_LOCKED_MAGIC);
+- return v;
+-}
+-#endif /* !CONFIG_SMP */
+ #endif /* !CONFIG_EROFS_FS_ZIP */
+
+ /* we strictly follow PAGE_SIZE and no buffer head yet */
+--
+2.35.1
+
--- /dev/null
+From 0da7259f914d79f5bc5bf84b6b35d066b7b5a3ea Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Aug 2022 18:22:30 +0200
+Subject: i40e: Fix kernel crash during module removal
+
+From: Ivan Vecera <ivecera@redhat.com>
+
+[ Upstream commit fb8396aeda5872369a8ed6d2301e2c86e303c520 ]
+
+The driver incorrectly frees client instance and subsequent
+i40e module removal leads to kernel crash.
+
+Reproducer:
+1. Do ethtool offline test followed immediately by another one
+host# ethtool -t eth0 offline; ethtool -t eth0 offline
+2. Remove recursively irdma module that also removes i40e module
+host# modprobe -r irdma
+
+Result:
+[ 8675.035651] i40e 0000:3d:00.0 eno1: offline testing starting
+[ 8675.193774] i40e 0000:3d:00.0 eno1: testing finished
+[ 8675.201316] i40e 0000:3d:00.0 eno1: offline testing starting
+[ 8675.358921] i40e 0000:3d:00.0 eno1: testing finished
+[ 8675.496921] i40e 0000:3d:00.0: IRDMA hardware initialization FAILED init_state=2 status=-110
+[ 8686.188955] i40e 0000:3d:00.1: i40e_ptp_stop: removed PHC on eno2
+[ 8686.943890] i40e 0000:3d:00.1: Deleted LAN device PF1 bus=0x3d dev=0x00 func=0x01
+[ 8686.952669] i40e 0000:3d:00.0: i40e_ptp_stop: removed PHC on eno1
+[ 8687.761787] BUG: kernel NULL pointer dereference, address: 0000000000000030
+[ 8687.768755] #PF: supervisor read access in kernel mode
+[ 8687.773895] #PF: error_code(0x0000) - not-present page
+[ 8687.779034] PGD 0 P4D 0
+[ 8687.781575] Oops: 0000 [#1] PREEMPT SMP NOPTI
+[ 8687.785935] CPU: 51 PID: 172891 Comm: rmmod Kdump: loaded Tainted: G W I 5.19.0+ #2
+[ 8687.794800] Hardware name: Intel Corporation S2600WFD/S2600WFD, BIOS SE5C620.86B.0X.02.0001.051420190324 05/14/2019
+[ 8687.805222] RIP: 0010:i40e_lan_del_device+0x13/0xb0 [i40e]
+[ 8687.810719] Code: d4 84 c0 0f 84 b8 25 01 00 e9 9c 25 01 00 41 bc f4 ff ff ff eb 91 90 0f 1f 44 00 00 41 54 55 53 48 8b 87 58 08 00 00 48 89 fb <48> 8b 68 30 48 89 ef e8 21 8a 0f d5 48 89 ef e8 a9 78 0f d5 48 8b
+[ 8687.829462] RSP: 0018:ffffa604072efce0 EFLAGS: 00010202
+[ 8687.834689] RAX: 0000000000000000 RBX: ffff8f43833b2000 RCX: 0000000000000000
+[ 8687.841821] RDX: 0000000000000000 RSI: ffff8f4b0545b298 RDI: ffff8f43833b2000
+[ 8687.848955] RBP: ffff8f43833b2000 R08: 0000000000000001 R09: 0000000000000000
+[ 8687.856086] R10: 0000000000000000 R11: 000ffffffffff000 R12: ffff8f43833b2ef0
+[ 8687.863218] R13: ffff8f43833b2ef0 R14: ffff915103966000 R15: ffff8f43833b2008
+[ 8687.870342] FS: 00007f79501c3740(0000) GS:ffff8f4adffc0000(0000) knlGS:0000000000000000
+[ 8687.878427] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 8687.884174] CR2: 0000000000000030 CR3: 000000014276e004 CR4: 00000000007706e0
+[ 8687.891306] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ 8687.898441] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[ 8687.905572] PKRU: 55555554
+[ 8687.908286] Call Trace:
+[ 8687.910737] <TASK>
+[ 8687.912843] i40e_remove+0x2c0/0x330 [i40e]
+[ 8687.917040] pci_device_remove+0x33/0xa0
+[ 8687.920962] device_release_driver_internal+0x1aa/0x230
+[ 8687.926188] driver_detach+0x44/0x90
+[ 8687.929770] bus_remove_driver+0x55/0xe0
+[ 8687.933693] pci_unregister_driver+0x2a/0xb0
+[ 8687.937967] i40e_exit_module+0xc/0xf48 [i40e]
+
+Two offline tests cause IRDMA driver failure (ETIMEDOUT) and this
+failure is indicated back to i40e_client_subtask() that calls
+i40e_client_del_instance() to free client instance referenced
+by pf->cinst and sets this pointer to NULL. During the module
+removal i40e_remove() calls i40e_lan_del_device() that dereferences
+pf->cinst that is NULL -> crash.
+Do not remove client instance when client open callbacks fails and
+just clear __I40E_CLIENT_INSTANCE_OPENED bit. The driver also needs
+to take care about this situation (when netdev is up and client
+is NOT opened) in i40e_notify_client_of_netdev_close() and
+calls client close callback only when __I40E_CLIENT_INSTANCE_OPENED
+is set.
+
+Fixes: 0ef2d5afb12d ("i40e: KISS the client interface")
+Signed-off-by: Ivan Vecera <ivecera@redhat.com>
+Tested-by: Helena Anna Dubel <helena.anna.dubel@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_client.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
+index ea2bb0140a6eb..10d7a982a5b9b 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
+@@ -177,6 +177,10 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
+ "Cannot locate client instance close routine\n");
+ return;
+ }
++ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
++ dev_dbg(&pf->pdev->dev, "Client is not open, abort close\n");
++ return;
++ }
+ cdev->client->ops->close(&cdev->lan_info, cdev->client, reset);
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+ i40e_client_release_qvlist(&cdev->lan_info);
+@@ -429,7 +433,6 @@ void i40e_client_subtask(struct i40e_pf *pf)
+ /* Remove failed client instance */
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state);
+- i40e_client_del_instance(pf);
+ return;
+ }
+ }
+--
+2.35.1
+
--- /dev/null
+From 901bcbb66b237a953fc1ef660991543621d4a678 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Aug 2022 10:16:27 +0200
+Subject: iavf: Detach device during reset task
+
+From: Ivan Vecera <ivecera@redhat.com>
+
+[ Upstream commit aa626da947e9cd30c4cf727493903e1adbb2c0a0 ]
+
+iavf_reset_task() takes crit_lock at the beginning and holds
+it during whole call. The function subsequently calls
+iavf_init_interrupt_scheme() that grabs RTNL. Problem occurs
+when userspace initiates during the reset task any ndo callback
+that runs under RTNL like iavf_open() because some of that
+functions tries to take crit_lock. This leads to classic A-B B-A
+deadlock scenario.
+
+To resolve this situation the device should be detached in
+iavf_reset_task() prior taking crit_lock to avoid subsequent
+ndos running under RTNL and reattach the device at the end.
+
+Fixes: 62fe2a865e6d ("i40evf: add missing rtnl_lock() around i40evf_set_interrupt_capability")
+Cc: Jacob Keller <jacob.e.keller@intel.com>
+Cc: Patryk Piotrowski <patryk.piotrowski@intel.com>
+Cc: SlawomirX Laba <slawomirx.laba@intel.com>
+Tested-by: Vitaly Grinberg <vgrinber@redhat.com>
+Signed-off-by: Ivan Vecera <ivecera@redhat.com>
+Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/iavf/iavf_main.c | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index db95786c3419f..00b2ef01f4ea6 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -2222,6 +2222,11 @@ static void iavf_reset_task(struct work_struct *work)
+ int i = 0, err;
+ bool running;
+
++ /* Detach interface to avoid subsequent NDO callbacks */
++ rtnl_lock();
++ netif_device_detach(netdev);
++ rtnl_unlock();
++
+ /* When device is being removed it doesn't make sense to run the reset
+ * task, just return in such a case.
+ */
+@@ -2229,7 +2234,7 @@ static void iavf_reset_task(struct work_struct *work)
+ if (adapter->state != __IAVF_REMOVE)
+ queue_work(iavf_wq, &adapter->reset_task);
+
+- return;
++ goto reset_finish;
+ }
+
+ while (!mutex_trylock(&adapter->client_lock))
+@@ -2299,7 +2304,6 @@ static void iavf_reset_task(struct work_struct *work)
+
+ if (running) {
+ netif_carrier_off(netdev);
+- netif_tx_stop_all_queues(netdev);
+ adapter->link_up = false;
+ iavf_napi_disable_all(adapter);
+ }
+@@ -2412,7 +2416,7 @@ static void iavf_reset_task(struct work_struct *work)
+ mutex_unlock(&adapter->client_lock);
+ mutex_unlock(&adapter->crit_lock);
+
+- return;
++ goto reset_finish;
+ reset_err:
+ if (running) {
+ set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
+@@ -2423,6 +2427,10 @@ static void iavf_reset_task(struct work_struct *work)
+ mutex_unlock(&adapter->client_lock);
+ mutex_unlock(&adapter->crit_lock);
+ dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
++reset_finish:
++ rtnl_lock();
++ netif_device_attach(netdev);
++ rtnl_unlock();
+ }
+
+ /**
+--
+2.35.1
+
--- /dev/null
+From f102c9f8b44960a420287cbb5819e9f0fe995e34 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Aug 2022 09:10:36 +0300
+Subject: IB/core: Fix a nested dead lock as part of ODP flow
+
+From: Yishai Hadas <yishaih@nvidia.com>
+
+[ Upstream commit 85eaeb5058f0f04dffb124c97c86b4f18db0b833 ]
+
+Fix a nested dead lock as part of ODP flow by using mmput_async().
+
+From the below call trace [1] can see that calling mmput() once we have
+the umem_odp->umem_mutex locked as required by
+ib_umem_odp_map_dma_and_lock() might trigger in the same task the
+exit_mmap()->__mmu_notifier_release()->mlx5_ib_invalidate_range() which
+may dead lock when trying to lock the same mutex.
+
+Moving to use mmput_async() will solve the problem as the above
+exit_mmap() flow will be called in other task and will be executed once
+the lock will be available.
+
+[1]
+[64843.077665] task:kworker/u133:2 state:D stack: 0 pid:80906 ppid:
+2 flags:0x00004000
+[64843.077672] Workqueue: mlx5_ib_page_fault mlx5_ib_eqe_pf_action [mlx5_ib]
+[64843.077719] Call Trace:
+[64843.077722] <TASK>
+[64843.077724] __schedule+0x23d/0x590
+[64843.077729] schedule+0x4e/0xb0
+[64843.077735] schedule_preempt_disabled+0xe/0x10
+[64843.077740] __mutex_lock.constprop.0+0x263/0x490
+[64843.077747] __mutex_lock_slowpath+0x13/0x20
+[64843.077752] mutex_lock+0x34/0x40
+[64843.077758] mlx5_ib_invalidate_range+0x48/0x270 [mlx5_ib]
+[64843.077808] __mmu_notifier_release+0x1a4/0x200
+[64843.077816] exit_mmap+0x1bc/0x200
+[64843.077822] ? walk_page_range+0x9c/0x120
+[64843.077828] ? __cond_resched+0x1a/0x50
+[64843.077833] ? mutex_lock+0x13/0x40
+[64843.077839] ? uprobe_clear_state+0xac/0x120
+[64843.077860] mmput+0x5f/0x140
+[64843.077867] ib_umem_odp_map_dma_and_lock+0x21b/0x580 [ib_core]
+[64843.077931] pagefault_real_mr+0x9a/0x140 [mlx5_ib]
+[64843.077962] pagefault_mr+0xb4/0x550 [mlx5_ib]
+[64843.077992] pagefault_single_data_segment.constprop.0+0x2ac/0x560
+[mlx5_ib]
+[64843.078022] mlx5_ib_eqe_pf_action+0x528/0x780 [mlx5_ib]
+[64843.078051] process_one_work+0x22b/0x3d0
+[64843.078059] worker_thread+0x53/0x410
+[64843.078065] ? process_one_work+0x3d0/0x3d0
+[64843.078073] kthread+0x12a/0x150
+[64843.078079] ? set_kthread_struct+0x50/0x50
+[64843.078085] ret_from_fork+0x22/0x30
+[64843.078093] </TASK>
+
+Fixes: 36f30e486dce ("IB/core: Improve ODP to use hmm_range_fault()")
+Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
+Signed-off-by: Yishai Hadas <yishaih@nvidia.com>
+Link: https://lore.kernel.org/r/74d93541ea533ef7daec6f126deb1072500aeb16.1661251841.git.leonro@nvidia.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/umem_odp.c | 2 +-
+ kernel/fork.c | 1 +
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
+index 7a47343d11f9f..b052de1b9ccb9 100644
+--- a/drivers/infiniband/core/umem_odp.c
++++ b/drivers/infiniband/core/umem_odp.c
+@@ -463,7 +463,7 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ out_put_mm:
+- mmput(owning_mm);
++ mmput_async(owning_mm);
+ out_put_task:
+ if (owning_process)
+ put_task_struct(owning_process);
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 89475c994ca91..908ba3c93893f 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1153,6 +1153,7 @@ void mmput_async(struct mm_struct *mm)
+ schedule_work(&mm->async_put_work);
+ }
+ }
++EXPORT_SYMBOL_GPL(mmput_async);
+ #endif
+
+ /**
+--
+2.35.1
+
--- /dev/null
+From 07c2ca01ccee96c546233f86a4fdf852c8e2851e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Aug 2022 10:53:20 +0200
+Subject: ice: use bitmap_free instead of devm_kfree
+
+From: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
+
+[ Upstream commit 59ac325557b6c14f1f793b90d3946bc145ffa085 ]
+
+pf->avail_txqs was allocated using bitmap_zalloc, bitmap_free should be
+used to free this memory.
+
+Fixes: 78b5713ac1241 ("ice: Alloc queue management bitmaps and arrays dynamically")
+Signed-off-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
+Tested-by: Gurucharan <gurucharanx.g@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index b9d45c7dbef18..63ae4674d2000 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -3549,7 +3549,7 @@ static int ice_init_pf(struct ice_pf *pf)
+
+ pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
+ if (!pf->avail_rxqs) {
+- devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
++ bitmap_free(pf->avail_txqs);
+ pf->avail_txqs = NULL;
+ return -ENOMEM;
+ }
+--
+2.35.1
+
--- /dev/null
+From 52c1d59b6929ca33c1cb1df15c5416a7950911fc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Sep 2022 10:45:06 +0100
+Subject: ipv6: sr: fix out-of-bounds read when setting HMAC data.
+
+From: David Lebrun <dlebrun@google.com>
+
+[ Upstream commit 84a53580c5d2138c7361c7c3eea5b31827e63b35 ]
+
+The SRv6 layer allows defining HMAC data that can later be used to sign IPv6
+Segment Routing Headers. This configuration is realised via netlink through
+four attributes: SEG6_ATTR_HMACKEYID, SEG6_ATTR_SECRET, SEG6_ATTR_SECRETLEN and
+SEG6_ATTR_ALGID. Because the SECRETLEN attribute is decoupled from the actual
+length of the SECRET attribute, it is possible to provide invalid combinations
+(e.g., secret = "", secretlen = 64). This case is not checked in the code and
+with an appropriately crafted netlink message, an out-of-bounds read of up
+to 64 bytes (max secret length) can occur past the skb end pointer and into
+skb_shared_info:
+
+Breakpoint 1, seg6_genl_sethmac (skb=<optimized out>, info=<optimized out>) at net/ipv6/seg6.c:208
+208 memcpy(hinfo->secret, secret, slen);
+(gdb) bt
+ #0 seg6_genl_sethmac (skb=<optimized out>, info=<optimized out>) at net/ipv6/seg6.c:208
+ #1 0xffffffff81e012e9 in genl_family_rcv_msg_doit (skb=skb@entry=0xffff88800b1f9f00, nlh=nlh@entry=0xffff88800b1b7600,
+ extack=extack@entry=0xffffc90000ba7af0, ops=ops@entry=0xffffc90000ba7a80, hdrlen=4, net=0xffffffff84237580 <init_net>, family=<optimized out>,
+ family=<optimized out>) at net/netlink/genetlink.c:731
+ #2 0xffffffff81e01435 in genl_family_rcv_msg (extack=0xffffc90000ba7af0, nlh=0xffff88800b1b7600, skb=0xffff88800b1f9f00,
+ family=0xffffffff82fef6c0 <seg6_genl_family>) at net/netlink/genetlink.c:775
+ #3 genl_rcv_msg (skb=0xffff88800b1f9f00, nlh=0xffff88800b1b7600, extack=0xffffc90000ba7af0) at net/netlink/genetlink.c:792
+ #4 0xffffffff81dfffc3 in netlink_rcv_skb (skb=skb@entry=0xffff88800b1f9f00, cb=cb@entry=0xffffffff81e01350 <genl_rcv_msg>)
+ at net/netlink/af_netlink.c:2501
+ #5 0xffffffff81e00919 in genl_rcv (skb=0xffff88800b1f9f00) at net/netlink/genetlink.c:803
+ #6 0xffffffff81dff6ae in netlink_unicast_kernel (ssk=0xffff888010eec800, skb=0xffff88800b1f9f00, sk=0xffff888004aed000)
+ at net/netlink/af_netlink.c:1319
+ #7 netlink_unicast (ssk=ssk@entry=0xffff888010eec800, skb=skb@entry=0xffff88800b1f9f00, portid=portid@entry=0, nonblock=<optimized out>)
+ at net/netlink/af_netlink.c:1345
+ #8 0xffffffff81dff9a4 in netlink_sendmsg (sock=<optimized out>, msg=0xffffc90000ba7e48, len=<optimized out>) at net/netlink/af_netlink.c:1921
+...
+(gdb) p/x ((struct sk_buff *)0xffff88800b1f9f00)->head + ((struct sk_buff *)0xffff88800b1f9f00)->end
+$1 = 0xffff88800b1b76c0
+(gdb) p/x secret
+$2 = 0xffff88800b1b76c0
+(gdb) p slen
+$3 = 64 '@'
+
+The OOB data can then be read back from userspace by dumping HMAC state. This
+commit fixes this by ensuring SECRETLEN cannot exceed the actual length of
+SECRET.
+
+Reported-by: Lucas Leong <wmliang.tw@gmail.com>
+Tested: verified that EINVAL is correctly returned when secretlen > len(secret)
+Fixes: 4f4853dc1c9c1 ("ipv6: sr: implement API to control SR HMAC structure")
+Signed-off-by: David Lebrun <dlebrun@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/seg6.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
+index fa6b64c95d3ae..0c7c6fc16c3c3 100644
+--- a/net/ipv6/seg6.c
++++ b/net/ipv6/seg6.c
+@@ -191,6 +191,11 @@ static int seg6_genl_sethmac(struct sk_buff *skb, struct genl_info *info)
+ goto out_unlock;
+ }
+
++ if (slen > nla_len(info->attrs[SEG6_ATTR_SECRET])) {
++ err = -EINVAL;
++ goto out_unlock;
++ }
++
+ if (hinfo) {
+ err = seg6_hmac_info_del(net, hmackeyid);
+ if (err)
+--
+2.35.1
+
--- /dev/null
+From 70a28e4ba3e1b22fb6108b21a9d937ec5e0ee476 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 1 Sep 2022 16:04:03 +0200
+Subject: net: fec: Use a spinlock to guard `fep->ptp_clk_on`
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Csókás Bence <csokas.bence@prolan.hu>
+
+[ Upstream commit b353b241f1eb9b6265358ffbe2632fdcb563354f ]
+
+Mutexes cannot be taken in a non-preemptible context,
+causing a panic in `fec_ptp_save_state()`. Replacing
+`ptp_clk_mutex` by `tmreg_lock` fixes this.
+
+Fixes: 6a4d7234ae9a ("net: fec: ptp: avoid register access when ipg clock is disabled")
+Fixes: f79959220fa5 ("fec: Restart PPS after link state change")
+Reported-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Link: https://lore.kernel.org/all/20220827160922.642zlcd5foopozru@pengutronix.de/
+Signed-off-by: Csókás Bence <csokas.bence@prolan.hu>
+Tested-by: Francesco Dolcini <francesco.dolcini@toradex.com> # Toradex Apalis iMX6
+Link: https://lore.kernel.org/r/20220901140402.64804-1-csokas.bence@prolan.hu
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/fec.h | 1 -
+ drivers/net/ethernet/freescale/fec_main.c | 17 +++++++-------
+ drivers/net/ethernet/freescale/fec_ptp.c | 28 ++++++++---------------
+ 3 files changed, 19 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
+index ed7301b691694..939720a75f87c 100644
+--- a/drivers/net/ethernet/freescale/fec.h
++++ b/drivers/net/ethernet/freescale/fec.h
+@@ -557,7 +557,6 @@ struct fec_enet_private {
+ struct clk *clk_2x_txclk;
+
+ bool ptp_clk_on;
+- struct mutex ptp_clk_mutex;
+ unsigned int num_tx_queues;
+ unsigned int num_rx_queues;
+
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 67eb9b671244b..7561524e7c361 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -1984,6 +1984,7 @@ static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
+ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
+ {
+ struct fec_enet_private *fep = netdev_priv(ndev);
++ unsigned long flags;
+ int ret;
+
+ if (enable) {
+@@ -1992,15 +1993,15 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
+ return ret;
+
+ if (fep->clk_ptp) {
+- mutex_lock(&fep->ptp_clk_mutex);
++ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ ret = clk_prepare_enable(fep->clk_ptp);
+ if (ret) {
+- mutex_unlock(&fep->ptp_clk_mutex);
++ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ goto failed_clk_ptp;
+ } else {
+ fep->ptp_clk_on = true;
+ }
+- mutex_unlock(&fep->ptp_clk_mutex);
++ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ }
+
+ ret = clk_prepare_enable(fep->clk_ref);
+@@ -2015,10 +2016,10 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
+ } else {
+ clk_disable_unprepare(fep->clk_enet_out);
+ if (fep->clk_ptp) {
+- mutex_lock(&fep->ptp_clk_mutex);
++ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ clk_disable_unprepare(fep->clk_ptp);
+ fep->ptp_clk_on = false;
+- mutex_unlock(&fep->ptp_clk_mutex);
++ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ }
+ clk_disable_unprepare(fep->clk_ref);
+ clk_disable_unprepare(fep->clk_2x_txclk);
+@@ -2031,10 +2032,10 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
+ clk_disable_unprepare(fep->clk_ref);
+ failed_clk_ref:
+ if (fep->clk_ptp) {
+- mutex_lock(&fep->ptp_clk_mutex);
++ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ clk_disable_unprepare(fep->clk_ptp);
+ fep->ptp_clk_on = false;
+- mutex_unlock(&fep->ptp_clk_mutex);
++ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ }
+ failed_clk_ptp:
+ clk_disable_unprepare(fep->clk_enet_out);
+@@ -3866,7 +3867,7 @@ fec_probe(struct platform_device *pdev)
+ fep->clk_enet_out = NULL;
+
+ fep->ptp_clk_on = false;
+- mutex_init(&fep->ptp_clk_mutex);
++ spin_lock_init(&fep->tmreg_lock);
+
+ /* clk_ref is optional, depends on board */
+ fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref");
+diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
+index c5ae673005908..99bd67d3befd0 100644
+--- a/drivers/net/ethernet/freescale/fec_ptp.c
++++ b/drivers/net/ethernet/freescale/fec_ptp.c
+@@ -366,21 +366,19 @@ static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+ */
+ static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+ {
+- struct fec_enet_private *adapter =
++ struct fec_enet_private *fep =
+ container_of(ptp, struct fec_enet_private, ptp_caps);
+ u64 ns;
+ unsigned long flags;
+
+- mutex_lock(&adapter->ptp_clk_mutex);
++ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ /* Check the ptp clock */
+- if (!adapter->ptp_clk_on) {
+- mutex_unlock(&adapter->ptp_clk_mutex);
++ if (!fep->ptp_clk_on) {
++ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ return -EINVAL;
+ }
+- spin_lock_irqsave(&adapter->tmreg_lock, flags);
+- ns = timecounter_read(&adapter->tc);
+- spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+- mutex_unlock(&adapter->ptp_clk_mutex);
++ ns = timecounter_read(&fep->tc);
++ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ *ts = ns_to_timespec64(ns);
+
+@@ -405,10 +403,10 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp,
+ unsigned long flags;
+ u32 counter;
+
+- mutex_lock(&fep->ptp_clk_mutex);
++ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ /* Check the ptp clock */
+ if (!fep->ptp_clk_on) {
+- mutex_unlock(&fep->ptp_clk_mutex);
++ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ return -EINVAL;
+ }
+
+@@ -418,11 +416,9 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp,
+ */
+ counter = ns & fep->cc.mask;
+
+- spin_lock_irqsave(&fep->tmreg_lock, flags);
+ writel(counter, fep->hwp + FEC_ATIME);
+ timecounter_init(&fep->tc, &fep->cc, ns);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+- mutex_unlock(&fep->ptp_clk_mutex);
+ return 0;
+ }
+
+@@ -523,13 +519,11 @@ static void fec_time_keep(struct work_struct *work)
+ struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep);
+ unsigned long flags;
+
+- mutex_lock(&fep->ptp_clk_mutex);
++ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ if (fep->ptp_clk_on) {
+- spin_lock_irqsave(&fep->tmreg_lock, flags);
+ timecounter_read(&fep->tc);
+- spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ }
+- mutex_unlock(&fep->ptp_clk_mutex);
++ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ schedule_delayed_work(&fep->time_keep, HZ);
+ }
+@@ -604,8 +598,6 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
+ }
+ fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
+
+- spin_lock_init(&fep->tmreg_lock);
+-
+ fec_ptp_start_cyclecounter(ndev);
+
+ INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
+--
+2.35.1
+
--- /dev/null
+From 6f795b3cbc0b6225ad057083df357ec2d591371d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 Jul 2022 21:52:32 +0100
+Subject: net: introduce __skb_fill_page_desc_noacc
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit 84ce071e38a6e25ea3ea91188e5482ac1f17b3af ]
+
+Managed pages contain pinned userspace pages and controlled by upper
+layers, there is no need in tracking skb->pfmemalloc for them. Introduce
+a helper for filling frags but ignoring page tracking, it'll be needed
+later.
+
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/skbuff.h | 28 +++++++++++++++++-----------
+ 1 file changed, 17 insertions(+), 11 deletions(-)
+
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index ae598ed86b50b..be7cc31d58961 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2232,6 +2232,22 @@ static inline unsigned int skb_pagelen(const struct sk_buff *skb)
+ return skb_headlen(skb) + __skb_pagelen(skb);
+ }
+
++static inline void __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo,
++ int i, struct page *page,
++ int off, int size)
++{
++ skb_frag_t *frag = &shinfo->frags[i];
++
++ /*
++ * Propagate page pfmemalloc to the skb if we can. The problem is
++ * that not all callers have unique ownership of the page but rely
++ * on page_is_pfmemalloc doing the right thing(tm).
++ */
++ frag->bv_page = page;
++ frag->bv_offset = off;
++ skb_frag_size_set(frag, size);
++}
++
+ /**
+ * __skb_fill_page_desc - initialise a paged fragment in an skb
+ * @skb: buffer containing fragment to be initialised
+@@ -2248,17 +2264,7 @@ static inline unsigned int skb_pagelen(const struct sk_buff *skb)
+ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
+ struct page *page, int off, int size)
+ {
+- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+-
+- /*
+- * Propagate page pfmemalloc to the skb if we can. The problem is
+- * that not all callers have unique ownership of the page but rely
+- * on page_is_pfmemalloc doing the right thing(tm).
+- */
+- frag->bv_page = page;
+- frag->bv_offset = off;
+- skb_frag_size_set(frag, size);
+-
++ __skb_fill_page_desc_noacc(skb_shinfo(skb), i, page, off, size);
+ page = compound_head(page);
+ if (page_is_pfmemalloc(page))
+ skb->pfmemalloc = true;
+--
+2.35.1
+
--- /dev/null
+From 2a2e0fd76da52410bdd474ce38040e2559145ca5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Aug 2022 22:36:03 -0700
+Subject: netfilter: br_netfilter: Drop dst references before setting.
+
+From: Harsh Modi <harshmodi@google.com>
+
+[ Upstream commit d047283a7034140ea5da759a494fd2274affdd46 ]
+
+The IPv6 path already drops dst in the daddr changed case, but the IPv4
+path does not. This change makes the two code paths consistent.
+
+Further, it is possible that there is already a metadata_dst allocated from
+ingress that might already be attached to skbuff->dst while following
+the bridge path. If it is not released before setting a new
+metadata_dst, it will be leaked. This is similar to what is done in
+bpf_set_tunnel_key() or ip6_route_input().
+
+It is important to note that the memory being leaked is not the dst
+being set in the bridge code, but rather memory allocated from some
+other code path that is not being freed correctly before the skb dst is
+overwritten.
+
+An example of the leakage fixed by this commit found using kmemleak:
+
+unreferenced object 0xffff888010112b00 (size 256):
+ comm "softirq", pid 0, jiffies 4294762496 (age 32.012s)
+ hex dump (first 32 bytes):
+ 00 00 00 00 00 00 00 00 80 16 f1 83 ff ff ff ff ................
+ e1 4e f6 82 ff ff ff ff 00 00 00 00 00 00 00 00 .N..............
+ backtrace:
+ [<00000000d79567ea>] metadata_dst_alloc+0x1b/0xe0
+ [<00000000be113e13>] udp_tun_rx_dst+0x174/0x1f0
+ [<00000000a36848f4>] geneve_udp_encap_recv+0x350/0x7b0
+ [<00000000d4afb476>] udp_queue_rcv_one_skb+0x380/0x560
+ [<00000000ac064aea>] udp_unicast_rcv_skb+0x75/0x90
+ [<000000009a8ee8c5>] ip_protocol_deliver_rcu+0xd8/0x230
+ [<00000000ef4980bb>] ip_local_deliver_finish+0x7a/0xa0
+ [<00000000d7533c8c>] __netif_receive_skb_one_core+0x89/0xa0
+ [<00000000a879497d>] process_backlog+0x93/0x190
+ [<00000000e41ade9f>] __napi_poll+0x28/0x170
+ [<00000000b4c0906b>] net_rx_action+0x14f/0x2a0
+ [<00000000b20dd5d4>] __do_softirq+0xf4/0x305
+ [<000000003a7d7e15>] __irq_exit_rcu+0xc3/0x140
+ [<00000000968d39a2>] sysvec_apic_timer_interrupt+0x9e/0xc0
+ [<000000009e920794>] asm_sysvec_apic_timer_interrupt+0x16/0x20
+ [<000000008942add0>] native_safe_halt+0x13/0x20
+
+Florian Westphal says: "Original code was likely fine because nothing
+ever did set a skb->dst entry earlier than bridge in those days."
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Harsh Modi <harshmodi@google.com>
+Acked-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bridge/br_netfilter_hooks.c | 2 ++
+ net/bridge/br_netfilter_ipv6.c | 1 +
+ 2 files changed, 3 insertions(+)
+
+diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
+index 10a2c7bca7199..a718204c4bfdd 100644
+--- a/net/bridge/br_netfilter_hooks.c
++++ b/net/bridge/br_netfilter_hooks.c
+@@ -384,6 +384,7 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_
+ /* - Bridged-and-DNAT'ed traffic doesn't
+ * require ip_forwarding. */
+ if (rt->dst.dev == dev) {
++ skb_dst_drop(skb);
+ skb_dst_set(skb, &rt->dst);
+ goto bridged_dnat;
+ }
+@@ -413,6 +414,7 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_
+ kfree_skb(skb);
+ return 0;
+ }
++ skb_dst_drop(skb);
+ skb_dst_set_noref(skb, &rt->dst);
+ }
+
+diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
+index e4e0c836c3f51..6b07f30675bb0 100644
+--- a/net/bridge/br_netfilter_ipv6.c
++++ b/net/bridge/br_netfilter_ipv6.c
+@@ -197,6 +197,7 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc
+ kfree_skb(skb);
+ return 0;
+ }
++ skb_dst_drop(skb);
+ skb_dst_set_noref(skb, &rt->dst);
+ }
+
+--
+2.35.1
+
--- /dev/null
+From b2f21a27b67e294b2a40ab3ab3bd0c2f1ef5b1ed Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Aug 2022 14:56:58 +1000
+Subject: netfilter: nf_conntrack_irc: Fix forged IP logic
+
+From: David Leadbeater <dgl@dgl.cx>
+
+[ Upstream commit 0efe125cfb99e6773a7434f3463f7c2fa28f3a43 ]
+
+Ensure the match happens in the right direction, previously the
+destination used was the server, not the NAT host, as the comment
+shows the code intended.
+
+Additionally nf_nat_irc uses port 0 as a signal and there's no valid way
+it can appear in a DCC message, so consider port 0 also forged.
+
+Fixes: 869f37d8e48f ("[NETFILTER]: nf_conntrack/nf_nat: add IRC helper port")
+Signed-off-by: David Leadbeater <dgl@dgl.cx>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conntrack_irc.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
+index 08ee4e760a3d2..18b90e334b5bd 100644
+--- a/net/netfilter/nf_conntrack_irc.c
++++ b/net/netfilter/nf_conntrack_irc.c
+@@ -188,8 +188,9 @@ static int help(struct sk_buff *skb, unsigned int protoff,
+
+ /* dcc_ip can be the internal OR external (NAT'ed) IP */
+ tuple = &ct->tuplehash[dir].tuple;
+- if (tuple->src.u3.ip != dcc_ip &&
+- tuple->dst.u3.ip != dcc_ip) {
++ if ((tuple->src.u3.ip != dcc_ip &&
++ ct->tuplehash[!dir].tuple.dst.u3.ip != dcc_ip) ||
++ dcc_port == 0) {
+ net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n",
+ &tuple->src.u3.ip,
+ &dcc_ip, dcc_port);
+--
+2.35.1
+
--- /dev/null
+From bdc18b9ebc7099cc56759dfe756da1745c52d9e8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Aug 2022 13:11:47 +0200
+Subject: netfilter: nf_tables: clean up hook list when offload flags check
+ fails
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 77972a36ecc4db7fc7c68f0e80714263c5f03f65 ]
+
+splice back the hook list so nft_chain_release_hook() has a chance to
+release the hooks.
+
+BUG: memory leak
+unreferenced object 0xffff88810180b100 (size 96):
+ comm "syz-executor133", pid 3619, jiffies 4294945714 (age 12.690s)
+ hex dump (first 32 bytes):
+ 28 64 23 02 81 88 ff ff 28 64 23 02 81 88 ff ff (d#.....(d#.....
+ 90 a8 aa 83 ff ff ff ff 00 00 b5 0f 81 88 ff ff ................
+ backtrace:
+ [<ffffffff83a8c59b>] kmalloc include/linux/slab.h:600 [inline]
+ [<ffffffff83a8c59b>] nft_netdev_hook_alloc+0x3b/0xc0 net/netfilter/nf_tables_api.c:1901
+ [<ffffffff83a9239a>] nft_chain_parse_netdev net/netfilter/nf_tables_api.c:1998 [inline]
+ [<ffffffff83a9239a>] nft_chain_parse_hook+0x33a/0x530 net/netfilter/nf_tables_api.c:2073
+ [<ffffffff83a9b14b>] nf_tables_addchain.constprop.0+0x10b/0x950 net/netfilter/nf_tables_api.c:2218
+ [<ffffffff83a9c41b>] nf_tables_newchain+0xa8b/0xc60 net/netfilter/nf_tables_api.c:2593
+ [<ffffffff83a3d6a6>] nfnetlink_rcv_batch+0xa46/0xd20 net/netfilter/nfnetlink.c:517
+ [<ffffffff83a3db79>] nfnetlink_rcv_skb_batch net/netfilter/nfnetlink.c:638 [inline]
+ [<ffffffff83a3db79>] nfnetlink_rcv+0x1f9/0x220 net/netfilter/nfnetlink.c:656
+ [<ffffffff83a13b17>] netlink_unicast_kernel net/netlink/af_netlink.c:1319 [inline]
+ [<ffffffff83a13b17>] netlink_unicast+0x397/0x4c0 net/netlink/af_netlink.c:1345
+ [<ffffffff83a13fd6>] netlink_sendmsg+0x396/0x710 net/netlink/af_netlink.c:1921
+ [<ffffffff83865ab6>] sock_sendmsg_nosec net/socket.c:714 [inline]
+ [<ffffffff83865ab6>] sock_sendmsg+0x56/0x80 net/socket.c:734
+ [<ffffffff8386601c>] ____sys_sendmsg+0x36c/0x390 net/socket.c:2482
+ [<ffffffff8386a918>] ___sys_sendmsg+0xa8/0x110 net/socket.c:2536
+ [<ffffffff8386aaa8>] __sys_sendmsg+0x88/0x100 net/socket.c:2565
+ [<ffffffff845e5955>] do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ [<ffffffff845e5955>] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80
+ [<ffffffff84800087>] entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+Fixes: d54725cd11a5 ("netfilter: nf_tables: support for multiple devices per netdev hook")
+Reported-by: syzbot+5fcdbfab6d6744c57418@syzkaller.appspotmail.com
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index d8ca55d6be409..d35d09df83fee 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -2072,8 +2072,10 @@ static int nft_basechain_init(struct nft_base_chain *basechain, u8 family,
+ chain->flags |= NFT_CHAIN_BASE | flags;
+ basechain->policy = NF_ACCEPT;
+ if (chain->flags & NFT_CHAIN_HW_OFFLOAD &&
+- !nft_chain_offload_support(basechain))
++ !nft_chain_offload_support(basechain)) {
++ list_splice_init(&basechain->hook_list, &hook->list);
+ return -EOPNOTSUPP;
++ }
+
+ flow_block_init(&basechain->flow_block);
+
+--
+2.35.1
+
--- /dev/null
+From 68ce0a0278525646fc95bfafc41e43393cba4aaa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Sep 2022 18:07:06 +0300
+Subject: nvme-tcp: fix regression that causes sporadic requests to time out
+
+From: Sagi Grimberg <sagi@grimberg.me>
+
+[ Upstream commit 3770a42bb8ceb856877699257a43c0585a5d2996 ]
+
+When we queue requests, we strive to batch as much as possible and also
+signal the network stack that more data is about to be sent over a socket
+with MSG_SENDPAGE_NOTLAST. This flag looks at the pending requests queued
+as well as queue->more_requests that is derived from the block layer
+last-in-batch indication.
+
+We set more_request=true when we flush the request directly from
+.queue_rq submission context (in nvme_tcp_send_all), however this is
+wrongly assuming that no other requests may be queued during the
+execution of nvme_tcp_send_all.
+
+Due to this, a race condition may happen where:
+
+ 1. request X is queued as !last-in-batch
+ 2. request X submission context calls nvme_tcp_send_all directly
+ 3. nvme_tcp_send_all is preempted and schedules to a different cpu
+ 4. request Y is queued as last-in-batch
+ 5. nvme_tcp_send_all context sends request X+Y, however signals for
+ both MSG_SENDPAGE_NOTLAST because queue->more_requests=true.
+
+==> none of the requests is pushed down to the wire as the network
+stack is waiting for more data, both requests timeout.
+
+To fix this, we eliminate queue->more_requests and only rely on
+the queue req_list and send_list to be not-empty.
+
+Fixes: 122e5b9f3d37 ("nvme-tcp: optimize network stack with setting msg flags according to batch size")
+Reported-by: Jonathan Nicklin <jnicklin@blockbridge.com>
+Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
+Tested-by: Jonathan Nicklin <jnicklin@blockbridge.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/tcp.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 2c6e031135716..96d8d7844e846 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -119,7 +119,6 @@ struct nvme_tcp_queue {
+ struct mutex send_mutex;
+ struct llist_head req_list;
+ struct list_head send_list;
+- bool more_requests;
+
+ /* recv state */
+ void *pdu;
+@@ -315,7 +314,7 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
+ static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+ {
+ return !list_empty(&queue->send_list) ||
+- !llist_empty(&queue->req_list) || queue->more_requests;
++ !llist_empty(&queue->req_list);
+ }
+
+ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
+@@ -334,9 +333,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
+ */
+ if (queue->io_cpu == raw_smp_processor_id() &&
+ sync && empty && mutex_trylock(&queue->send_mutex)) {
+- queue->more_requests = !last;
+ nvme_tcp_send_all(queue);
+- queue->more_requests = false;
+ mutex_unlock(&queue->send_mutex);
+ }
+
+--
+2.35.1
+
--- /dev/null
+From f24eaa8d302ca3c95c5c618aa710536e439a3923 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Sep 2022 13:54:17 +0300
+Subject: nvme-tcp: fix UAF when detecting digest errors
+
+From: Sagi Grimberg <sagi@grimberg.me>
+
+[ Upstream commit 160f3549a907a50e51a8518678ba2dcf2541abea ]
+
+We should also bail from the io_work loop when we set rd_enabled to true,
+so we don't attempt to read data from the socket when the TCP stream is
+already out-of-sync or corrupted.
+
+Fixes: 3f2304f8c6d6 ("nvme-tcp: add NVMe over TCP host driver")
+Reported-by: Daniel Wagner <dwagner@suse.de>
+Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
+Reviewed-by: Daniel Wagner <dwagner@suse.de>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/tcp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 20138e132558c..2c6e031135716 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1209,7 +1209,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
+ else if (unlikely(result < 0))
+ return;
+
+- if (!pending)
++ if (!pending || !queue->rd_enabled)
+ return;
+
+ } while (!time_after(jiffies, deadline)); /* quota is exhausted */
+--
+2.35.1
+
--- /dev/null
+From 10538ebde8e4b278b6074c7cddd236530d328db5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Sep 2022 09:39:28 +0200
+Subject: nvmet: fix mar and mor off-by-one errors
+
+From: Dennis Maisenbacher <dennis.maisenbacher@wdc.com>
+
+[ Upstream commit b7e97872a65e1d57b4451769610554c131f37a0a ]
+
+Maximum Active Resources (MAR) and Maximum Open Resources (MOR) are 0's
+based vales where a value of 0xffffffff indicates that there is no limit.
+
+Decrement the values that are returned by bdev_max_open_zones and
+bdev_max_active_zones as the block layer helpers are not 0's based.
+A 0 returned by the block layer helpers indicates no limit, thus convert
+it to 0xffffffff (U32_MAX).
+
+Fixes: aaf2e048af27 ("nvmet: add ZBD over ZNS backend support")
+Suggested-by: Niklas Cassel <niklas.cassel@wdc.com>
+Signed-off-by: Dennis Maisenbacher <dennis.maisenbacher@wdc.com>
+Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/target/zns.c | 17 +++++++++++++++--
+ 1 file changed, 15 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
+index 235553337fb2d..1466698751c55 100644
+--- a/drivers/nvme/target/zns.c
++++ b/drivers/nvme/target/zns.c
+@@ -100,6 +100,7 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
+ struct nvme_id_ns_zns *id_zns;
+ u64 zsze;
+ u16 status;
++ u32 mar, mor;
+
+ if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
+ req->error_loc = offsetof(struct nvme_identify, nsid);
+@@ -126,8 +127,20 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
+ zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
+ req->ns->blksize_shift;
+ id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
+- id_zns->mor = cpu_to_le32(bdev_max_open_zones(req->ns->bdev));
+- id_zns->mar = cpu_to_le32(bdev_max_active_zones(req->ns->bdev));
++
++ mor = bdev_max_open_zones(req->ns->bdev);
++ if (!mor)
++ mor = U32_MAX;
++ else
++ mor--;
++ id_zns->mor = cpu_to_le32(mor);
++
++ mar = bdev_max_active_zones(req->ns->bdev);
++ if (!mar)
++ mar = U32_MAX;
++ else
++ mar--;
++ id_zns->mar = cpu_to_le32(mar);
+
+ done:
+ status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns));
+--
+2.35.1
+
--- /dev/null
+From 19675bb560e839a2e3ba909fba76f11ae33eedb7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Sep 2022 15:00:30 +0800
+Subject: perf script: Fix Cannot print 'iregs' field for hybrid systems
+
+From: Zhengjun Xing <zhengjun.xing@linux.intel.com>
+
+[ Upstream commit 82b2425fad2dd47204b3da589b679220f8aacc0e ]
+
+Commit b91e5492f9d7ca89 ("perf record: Add a dummy event on hybrid
+systems to collect metadata records") adds a dummy event on hybrid
+systems to fix the symbol "unknown" issue when the workload is created
+in a P-core but runs on an E-core. The added dummy event will cause
+"perf script -F iregs" to fail. Dummy events do not have "iregs"
+attribute set, so when we do evsel__check_attr, the "iregs" attribute
+check will fail, so the issue happened.
+
+The following commit [1] has fixed a similar issue by skipping the attr
+check for the dummy event because it does not have any samples anyway. It
+works okay for the normal mode, but the issue still happened when running
+the test in the pipe mode. In the pipe mode, it calls process_attr() which
+still checks the attr for the dummy event. This commit fixed the issue by
+skipping the attr check for the dummy event in the API evsel__check_attr,
+Otherwise, we have to patch everywhere when evsel__check_attr() is called.
+
+Before:
+
+ #./perf record -o - --intr-regs=di,r8,dx,cx -e br_inst_retired.near_call:p -c 1000 --per-thread true 2>/dev/null|./perf script -F iregs |head -5
+ Samples for 'dummy:HG' event do not have IREGS attribute set. Cannot print 'iregs' field.
+ 0x120 [0x90]: failed to process type: 64
+ #
+
+After:
+
+ # ./perf record -o - --intr-regs=di,r8,dx,cx -e br_inst_retired.near_call:p -c 1000 --per-thread true 2>/dev/null|./perf script -F iregs |head -5
+ ABI:2 CX:0x55b8efa87000 DX:0x55b8efa7e000 DI:0xffffba5e625efbb0 R8:0xffff90e51f8ae100
+ ABI:2 CX:0x7f1dae1e4000 DX:0xd0 DI:0xffff90e18c675ac0 R8:0x71
+ ABI:2 CX:0xcc0 DX:0x1 DI:0xffff90e199880240 R8:0x0
+ ABI:2 CX:0xffff90e180dd7500 DX:0xffff90e180dd7500 DI:0xffff90e180043500 R8:0x1
+ ABI:2 CX:0x50 DX:0xffff90e18c583bd0 DI:0xffff90e1998803c0 R8:0x58
+ #
+
+[1]https://lore.kernel.org/lkml/20220831124041.219925-1-jolsa@kernel.org/
+
+Fixes: b91e5492f9d7ca89 ("perf record: Add a dummy event on hybrid systems to collect metadata records")
+Suggested-by: Namhyung Kim <namhyung@kernel.org>
+Signed-off-by: Xing Zhengjun <zhengjun.xing@linux.intel.com>
+Acked-by: Jiri Olsa <jolsa@kernel.org>
+Cc: Alexander Shishkin <alexander.shishkin@intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Ian Rogers <irogers@google.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Kan Liang <kan.liang@linux.intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20220908070030.3455164-1-zhengjun.xing@linux.intel.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/builtin-script.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
+index cb3d81adf5ca8..c6c40191933d4 100644
+--- a/tools/perf/builtin-script.c
++++ b/tools/perf/builtin-script.c
+@@ -435,6 +435,9 @@ static int evsel__check_attr(struct evsel *evsel, struct perf_session *session)
+ struct perf_event_attr *attr = &evsel->core.attr;
+ bool allow_user_set;
+
++ if (evsel__is_dummy_event(evsel))
++ return 0;
++
+ if (perf_header__has_feat(&session->header, HEADER_STAT))
+ return 0;
+
+--
+2.35.1
+
--- /dev/null
+From fca0e4ff5004bca9888ee3cd60d7ff3ef643842e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Aug 2022 13:51:50 +0300
+Subject: RDMA/cma: Fix arguments order in net device validation
+
+From: Michael Guralnik <michaelgur@nvidia.com>
+
+[ Upstream commit 27cfde795a96aef1e859a5480489944b95421e46 ]
+
+Fix the order of source and destination addresses when resolving the
+route between server and client to validate use of correct net device.
+
+The reverse order we had so far didn't actually validate the net device
+as the server would try to resolve the route to itself, thus always
+getting the server's net device.
+
+The issue was discovered when running cm applications on a single host
+between 2 interfaces with same subnet and source based routing rules.
+When resolving the reverse route the source based route rules were
+ignored.
+
+Fixes: f887f2ac87c2 ("IB/cma: Validate routing of incoming requests")
+Link: https://lore.kernel.org/r/1c1ec2277a131d277ebcceec987fd338d35b775f.1661251872.git.leonro@nvidia.com
+Signed-off-by: Michael Guralnik <michaelgur@nvidia.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/cma.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index a814dabcdff43..0da66dd40d6a8 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -1718,8 +1718,8 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
+ }
+
+ if (!validate_net_dev(*net_dev,
+- (struct sockaddr *)&req->listen_addr_storage,
+- (struct sockaddr *)&req->src_addr_storage)) {
++ (struct sockaddr *)&req->src_addr_storage,
++ (struct sockaddr *)&req->listen_addr_storage)) {
+ id_priv = ERR_PTR(-EHOSTUNREACH);
+ goto err;
+ }
+--
+2.35.1
+
--- /dev/null
+From 2cc01284e8ac2300df680c224fafbe2eb20ebf29 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 Aug 2022 18:50:18 +0800
+Subject: RDMA/hns: Fix supported page size
+
+From: Chengchang Tang <tangchengchang@huawei.com>
+
+[ Upstream commit 55af9d498556f0860eb89ffa7677e8d73f6f643f ]
+
+The supported page size for hns is (4K, 128M), not (4K, 2G).
+
+Fixes: cfc85f3e4b7f ("RDMA/hns: Add profile support for hip08 driver")
+Link: https://lore.kernel.org/r/20220829105021.1427804-2-liangwenpeng@huawei.com
+Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
+Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+index df4501e77fd17..d3d5b5f57052c 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -98,7 +98,7 @@
+
+ #define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE
+ #define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
+-#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
++#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFF000
+ #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
+ #define HNS_ROCE_INVALID_LKEY 0x0
+ #define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000
+--
+2.35.1
+
--- /dev/null
+From 8e09db802ea046e9e980a7b216d0d36b7558f9ce Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 Aug 2022 18:50:19 +0800
+Subject: RDMA/hns: Fix wrong fixed value of qp->rq.wqe_shift
+
+From: Wenpeng Liang <liangwenpeng@huawei.com>
+
+[ Upstream commit 0c8b5d6268d92d141bfd64d21c870d295a84dee1 ]
+
+The value of qp->rq.wqe_shift of HIP08 is always determined by the number
+of sge. So delete the wrong branch.
+
+Fixes: cfc85f3e4b7f ("RDMA/hns: Add profile support for hip08 driver")
+Fixes: 926a01dc000d ("RDMA/hns: Add QP operations support for hip08 SoC")
+Link: https://lore.kernel.org/r/20220829105021.1427804-3-liangwenpeng@huawei.com
+Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index 9af4509894e68..5d50d2d1deca9 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -495,11 +495,8 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
+ hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
+ hr_qp->rq.rsv_sge);
+
+- if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
+- hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
+- else
+- hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
+- hr_qp->rq.max_gs);
++ hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
++ hr_qp->rq.max_gs);
+
+ hr_qp->rq.wqe_cnt = cnt;
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE &&
+--
+2.35.1
+
--- /dev/null
+From 1ade396a5cadc6f1624f9092b360b51f4f800232 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Sep 2022 17:32:44 -0500
+Subject: RDMA/irdma: Report RNR NAK generation in device caps
+
+From: Sindhu-Devale <sindhu.devale@intel.com>
+
+[ Upstream commit a261786fdc0a5bed2e5f994dcc0ffeeeb0d662c7 ]
+
+Report RNR NAK generation when device capabilities are queried
+
+Fixes: b48c24c2d710 ("RDMA/irdma: Implement device supported verb APIs")
+Signed-off-by: Sindhu-Devale <sindhu.devale@intel.com>
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Link: https://lore.kernel.org/r/20220906223244.1119-6-shiraz.saleem@intel.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/irdma/verbs.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index adb0e0774256c..5275616398d83 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -43,8 +43,11 @@ static int irdma_query_device(struct ib_device *ibdev,
+ props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
+ props->max_qp_rd_atom = hw_attrs->max_hw_ird;
+ props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
+- if (rdma_protocol_roce(ibdev, 1))
++ if (rdma_protocol_roce(ibdev, 1)) {
++ props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN;
+ props->max_pkeys = IRDMA_PKEY_TBL_SZ;
++ }
++
+ props->max_ah = rf->max_ah;
+ props->max_mcast_grp = rf->max_mcg;
+ props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
+--
+2.35.1
+
--- /dev/null
+From 5796523bd011bcf951f36b8a95c14c3b30326639 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Sep 2022 17:32:40 -0500
+Subject: RDMA/irdma: Report the correct max cqes from query device
+
+From: Sindhu-Devale <sindhu.devale@intel.com>
+
+[ Upstream commit 12faad5e5cf2372af2d51f348b697b5edf838daf ]
+
+Report the correct max cqes available to an application taking
+into account a reserved entry to detect overflow.
+
+Fixes: b48c24c2d710 ("RDMA/irdma: Implement device supported verb APIs")
+Signed-off-by: Sindhu-Devale <sindhu.devale@intel.com>
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Link: https://lore.kernel.org/r/20220906223244.1119-2-shiraz.saleem@intel.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/irdma/verbs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index cac4fb228b9b0..adb0e0774256c 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -36,7 +36,7 @@ static int irdma_query_device(struct ib_device *ibdev,
+ props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
+ props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
+ props->max_cq = rf->max_cq - rf->used_cqs;
+- props->max_cqe = rf->max_cqe;
++ props->max_cqe = rf->max_cqe - 1;
+ props->max_mr = rf->max_mr - rf->used_mrs;
+ props->max_mw = props->max_mr;
+ props->max_pd = rf->max_pd - rf->used_pds;
+--
+2.35.1
+
--- /dev/null
+From 57df62836e49cef26991f0e7c9d68d844c75154e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Sep 2022 17:32:42 -0500
+Subject: RDMA/irdma: Return correct WC error for bind operation failure
+
+From: Sindhu-Devale <sindhu.devale@intel.com>
+
+[ Upstream commit dcb23bbb1de7e009875fdfac2b8a9808a9319cc6 ]
+
+When a QP and a MR on a local host are in different PDs, the HW generates
+an asynchronous event (AE). The same AE is generated when a QP and a MW
+are in different PDs during a bind operation. Return the more appropriate
+IBV_WC_MW_BIND_ERR for the latter case by checking the OP type from the
+CQE in error.
+
+Fixes: 551c46edc769 ("RDMA/irdma: Add user/kernel shared libraries")
+Signed-off-by: Sindhu-Devale <sindhu.devale@intel.com>
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Link: https://lore.kernel.org/r/20220906223244.1119-4-shiraz.saleem@intel.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/irdma/uk.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
+index 9b544a3b12886..7e6c3ba8df6ab 100644
+--- a/drivers/infiniband/hw/irdma/uk.c
++++ b/drivers/infiniband/hw/irdma/uk.c
+@@ -1068,6 +1068,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
+ enum irdma_status_code ret_code;
+ bool move_cq_head = true;
+ u8 polarity;
++ u8 op_type;
+ bool ext_valid;
+ __le64 *ext_cqe;
+
+@@ -1250,7 +1251,6 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
+ do {
+ __le64 *sw_wqe;
+ u64 wqe_qword;
+- u8 op_type;
+ u32 tail;
+
+ tail = qp->sq_ring.tail;
+@@ -1267,6 +1267,8 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
+ break;
+ }
+ } while (1);
++ if (op_type == IRDMA_OP_TYPE_BIND_MW && info->minor_err == FLUSH_PROT_ERR)
++ info->minor_err = FLUSH_MW_BIND_ERR;
+ qp->sq_flush_seen = true;
+ if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
+ qp->sq_flush_complete = true;
+--
+2.35.1
+
--- /dev/null
+From 2484851d22b6893682c2eafaa38134531adaf9e5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 Aug 2022 12:02:28 +0300
+Subject: RDMA/mlx5: Set local port to one when accessing counters
+
+From: Chris Mi <cmi@nvidia.com>
+
+[ Upstream commit 74b30b3ad5cec95d2647e796d10137438a098bc1 ]
+
+When accessing Ports Performance Counters Register (PPCNT),
+local port must be one if it is Function-Per-Port HCA that
+HCA_CAP.num_ports is 1.
+
+The offending patch can change the local port to other values
+when accessing PPCNT after enabling switchdev mode. The following
+syndrome will be printed:
+
+ # cat /sys/class/infiniband/rdmap4s0f0/ports/2/counters/*
+ # dmesg
+ mlx5_core 0000:04:00.0: mlx5_cmd_check:756:(pid 12450): ACCESS_REG(0x805) op_mod(0x1) failed, status bad parameter(0x3), syndrome (0x1e5585)
+
+Fix it by setting local port to one for Function-Per-Port HCA.
+
+Fixes: 210b1f78076f ("IB/mlx5: When not in dual port RoCE mode, use provided port as native")
+Reviewed-by: Mark Bloch <mbloch@nvidia.com>
+Signed-off-by: Chris Mi <cmi@nvidia.com>
+Link: https://lore.kernel.org/r/6c5086c295c76211169e58dbd610fb0402360bab.1661763459.git.leonro@nvidia.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/mlx5/mad.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
+index ec242a5a17a35..f6f2df855c2ed 100644
+--- a/drivers/infiniband/hw/mlx5/mad.c
++++ b/drivers/infiniband/hw/mlx5/mad.c
+@@ -166,6 +166,12 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ }
++ if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
++ /* set local port to one for Function-Per-Port HCA. */
++ mdev = dev->mdev;
++ mdev_port_num = 1;
++ }
++
+ /* Declaring support of extended counters */
+ if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
+ struct ib_class_port_info cpi = {};
+--
+2.35.1
+
--- /dev/null
+From 8cc83398850d3935518cb7c4d21b4ffbe2e4465d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Aug 2022 12:53:54 +0200
+Subject: RDMA/rtrs-clt: Use the right sg_cnt after ib_dma_map_sg
+
+From: Jack Wang <jinpu.wang@ionos.com>
+
+[ Upstream commit b66905e04dc714825aa6cffb950e281b46bbeafe ]
+
+When iommu is enabled, we hit warnings like this:
+WARNING: at rtrs/rtrs.c:178 rtrs_iu_post_rdma_write_imm+0x9b/0x110
+
+rtrs warn on one sge entry length is 0, which is unexpected.
+
+The problem is ib_dma_map_sg augments the SGL into a 'dma mapped SGL'.
+This process may change the number of entries and the lengths of each
+entry.
+
+Code that touches dma_address is iterating over the 'dma mapped SGL'
+and must use dma_nents which returned from ib_dma_map_sg().
+So pass the count return from ib_dma_map_sg.
+
+Fixes: 6a98d71daea1 ("RDMA/rtrs: client: main functionality")
+Link: https://lore.kernel.org/r/20220818105355.110344-3-haris.iqbal@ionos.com
+Signed-off-by: Jack Wang <jinpu.wang@ionos.com>
+Reviewed-by: Aleksei Marov <aleksei.marov@ionos.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/ulp/rtrs/rtrs-clt.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+index 9edbb309b96c0..c644617725a88 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+@@ -1011,7 +1011,8 @@ rtrs_clt_get_copy_req(struct rtrs_clt_path *alive_path,
+ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
+ struct rtrs_clt_io_req *req,
+ struct rtrs_rbuf *rbuf, bool fr_en,
+- u32 size, u32 imm, struct ib_send_wr *wr,
++ u32 count, u32 size, u32 imm,
++ struct ib_send_wr *wr,
+ struct ib_send_wr *tail)
+ {
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+@@ -1031,12 +1032,12 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
+ num_sge = 2;
+ ptail = tail;
+ } else {
+- for_each_sg(req->sglist, sg, req->sg_cnt, i) {
++ for_each_sg(req->sglist, sg, count, i) {
+ sge[i].addr = sg_dma_address(sg);
+ sge[i].length = sg_dma_len(sg);
+ sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
+ }
+- num_sge = 1 + req->sg_cnt;
++ num_sge = 1 + count;
+ }
+ sge[i].addr = req->iu->dma_addr;
+ sge[i].length = size;
+@@ -1149,7 +1150,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
+ */
+ rtrs_clt_update_all_stats(req, WRITE);
+
+- ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en,
++ ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, count,
+ req->usr_len + sizeof(*msg),
+ imm, wr, &inv_wr);
+ if (ret) {
+--
+2.35.1
+
--- /dev/null
+From f10c98d3a0dd60a41a82caf70b3cf1b488906669 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Aug 2022 12:53:55 +0200
+Subject: RDMA/rtrs-srv: Pass the correct number of entries for dma mapped SGL
+
+From: Jack Wang <jinpu.wang@ionos.com>
+
+[ Upstream commit 56c310de0b4b3aca1c4fdd9c1093fc48372a7335 ]
+
+ib_dma_map_sg() augments the SGL into a 'dma mapped SGL'. This process
+may change the number of entries and the lengths of each entry.
+
+Code that touches dma_address is iterating over the 'dma mapped SGL'
+and must use dma_nents which returned from ib_dma_map_sg().
+
+We should use the return count from ib_dma_map_sg for futher usage.
+
+Fixes: 9cb837480424e ("RDMA/rtrs: server: main functionality")
+Link: https://lore.kernel.org/r/20220818105355.110344-4-haris.iqbal@ionos.com
+Signed-off-by: Jack Wang <jinpu.wang@ionos.com>
+Reviewed-by: Aleksei Marov <aleksei.marov@ionos.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/ulp/rtrs/rtrs-srv.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+index 1ca31b919e987..733116554e0bc 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+@@ -600,7 +600,7 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
+ struct sg_table *sgt = &srv_mr->sgt;
+ struct scatterlist *s;
+ struct ib_mr *mr;
+- int nr, chunks;
++ int nr, nr_sgt, chunks;
+
+ chunks = chunks_per_mr * mri;
+ if (!always_invalidate)
+@@ -615,19 +615,19 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
+ sg_set_page(s, srv->chunks[chunks + i],
+ max_chunk_size, 0);
+
+- nr = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl,
++ nr_sgt = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl,
+ sgt->nents, DMA_BIDIRECTIONAL);
+- if (nr < sgt->nents) {
+- err = nr < 0 ? nr : -EINVAL;
++ if (!nr_sgt) {
++ err = -EINVAL;
+ goto free_sg;
+ }
+ mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
+- sgt->nents);
++ nr_sgt);
+ if (IS_ERR(mr)) {
+ err = PTR_ERR(mr);
+ goto unmap_sg;
+ }
+- nr = ib_map_mr_sg(mr, sgt->sgl, sgt->nents,
++ nr = ib_map_mr_sg(mr, sgt->sgl, nr_sgt,
+ NULL, max_chunk_size);
+ if (nr < 0 || nr < sgt->nents) {
+ err = nr < 0 ? nr : -EINVAL;
+@@ -646,7 +646,7 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
+ }
+ }
+ /* Eventually dma addr for each chunk can be cached */
+- for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
++ for_each_sg(sgt->sgl, s, nr_sgt, i)
+ srv_path->dma_addr[chunks + i] = sg_dma_address(s);
+
+ ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
+--
+2.35.1
+
--- /dev/null
+From 71bc8da3f99b63e7e01ab613b07b4a9038c70304 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Sep 2022 23:59:18 +0200
+Subject: RDMA/siw: Pass a pointer to virt_to_page()
+
+From: Linus Walleij <linus.walleij@linaro.org>
+
+[ Upstream commit 0d1b756acf60da5004c1e20ca4462f0c257bf6e1 ]
+
+Functions that work on a pointer to virtual memory such as
+virt_to_pfn() and users of that function such as
+virt_to_page() are supposed to pass a pointer to virtual
+memory, ideally a (void *) or other pointer. However since
+many architectures implement virt_to_pfn() as a macro,
+this function becomes polymorphic and accepts both a
+(unsigned long) and a (void *).
+
+If we instead implement a proper virt_to_pfn(void *addr)
+function the following happens (occurred on arch/arm):
+
+drivers/infiniband/sw/siw/siw_qp_tx.c:32:23: warning: incompatible
+ integer to pointer conversion passing 'dma_addr_t' (aka 'unsigned int')
+ to parameter of type 'const void *' [-Wint-conversion]
+drivers/infiniband/sw/siw/siw_qp_tx.c:32:37: warning: passing argument
+ 1 of 'virt_to_pfn' makes pointer from integer without a cast
+ [-Wint-conversion]
+drivers/infiniband/sw/siw/siw_qp_tx.c:538:36: warning: incompatible
+ integer to pointer conversion passing 'unsigned long long'
+ to parameter of type 'const void *' [-Wint-conversion]
+
+Fix this with an explicit cast. In one case where the SIW
+SGE uses an unaligned u64 we need a double cast modifying the
+virtual address (va) to a platform-specific uintptr_t before
+casting to a (void *).
+
+Fixes: b9be6f18cf9e ("rdma/siw: transmit path")
+Cc: linux-rdma@vger.kernel.org
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Link: https://lore.kernel.org/r/20220902215918.603761-1-linus.walleij@linaro.org
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/sw/siw/siw_qp_tx.c | 18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
+index 1f4e60257700e..7d47b521070b1 100644
+--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
++++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
+@@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
+ dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
+
+ if (paddr)
+- return virt_to_page(paddr);
++ return virt_to_page((void *)paddr);
+
+ return NULL;
+ }
+@@ -533,13 +533,23 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
+ kunmap_local(kaddr);
+ }
+ } else {
+- u64 va = sge->laddr + sge_off;
++ /*
++ * Cast to an uintptr_t to preserve all 64 bits
++ * in sge->laddr.
++ */
++ uintptr_t va = (uintptr_t)(sge->laddr + sge_off);
+
+- page_array[seg] = virt_to_page(va & PAGE_MASK);
++ /*
++ * virt_to_page() takes a (void *) pointer
++ * so cast to a (void *) meaning it will be 64
++ * bits on a 64 bit platform and 32 bits on a
++ * 32 bit platform.
++ */
++ page_array[seg] = virt_to_page((void *)(va & PAGE_MASK));
+ if (do_crc)
+ crypto_shash_update(
+ c_tx->mpa_crc_hd,
+- (void *)(uintptr_t)va,
++ (void *)va,
+ plen);
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 9caad375f9f0d7e2ab248534db6785f871afb274 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Aug 2022 08:16:29 +0000
+Subject: RDMA/srp: Set scmnd->result only when scmnd is not NULL
+
+From: yangx.jy@fujitsu.com <yangx.jy@fujitsu.com>
+
+[ Upstream commit 12f35199a2c0551187edbf8eb01379f0598659fa ]
+
+This change fixes the following kernel NULL pointer dereference
+which is reproduced by blktests srp/007 occasionally.
+
+BUG: kernel NULL pointer dereference, address: 0000000000000170
+PGD 0 P4D 0
+Oops: 0002 [#1] PREEMPT SMP NOPTI
+CPU: 0 PID: 9 Comm: kworker/0:1H Kdump: loaded Not tainted 6.0.0-rc1+ #37
+Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.15.0-29-g6a62e0cb0dfe-prebuilt.qemu.org 04/01/2014
+Workqueue: 0x0 (kblockd)
+RIP: 0010:srp_recv_done+0x176/0x500 [ib_srp]
+Code: 00 4d 85 ff 0f 84 52 02 00 00 48 c7 82 80 02 00 00 00 00 00 00 4c 89 df 4c 89 14 24 e8 53 d3 4a f6 4c 8b 14 24 41 0f b6 42 13 <41> 89 87 70 01 00 00 41 0f b6 52 12 f6 c2 02 74 44 41 8b 42 1c b9
+RSP: 0018:ffffaef7c0003e28 EFLAGS: 00000282
+RAX: 0000000000000000 RBX: ffff9bc9486dea60 RCX: 0000000000000000
+RDX: 0000000000000102 RSI: ffffffffb76bbd0e RDI: 00000000ffffffff
+RBP: ffff9bc980099a00 R08: 0000000000000001 R09: 0000000000000001
+R10: ffff9bca53ef0000 R11: ffff9bc980099a10 R12: ffff9bc956e14000
+R13: ffff9bc9836b9cb0 R14: ffff9bc9557b4480 R15: 0000000000000000
+FS: 0000000000000000(0000) GS:ffff9bc97ec00000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000000000000170 CR3: 0000000007e04000 CR4: 00000000000006f0
+Call Trace:
+ <IRQ>
+ __ib_process_cq+0xb7/0x280 [ib_core]
+ ib_poll_handler+0x2b/0x130 [ib_core]
+ irq_poll_softirq+0x93/0x150
+ __do_softirq+0xee/0x4b8
+ irq_exit_rcu+0xf7/0x130
+ sysvec_apic_timer_interrupt+0x8e/0xc0
+ </IRQ>
+
+Fixes: ad215aaea4f9 ("RDMA/srp: Make struct scsi_cmnd and struct srp_request adjacent")
+Link: https://lore.kernel.org/r/20220831081626.18712-1-yangx.jy@fujitsu.com
+Signed-off-by: Xiao Yang <yangx.jy@fujitsu.com>
+Acked-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/ulp/srp/ib_srp.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index 5d416ec228717..473b3a08cf96d 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -1955,7 +1955,8 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
+ if (scmnd) {
+ req = scsi_cmd_priv(scmnd);
+ scmnd = srp_claim_req(ch, req, NULL, scmnd);
+- } else {
++ }
++ if (!scmnd) {
+ shost_printk(KERN_ERR, target->scsi_host,
+ "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
+ rsp->tag, ch - target->ch, ch->qp->qp_num);
+--
+2.35.1
+
--- /dev/null
+From c023118eb881416c63e70a9a39f0d1843991de58 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Aug 2022 14:43:36 -0500
+Subject: regulator: core: Clean up on enable failure
+
+From: Andrew Halaney <ahalaney@redhat.com>
+
+[ Upstream commit c32f1ebfd26bece77141257864ed7b4720da1557 ]
+
+If regulator_enable() fails, enable_count is incremented still.
+A consumer, assuming no matching regulator_disable() is necessary on
+failure, will then get this error message upon regulator_put()
+since enable_count is non-zero:
+
+ [ 1.277418] WARNING: CPU: 3 PID: 1 at drivers/regulator/core.c:2304 _regulator_put.part.0+0x168/0x170
+
+The consumer could try to fix this in their driver by cleaning up on
+error from regulator_enable() (i.e. call regulator_disable()), but that
+results in the following since regulator_enable() failed and didn't
+increment user_count:
+
+ [ 1.258112] unbalanced disables for vreg_l17c
+ [ 1.262606] WARNING: CPU: 4 PID: 1 at drivers/regulator/core.c:2899 _regulator_disable+0xd4/0x190
+
+Fix this by decrementing enable_count upon failure to enable.
+
+With this in place, just the reason for failure to enable is printed
+as expected and developers can focus on the root cause of their issue
+instead of thinking their usage of the regulator consumer api is
+incorrect. For example, in my case:
+
+ [ 1.240426] vreg_l17c: invalid input voltage found
+
+Fixes: 5451781dadf8 ("regulator: core: Only count load for enabled consumers")
+Signed-off-by: Andrew Halaney <ahalaney@redhat.com>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Reviewed-by: Brian Masney <bmasney@redhat.com>
+Link: https://lore.kernel.org/r/20220819194336.382740-1-ahalaney@redhat.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/core.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index f4f28e5888b1c..43613db7af754 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -2688,13 +2688,18 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
+ */
+ static int _regulator_handle_consumer_enable(struct regulator *regulator)
+ {
++ int ret;
+ struct regulator_dev *rdev = regulator->rdev;
+
+ lockdep_assert_held_once(&rdev->mutex.base);
+
+ regulator->enable_count++;
+- if (regulator->uA_load && regulator->enable_count == 1)
+- return drms_uA_update(rdev);
++ if (regulator->uA_load && regulator->enable_count == 1) {
++ ret = drms_uA_update(rdev);
++ if (ret)
++ regulator->enable_count--;
++ return ret;
++ }
+
+ return 0;
+ }
+--
+2.35.1
+
--- /dev/null
+From ae458a7ef3a9613b83305d54f16acaa3a3d04243 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Aug 2022 21:20:49 +0200
+Subject: Revert "net: phy: meson-gxl: improve link-up behavior"
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+[ Upstream commit 7fdc77665f3d45c9da7c6edd4beadee9790f43aa ]
+
+This reverts commit 2c87c6f9fbddc5b84d67b2fa3f432fcac6d99d93.
+Meanwhile it turned out that the following commit is the proper
+workaround for the issue that 2c87c6f9fbdd tries to address.
+a3a57bf07de2 ("net: stmmac: work around sporadic tx issue on link-up")
+It's nor clear why the to be reverted commit helped for one user,
+for others it didn't make a difference.
+
+Fixes: 2c87c6f9fbdd ("net: phy: meson-gxl: improve link-up behavior")
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Link: https://lore.kernel.org/r/8deeeddc-6b71-129b-1918-495a12dc11e3@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/meson-gxl.c | 8 +-------
+ 1 file changed, 1 insertion(+), 7 deletions(-)
+
+diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
+index 73f7962a37d33..c49062ad72c6c 100644
+--- a/drivers/net/phy/meson-gxl.c
++++ b/drivers/net/phy/meson-gxl.c
+@@ -243,13 +243,7 @@ static irqreturn_t meson_gxl_handle_interrupt(struct phy_device *phydev)
+ irq_status == INTSRC_ENERGY_DETECT)
+ return IRQ_HANDLED;
+
+- /* Give PHY some time before MAC starts sending data. This works
+- * around an issue where network doesn't come up properly.
+- */
+- if (!(irq_status & INTSRC_LINK_DOWN))
+- phy_queue_state_machine(phydev, msecs_to_jiffies(100));
+- else
+- phy_trigger_machine(phydev);
++ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+ }
+--
+2.35.1
+
--- /dev/null
+From ea4c9654d8ec75ec71ca959d55ac85da39cf626f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Aug 2022 22:39:28 +0100
+Subject: rxrpc: Fix an insufficiently large sglist in rxkad_verify_packet_2()
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 0d40f728e28393a8817d1fcae923dfa3409e488c ]
+
+rxkad_verify_packet_2() has a small stack-allocated sglist of 4 elements,
+but if that isn't sufficient for the number of fragments in the socket
+buffer, we try to allocate an sglist large enough to hold all the
+fragments.
+
+However, for large packets with a lot of fragments, this isn't sufficient
+and we need at least one additional fragment.
+
+The problem manifests as skb_to_sgvec() returning -EMSGSIZE and this then
+getting returned by userspace. Most of the time, this isn't a problem as
+rxrpc sets a limit of 5692, big enough for 4 jumbo subpackets to be glued
+together; occasionally, however, the server will ignore the reported limit
+and give a packet that's a lot bigger - say 19852 bytes with ->nr_frags
+being 7. skb_to_sgvec() then tries to return a "zeroth" fragment that
+seems to occur before the fragments counted by ->nr_frags and we hit the
+end of the sglist too early.
+
+Note that __skb_to_sgvec() also has an skb_walk_frags() loop that is
+recursive up to 24 deep. I'm not sure if I need to take account of that
+too - or if there's an easy way of counting those frags too.
+
+Fix this by counting an extra frag and allocating a larger sglist based on
+that.
+
+Fixes: d0d5c0cd1e71 ("rxrpc: Use skb_unshare() rather than skb_cow_data()")
+Reported-by: Marc Dionne <marc.dionne@auristor.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: linux-afs@lists.infradead.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/rxrpc/rxkad.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
+index 08aab5c01437d..db47844f4ac99 100644
+--- a/net/rxrpc/rxkad.c
++++ b/net/rxrpc/rxkad.c
+@@ -540,7 +540,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
+ * directly into the target buffer.
+ */
+ sg = _sg;
+- nsg = skb_shinfo(skb)->nr_frags;
++ nsg = skb_shinfo(skb)->nr_frags + 1;
+ if (nsg <= 4) {
+ nsg = 4;
+ } else {
+--
+2.35.1
+
--- /dev/null
+From be0f8176f8458fedb8d942a5a25ef7864908d093 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Aug 2022 15:39:28 +0100
+Subject: rxrpc: Fix ICMP/ICMP6 error handling
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit ac56a0b48da86fd1b4389632fb7c4c8a5d86eefa ]
+
+Because rxrpc pretends to be a tunnel on top of a UDP/UDP6 socket, allowing
+it to siphon off UDP packets early in the handling of received UDP packets
+thereby avoiding the packet going through the UDP receive queue, it doesn't
+get ICMP packets through the UDP ->sk_error_report() callback. In fact, it
+doesn't appear that there's any usable option for getting hold of ICMP
+packets.
+
+Fix this by adding a new UDP encap hook to distribute error messages for
+UDP tunnels. If the hook is set, then the tunnel driver will be able to
+see ICMP packets. The hook provides the offset into the packet of the UDP
+header of the original packet that caused the notification.
+
+An alternative would be to call the ->error_handler() hook - but that
+requires that the skbuff be cloned (as ip_icmp_error() or ipv6_cmp_error()
+do, though isn't really necessary or desirable in rxrpc's case is we want
+to parse them there and then, not queue them).
+
+Changes
+=======
+ver #3)
+ - Fixed an uninitialised variable.
+
+ver #2)
+ - Fixed some missing CONFIG_AF_RXRPC_IPV6 conditionals.
+
+Fixes: 5271953cad31 ("rxrpc: Use the UDP encap_rcv hook")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/udp.h | 1 +
+ include/net/udp_tunnel.h | 4 +
+ net/ipv4/udp.c | 2 +
+ net/ipv4/udp_tunnel_core.c | 1 +
+ net/ipv6/udp.c | 5 +-
+ net/rxrpc/ar-internal.h | 1 +
+ net/rxrpc/local_object.c | 1 +
+ net/rxrpc/peer_event.c | 293 ++++++++++++++++++++++++++++++++-----
+ 8 files changed, 270 insertions(+), 38 deletions(-)
+
+diff --git a/include/linux/udp.h b/include/linux/udp.h
+index ae66dadd85434..0727276e7538c 100644
+--- a/include/linux/udp.h
++++ b/include/linux/udp.h
+@@ -75,6 +75,7 @@ struct udp_sock {
+ * For encapsulation sockets.
+ */
+ int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
++ void (*encap_err_rcv)(struct sock *sk, struct sk_buff *skb, unsigned int udp_offset);
+ int (*encap_err_lookup)(struct sock *sk, struct sk_buff *skb);
+ void (*encap_destroy)(struct sock *sk);
+
+diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
+index afc7ce713657b..72394f441dad8 100644
+--- a/include/net/udp_tunnel.h
++++ b/include/net/udp_tunnel.h
+@@ -67,6 +67,9 @@ static inline int udp_sock_create(struct net *net,
+ typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
+ typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *sk,
+ struct sk_buff *skb);
++typedef void (*udp_tunnel_encap_err_rcv_t)(struct sock *sk,
++ struct sk_buff *skb,
++ unsigned int udp_offset);
+ typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
+ typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk,
+ struct list_head *head,
+@@ -80,6 +83,7 @@ struct udp_tunnel_sock_cfg {
+ __u8 encap_type;
+ udp_tunnel_encap_rcv_t encap_rcv;
+ udp_tunnel_encap_err_lookup_t encap_err_lookup;
++ udp_tunnel_encap_err_rcv_t encap_err_rcv;
+ udp_tunnel_encap_destroy_t encap_destroy;
+ udp_tunnel_gro_receive_t gro_receive;
+ udp_tunnel_gro_complete_t gro_complete;
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index efef7ba44e1d6..75d1977ecc07e 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -781,6 +781,8 @@ int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
+ */
+ if (tunnel) {
+ /* ...not for tunnels though: we don't have a sending socket */
++ if (udp_sk(sk)->encap_err_rcv)
++ udp_sk(sk)->encap_err_rcv(sk, skb, iph->ihl << 2);
+ goto out;
+ }
+ if (!inet->recverr) {
+diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c
+index b97e3635acf50..46101fd67a477 100644
+--- a/net/ipv4/udp_tunnel_core.c
++++ b/net/ipv4/udp_tunnel_core.c
+@@ -75,6 +75,7 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
+
+ udp_sk(sk)->encap_type = cfg->encap_type;
+ udp_sk(sk)->encap_rcv = cfg->encap_rcv;
++ udp_sk(sk)->encap_err_rcv = cfg->encap_err_rcv;
+ udp_sk(sk)->encap_err_lookup = cfg->encap_err_lookup;
+ udp_sk(sk)->encap_destroy = cfg->encap_destroy;
+ udp_sk(sk)->gro_receive = cfg->gro_receive;
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 4a9afdbd5f292..07726a51a3f09 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -614,8 +614,11 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ }
+
+ /* Tunnels don't have an application socket: don't pass errors back */
+- if (tunnel)
++ if (tunnel) {
++ if (udp_sk(sk)->encap_err_rcv)
++ udp_sk(sk)->encap_err_rcv(sk, skb, offset);
+ goto out;
++ }
+
+ if (!np->recverr) {
+ if (!harderr || sk->sk_state != TCP_ESTABLISHED)
+diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
+index f2d593e27b64f..f2e3fb77a02d3 100644
+--- a/net/rxrpc/ar-internal.h
++++ b/net/rxrpc/ar-internal.h
+@@ -990,6 +990,7 @@ void rxrpc_send_keepalive(struct rxrpc_peer *);
+ /*
+ * peer_event.c
+ */
++void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb, unsigned int udp_offset);
+ void rxrpc_error_report(struct sock *);
+ void rxrpc_peer_keepalive_worker(struct work_struct *);
+
+diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
+index 6a1611b0e3037..ef43fe8bdd2ff 100644
+--- a/net/rxrpc/local_object.c
++++ b/net/rxrpc/local_object.c
+@@ -137,6 +137,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
+
+ tuncfg.encap_type = UDP_ENCAP_RXRPC;
+ tuncfg.encap_rcv = rxrpc_input_packet;
++ tuncfg.encap_err_rcv = rxrpc_encap_err_rcv;
+ tuncfg.sk_user_data = local;
+ setup_udp_tunnel_sock(net, local->socket, &tuncfg);
+
+diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
+index be032850ae8ca..32561e9567fe3 100644
+--- a/net/rxrpc/peer_event.c
++++ b/net/rxrpc/peer_event.c
+@@ -16,22 +16,105 @@
+ #include <net/sock.h>
+ #include <net/af_rxrpc.h>
+ #include <net/ip.h>
++#include <net/icmp.h>
+ #include "ar-internal.h"
+
++static void rxrpc_adjust_mtu(struct rxrpc_peer *, unsigned int);
+ static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
+ static void rxrpc_distribute_error(struct rxrpc_peer *, int,
+ enum rxrpc_call_completion);
+
+ /*
+- * Find the peer associated with an ICMP packet.
++ * Find the peer associated with an ICMPv4 packet.
+ */
+ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
+- const struct sk_buff *skb,
++ struct sk_buff *skb,
++ unsigned int udp_offset,
++ unsigned int *info,
+ struct sockaddr_rxrpc *srx)
+ {
+- struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
++ struct iphdr *ip, *ip0 = ip_hdr(skb);
++ struct icmphdr *icmp = icmp_hdr(skb);
++ struct udphdr *udp = (struct udphdr *)(skb->data + udp_offset);
+
+- _enter("");
++ _enter("%u,%u,%u", ip0->protocol, icmp->type, icmp->code);
++
++ switch (icmp->type) {
++ case ICMP_DEST_UNREACH:
++ *info = ntohs(icmp->un.frag.mtu);
++ fallthrough;
++ case ICMP_TIME_EXCEEDED:
++ case ICMP_PARAMETERPROB:
++ ip = (struct iphdr *)((void *)icmp + 8);
++ break;
++ default:
++ return NULL;
++ }
++
++ memset(srx, 0, sizeof(*srx));
++ srx->transport_type = local->srx.transport_type;
++ srx->transport_len = local->srx.transport_len;
++ srx->transport.family = local->srx.transport.family;
++
++ /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice
++ * versa?
++ */
++ switch (srx->transport.family) {
++ case AF_INET:
++ srx->transport_len = sizeof(srx->transport.sin);
++ srx->transport.family = AF_INET;
++ srx->transport.sin.sin_port = udp->dest;
++ memcpy(&srx->transport.sin.sin_addr, &ip->daddr,
++ sizeof(struct in_addr));
++ break;
++
++#ifdef CONFIG_AF_RXRPC_IPV6
++ case AF_INET6:
++ srx->transport_len = sizeof(srx->transport.sin);
++ srx->transport.family = AF_INET;
++ srx->transport.sin.sin_port = udp->dest;
++ memcpy(&srx->transport.sin.sin_addr, &ip->daddr,
++ sizeof(struct in_addr));
++ break;
++#endif
++
++ default:
++ WARN_ON_ONCE(1);
++ return NULL;
++ }
++
++ _net("ICMP {%pISp}", &srx->transport);
++ return rxrpc_lookup_peer_rcu(local, srx);
++}
++
++#ifdef CONFIG_AF_RXRPC_IPV6
++/*
++ * Find the peer associated with an ICMPv6 packet.
++ */
++static struct rxrpc_peer *rxrpc_lookup_peer_icmp6_rcu(struct rxrpc_local *local,
++ struct sk_buff *skb,
++ unsigned int udp_offset,
++ unsigned int *info,
++ struct sockaddr_rxrpc *srx)
++{
++ struct icmp6hdr *icmp = icmp6_hdr(skb);
++ struct ipv6hdr *ip, *ip0 = ipv6_hdr(skb);
++ struct udphdr *udp = (struct udphdr *)(skb->data + udp_offset);
++
++ _enter("%u,%u,%u", ip0->nexthdr, icmp->icmp6_type, icmp->icmp6_code);
++
++ switch (icmp->icmp6_type) {
++ case ICMPV6_DEST_UNREACH:
++ *info = ntohl(icmp->icmp6_mtu);
++ fallthrough;
++ case ICMPV6_PKT_TOOBIG:
++ case ICMPV6_TIME_EXCEED:
++ case ICMPV6_PARAMPROB:
++ ip = (struct ipv6hdr *)((void *)icmp + 8);
++ break;
++ default:
++ return NULL;
++ }
+
+ memset(srx, 0, sizeof(*srx));
+ srx->transport_type = local->srx.transport_type;
+@@ -41,6 +124,165 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
+ /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice
+ * versa?
+ */
++ switch (srx->transport.family) {
++ case AF_INET:
++ _net("Rx ICMP6 on v4 sock");
++ srx->transport_len = sizeof(srx->transport.sin);
++ srx->transport.family = AF_INET;
++ srx->transport.sin.sin_port = udp->dest;
++ memcpy(&srx->transport.sin.sin_addr,
++ &ip->daddr.s6_addr32[3], sizeof(struct in_addr));
++ break;
++ case AF_INET6:
++ _net("Rx ICMP6");
++ srx->transport.sin.sin_port = udp->dest;
++ memcpy(&srx->transport.sin6.sin6_addr, &ip->daddr,
++ sizeof(struct in6_addr));
++ break;
++ default:
++ WARN_ON_ONCE(1);
++ return NULL;
++ }
++
++ _net("ICMP {%pISp}", &srx->transport);
++ return rxrpc_lookup_peer_rcu(local, srx);
++}
++#endif /* CONFIG_AF_RXRPC_IPV6 */
++
++/*
++ * Handle an error received on the local endpoint as a tunnel.
++ */
++void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb,
++ unsigned int udp_offset)
++{
++ struct sock_extended_err ee;
++ struct sockaddr_rxrpc srx;
++ struct rxrpc_local *local;
++ struct rxrpc_peer *peer;
++ unsigned int info = 0;
++ int err;
++ u8 version = ip_hdr(skb)->version;
++ u8 type = icmp_hdr(skb)->type;
++ u8 code = icmp_hdr(skb)->code;
++
++ rcu_read_lock();
++ local = rcu_dereference_sk_user_data(sk);
++ if (unlikely(!local)) {
++ rcu_read_unlock();
++ return;
++ }
++
++ rxrpc_new_skb(skb, rxrpc_skb_received);
++
++ switch (ip_hdr(skb)->version) {
++ case IPVERSION:
++ peer = rxrpc_lookup_peer_icmp_rcu(local, skb, udp_offset,
++ &info, &srx);
++ break;
++#ifdef CONFIG_AF_RXRPC_IPV6
++ case 6:
++ peer = rxrpc_lookup_peer_icmp6_rcu(local, skb, udp_offset,
++ &info, &srx);
++ break;
++#endif
++ default:
++ rcu_read_unlock();
++ return;
++ }
++
++ if (peer && !rxrpc_get_peer_maybe(peer))
++ peer = NULL;
++ if (!peer) {
++ rcu_read_unlock();
++ return;
++ }
++
++ memset(&ee, 0, sizeof(ee));
++
++ switch (version) {
++ case IPVERSION:
++ switch (type) {
++ case ICMP_DEST_UNREACH:
++ switch (code) {
++ case ICMP_FRAG_NEEDED:
++ rxrpc_adjust_mtu(peer, info);
++ rcu_read_unlock();
++ rxrpc_put_peer(peer);
++ return;
++ default:
++ break;
++ }
++
++ err = EHOSTUNREACH;
++ if (code <= NR_ICMP_UNREACH) {
++ /* Might want to do something different with
++ * non-fatal errors
++ */
++ //harderr = icmp_err_convert[code].fatal;
++ err = icmp_err_convert[code].errno;
++ }
++ break;
++
++ case ICMP_TIME_EXCEEDED:
++ err = EHOSTUNREACH;
++ break;
++ default:
++ err = EPROTO;
++ break;
++ }
++
++ ee.ee_origin = SO_EE_ORIGIN_ICMP;
++ ee.ee_type = type;
++ ee.ee_code = code;
++ ee.ee_errno = err;
++ break;
++
++#ifdef CONFIG_AF_RXRPC_IPV6
++ case 6:
++ switch (type) {
++ case ICMPV6_PKT_TOOBIG:
++ rxrpc_adjust_mtu(peer, info);
++ rcu_read_unlock();
++ rxrpc_put_peer(peer);
++ return;
++ }
++
++ icmpv6_err_convert(type, code, &err);
++
++ if (err == EACCES)
++ err = EHOSTUNREACH;
++
++ ee.ee_origin = SO_EE_ORIGIN_ICMP6;
++ ee.ee_type = type;
++ ee.ee_code = code;
++ ee.ee_errno = err;
++ break;
++#endif
++ }
++
++ trace_rxrpc_rx_icmp(peer, &ee, &srx);
++
++ rxrpc_distribute_error(peer, err, RXRPC_CALL_NETWORK_ERROR);
++ rcu_read_unlock();
++ rxrpc_put_peer(peer);
++}
++
++/*
++ * Find the peer associated with a local error.
++ */
++static struct rxrpc_peer *rxrpc_lookup_peer_local_rcu(struct rxrpc_local *local,
++ const struct sk_buff *skb,
++ struct sockaddr_rxrpc *srx)
++{
++ struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
++
++ _enter("");
++
++ memset(srx, 0, sizeof(*srx));
++ srx->transport_type = local->srx.transport_type;
++ srx->transport_len = local->srx.transport_len;
++ srx->transport.family = local->srx.transport.family;
++
+ switch (srx->transport.family) {
+ case AF_INET:
+ srx->transport_len = sizeof(srx->transport.sin);
+@@ -104,10 +346,8 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
+ /*
+ * Handle an MTU/fragmentation problem.
+ */
+-static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, struct sock_exterr_skb *serr)
++static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, unsigned int mtu)
+ {
+- u32 mtu = serr->ee.ee_info;
+-
+ _net("Rx ICMP Fragmentation Needed (%d)", mtu);
+
+ /* wind down the local interface MTU */
+@@ -148,7 +388,7 @@ void rxrpc_error_report(struct sock *sk)
+ struct sock_exterr_skb *serr;
+ struct sockaddr_rxrpc srx;
+ struct rxrpc_local *local;
+- struct rxrpc_peer *peer;
++ struct rxrpc_peer *peer = NULL;
+ struct sk_buff *skb;
+
+ rcu_read_lock();
+@@ -172,41 +412,20 @@ void rxrpc_error_report(struct sock *sk)
+ }
+ rxrpc_new_skb(skb, rxrpc_skb_received);
+ serr = SKB_EXT_ERR(skb);
+- if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
+- _leave("UDP empty message");
+- rcu_read_unlock();
+- rxrpc_free_skb(skb, rxrpc_skb_freed);
+- return;
+- }
+
+- peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx);
+- if (peer && !rxrpc_get_peer_maybe(peer))
+- peer = NULL;
+- if (!peer) {
+- rcu_read_unlock();
+- rxrpc_free_skb(skb, rxrpc_skb_freed);
+- _leave(" [no peer]");
+- return;
+- }
+-
+- trace_rxrpc_rx_icmp(peer, &serr->ee, &srx);
+-
+- if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP &&
+- serr->ee.ee_type == ICMP_DEST_UNREACH &&
+- serr->ee.ee_code == ICMP_FRAG_NEEDED)) {
+- rxrpc_adjust_mtu(peer, serr);
+- rcu_read_unlock();
+- rxrpc_free_skb(skb, rxrpc_skb_freed);
+- rxrpc_put_peer(peer);
+- _leave(" [MTU update]");
+- return;
++ if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL) {
++ peer = rxrpc_lookup_peer_local_rcu(local, skb, &srx);
++ if (peer && !rxrpc_get_peer_maybe(peer))
++ peer = NULL;
++ if (peer) {
++ trace_rxrpc_rx_icmp(peer, &serr->ee, &srx);
++ rxrpc_store_error(peer, serr);
++ }
+ }
+
+- rxrpc_store_error(peer, serr);
+ rcu_read_unlock();
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
+ rxrpc_put_peer(peer);
+-
+ _leave("");
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 42b9be01d091bf84c0151b538c5636636087e0bd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Sep 2022 21:21:36 +0200
+Subject: sch_sfb: Also store skb len before calling child enqueue
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Toke Høiland-Jørgensen <toke@toke.dk>
+
+[ Upstream commit 2f09707d0c972120bf794cfe0f0c67e2c2ddb252 ]
+
+Cong Wang noticed that the previous fix for sch_sfb accessing the queued
+skb after enqueueing it to a child qdisc was incomplete: the SFB enqueue
+function was also calling qdisc_qstats_backlog_inc() after enqueue, which
+reads the pkt len from the skb cb field. Fix this by also storing the skb
+len, and using the stored value to increment the backlog after enqueueing.
+
+Fixes: 9efd23297cca ("sch_sfb: Don't assume the skb is still around after enqueueing to child")
+Signed-off-by: Toke Høiland-Jørgensen <toke@toke.dk>
+Acked-by: Cong Wang <cong.wang@bytedance.com>
+Link: https://lore.kernel.org/r/20220905192137.965549-1-toke@toke.dk
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_sfb.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
+index 0d761f454ae8b..2829455211f8c 100644
+--- a/net/sched/sch_sfb.c
++++ b/net/sched/sch_sfb.c
+@@ -281,6 +281,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ {
+
+ struct sfb_sched_data *q = qdisc_priv(sch);
++ unsigned int len = qdisc_pkt_len(skb);
+ struct Qdisc *child = q->qdisc;
+ struct tcf_proto *fl;
+ struct sfb_skb_cb cb;
+@@ -403,7 +404,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ memcpy(&cb, sfb_skb_cb(skb), sizeof(cb));
+ ret = qdisc_enqueue(skb, child, to_free);
+ if (likely(ret == NET_XMIT_SUCCESS)) {
+- qdisc_qstats_backlog_inc(sch, skb);
++ sch->qstats.backlog += len;
+ sch->q.qlen++;
+ increment_qlen(&cb, q);
+ } else if (net_xmit_drop_count(ret)) {
+--
+2.35.1
+
--- /dev/null
+From 2057b2a4aaebc59ef03a7302dadf85a1569a1abf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Aug 2022 23:52:18 +0200
+Subject: sch_sfb: Don't assume the skb is still around after enqueueing to
+ child
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Toke Høiland-Jørgensen <toke@toke.dk>
+
+[ Upstream commit 9efd23297cca530bb35e1848665805d3fcdd7889 ]
+
+The sch_sfb enqueue() routine assumes the skb is still alive after it has
+been enqueued into a child qdisc, using the data in the skb cb field in the
+increment_qlen() routine after enqueue. However, the skb may in fact have
+been freed, causing a use-after-free in this case. In particular, this
+happens if sch_cake is used as a child of sfb, and the GSO splitting mode
+of CAKE is enabled (in which case the skb will be split into segments and
+the original skb freed).
+
+Fix this by copying the sfb cb data to the stack before enqueueing the skb,
+and using this stack copy in increment_qlen() instead of the skb pointer
+itself.
+
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-18231
+Fixes: e13e02a3c68d ("net_sched: SFB flow scheduler")
+Signed-off-by: Toke Høiland-Jørgensen <toke@toke.dk>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_sfb.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
+index 3d061a13d7ed2..0d761f454ae8b 100644
+--- a/net/sched/sch_sfb.c
++++ b/net/sched/sch_sfb.c
+@@ -135,15 +135,15 @@ static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
+ }
+ }
+
+-static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
++static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q)
+ {
+ u32 sfbhash;
+
+- sfbhash = sfb_hash(skb, 0);
++ sfbhash = cb->hashes[0];
+ if (sfbhash)
+ increment_one_qlen(sfbhash, 0, q);
+
+- sfbhash = sfb_hash(skb, 1);
++ sfbhash = cb->hashes[1];
+ if (sfbhash)
+ increment_one_qlen(sfbhash, 1, q);
+ }
+@@ -283,6 +283,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ struct sfb_sched_data *q = qdisc_priv(sch);
+ struct Qdisc *child = q->qdisc;
+ struct tcf_proto *fl;
++ struct sfb_skb_cb cb;
+ int i;
+ u32 p_min = ~0;
+ u32 minqlen = ~0;
+@@ -399,11 +400,12 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ }
+
+ enqueue:
++ memcpy(&cb, sfb_skb_cb(skb), sizeof(cb));
+ ret = qdisc_enqueue(skb, child, to_free);
+ if (likely(ret == NET_XMIT_SUCCESS)) {
+ qdisc_qstats_backlog_inc(sch, skb);
+ sch->q.qlen++;
+- increment_qlen(skb, q);
++ increment_qlen(&cb, q);
+ } else if (net_xmit_drop_count(ret)) {
+ q->stats.childdrop++;
+ qdisc_qstats_drop(sch);
+--
+2.35.1
+
cgroup-elide-write-locking-threadgroup_rwsem-when-up.patch
cgroup-fix-threadgroup_rwsem-cpus_read_lock-deadlock.patch
riscv-dts-microchip-mpfs-fix-reference-clock-node.patch
+asoc-qcom-sm8250-add-missing-module-owner.patch
+rdma-rtrs-clt-use-the-right-sg_cnt-after-ib_dma_map_.patch
+rdma-rtrs-srv-pass-the-correct-number-of-entries-for.patch
+arm-dts-imx6qdl-kontron-samx6i-remove-duplicated-nod.patch
+soc-imx-gpcv2-assert-reset-before-ungating-clock.patch
+regulator-core-clean-up-on-enable-failure.patch
+tee-fix-compiler-warning-in-tee_shm_register.patch
+rdma-cma-fix-arguments-order-in-net-device-validatio.patch
+soc-brcmstb-pm-arm-fix-refcount-leak-and-__iomem-lea.patch
+rdma-hns-fix-supported-page-size.patch
+rdma-hns-fix-wrong-fixed-value-of-qp-rq.wqe_shift.patch
+wifi-wilc1000-fix-dma-on-stack-objects.patch
+arm-at91-pm-fix-self-refresh-for-sama7g5.patch
+arm-at91-pm-fix-ddr-recalibration-when-resuming-from.patch
+arm-dts-at91-sama5d27_wlsom1-specify-proper-regulato.patch
+arm-dts-at91-sama5d2_icp-specify-proper-regulator-ou.patch
+arm-dts-at91-sama5d27_wlsom1-don-t-keep-ldo2-enabled.patch
+arm-dts-at91-sama5d2_icp-don-t-keep-vdd_other-enable.patch
+netfilter-br_netfilter-drop-dst-references-before-se.patch
+netfilter-nf_tables-clean-up-hook-list-when-offload-.patch
+netfilter-nf_conntrack_irc-fix-forged-ip-logic.patch
+rdma-srp-set-scmnd-result-only-when-scmnd-is-not-nul.patch
+alsa-usb-audio-inform-the-delayed-registration-more-.patch
+alsa-usb-audio-register-card-again-for-iface-over-de.patch
+rxrpc-fix-icmp-icmp6-error-handling.patch
+rxrpc-fix-an-insufficiently-large-sglist-in-rxkad_ve.patch
+afs-use-the-operation-issue-time-instead-of-the-repl.patch
+revert-net-phy-meson-gxl-improve-link-up-behavior.patch
+sch_sfb-don-t-assume-the-skb-is-still-around-after-e.patch
+tipc-fix-shift-wrapping-bug-in-map_get.patch
+net-introduce-__skb_fill_page_desc_noacc.patch
+tcp-tx-zerocopy-should-not-sense-pfmemalloc-status.patch
+ice-use-bitmap_free-instead-of-devm_kfree.patch
+i40e-fix-kernel-crash-during-module-removal.patch
+iavf-detach-device-during-reset-task.patch
+net-fec-use-a-spinlock-to-guard-fep-ptp_clk_on.patch
+xen-netback-only-remove-hotplug-status-when-the-vif-.patch
+rdma-siw-pass-a-pointer-to-virt_to_page.patch
+ipv6-sr-fix-out-of-bounds-read-when-setting-hmac-dat.patch
+ib-core-fix-a-nested-dead-lock-as-part-of-odp-flow.patch
+rdma-mlx5-set-local-port-to-one-when-accessing-count.patch
+erofs-fix-pcluster-use-after-free-on-up-platforms.patch
+nvme-tcp-fix-uaf-when-detecting-digest-errors.patch
+nvme-tcp-fix-regression-that-causes-sporadic-request.patch
+tcp-fix-early-etimedout-after-spurious-non-sack-rto.patch
+nvmet-fix-mar-and-mor-off-by-one-errors.patch
+rdma-irdma-report-the-correct-max-cqes-from-query-de.patch
+rdma-irdma-return-correct-wc-error-for-bind-operatio.patch
+rdma-irdma-report-rnr-nak-generation-in-device-caps.patch
+sch_sfb-also-store-skb-len-before-calling-child-enqu.patch
+perf-script-fix-cannot-print-iregs-field-for-hybrid-.patch
--- /dev/null
+From 01b6fa34b4f5d26505e60829d32ec1baf819ec1f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Jul 2022 09:56:20 +0800
+Subject: soc: brcmstb: pm-arm: Fix refcount leak and __iomem leak bugs
+
+From: Liang He <windhl@126.com>
+
+[ Upstream commit 1085f5080647f0c9f357c270a537869191f7f2a1 ]
+
+In brcmstb_pm_probe(), there are two kinds of leak bugs:
+
+(1) we need to add of_node_put() when for_each__matching_node() breaks
+(2) we need to add iounmap() for each iomap in fail path
+
+Fixes: 0b741b8234c8 ("soc: bcm: brcmstb: Add support for S2/S3/S5 suspend states (ARM)")
+Signed-off-by: Liang He <windhl@126.com>
+Link: https://lore.kernel.org/r/20220707015620.306468-1-windhl@126.com
+Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/soc/bcm/brcmstb/pm/pm-arm.c | 50 ++++++++++++++++++++++-------
+ 1 file changed, 39 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
+index 70ad0f3dce283..286f5d57c0cab 100644
+--- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c
++++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
+@@ -684,13 +684,14 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
+ const struct of_device_id *of_id = NULL;
+ struct device_node *dn;
+ void __iomem *base;
+- int ret, i;
++ int ret, i, s;
+
+ /* AON ctrl registers */
+ base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
+ if (IS_ERR(base)) {
+ pr_err("error mapping AON_CTRL\n");
+- return PTR_ERR(base);
++ ret = PTR_ERR(base);
++ goto aon_err;
+ }
+ ctrl.aon_ctrl_base = base;
+
+@@ -700,8 +701,10 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
+ /* Assume standard offset */
+ ctrl.aon_sram = ctrl.aon_ctrl_base +
+ AON_CTRL_SYSTEM_DATA_RAM_OFS;
++ s = 0;
+ } else {
+ ctrl.aon_sram = base;
++ s = 1;
+ }
+
+ writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC);
+@@ -711,7 +714,8 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
+ (const void **)&ddr_phy_data);
+ if (IS_ERR(base)) {
+ pr_err("error mapping DDR PHY\n");
+- return PTR_ERR(base);
++ ret = PTR_ERR(base);
++ goto ddr_phy_err;
+ }
+ ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot;
+ ctrl.pll_status_offset = ddr_phy_data->pll_status_offset;
+@@ -731,17 +735,20 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
+ for_each_matching_node(dn, ddr_shimphy_dt_ids) {
+ i = ctrl.num_memc;
+ if (i >= MAX_NUM_MEMC) {
++ of_node_put(dn);
+ pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC);
+ break;
+ }
+
+ base = of_io_request_and_map(dn, 0, dn->full_name);
+ if (IS_ERR(base)) {
++ of_node_put(dn);
+ if (!ctrl.support_warm_boot)
+ break;
+
+ pr_err("error mapping DDR SHIMPHY %d\n", i);
+- return PTR_ERR(base);
++ ret = PTR_ERR(base);
++ goto ddr_shimphy_err;
+ }
+ ctrl.memcs[i].ddr_shimphy_base = base;
+ ctrl.num_memc++;
+@@ -752,14 +759,18 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
+ for_each_matching_node(dn, brcmstb_memc_of_match) {
+ base = of_iomap(dn, 0);
+ if (!base) {
++ of_node_put(dn);
+ pr_err("error mapping DDR Sequencer %d\n", i);
+- return -ENOMEM;
++ ret = -ENOMEM;
++ goto brcmstb_memc_err;
+ }
+
+ of_id = of_match_node(brcmstb_memc_of_match, dn);
+ if (!of_id) {
+ iounmap(base);
+- return -EINVAL;
++ of_node_put(dn);
++ ret = -EINVAL;
++ goto brcmstb_memc_err;
+ }
+
+ ddr_seq_data = of_id->data;
+@@ -779,21 +790,24 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
+ dn = of_find_matching_node(NULL, sram_dt_ids);
+ if (!dn) {
+ pr_err("SRAM not found\n");
+- return -EINVAL;
++ ret = -EINVAL;
++ goto brcmstb_memc_err;
+ }
+
+ ret = brcmstb_init_sram(dn);
+ of_node_put(dn);
+ if (ret) {
+ pr_err("error setting up SRAM for PM\n");
+- return ret;
++ goto brcmstb_memc_err;
+ }
+
+ ctrl.pdev = pdev;
+
+ ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL);
+- if (!ctrl.s3_params)
+- return -ENOMEM;
++ if (!ctrl.s3_params) {
++ ret = -ENOMEM;
++ goto s3_params_err;
++ }
+ ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params,
+ sizeof(*ctrl.s3_params),
+ DMA_TO_DEVICE);
+@@ -813,7 +827,21 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
+
+ out:
+ kfree(ctrl.s3_params);
+-
++s3_params_err:
++ iounmap(ctrl.boot_sram);
++brcmstb_memc_err:
++ for (i--; i >= 0; i--)
++ iounmap(ctrl.memcs[i].ddr_ctrl);
++ddr_shimphy_err:
++ for (i = 0; i < ctrl.num_memc; i++)
++ iounmap(ctrl.memcs[i].ddr_shimphy_base);
++
++ iounmap(ctrl.memcs[0].ddr_phy_base);
++ddr_phy_err:
++ iounmap(ctrl.aon_ctrl_base);
++ if (s)
++ iounmap(ctrl.aon_sram);
++aon_err:
+ pr_warn("PM: initialization failed with code %d\n", ret);
+
+ return ret;
+--
+2.35.1
+
--- /dev/null
+From f5e81759a6133e24a1cbf10aff161f9995159d04 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Aug 2022 19:08:02 +0200
+Subject: soc: imx: gpcv2: Assert reset before ungating clock
+
+From: Marek Vasut <marex@denx.de>
+
+[ Upstream commit df88005bd81b80c944d185554e264a4b0f993c37 ]
+
+In case the power domain clock are ungated before the reset is asserted,
+the system might freeze completely. This is likely due to a device is an
+undefined state being attached to bus, which sporadically leads to a bus
+hang. Assert the reset before the clock are enabled to assure the device
+is in defined state before being attached to bus.
+
+Fixes: fe58c887fb8ca ("soc: imx: gpcv2: add support for optional resets")
+Signed-off-by: Marek Vasut <marex@denx.de>
+Reviewed-by: Fabio Estevam <festevam@denx.de>
+Reviewed-by: Lucas Stach <l.stach@pengutronix.de>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/soc/imx/gpcv2.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
+index b4aa28420f2a8..4dc3a3f73511e 100644
+--- a/drivers/soc/imx/gpcv2.c
++++ b/drivers/soc/imx/gpcv2.c
+@@ -237,6 +237,8 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
+ }
+ }
+
++ reset_control_assert(domain->reset);
++
+ /* Enable reset clocks for all devices in the domain */
+ ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
+ if (ret) {
+@@ -244,7 +246,8 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
+ goto out_regulator_disable;
+ }
+
+- reset_control_assert(domain->reset);
++ /* delays for reset to propagate */
++ udelay(5);
+
+ if (domain->bits.pxx) {
+ /* request the domain to power up */
+--
+2.35.1
+
--- /dev/null
+From 450518aff35c5ea6cd23b302a5fbbe9aa5d4fbec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 3 Sep 2022 08:10:23 -0400
+Subject: tcp: fix early ETIMEDOUT after spurious non-SACK RTO
+
+From: Neal Cardwell <ncardwell@google.com>
+
+[ Upstream commit 686dc2db2a0fdc1d34b424ec2c0a735becd8d62b ]
+
+Fix a bug reported and analyzed by Nagaraj Arankal, where the handling
+of a spurious non-SACK RTO could cause a connection to fail to clear
+retrans_stamp, causing a later RTO to very prematurely time out the
+connection with ETIMEDOUT.
+
+Here is the buggy scenario, expanding upon Nagaraj Arankal's excellent
+report:
+
+(*1) Send one data packet on a non-SACK connection
+
+(*2) Because no ACK packet is received, the packet is retransmitted
+ and we enter CA_Loss; but this retransmission is spurious.
+
+(*3) The ACK for the original data is received. The transmitted packet
+ is acknowledged. The TCP timestamp is before the retrans_stamp,
+ so tcp_may_undo() returns true, and tcp_try_undo_loss() returns
+ true without changing state to Open (because tcp_is_sack() is
+ false), and tcp_process_loss() returns without calling
+ tcp_try_undo_recovery(). Normally after undoing a CA_Loss
+ episode, tcp_fastretrans_alert() would see that the connection
+ has returned to CA_Open and fall through and call
+ tcp_try_to_open(), which would set retrans_stamp to 0. However,
+ for non-SACK connections we hold the connection in CA_Loss, so do
+ not fall through to call tcp_try_to_open() and do not set
+ retrans_stamp to 0. So retrans_stamp is (erroneously) still
+ non-zero.
+
+ At this point the first "retransmission event" has passed and
+ been recovered from. Any future retransmission is a completely
+ new "event". However, retrans_stamp is erroneously still
+ set. (And we are still in CA_Loss, which is correct.)
+
+(*4) After 16 minutes (to correspond with tcp_retries2=15), a new data
+ packet is sent. Note: No data is transmitted between (*3) and
+ (*4) and we disabled keep alives.
+
+ The socket's timeout SHOULD be calculated from this point in
+ time, but instead it's calculated from the prior "event" 16
+ minutes ago (step (*2)).
+
+(*5) Because no ACK packet is received, the packet is retransmitted.
+
+(*6) At the time of the 2nd retransmission, the socket returns
+ ETIMEDOUT, prematurely, because retrans_stamp is (erroneously)
+ too far in the past (set at the time of (*2)).
+
+This commit fixes this bug by ensuring that we reuse in
+tcp_try_undo_loss() the same careful logic for non-SACK connections
+that we have in tcp_try_undo_recovery(). To avoid duplicating logic,
+we factor out that logic into a new
+tcp_is_non_sack_preventing_reopen() helper and call that helper from
+both undo functions.
+
+Fixes: da34ac7626b5 ("tcp: only undo on partial ACKs in CA_Loss")
+Reported-by: Nagaraj Arankal <nagaraj.p.arankal@hpe.com>
+Link: https://lore.kernel.org/all/SJ0PR84MB1847BE6C24D274C46A1B9B0EB27A9@SJ0PR84MB1847.NAMPRD84.PROD.OUTLOOK.COM/
+Signed-off-by: Neal Cardwell <ncardwell@google.com>
+Signed-off-by: Yuchung Cheng <ycheng@google.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20220903121023.866900-1-ncardwell.kernel@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/tcp_input.c | 25 ++++++++++++++++++-------
+ 1 file changed, 18 insertions(+), 7 deletions(-)
+
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 7fd7e7cba0c92..686e210d89c21 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2506,6 +2506,21 @@ static inline bool tcp_may_undo(const struct tcp_sock *tp)
+ return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
+ }
+
++static bool tcp_is_non_sack_preventing_reopen(struct sock *sk)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++
++ if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
++ /* Hold old state until something *above* high_seq
++ * is ACKed. For Reno it is MUST to prevent false
++ * fast retransmits (RFC2582). SACK TCP is safe. */
++ if (!tcp_any_retrans_done(sk))
++ tp->retrans_stamp = 0;
++ return true;
++ }
++ return false;
++}
++
+ /* People celebrate: "We love our President!" */
+ static bool tcp_try_undo_recovery(struct sock *sk)
+ {
+@@ -2528,14 +2543,8 @@ static bool tcp_try_undo_recovery(struct sock *sk)
+ } else if (tp->rack.reo_wnd_persist) {
+ tp->rack.reo_wnd_persist--;
+ }
+- if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
+- /* Hold old state until something *above* high_seq
+- * is ACKed. For Reno it is MUST to prevent false
+- * fast retransmits (RFC2582). SACK TCP is safe. */
+- if (!tcp_any_retrans_done(sk))
+- tp->retrans_stamp = 0;
++ if (tcp_is_non_sack_preventing_reopen(sk))
+ return true;
+- }
+ tcp_set_ca_state(sk, TCP_CA_Open);
+ tp->is_sack_reneg = 0;
+ return false;
+@@ -2571,6 +2580,8 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
+ NET_INC_STATS(sock_net(sk),
+ LINUX_MIB_TCPSPURIOUSRTOS);
+ inet_csk(sk)->icsk_retransmits = 0;
++ if (tcp_is_non_sack_preventing_reopen(sk))
++ return true;
+ if (frto_undo || tcp_is_sack(tp)) {
+ tcp_set_ca_state(sk, TCP_CA_Open);
+ tp->is_sack_reneg = 0;
+--
+2.35.1
+
--- /dev/null
+From 82065dbe780d3add1d581169dd4351fa2fabb372 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Aug 2022 23:38:09 +0000
+Subject: tcp: TX zerocopy should not sense pfmemalloc status
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 3261400639463a853ba2b3be8bd009c2a8089775 ]
+
+We got a recent syzbot report [1] showing a possible misuse
+of pfmemalloc page status in TCP zerocopy paths.
+
+Indeed, for pages coming from user space or other layers,
+using page_is_pfmemalloc() is moot, and possibly could give
+false positives.
+
+There has been attempts to make page_is_pfmemalloc() more robust,
+but not using it in the first place in this context is probably better,
+removing cpu cycles.
+
+Note to stable teams :
+
+You need to backport 84ce071e38a6 ("net: introduce
+__skb_fill_page_desc_noacc") as a prereq.
+
+Race is more probable after commit c07aea3ef4d4
+("mm: add a signature in struct page") because page_is_pfmemalloc()
+is now using low order bit from page->lru.next, which can change
+more often than page->index.
+
+Low order bit should never be set for lru.next (when used as an anchor
+in LRU list), so KCSAN report is mostly a false positive.
+
+Backporting to older kernel versions seems not necessary.
+
+[1]
+BUG: KCSAN: data-race in lru_add_fn / tcp_build_frag
+
+write to 0xffffea0004a1d2c8 of 8 bytes by task 18600 on cpu 0:
+__list_add include/linux/list.h:73 [inline]
+list_add include/linux/list.h:88 [inline]
+lruvec_add_folio include/linux/mm_inline.h:105 [inline]
+lru_add_fn+0x440/0x520 mm/swap.c:228
+folio_batch_move_lru+0x1e1/0x2a0 mm/swap.c:246
+folio_batch_add_and_move mm/swap.c:263 [inline]
+folio_add_lru+0xf1/0x140 mm/swap.c:490
+filemap_add_folio+0xf8/0x150 mm/filemap.c:948
+__filemap_get_folio+0x510/0x6d0 mm/filemap.c:1981
+pagecache_get_page+0x26/0x190 mm/folio-compat.c:104
+grab_cache_page_write_begin+0x2a/0x30 mm/folio-compat.c:116
+ext4_da_write_begin+0x2dd/0x5f0 fs/ext4/inode.c:2988
+generic_perform_write+0x1d4/0x3f0 mm/filemap.c:3738
+ext4_buffered_write_iter+0x235/0x3e0 fs/ext4/file.c:270
+ext4_file_write_iter+0x2e3/0x1210
+call_write_iter include/linux/fs.h:2187 [inline]
+new_sync_write fs/read_write.c:491 [inline]
+vfs_write+0x468/0x760 fs/read_write.c:578
+ksys_write+0xe8/0x1a0 fs/read_write.c:631
+__do_sys_write fs/read_write.c:643 [inline]
+__se_sys_write fs/read_write.c:640 [inline]
+__x64_sys_write+0x3e/0x50 fs/read_write.c:640
+do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+do_syscall_64+0x2b/0x70 arch/x86/entry/common.c:80
+entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+read to 0xffffea0004a1d2c8 of 8 bytes by task 18611 on cpu 1:
+page_is_pfmemalloc include/linux/mm.h:1740 [inline]
+__skb_fill_page_desc include/linux/skbuff.h:2422 [inline]
+skb_fill_page_desc include/linux/skbuff.h:2443 [inline]
+tcp_build_frag+0x613/0xb20 net/ipv4/tcp.c:1018
+do_tcp_sendpages+0x3e8/0xaf0 net/ipv4/tcp.c:1075
+tcp_sendpage_locked net/ipv4/tcp.c:1140 [inline]
+tcp_sendpage+0x89/0xb0 net/ipv4/tcp.c:1150
+inet_sendpage+0x7f/0xc0 net/ipv4/af_inet.c:833
+kernel_sendpage+0x184/0x300 net/socket.c:3561
+sock_sendpage+0x5a/0x70 net/socket.c:1054
+pipe_to_sendpage+0x128/0x160 fs/splice.c:361
+splice_from_pipe_feed fs/splice.c:415 [inline]
+__splice_from_pipe+0x222/0x4d0 fs/splice.c:559
+splice_from_pipe fs/splice.c:594 [inline]
+generic_splice_sendpage+0x89/0xc0 fs/splice.c:743
+do_splice_from fs/splice.c:764 [inline]
+direct_splice_actor+0x80/0xa0 fs/splice.c:931
+splice_direct_to_actor+0x305/0x620 fs/splice.c:886
+do_splice_direct+0xfb/0x180 fs/splice.c:974
+do_sendfile+0x3bf/0x910 fs/read_write.c:1249
+__do_sys_sendfile64 fs/read_write.c:1317 [inline]
+__se_sys_sendfile64 fs/read_write.c:1303 [inline]
+__x64_sys_sendfile64+0x10c/0x150 fs/read_write.c:1303
+do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+do_syscall_64+0x2b/0x70 arch/x86/entry/common.c:80
+entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+value changed: 0x0000000000000000 -> 0xffffea0004a1d288
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 1 PID: 18611 Comm: syz-executor.4 Not tainted 6.0.0-rc2-syzkaller-00248-ge022620b5d05-dirty #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/22/2022
+
+Fixes: c07aea3ef4d4 ("mm: add a signature in struct page")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Shakeel Butt <shakeelb@google.com>
+Reviewed-by: Shakeel Butt <shakeelb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/skbuff.h | 21 +++++++++++++++++++++
+ net/core/datagram.c | 2 +-
+ net/ipv4/tcp.c | 2 +-
+ 3 files changed, 23 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index be7cc31d58961..cfb889f66c703 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2291,6 +2291,27 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
+ skb_shinfo(skb)->nr_frags = i + 1;
+ }
+
++/**
++ * skb_fill_page_desc_noacc - initialise a paged fragment in an skb
++ * @skb: buffer containing fragment to be initialised
++ * @i: paged fragment index to initialise
++ * @page: the page to use for this fragment
++ * @off: the offset to the data with @page
++ * @size: the length of the data
++ *
++ * Variant of skb_fill_page_desc() which does not deal with
++ * pfmemalloc, if page is not owned by us.
++ */
++static inline void skb_fill_page_desc_noacc(struct sk_buff *skb, int i,
++ struct page *page, int off,
++ int size)
++{
++ struct skb_shared_info *shinfo = skb_shinfo(skb);
++
++ __skb_fill_page_desc_noacc(shinfo, i, page, off, size);
++ shinfo->nr_frags = i + 1;
++}
++
+ void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
+ int size, unsigned int truesize);
+
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index 15ab9ffb27fe9..28e5f921dcaf4 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -677,7 +677,7 @@ int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
+ page_ref_sub(last_head, refs);
+ refs = 0;
+ }
+- skb_fill_page_desc(skb, frag++, head, start, size);
++ skb_fill_page_desc_noacc(skb, frag++, head, start, size);
+ }
+ if (refs)
+ page_ref_sub(last_head, refs);
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 0ebef2a5950cd..4f6b897ccf23f 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1002,7 +1002,7 @@ struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
+ skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
+ } else {
+ get_page(page);
+- skb_fill_page_desc(skb, i, page, offset, copy);
++ skb_fill_page_desc_noacc(skb, i, page, offset, copy);
+ }
+
+ if (!(flags & MSG_NO_SHARED_FRAGS))
+--
+2.35.1
+
--- /dev/null
+From c1043777d1c9182bff391db7846615d5eb1abf78 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Aug 2022 07:43:35 +0200
+Subject: tee: fix compiler warning in tee_shm_register()
+
+From: Jens Wiklander <jens.wiklander@linaro.org>
+
+[ Upstream commit eccd7439709810127563e7e3e49b8b44c7b2791d ]
+
+Include <linux/uaccess.h> to avoid the warning:
+ drivers/tee/tee_shm.c: In function 'tee_shm_register':
+>> drivers/tee/tee_shm.c:242:14: error: implicit declaration of function 'access_ok' [-Werror=implicit-function-declaration]
+ 242 | if (!access_ok((void __user *)addr, length))
+ | ^~~~~~~~~
+ cc1: some warnings being treated as errors
+
+Fixes: 573ae4f13f63 ("tee: add overflow check in register_shm_helper()")
+Reviewed-by: Sumit Garg <sumit.garg@linaro.org>
+Reported-by: kernel test robot <lkp@intel.com>
+Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tee/tee_shm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
+index 6e662fb131d55..bd96ebb82c8ec 100644
+--- a/drivers/tee/tee_shm.c
++++ b/drivers/tee/tee_shm.c
+@@ -9,6 +9,7 @@
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/tee_drv.h>
++#include <linux/uaccess.h>
+ #include <linux/uio.h>
+ #include "tee_private.h"
+
+--
+2.35.1
+
--- /dev/null
+From f4bba2eaecd12c1bff4fd5ed63ba2568aaf59c03 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Aug 2022 17:47:56 +0300
+Subject: tipc: fix shift wrapping bug in map_get()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit e2b224abd9bf45dcb55750479fc35970725a430b ]
+
+There is a shift wrapping bug in this code so anything thing above
+31 will return false.
+
+Fixes: 35c55c9877f8 ("tipc: add neighbor monitoring framework")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tipc/monitor.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
+index 2f4d23238a7e3..9618e4429f0fe 100644
+--- a/net/tipc/monitor.c
++++ b/net/tipc/monitor.c
+@@ -160,7 +160,7 @@ static void map_set(u64 *up_map, int i, unsigned int v)
+
+ static int map_get(u64 up_map, int i)
+ {
+- return (up_map & (1 << i)) >> i;
++ return (up_map & (1ULL << i)) >> i;
+ }
+
+ static struct tipc_peer *peer_prev(struct tipc_peer *peer)
+--
+2.35.1
+
--- /dev/null
+From f75c817395b2b4884293f895cd1cc9f16f906adc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 9 Aug 2022 07:57:56 +0000
+Subject: wifi: wilc1000: fix DMA on stack objects
+
+From: Ajay.Kathat@microchip.com <Ajay.Kathat@microchip.com>
+
+[ Upstream commit 40b717bfcefab28a0656b8caa5e43d5449e5a671 ]
+
+Sometimes 'wilc_sdio_cmd53' is called with addresses pointing to an
+object on the stack. Use dynamically allocated memory for cmd53 instead
+of stack address which is not DMA'able.
+
+Fixes: 5625f965d764 ("wilc1000: move wilc driver out of staging")
+Reported-by: Michael Walle <mwalle@kernel.org>
+Suggested-by: Michael Walle <mwalle@kernel.org>
+Signed-off-by: Ajay Singh <ajay.kathat@microchip.com>
+Reviewed-by: Michael Walle <mwalle@kernel.org>
+Tested-by: Michael Walle <mwalle@kernel.org>
+Signed-off-by: Kalle Valo <kvalo@kernel.org>
+Link: https://lore.kernel.org/r/20220809075749.62752-1-ajay.kathat@microchip.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/wireless/microchip/wilc1000/netdev.h | 1 +
+ .../net/wireless/microchip/wilc1000/sdio.c | 39 ++++++++++++++++---
+ .../net/wireless/microchip/wilc1000/wlan.c | 15 ++++++-
+ 3 files changed, 47 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h
+index 86209b391a3d6..e6e23fc585ee8 100644
+--- a/drivers/net/wireless/microchip/wilc1000/netdev.h
++++ b/drivers/net/wireless/microchip/wilc1000/netdev.h
+@@ -252,6 +252,7 @@ struct wilc {
+ u8 *rx_buffer;
+ u32 rx_buffer_offset;
+ u8 *tx_buffer;
++ u32 *vmm_table;
+
+ struct txq_handle txq[NQUEUES];
+ int txq_entries;
+diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
+index 8b3b735231085..6c0727fc4abd9 100644
+--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
++++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
+@@ -27,6 +27,7 @@ struct wilc_sdio {
+ bool irq_gpio;
+ u32 block_size;
+ int has_thrpt_enh3;
++ u8 *cmd53_buf;
+ };
+
+ struct sdio_cmd52 {
+@@ -46,6 +47,7 @@ struct sdio_cmd53 {
+ u32 count: 9;
+ u8 *buffer;
+ u32 block_size;
++ bool use_global_buf;
+ };
+
+ static const struct wilc_hif_func wilc_hif_sdio;
+@@ -90,6 +92,8 @@ static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd)
+ {
+ struct sdio_func *func = container_of(wilc->dev, struct sdio_func, dev);
+ int size, ret;
++ struct wilc_sdio *sdio_priv = wilc->bus_data;
++ u8 *buf = cmd->buffer;
+
+ sdio_claim_host(func);
+
+@@ -100,12 +104,23 @@ static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd)
+ else
+ size = cmd->count;
+
++ if (cmd->use_global_buf) {
++ if (size > sizeof(u32))
++ return -EINVAL;
++
++ buf = sdio_priv->cmd53_buf;
++ }
++
+ if (cmd->read_write) { /* write */
+- ret = sdio_memcpy_toio(func, cmd->address,
+- (void *)cmd->buffer, size);
++ if (cmd->use_global_buf)
++ memcpy(buf, cmd->buffer, size);
++
++ ret = sdio_memcpy_toio(func, cmd->address, buf, size);
+ } else { /* read */
+- ret = sdio_memcpy_fromio(func, (void *)cmd->buffer,
+- cmd->address, size);
++ ret = sdio_memcpy_fromio(func, buf, cmd->address, size);
++
++ if (cmd->use_global_buf)
++ memcpy(cmd->buffer, buf, size);
+ }
+
+ sdio_release_host(func);
+@@ -127,6 +142,12 @@ static int wilc_sdio_probe(struct sdio_func *func,
+ if (!sdio_priv)
+ return -ENOMEM;
+
++ sdio_priv->cmd53_buf = kzalloc(sizeof(u32), GFP_KERNEL);
++ if (!sdio_priv->cmd53_buf) {
++ ret = -ENOMEM;
++ goto free;
++ }
++
+ ret = wilc_cfg80211_init(&wilc, &func->dev, WILC_HIF_SDIO,
+ &wilc_hif_sdio);
+ if (ret)
+@@ -160,6 +181,7 @@ static int wilc_sdio_probe(struct sdio_func *func,
+ irq_dispose_mapping(wilc->dev_irq_num);
+ wilc_netdev_cleanup(wilc);
+ free:
++ kfree(sdio_priv->cmd53_buf);
+ kfree(sdio_priv);
+ return ret;
+ }
+@@ -171,6 +193,7 @@ static void wilc_sdio_remove(struct sdio_func *func)
+
+ clk_disable_unprepare(wilc->rtc_clk);
+ wilc_netdev_cleanup(wilc);
++ kfree(sdio_priv->cmd53_buf);
+ kfree(sdio_priv);
+ }
+
+@@ -367,8 +390,9 @@ static int wilc_sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
+ cmd.address = WILC_SDIO_FBR_DATA_REG;
+ cmd.block_mode = 0;
+ cmd.increment = 1;
+- cmd.count = 4;
++ cmd.count = sizeof(u32);
+ cmd.buffer = (u8 *)&data;
++ cmd.use_global_buf = true;
+ cmd.block_size = sdio_priv->block_size;
+ ret = wilc_sdio_cmd53(wilc, &cmd);
+ if (ret)
+@@ -406,6 +430,7 @@ static int wilc_sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
+ nblk = size / block_size;
+ nleft = size % block_size;
+
++ cmd.use_global_buf = false;
+ if (nblk > 0) {
+ cmd.block_mode = 1;
+ cmd.increment = 1;
+@@ -484,8 +509,9 @@ static int wilc_sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
+ cmd.address = WILC_SDIO_FBR_DATA_REG;
+ cmd.block_mode = 0;
+ cmd.increment = 1;
+- cmd.count = 4;
++ cmd.count = sizeof(u32);
+ cmd.buffer = (u8 *)data;
++ cmd.use_global_buf = true;
+
+ cmd.block_size = sdio_priv->block_size;
+ ret = wilc_sdio_cmd53(wilc, &cmd);
+@@ -527,6 +553,7 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
+ nblk = size / block_size;
+ nleft = size % block_size;
+
++ cmd.use_global_buf = false;
+ if (nblk > 0) {
+ cmd.block_mode = 1;
+ cmd.increment = 1;
+diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
+index 200a103a0a858..380699983a75b 100644
+--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
++++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
+@@ -701,7 +701,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
+ int ret = 0;
+ int counter;
+ int timeout;
+- u32 vmm_table[WILC_VMM_TBL_SIZE];
++ u32 *vmm_table = wilc->vmm_table;
+ u8 ac_pkt_num_to_chip[NQUEUES] = {0, 0, 0, 0};
+ const struct wilc_hif_func *func;
+ int srcu_idx;
+@@ -1220,6 +1220,8 @@ void wilc_wlan_cleanup(struct net_device *dev)
+ while ((rqe = wilc_wlan_rxq_remove(wilc)))
+ kfree(rqe);
+
++ kfree(wilc->vmm_table);
++ wilc->vmm_table = NULL;
+ kfree(wilc->rx_buffer);
+ wilc->rx_buffer = NULL;
+ kfree(wilc->tx_buffer);
+@@ -1455,6 +1457,14 @@ int wilc_wlan_init(struct net_device *dev)
+ goto fail;
+ }
+
++ if (!wilc->vmm_table)
++ wilc->vmm_table = kzalloc(WILC_VMM_TBL_SIZE, GFP_KERNEL);
++
++ if (!wilc->vmm_table) {
++ ret = -ENOBUFS;
++ goto fail;
++ }
++
+ if (!wilc->tx_buffer)
+ wilc->tx_buffer = kmalloc(WILC_TX_BUFF_SIZE, GFP_KERNEL);
+
+@@ -1479,7 +1489,8 @@ int wilc_wlan_init(struct net_device *dev)
+ return 0;
+
+ fail:
+-
++ kfree(wilc->vmm_table);
++ wilc->vmm_table = NULL;
+ kfree(wilc->rx_buffer);
+ wilc->rx_buffer = NULL;
+ kfree(wilc->tx_buffer);
+--
+2.35.1
+
--- /dev/null
+From 8447a9f67bc97f37a6b7c8091dfd778f4b2e4183 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 1 Sep 2022 12:55:54 +0100
+Subject: xen-netback: only remove 'hotplug-status' when the vif is actually
+ destroyed
+
+From: Paul Durrant <pdurrant@amazon.com>
+
+[ Upstream commit c55f34b6aec2a8cb47eadaffea773e83bf85de91 ]
+
+Removing 'hotplug-status' in backend_disconnected() means that it will be
+removed even in the case that the frontend unilaterally disconnects (which
+it is free to do at any time). The consequence of this is that, when the
+frontend attempts to re-connect, the backend gets stuck in 'InitWait'
+rather than moving straight to 'Connected' (which it can do because the
+hotplug script has already run).
+Instead, the 'hotplug-status' mode should be removed in netback_remove()
+i.e. when the vif really is going away.
+
+Fixes: 0f4558ae9187 ("Revert "xen-netback: remove 'hotplug-status' once it has served its purpose"")
+Signed-off-by: Paul Durrant <pdurrant@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/xen-netback/xenbus.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
+index 990360d75cb64..e85b3c5d4acce 100644
+--- a/drivers/net/xen-netback/xenbus.c
++++ b/drivers/net/xen-netback/xenbus.c
+@@ -256,7 +256,6 @@ static void backend_disconnect(struct backend_info *be)
+ unsigned int queue_index;
+
+ xen_unregister_watchers(vif);
+- xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
+ #ifdef CONFIG_DEBUG_FS
+ xenvif_debugfs_delif(vif);
+ #endif /* CONFIG_DEBUG_FS */
+@@ -984,6 +983,7 @@ static int netback_remove(struct xenbus_device *dev)
+ struct backend_info *be = dev_get_drvdata(&dev->dev);
+
+ unregister_hotplug_status_watch(be);
++ xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
+ if (be->vif) {
+ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
+ backend_disconnect(be);
+--
+2.35.1
+