--- /dev/null
+From 3b6981a5e54ebcb39cc6268411313a544ab7113b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 26 Mar 2024 08:01:02 +0200
+Subject: accel/habanalabs/gaudi2: unsecure edma max outstanding register
+
+From: Rakesh Ughreja <rughreja@habana.ai>
+
+[ Upstream commit 3309887c6ff8ca2ac05a74e1ee5d1c44829f63f2 ]
+
+Netowrk EDMAs uses more outstanding transfers so this needs to be
+programmed by EDMA firmware.
+
+Signed-off-by: Rakesh Ughreja <rughreja@habana.ai>
+Reviewed-by: Ofir Bitton <obitton@habana.ai>
+Signed-off-by: Ofir Bitton <obitton@habana.ai>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/habanalabs/gaudi2/gaudi2_security.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2_security.c b/drivers/accel/habanalabs/gaudi2/gaudi2_security.c
+index 34bf80c5a44b..307ccb912ccd 100644
+--- a/drivers/accel/habanalabs/gaudi2/gaudi2_security.c
++++ b/drivers/accel/habanalabs/gaudi2/gaudi2_security.c
+@@ -479,6 +479,7 @@ static const u32 gaudi2_pb_dcr0_edma0_unsecured_regs[] = {
+ mmDCORE0_EDMA0_CORE_CTX_TE_NUMROWS,
+ mmDCORE0_EDMA0_CORE_CTX_IDX,
+ mmDCORE0_EDMA0_CORE_CTX_IDX_INC,
++ mmDCORE0_EDMA0_CORE_WR_COMP_MAX_OUTSTAND,
+ mmDCORE0_EDMA0_CORE_RD_LBW_RATE_LIM_CFG,
+ mmDCORE0_EDMA0_QM_CQ_CFG0_0,
+ mmDCORE0_EDMA0_QM_CQ_CFG0_1,
+--
+2.43.0
+
--- /dev/null
+From f5b15ccb370e8fbf29aa1a00933393bdb83605ba Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Jun 2024 13:56:22 -0700
+Subject: af_unix: Remove put_pid()/put_cred() in copy_peercred().
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit e4bd881d987121dbf1a288641491955a53d9f8f7 ]
+
+When (AF_UNIX, SOCK_STREAM) socket connect()s to a listening socket,
+the listener's sk_peer_pid/sk_peer_cred are copied to the client in
+copy_peercred().
+
+Then, the client's sk_peer_pid and sk_peer_cred are always NULL, so
+we need not call put_pid() and put_cred() there.
+
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/unix/af_unix.c | 9 +--------
+ 1 file changed, 1 insertion(+), 8 deletions(-)
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index be5266007b48..84a332f95aa8 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -692,9 +692,6 @@ static void init_peercred(struct sock *sk)
+
+ static void copy_peercred(struct sock *sk, struct sock *peersk)
+ {
+- const struct cred *old_cred;
+- struct pid *old_pid;
+-
+ if (sk < peersk) {
+ spin_lock(&sk->sk_peer_lock);
+ spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
+@@ -702,16 +699,12 @@ static void copy_peercred(struct sock *sk, struct sock *peersk)
+ spin_lock(&peersk->sk_peer_lock);
+ spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
+ }
+- old_pid = sk->sk_peer_pid;
+- old_cred = sk->sk_peer_cred;
++
+ sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
+ sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
+
+ spin_unlock(&sk->sk_peer_lock);
+ spin_unlock(&peersk->sk_peer_lock);
+-
+- put_pid(old_pid);
+- put_cred(old_cred);
+ }
+
+ static int unix_listen(struct socket *sock, int backlog)
+--
+2.43.0
+
--- /dev/null
+From 7429979de9407df4b720d136ccdb09dc6f183efd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 16 Jun 2024 09:34:44 +0200
+Subject: ALSA: control: Apply sanity check of input values for user elements
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 50ed081284fe2bfd1f25e8b92f4f6a4990e73c0a ]
+
+Although we have already a mechanism for sanity checks of input values
+for control writes, it's not applied unless the kconfig
+CONFIG_SND_CTL_INPUT_VALIDATION is set due to the performance reason.
+Nevertheless, it still makes sense to apply the same check for user
+elements despite of its cost, as that's the only way to filter out the
+invalid values; the user controls are handled solely in ALSA core
+code, and there is no corresponding driver, after all.
+
+This patch adds the same input value validation for user control
+elements at its put callback. The kselftest will be happier with this
+change, as the incorrect values will be bailed out now with errors.
+
+For other normal controls, the check is applied still only when
+CONFIG_SND_CTL_INPUT_VALIDATION is set.
+
+Reported-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Closes: https://lore.kernel.org/r/1d44be36-9bb9-4d82-8953-5ae2a4f09405@molgen.mpg.de
+Reviewed-by: Jaroslav Kysela <perex@perex.cz>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Reviewed-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://lore.kernel.org/20240616073454.16512-4-tiwai@suse.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/core/control.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/sound/core/control.c b/sound/core/control.c
+index fb0c60044f7b..1dd2337e2930 100644
+--- a/sound/core/control.c
++++ b/sound/core/control.c
+@@ -1480,12 +1480,16 @@ static int snd_ctl_elem_user_get(struct snd_kcontrol *kcontrol,
+ static int snd_ctl_elem_user_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+- int change;
++ int err, change;
+ struct user_element *ue = kcontrol->private_data;
+ unsigned int size = ue->elem_data_size;
+ char *dst = ue->elem_data +
+ snd_ctl_get_ioff(kcontrol, &ucontrol->id) * size;
+
++ err = sanity_check_input_values(ue->card, ucontrol, &ue->info, false);
++ if (err < 0)
++ return err;
++
+ change = memcmp(&ucontrol->value, dst, size) != 0;
+ if (change)
+ memcpy(dst, &ucontrol->value, size);
+--
+2.43.0
+
--- /dev/null
+From 216fea5882aba36303a19b3ee7a3f939e813e00a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 16 Jun 2024 09:34:47 +0200
+Subject: ALSA: hda: Add input value sanity checks to HDMI channel map controls
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 6278056e42d953e207e2afd416be39d09ed2d496 ]
+
+Add a simple sanity check to HD-audio HDMI Channel Map controls.
+Although the value might not be accepted for the actual connection, we
+can filter out some bogus values beforehand, and that should be enough
+for making kselftest happier.
+
+Reviewed-by: Jaroslav Kysela <perex@perex.cz>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://lore.kernel.org/20240616073454.16512-7-tiwai@suse.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/hda/hdmi_chmap.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c
+index 5d8e1d944b0a..7b276047f85a 100644
+--- a/sound/hda/hdmi_chmap.c
++++ b/sound/hda/hdmi_chmap.c
+@@ -753,6 +753,20 @@ static int hdmi_chmap_ctl_get(struct snd_kcontrol *kcontrol,
+ return 0;
+ }
+
++/* a simple sanity check for input values to chmap kcontrol */
++static int chmap_value_check(struct hdac_chmap *hchmap,
++ const struct snd_ctl_elem_value *ucontrol)
++{
++ int i;
++
++ for (i = 0; i < hchmap->channels_max; i++) {
++ if (ucontrol->value.integer.value[i] < 0 ||
++ ucontrol->value.integer.value[i] > SNDRV_CHMAP_LAST)
++ return -EINVAL;
++ }
++ return 0;
++}
++
+ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+@@ -764,6 +778,10 @@ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
+ unsigned char chmap[8], per_pin_chmap[8];
+ int i, err, ca, prepared = 0;
+
++ err = chmap_value_check(hchmap, ucontrol);
++ if (err < 0)
++ return err;
++
+ /* No monitor is connected in dyn_pcm_assign.
+ * It's invalid to setup the chmap
+ */
+--
+2.43.0
+
--- /dev/null
+From 2e3bc1c92a2e26447115843163334a05b43785fd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 7 Jul 2024 16:30:07 +0800
+Subject: ASoc: TAS2781: replace beXX_to_cpup with get_unaligned_beXX for
+ potentially broken alignment
+
+From: Shenghao Ding <shenghao-ding@ti.com>
+
+[ Upstream commit 1cc509edbe23b61e8c245611bd15d88edb635a38 ]
+
+Use get_unaligned_be16 instead of be16_to_cpup and get_unaligned_be32
+instead of be32_to_cpup for potentially broken alignment.
+
+Signed-off-by: Shenghao Ding <shenghao-ding@ti.com>
+Link: https://patch.msgid.link/20240707083011.98-1-shenghao-ding@ti.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/codecs/tas2781-fmwlib.c | 71 +++++++++++++++----------------
+ 1 file changed, 35 insertions(+), 36 deletions(-)
+
+diff --git a/sound/soc/codecs/tas2781-fmwlib.c b/sound/soc/codecs/tas2781-fmwlib.c
+index 08082806d589..8f9a3ae7153e 100644
+--- a/sound/soc/codecs/tas2781-fmwlib.c
++++ b/sound/soc/codecs/tas2781-fmwlib.c
+@@ -21,7 +21,7 @@
+ #include <sound/soc.h>
+ #include <sound/tlv.h>
+ #include <sound/tas2781.h>
+-
++#include <asm/unaligned.h>
+
+ #define ERROR_PRAM_CRCCHK 0x0000000
+ #define ERROR_YRAM_CRCCHK 0x0000001
+@@ -187,8 +187,7 @@ static struct tasdevice_config_info *tasdevice_add_config(
+ /* convert data[offset], data[offset + 1], data[offset + 2] and
+ * data[offset + 3] into host
+ */
+- cfg_info->nblocks =
+- be32_to_cpup((__be32 *)&config_data[config_offset]);
++ cfg_info->nblocks = get_unaligned_be32(&config_data[config_offset]);
+ config_offset += 4;
+
+ /* Several kinds of dsp/algorithm firmwares can run on tas2781,
+@@ -232,14 +231,14 @@ static struct tasdevice_config_info *tasdevice_add_config(
+
+ }
+ bk_da[i]->yram_checksum =
+- be16_to_cpup((__be16 *)&config_data[config_offset]);
++ get_unaligned_be16(&config_data[config_offset]);
+ config_offset += 2;
+ bk_da[i]->block_size =
+- be32_to_cpup((__be32 *)&config_data[config_offset]);
++ get_unaligned_be32(&config_data[config_offset]);
+ config_offset += 4;
+
+ bk_da[i]->n_subblks =
+- be32_to_cpup((__be32 *)&config_data[config_offset]);
++ get_unaligned_be32(&config_data[config_offset]);
+
+ config_offset += 4;
+
+@@ -289,7 +288,7 @@ int tasdevice_rca_parser(void *context, const struct firmware *fmw)
+ }
+ buf = (unsigned char *)fmw->data;
+
+- fw_hdr->img_sz = be32_to_cpup((__be32 *)&buf[offset]);
++ fw_hdr->img_sz = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+ if (fw_hdr->img_sz != fmw->size) {
+ dev_err(tas_priv->dev,
+@@ -300,9 +299,9 @@ int tasdevice_rca_parser(void *context, const struct firmware *fmw)
+ goto out;
+ }
+
+- fw_hdr->checksum = be32_to_cpup((__be32 *)&buf[offset]);
++ fw_hdr->checksum = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+- fw_hdr->binary_version_num = be32_to_cpup((__be32 *)&buf[offset]);
++ fw_hdr->binary_version_num = get_unaligned_be32(&buf[offset]);
+ if (fw_hdr->binary_version_num < 0x103) {
+ dev_err(tas_priv->dev, "File version 0x%04x is too low",
+ fw_hdr->binary_version_num);
+@@ -311,7 +310,7 @@ int tasdevice_rca_parser(void *context, const struct firmware *fmw)
+ goto out;
+ }
+ offset += 4;
+- fw_hdr->drv_fw_version = be32_to_cpup((__be32 *)&buf[offset]);
++ fw_hdr->drv_fw_version = get_unaligned_be32(&buf[offset]);
+ offset += 8;
+ fw_hdr->plat_type = buf[offset];
+ offset += 1;
+@@ -339,11 +338,11 @@ int tasdevice_rca_parser(void *context, const struct firmware *fmw)
+ for (i = 0; i < TASDEVICE_DEVICE_SUM; i++, offset++)
+ fw_hdr->devs[i] = buf[offset];
+
+- fw_hdr->nconfig = be32_to_cpup((__be32 *)&buf[offset]);
++ fw_hdr->nconfig = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+
+ for (i = 0; i < TASDEVICE_CONFIG_SUM; i++) {
+- fw_hdr->config_size[i] = be32_to_cpup((__be32 *)&buf[offset]);
++ fw_hdr->config_size[i] = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+ total_config_sz += fw_hdr->config_size[i];
+ }
+@@ -423,7 +422,7 @@ static int fw_parse_block_data_kernel(struct tasdevice_fw *tas_fmw,
+ /* convert data[offset], data[offset + 1], data[offset + 2] and
+ * data[offset + 3] into host
+ */
+- block->type = be32_to_cpup((__be32 *)&data[offset]);
++ block->type = get_unaligned_be32(&data[offset]);
+ offset += 4;
+
+ block->is_pchksum_present = data[offset];
+@@ -438,10 +437,10 @@ static int fw_parse_block_data_kernel(struct tasdevice_fw *tas_fmw,
+ block->ychksum = data[offset];
+ offset++;
+
+- block->blk_size = be32_to_cpup((__be32 *)&data[offset]);
++ block->blk_size = get_unaligned_be32(&data[offset]);
+ offset += 4;
+
+- block->nr_subblocks = be32_to_cpup((__be32 *)&data[offset]);
++ block->nr_subblocks = get_unaligned_be32(&data[offset]);
+ offset += 4;
+
+ /* fixed m68k compiling issue:
+@@ -482,7 +481,7 @@ static int fw_parse_data_kernel(struct tasdevice_fw *tas_fmw,
+ offset = -EINVAL;
+ goto out;
+ }
+- img_data->nr_blk = be32_to_cpup((__be32 *)&data[offset]);
++ img_data->nr_blk = get_unaligned_be32(&data[offset]);
+ offset += 4;
+
+ img_data->dev_blks = kcalloc(img_data->nr_blk,
+@@ -578,14 +577,14 @@ static int fw_parse_variable_header_kernel(
+ offset = -EINVAL;
+ goto out;
+ }
+- fw_hdr->device_family = be16_to_cpup((__be16 *)&buf[offset]);
++ fw_hdr->device_family = get_unaligned_be16(&buf[offset]);
+ if (fw_hdr->device_family != 0) {
+ dev_err(tas_priv->dev, "%s:not TAS device\n", __func__);
+ offset = -EINVAL;
+ goto out;
+ }
+ offset += 2;
+- fw_hdr->device = be16_to_cpup((__be16 *)&buf[offset]);
++ fw_hdr->device = get_unaligned_be16(&buf[offset]);
+ if (fw_hdr->device >= TASDEVICE_DSP_TAS_MAX_DEVICE ||
+ fw_hdr->device == 6) {
+ dev_err(tas_priv->dev, "Unsupported dev %d\n", fw_hdr->device);
+@@ -603,7 +602,7 @@ static int fw_parse_variable_header_kernel(
+ goto out;
+ }
+
+- tas_fmw->nr_programs = be32_to_cpup((__be32 *)&buf[offset]);
++ tas_fmw->nr_programs = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+
+ if (tas_fmw->nr_programs == 0 || tas_fmw->nr_programs >
+@@ -622,14 +621,14 @@ static int fw_parse_variable_header_kernel(
+
+ for (i = 0; i < tas_fmw->nr_programs; i++) {
+ program = &(tas_fmw->programs[i]);
+- program->prog_size = be32_to_cpup((__be32 *)&buf[offset]);
++ program->prog_size = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+ }
+
+ /* Skip the unused prog_size */
+ offset += 4 * (TASDEVICE_MAXPROGRAM_NUM_KERNEL - tas_fmw->nr_programs);
+
+- tas_fmw->nr_configurations = be32_to_cpup((__be32 *)&buf[offset]);
++ tas_fmw->nr_configurations = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+
+ /* The max number of config in firmware greater than 4 pieces of
+@@ -661,7 +660,7 @@ static int fw_parse_variable_header_kernel(
+
+ for (i = 0; i < tas_fmw->nr_programs; i++) {
+ config = &(tas_fmw->configs[i]);
+- config->cfg_size = be32_to_cpup((__be32 *)&buf[offset]);
++ config->cfg_size = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+ }
+
+@@ -699,7 +698,7 @@ static int tasdevice_process_block(void *context, unsigned char *data,
+ switch (subblk_typ) {
+ case TASDEVICE_CMD_SING_W: {
+ int i;
+- unsigned short len = be16_to_cpup((__be16 *)&data[2]);
++ unsigned short len = get_unaligned_be16(&data[2]);
+
+ subblk_offset += 2;
+ if (subblk_offset + 4 * len > sublocksize) {
+@@ -725,7 +724,7 @@ static int tasdevice_process_block(void *context, unsigned char *data,
+ }
+ break;
+ case TASDEVICE_CMD_BURST: {
+- unsigned short len = be16_to_cpup((__be16 *)&data[2]);
++ unsigned short len = get_unaligned_be16(&data[2]);
+
+ subblk_offset += 2;
+ if (subblk_offset + 4 + len > sublocksize) {
+@@ -766,7 +765,7 @@ static int tasdevice_process_block(void *context, unsigned char *data,
+ is_err = true;
+ break;
+ }
+- sleep_time = be16_to_cpup((__be16 *)&data[2]) * 1000;
++ sleep_time = get_unaligned_be16(&data[2]) * 1000;
+ usleep_range(sleep_time, sleep_time + 50);
+ subblk_offset += 2;
+ }
+@@ -910,7 +909,7 @@ static int fw_parse_variable_hdr(struct tasdevice_priv
+
+ offset += len;
+
+- fw_hdr->device_family = be32_to_cpup((__be32 *)&buf[offset]);
++ fw_hdr->device_family = get_unaligned_be32(&buf[offset]);
+ if (fw_hdr->device_family != 0) {
+ dev_err(tas_priv->dev, "%s: not TAS device\n", __func__);
+ offset = -EINVAL;
+@@ -918,7 +917,7 @@ static int fw_parse_variable_hdr(struct tasdevice_priv
+ }
+ offset += 4;
+
+- fw_hdr->device = be32_to_cpup((__be32 *)&buf[offset]);
++ fw_hdr->device = get_unaligned_be32(&buf[offset]);
+ if (fw_hdr->device >= TASDEVICE_DSP_TAS_MAX_DEVICE ||
+ fw_hdr->device == 6) {
+ dev_err(tas_priv->dev, "Unsupported dev %d\n", fw_hdr->device);
+@@ -963,7 +962,7 @@ static int fw_parse_block_data(struct tasdevice_fw *tas_fmw,
+ offset = -EINVAL;
+ goto out;
+ }
+- block->type = be32_to_cpup((__be32 *)&data[offset]);
++ block->type = get_unaligned_be32(&data[offset]);
+ offset += 4;
+
+ if (tas_fmw->fw_hdr.fixed_hdr.drv_ver >= PPC_DRIVER_CRCCHK) {
+@@ -988,7 +987,7 @@ static int fw_parse_block_data(struct tasdevice_fw *tas_fmw,
+ block->is_ychksum_present = 0;
+ }
+
+- block->nr_cmds = be32_to_cpup((__be32 *)&data[offset]);
++ block->nr_cmds = get_unaligned_be32(&data[offset]);
+ offset += 4;
+
+ n = block->nr_cmds * 4;
+@@ -1039,7 +1038,7 @@ static int fw_parse_data(struct tasdevice_fw *tas_fmw,
+ goto out;
+ }
+ offset += n;
+- img_data->nr_blk = be16_to_cpup((__be16 *)&data[offset]);
++ img_data->nr_blk = get_unaligned_be16(&data[offset]);
+ offset += 2;
+
+ img_data->dev_blks = kcalloc(img_data->nr_blk,
+@@ -1076,7 +1075,7 @@ static int fw_parse_program_data(struct tasdevice_priv *tas_priv,
+ offset = -EINVAL;
+ goto out;
+ }
+- tas_fmw->nr_programs = be16_to_cpup((__be16 *)&buf[offset]);
++ tas_fmw->nr_programs = get_unaligned_be16(&buf[offset]);
+ offset += 2;
+
+ if (tas_fmw->nr_programs == 0) {
+@@ -1143,7 +1142,7 @@ static int fw_parse_configuration_data(
+ offset = -EINVAL;
+ goto out;
+ }
+- tas_fmw->nr_configurations = be16_to_cpup((__be16 *)&data[offset]);
++ tas_fmw->nr_configurations = get_unaligned_be16(&data[offset]);
+ offset += 2;
+
+ if (tas_fmw->nr_configurations == 0) {
+@@ -1775,7 +1774,7 @@ static int fw_parse_header(struct tasdevice_priv *tas_priv,
+ /* Convert data[offset], data[offset + 1], data[offset + 2] and
+ * data[offset + 3] into host
+ */
+- fw_fixed_hdr->fwsize = be32_to_cpup((__be32 *)&buf[offset]);
++ fw_fixed_hdr->fwsize = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+ if (fw_fixed_hdr->fwsize != fmw->size) {
+ dev_err(tas_priv->dev, "File size not match, %lu %u",
+@@ -1784,9 +1783,9 @@ static int fw_parse_header(struct tasdevice_priv *tas_priv,
+ goto out;
+ }
+ offset += 4;
+- fw_fixed_hdr->ppcver = be32_to_cpup((__be32 *)&buf[offset]);
++ fw_fixed_hdr->ppcver = get_unaligned_be32(&buf[offset]);
+ offset += 8;
+- fw_fixed_hdr->drv_ver = be32_to_cpup((__be32 *)&buf[offset]);
++ fw_fixed_hdr->drv_ver = get_unaligned_be32(&buf[offset]);
+ offset += 72;
+
+ out:
+@@ -1828,7 +1827,7 @@ static int fw_parse_calibration_data(struct tasdevice_priv *tas_priv,
+ offset = -EINVAL;
+ goto out;
+ }
+- tas_fmw->nr_calibrations = be16_to_cpup((__be16 *)&data[offset]);
++ tas_fmw->nr_calibrations = get_unaligned_be16(&data[offset]);
+ offset += 2;
+
+ if (tas_fmw->nr_calibrations != 1) {
+--
+2.43.0
+
--- /dev/null
+From 4f91e4d53d4a3717670ed52de80eef9f2732f4b2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 Jun 2024 12:18:40 +0200
+Subject: ASoC: topology: Properly initialize soc_enum values
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Amadeusz Sławiński <amadeuszx.slawinski@linux.intel.com>
+
+[ Upstream commit 8ec2a2643544ce352f012ad3d248163199d05dfc ]
+
+soc_tplg_denum_create_values() should properly set its values field.
+
+Signed-off-by: Amadeusz Sławiński <amadeuszx.slawinski@linux.intel.com>
+Link: https://patch.msgid.link/20240627101850.2191513-4-amadeuszx.slawinski@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/soc-topology.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
+index 6951ff7bc61e..73d44dff45d6 100644
+--- a/sound/soc/soc-topology.c
++++ b/sound/soc/soc-topology.c
+@@ -851,6 +851,8 @@ static int soc_tplg_denum_create_values(struct soc_tplg *tplg, struct soc_enum *
+ se->dobj.control.dvalues[i] = le32_to_cpu(ec->values[i]);
+ }
+
++ se->items = le32_to_cpu(ec->items);
++ se->values = (const unsigned int *)se->dobj.control.dvalues;
+ return 0;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 4abfd9097d35f5dedcca18ae309fb6b89b23d722 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Aug 2024 13:04:07 +1000
+Subject: ata: pata_macio: Use WARN instead of BUG
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+[ Upstream commit d4bc0a264fb482b019c84fbc7202dd3cab059087 ]
+
+The overflow/underflow conditions in pata_macio_qc_prep() should never
+happen. But if they do there's no need to kill the system entirely, a
+WARN and failing the IO request should be sufficient and might allow the
+system to keep running.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ata/pata_macio.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
+index 99fc5d9d95d7..cac022eb1492 100644
+--- a/drivers/ata/pata_macio.c
++++ b/drivers/ata/pata_macio.c
+@@ -554,7 +554,8 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
+
+ while (sg_len) {
+ /* table overflow should never happen */
+- BUG_ON (pi++ >= MAX_DCMDS);
++ if (WARN_ON_ONCE(pi >= MAX_DCMDS))
++ return AC_ERR_SYSTEM;
+
+ len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG;
+ table->command = cpu_to_le16(write ? OUTPUT_MORE: INPUT_MORE);
+@@ -566,11 +567,13 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
+ addr += len;
+ sg_len -= len;
+ ++table;
++ ++pi;
+ }
+ }
+
+ /* Should never happen according to Tejun */
+- BUG_ON(!pi);
++ if (WARN_ON_ONCE(!pi))
++ return AC_ERR_SYSTEM;
+
+ /* Convert the last command to an input/output */
+ table--;
+--
+2.43.0
+
--- /dev/null
+From 660a8d261bbcf4a44558c17cc398ae8dbec5c0ab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Aug 2024 17:31:07 +0200
+Subject: bareudp: Fix device stats updates.
+
+From: Guillaume Nault <gnault@redhat.com>
+
+[ Upstream commit 4963d2343af81f493519f9c3ea9f2169eaa7353a ]
+
+Bareudp devices update their stats concurrently.
+Therefore they need proper atomic increments.
+
+Fixes: 571912c69f0e ("net: UDP tunnel encapsulation module for tunnelling different protocols like MPLS, IP, NSH etc.")
+Signed-off-by: Guillaume Nault <gnault@redhat.com>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Link: https://patch.msgid.link/04b7b9d0b480158eb3ab4366ec80aa2ab7e41fcb.1725031794.git.gnault@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bareudp.c | 22 +++++++++++-----------
+ 1 file changed, 11 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
+index d5c56ca91b77..7aca0544fb29 100644
+--- a/drivers/net/bareudp.c
++++ b/drivers/net/bareudp.c
+@@ -83,7 +83,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+
+ if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
+ sizeof(ipversion))) {
+- bareudp->dev->stats.rx_dropped++;
++ DEV_STATS_INC(bareudp->dev, rx_dropped);
+ goto drop;
+ }
+ ipversion >>= 4;
+@@ -93,7 +93,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ } else if (ipversion == 6 && bareudp->multi_proto_mode) {
+ proto = htons(ETH_P_IPV6);
+ } else {
+- bareudp->dev->stats.rx_dropped++;
++ DEV_STATS_INC(bareudp->dev, rx_dropped);
+ goto drop;
+ }
+ } else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) {
+@@ -107,7 +107,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ ipv4_is_multicast(tunnel_hdr->daddr)) {
+ proto = htons(ETH_P_MPLS_MC);
+ } else {
+- bareudp->dev->stats.rx_dropped++;
++ DEV_STATS_INC(bareudp->dev, rx_dropped);
+ goto drop;
+ }
+ } else {
+@@ -123,7 +123,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ (addr_type & IPV6_ADDR_MULTICAST)) {
+ proto = htons(ETH_P_MPLS_MC);
+ } else {
+- bareudp->dev->stats.rx_dropped++;
++ DEV_STATS_INC(bareudp->dev, rx_dropped);
+ goto drop;
+ }
+ }
+@@ -135,7 +135,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ proto,
+ !net_eq(bareudp->net,
+ dev_net(bareudp->dev)))) {
+- bareudp->dev->stats.rx_dropped++;
++ DEV_STATS_INC(bareudp->dev, rx_dropped);
+ goto drop;
+ }
+
+@@ -143,7 +143,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+
+ tun_dst = udp_tun_rx_dst(skb, family, key, 0, 0);
+ if (!tun_dst) {
+- bareudp->dev->stats.rx_dropped++;
++ DEV_STATS_INC(bareudp->dev, rx_dropped);
+ goto drop;
+ }
+ skb_dst_set(skb, &tun_dst->dst);
+@@ -169,8 +169,8 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ &((struct ipv6hdr *)oiph)->saddr);
+ }
+ if (err > 1) {
+- ++bareudp->dev->stats.rx_frame_errors;
+- ++bareudp->dev->stats.rx_errors;
++ DEV_STATS_INC(bareudp->dev, rx_frame_errors);
++ DEV_STATS_INC(bareudp->dev, rx_errors);
+ goto drop;
+ }
+ }
+@@ -467,11 +467,11 @@ static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
+ dev_kfree_skb(skb);
+
+ if (err == -ELOOP)
+- dev->stats.collisions++;
++ DEV_STATS_INC(dev, collisions);
+ else if (err == -ENETUNREACH)
+- dev->stats.tx_carrier_errors++;
++ DEV_STATS_INC(dev, tx_carrier_errors);
+
+- dev->stats.tx_errors++;
++ DEV_STATS_INC(dev, tx_errors);
+ return NETDEV_TX_OK;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 572188eea6c777b8db4cfc15fab6e7fe6ebe206a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 May 2024 22:36:10 -0600
+Subject: bcachefs: Add error code to defer option parsing
+
+From: Thomas Bertschinger <tahbertschinger@gmail.com>
+
+[ Upstream commit 1c12d1caf8d627d8b791f4dc25af2522dac7cd10 ]
+
+This introduces a new error code, option_needs_open_fs, which is used to
+indicate that an attempt was made to parse a mount option prior to
+opening a filesystem, when that mount option requires an open filesystem
+in order to be validated.
+
+Returning this error results in bch2_parse_one_mount_opt() saving that
+option for later parsing, after the filesystem is opened.
+
+Signed-off-by: Thomas Bertschinger <tahbertschinger@gmail.com>
+Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
+Stable-dep-of: e3e694094091 ("bcachefs: Revert lockless buffered IO path")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/bcachefs/disk_groups.c | 2 +-
+ fs/bcachefs/errcode.h | 3 ++-
+ fs/bcachefs/opts.c | 15 +++++++++++++++
+ 3 files changed, 18 insertions(+), 2 deletions(-)
+
+diff --git a/fs/bcachefs/disk_groups.c b/fs/bcachefs/disk_groups.c
+index 521a86df5e52..5df8de0b8c02 100644
+--- a/fs/bcachefs/disk_groups.c
++++ b/fs/bcachefs/disk_groups.c
+@@ -511,7 +511,7 @@ int bch2_opt_target_parse(struct bch_fs *c, const char *val, u64 *res,
+ return -EINVAL;
+
+ if (!c)
+- return 0;
++ return -BCH_ERR_option_needs_open_fs;
+
+ if (!strlen(val) || !strcmp(val, "none")) {
+ *res = 0;
+diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h
+index 58612abf7927..a268af3e52bf 100644
+--- a/fs/bcachefs/errcode.h
++++ b/fs/bcachefs/errcode.h
+@@ -257,7 +257,8 @@
+ x(BCH_ERR_nopromote, nopromote_no_writes) \
+ x(BCH_ERR_nopromote, nopromote_enomem) \
+ x(0, need_inode_lock) \
+- x(0, invalid_snapshot_node)
++ x(0, invalid_snapshot_node) \
++ x(0, option_needs_open_fs)
+
+ enum bch_errcode {
+ BCH_ERR_START = 2048,
+diff --git a/fs/bcachefs/opts.c b/fs/bcachefs/opts.c
+index e794706276cf..e10fc1da71b1 100644
+--- a/fs/bcachefs/opts.c
++++ b/fs/bcachefs/opts.c
+@@ -378,6 +378,10 @@ int bch2_opt_parse(struct bch_fs *c,
+ break;
+ case BCH_OPT_FN:
+ ret = opt->fn.parse(c, val, res, err);
++
++ if (ret == -BCH_ERR_option_needs_open_fs)
++ return ret;
++
+ if (ret < 0) {
+ if (err)
+ prt_printf(err, "%s: parse error",
+@@ -495,6 +499,17 @@ int bch2_parse_one_mount_opt(struct bch_fs *c, struct bch_opts *opts,
+ goto bad_opt;
+
+ ret = bch2_opt_parse(c, &bch2_opt_table[id], val, &v, &err);
++ if (ret == -BCH_ERR_option_needs_open_fs && parse_later) {
++ prt_printf(parse_later, "%s=%s,", name, val);
++ if (parse_later->allocation_failure) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ ret = 0;
++ goto out;
++ }
++
+ if (ret < 0)
+ goto bad_val;
+
+--
+2.43.0
+
--- /dev/null
+From 037abae0e3f8c411a33fccae3a354604caaf3896 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 May 2024 22:36:09 -0600
+Subject: bcachefs: add printbuf arg to bch2_parse_mount_opts()
+
+From: Thomas Bertschinger <tahbertschinger@gmail.com>
+
+[ Upstream commit 9b7f0b5d3d220ccba3151b95a5532780e04e1954 ]
+
+Mount options that take the name of a device that may be part of a
+filesystem, for example "metadata_target", cannot be validated until
+after the filesystem has been opened. However, an attempt to parse those
+options may be made prior to the filesystem being opened.
+
+This change adds a printbuf parameter to bch2_parse_mount_opts() which
+will be used to save those mount options, when they are supplied prior
+to the FS being opened, so that they can be parsed later.
+
+This functionality is not currently needed, but will be used after
+bcachefs starts using the new mount API to parse mount options. This is
+because using the new mount API, we will process mount options prior to
+opening the FS, but the new API doesn't provide a convenient way to
+"replay" mount option parsing. So we save these options ourselves to
+accomplish this.
+
+This change also splits out the code to parse a single option into
+bch2_parse_one_mount_opt(), which will be useful when using the new
+mount API which deals with a single mount option at a time.
+
+Signed-off-by: Thomas Bertschinger <tahbertschinger@gmail.com>
+Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
+Stable-dep-of: e3e694094091 ("bcachefs: Revert lockless buffered IO path")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/bcachefs/chardev.c | 4 +-
+ fs/bcachefs/fs.c | 6 +--
+ fs/bcachefs/opts.c | 105 +++++++++++++++++++++++++-----------------
+ fs/bcachefs/opts.h | 5 +-
+ 4 files changed, 71 insertions(+), 49 deletions(-)
+
+diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c
+index 6d82e1165adc..268bbe847c8f 100644
+--- a/fs/bcachefs/chardev.c
++++ b/fs/bcachefs/chardev.c
+@@ -215,7 +215,7 @@ static long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_a
+ char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16);
+
+ ret = PTR_ERR_OR_ZERO(optstr) ?:
+- bch2_parse_mount_opts(NULL, &thr->opts, optstr);
++ bch2_parse_mount_opts(NULL, &thr->opts, NULL, optstr);
+ if (!IS_ERR(optstr))
+ kfree(optstr);
+
+@@ -851,7 +851,7 @@ static long bch2_ioctl_fsck_online(struct bch_fs *c,
+ char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16);
+
+ ret = PTR_ERR_OR_ZERO(optstr) ?:
+- bch2_parse_mount_opts(c, &thr->opts, optstr);
++ bch2_parse_mount_opts(c, &thr->opts, NULL, optstr);
+ if (!IS_ERR(optstr))
+ kfree(optstr);
+
+diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
+index fa1fee05cf8f..ab97b5aa88d6 100644
+--- a/fs/bcachefs/fs.c
++++ b/fs/bcachefs/fs.c
+@@ -1729,7 +1729,7 @@ static int bch2_remount(struct super_block *sb, int *flags, char *data)
+ struct bch_opts opts = bch2_opts_empty();
+ int ret;
+
+- ret = bch2_parse_mount_opts(c, &opts, data);
++ ret = bch2_parse_mount_opts(c, &opts, NULL, data);
+ if (ret)
+ goto err;
+
+@@ -1902,7 +1902,7 @@ static struct dentry *bch2_mount(struct file_system_type *fs_type,
+
+ opt_set(opts, read_only, (flags & SB_RDONLY) != 0);
+
+- ret = bch2_parse_mount_opts(NULL, &opts, data);
++ ret = bch2_parse_mount_opts(NULL, &opts, NULL, data);
+ if (ret) {
+ ret = bch2_err_class(ret);
+ return ERR_PTR(ret);
+@@ -1936,7 +1936,7 @@ static struct dentry *bch2_mount(struct file_system_type *fs_type,
+ }
+
+ /* Some options can't be parsed until after the fs is started: */
+- ret = bch2_parse_mount_opts(c, &opts, data);
++ ret = bch2_parse_mount_opts(c, &opts, NULL, data);
+ if (ret) {
+ bch2_fs_stop(c);
+ sb = ERR_PTR(ret);
+diff --git a/fs/bcachefs/opts.c b/fs/bcachefs/opts.c
+index bb068fd72465..e794706276cf 100644
+--- a/fs/bcachefs/opts.c
++++ b/fs/bcachefs/opts.c
+@@ -460,14 +460,70 @@ int bch2_opts_check_may_set(struct bch_fs *c)
+ return 0;
+ }
+
++int bch2_parse_one_mount_opt(struct bch_fs *c, struct bch_opts *opts,
++ struct printbuf *parse_later,
++ const char *name, const char *val)
++{
++ struct printbuf err = PRINTBUF;
++ u64 v;
++ int ret, id;
++
++ id = bch2_mount_opt_lookup(name);
++
++ /* Check for the form "noopt", negation of a boolean opt: */
++ if (id < 0 &&
++ !val &&
++ !strncmp("no", name, 2)) {
++ id = bch2_mount_opt_lookup(name + 2);
++ val = "0";
++ }
++
++ /* Unknown options are ignored: */
++ if (id < 0)
++ return 0;
++
++ if (!(bch2_opt_table[id].flags & OPT_MOUNT))
++ goto bad_opt;
++
++ if (id == Opt_acl &&
++ !IS_ENABLED(CONFIG_BCACHEFS_POSIX_ACL))
++ goto bad_opt;
++
++ if ((id == Opt_usrquota ||
++ id == Opt_grpquota) &&
++ !IS_ENABLED(CONFIG_BCACHEFS_QUOTA))
++ goto bad_opt;
++
++ ret = bch2_opt_parse(c, &bch2_opt_table[id], val, &v, &err);
++ if (ret < 0)
++ goto bad_val;
++
++ if (opts)
++ bch2_opt_set_by_id(opts, id, v);
++
++ ret = 0;
++ goto out;
++
++bad_opt:
++ pr_err("Bad mount option %s", name);
++ ret = -BCH_ERR_option_name;
++ goto out;
++
++bad_val:
++ pr_err("Invalid mount option %s", err.buf);
++ ret = -BCH_ERR_option_value;
++
++out:
++ printbuf_exit(&err);
++ return ret;
++}
++
+ int bch2_parse_mount_opts(struct bch_fs *c, struct bch_opts *opts,
+- char *options)
++ struct printbuf *parse_later, char *options)
+ {
+ char *copied_opts, *copied_opts_start;
+ char *opt, *name, *val;
+- int ret, id;
+- struct printbuf err = PRINTBUF;
+- u64 v;
++ int ret;
+
+ if (!options)
+ return 0;
+@@ -488,53 +544,16 @@ int bch2_parse_mount_opts(struct bch_fs *c, struct bch_opts *opts,
+ name = strsep(&opt, "=");
+ val = opt;
+
+- id = bch2_mount_opt_lookup(name);
+-
+- /* Check for the form "noopt", negation of a boolean opt: */
+- if (id < 0 &&
+- !val &&
+- !strncmp("no", name, 2)) {
+- id = bch2_mount_opt_lookup(name + 2);
+- val = "0";
+- }
+-
+- /* Unknown options are ignored: */
+- if (id < 0)
+- continue;
+-
+- if (!(bch2_opt_table[id].flags & OPT_MOUNT))
+- goto bad_opt;
+-
+- if (id == Opt_acl &&
+- !IS_ENABLED(CONFIG_BCACHEFS_POSIX_ACL))
+- goto bad_opt;
+-
+- if ((id == Opt_usrquota ||
+- id == Opt_grpquota) &&
+- !IS_ENABLED(CONFIG_BCACHEFS_QUOTA))
+- goto bad_opt;
+-
+- ret = bch2_opt_parse(c, &bch2_opt_table[id], val, &v, &err);
++ ret = bch2_parse_one_mount_opt(c, opts, parse_later, name, val);
+ if (ret < 0)
+- goto bad_val;
+-
+- bch2_opt_set_by_id(opts, id, v);
++ goto out;
+ }
+
+ ret = 0;
+ goto out;
+
+-bad_opt:
+- pr_err("Bad mount option %s", name);
+- ret = -BCH_ERR_option_name;
+- goto out;
+-bad_val:
+- pr_err("Invalid mount option %s", err.buf);
+- ret = -BCH_ERR_option_value;
+- goto out;
+ out:
+ kfree(copied_opts_start);
+- printbuf_exit(&err);
+ return ret;
+ }
+
+diff --git a/fs/bcachefs/opts.h b/fs/bcachefs/opts.h
+index b197ec90d4cb..1cfe75bd8ac8 100644
+--- a/fs/bcachefs/opts.h
++++ b/fs/bcachefs/opts.h
+@@ -566,7 +566,10 @@ void bch2_opt_to_text(struct printbuf *, struct bch_fs *, struct bch_sb *,
+
+ int bch2_opt_check_may_set(struct bch_fs *, int, u64);
+ int bch2_opts_check_may_set(struct bch_fs *);
+-int bch2_parse_mount_opts(struct bch_fs *, struct bch_opts *, char *);
++int bch2_parse_one_mount_opt(struct bch_fs *, struct bch_opts *,
++ struct printbuf *, const char *, const char *);
++int bch2_parse_mount_opts(struct bch_fs *, struct bch_opts *, struct printbuf *,
++ char *);
+
+ /* inode opts: */
+
+--
+2.43.0
+
--- /dev/null
+From 9b6a94a42a3867dbb7ce7271f1d77958517823fe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 31 Aug 2024 17:44:51 -0400
+Subject: bcachefs: Revert lockless buffered IO path
+
+From: Kent Overstreet <kent.overstreet@linux.dev>
+
+[ Upstream commit e3e6940940910c2287fe962bdf72015efd4fee81 ]
+
+We had a report of data corruption on nixos when building installer
+images.
+
+https://github.com/NixOS/nixpkgs/pull/321055#issuecomment-2184131334
+
+It seems that writes are being dropped, but only when issued by QEMU,
+and possibly only in snapshot mode. It's undetermined if it's write
+calls are being dropped or dirty folios.
+
+Further testing, via minimizing the original patch to just the change
+that skips the inode lock on non appends/truncates, reveals that it
+really is just not taking the inode lock that causes the corruption: it
+has nothing to do with the other logic changes for preserving write
+atomicity in corner cases.
+
+It's also kernel config dependent: it doesn't reproduce with the minimal
+kernel config that ktest uses, but it does reproduce with nixos's distro
+config. Bisection the kernel config initially pointer the finger at page
+migration or compaction, but it appears that was erroneous; we haven't
+yet determined what kernel config option actually triggers it.
+
+Sadly it appears this will have to be reverted since we're getting too
+close to release and my plate is full, but we'd _really_ like to fully
+debug it.
+
+My suspicion is that this patch is exposing a preexisting bug - the
+inode lock actually covers very little in IO paths, and we have a
+different lock (the pagecache add lock) that guards against races with
+truncate here.
+
+Fixes: 7e64c86cdc6c ("bcachefs: Buffered write path now can avoid the inode lock")
+Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/bcachefs/errcode.h | 1 -
+ fs/bcachefs/fs-io-buffered.c | 149 ++++++++++-------------------------
+ 2 files changed, 40 insertions(+), 110 deletions(-)
+
+diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h
+index a268af3e52bf..81236e678866 100644
+--- a/fs/bcachefs/errcode.h
++++ b/fs/bcachefs/errcode.h
+@@ -256,7 +256,6 @@
+ x(BCH_ERR_nopromote, nopromote_in_flight) \
+ x(BCH_ERR_nopromote, nopromote_no_writes) \
+ x(BCH_ERR_nopromote, nopromote_enomem) \
+- x(0, need_inode_lock) \
+ x(0, invalid_snapshot_node) \
+ x(0, option_needs_open_fs)
+
+diff --git a/fs/bcachefs/fs-io-buffered.c b/fs/bcachefs/fs-io-buffered.c
+index 54873ecc635c..98c1e26a313a 100644
+--- a/fs/bcachefs/fs-io-buffered.c
++++ b/fs/bcachefs/fs-io-buffered.c
+@@ -802,8 +802,7 @@ static noinline void folios_trunc(folios *fs, struct folio **fi)
+ static int __bch2_buffered_write(struct bch_inode_info *inode,
+ struct address_space *mapping,
+ struct iov_iter *iter,
+- loff_t pos, unsigned len,
+- bool inode_locked)
++ loff_t pos, unsigned len)
+ {
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct bch2_folio_reservation res;
+@@ -828,15 +827,6 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
+
+ BUG_ON(!fs.nr);
+
+- /*
+- * If we're not using the inode lock, we need to lock all the folios for
+- * atomiticity of writes vs. other writes:
+- */
+- if (!inode_locked && folio_end_pos(darray_last(fs)) < end) {
+- ret = -BCH_ERR_need_inode_lock;
+- goto out;
+- }
+-
+ f = darray_first(fs);
+ if (pos != folio_pos(f) && !folio_test_uptodate(f)) {
+ ret = bch2_read_single_folio(f, mapping);
+@@ -931,10 +921,8 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
+ end = pos + copied;
+
+ spin_lock(&inode->v.i_lock);
+- if (end > inode->v.i_size) {
+- BUG_ON(!inode_locked);
++ if (end > inode->v.i_size)
+ i_size_write(&inode->v, end);
+- }
+ spin_unlock(&inode->v.i_lock);
+
+ f_pos = pos;
+@@ -978,68 +966,12 @@ static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
+ struct file *file = iocb->ki_filp;
+ struct address_space *mapping = file->f_mapping;
+ struct bch_inode_info *inode = file_bch_inode(file);
+- loff_t pos;
+- bool inode_locked = false;
+- ssize_t written = 0, written2 = 0, ret = 0;
+-
+- /*
+- * We don't take the inode lock unless i_size will be changing. Folio
+- * locks provide exclusion with other writes, and the pagecache add lock
+- * provides exclusion with truncate and hole punching.
+- *
+- * There is one nasty corner case where atomicity would be broken
+- * without great care: when copying data from userspace to the page
+- * cache, we do that with faults disable - a page fault would recurse
+- * back into the filesystem, taking filesystem locks again, and
+- * deadlock; so it's done with faults disabled, and we fault in the user
+- * buffer when we aren't holding locks.
+- *
+- * If we do part of the write, but we then race and in the userspace
+- * buffer have been evicted and are no longer resident, then we have to
+- * drop our folio locks to re-fault them in, breaking write atomicity.
+- *
+- * To fix this, we restart the write from the start, if we weren't
+- * holding the inode lock.
+- *
+- * There is another wrinkle after that; if we restart the write from the
+- * start, and then get an unrecoverable error, we _cannot_ claim to
+- * userspace that we did not write data we actually did - so we must
+- * track (written2) the most we ever wrote.
+- */
+-
+- if ((iocb->ki_flags & IOCB_APPEND) ||
+- (iocb->ki_pos + iov_iter_count(iter) > i_size_read(&inode->v))) {
+- inode_lock(&inode->v);
+- inode_locked = true;
+- }
+-
+- ret = generic_write_checks(iocb, iter);
+- if (ret <= 0)
+- goto unlock;
+-
+- ret = file_remove_privs_flags(file, !inode_locked ? IOCB_NOWAIT : 0);
+- if (ret) {
+- if (!inode_locked) {
+- inode_lock(&inode->v);
+- inode_locked = true;
+- ret = file_remove_privs_flags(file, 0);
+- }
+- if (ret)
+- goto unlock;
+- }
+-
+- ret = file_update_time(file);
+- if (ret)
+- goto unlock;
+-
+- pos = iocb->ki_pos;
++ loff_t pos = iocb->ki_pos;
++ ssize_t written = 0;
++ int ret = 0;
+
+ bch2_pagecache_add_get(inode);
+
+- if (!inode_locked &&
+- (iocb->ki_pos + iov_iter_count(iter) > i_size_read(&inode->v)))
+- goto get_inode_lock;
+-
+ do {
+ unsigned offset = pos & (PAGE_SIZE - 1);
+ unsigned bytes = iov_iter_count(iter);
+@@ -1064,17 +996,12 @@ static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
+ }
+ }
+
+- if (unlikely(bytes != iov_iter_count(iter) && !inode_locked))
+- goto get_inode_lock;
+-
+ if (unlikely(fatal_signal_pending(current))) {
+ ret = -EINTR;
+ break;
+ }
+
+- ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes, inode_locked);
+- if (ret == -BCH_ERR_need_inode_lock)
+- goto get_inode_lock;
++ ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
+ if (unlikely(ret < 0))
+ break;
+
+@@ -1095,46 +1022,50 @@ static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
+ }
+ pos += ret;
+ written += ret;
+- written2 = max(written, written2);
+-
+- if (ret != bytes && !inode_locked)
+- goto get_inode_lock;
+ ret = 0;
+
+ balance_dirty_pages_ratelimited(mapping);
+-
+- if (0) {
+-get_inode_lock:
+- bch2_pagecache_add_put(inode);
+- inode_lock(&inode->v);
+- inode_locked = true;
+- bch2_pagecache_add_get(inode);
+-
+- iov_iter_revert(iter, written);
+- pos -= written;
+- written = 0;
+- ret = 0;
+- }
+ } while (iov_iter_count(iter));
+- bch2_pagecache_add_put(inode);
+-unlock:
+- if (inode_locked)
+- inode_unlock(&inode->v);
+
+- iocb->ki_pos += written;
++ bch2_pagecache_add_put(inode);
+
+- ret = max(written, written2) ?: ret;
+- if (ret > 0)
+- ret = generic_write_sync(iocb, ret);
+- return ret;
++ return written ? written : ret;
+ }
+
+-ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *iter)
++ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ {
+- ssize_t ret = iocb->ki_flags & IOCB_DIRECT
+- ? bch2_direct_write(iocb, iter)
+- : bch2_buffered_write(iocb, iter);
++ struct file *file = iocb->ki_filp;
++ struct bch_inode_info *inode = file_bch_inode(file);
++ ssize_t ret;
++
++ if (iocb->ki_flags & IOCB_DIRECT) {
++ ret = bch2_direct_write(iocb, from);
++ goto out;
++ }
++
++ inode_lock(&inode->v);
++
++ ret = generic_write_checks(iocb, from);
++ if (ret <= 0)
++ goto unlock;
++
++ ret = file_remove_privs(file);
++ if (ret)
++ goto unlock;
++
++ ret = file_update_time(file);
++ if (ret)
++ goto unlock;
++
++ ret = bch2_buffered_write(iocb, from);
++ if (likely(ret > 0))
++ iocb->ki_pos += ret;
++unlock:
++ inode_unlock(&inode->v);
+
++ if (ret > 0)
++ ret = generic_write_sync(iocb, ret);
++out:
+ return bch2_err_class(ret);
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 013468c2b2c78386454583020844e141ee4edb2c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Jul 2024 17:10:21 +0200
+Subject: block: don't call bio_uninit from bio_endio
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit bf4c89fc8797f5c0964a0c3d561fbe7e8483b62f ]
+
+Commit b222dd2fdd53 ("block: call bio_uninit in bio_endio") added a call
+to bio_uninit in bio_endio to work around callers that use bio_init but
+fail to call bio_uninit after they are done to release the resources.
+While this is an abuse of the bio_init API we still have quite a few of
+those left. But this early uninit causes a problem for integrity data,
+as at least some users need the bio_integrity_payload. Right now the
+only one is the NVMe passthrough which archives this by adding a special
+case to skip the freeing if the BIP_INTEGRITY_USER flag is set.
+
+Sort this out by only putting bi_blkg in bio_endio as that is the cause
+of the actual leaks - the few users of the crypto context and integrity
+data all properly call bio_uninit, usually through bio_put for
+dynamically allocated bios.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
+Link: https://lore.kernel.org/r/20240702151047.1746127-4-hch@lst.de
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/bio.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/block/bio.c b/block/bio.c
+index e9e809a63c59..c7a4bc05c43e 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1630,8 +1630,18 @@ void bio_endio(struct bio *bio)
+ goto again;
+ }
+
+- /* release cgroup info */
+- bio_uninit(bio);
++#ifdef CONFIG_BLK_CGROUP
++ /*
++ * Release cgroup info. We shouldn't have to do this here, but quite
++ * a few callers of bio_init fail to call bio_uninit, so we cover up
++ * for that here at least for now.
++ */
++ if (bio->bi_blkg) {
++ blkg_put(bio->bi_blkg);
++ bio->bi_blkg = NULL;
++ }
++#endif
++
+ if (bio->bi_end_io)
+ bio->bi_end_io(bio);
+ }
+--
+2.43.0
+
--- /dev/null
+From eb35db91e0ae8ea5ff3f7cc1728b03349df62e15 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 May 2024 12:36:55 +0530
+Subject: Bluetooth: btnxpuart: Fix Null pointer dereference in
+ btnxpuart_flush()
+
+From: Neeraj Sanjay Kale <neeraj.sanjaykale@nxp.com>
+
+[ Upstream commit c68bbf5e334b35b36ac5b9f0419f1f93f796bad1 ]
+
+This adds a check before freeing the rx->skb in flush and close
+functions to handle the kernel crash seen while removing driver after FW
+download fails or before FW download completes.
+
+dmesg log:
+[ 54.634586] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000080
+[ 54.643398] Mem abort info:
+[ 54.646204] ESR = 0x0000000096000004
+[ 54.649964] EC = 0x25: DABT (current EL), IL = 32 bits
+[ 54.655286] SET = 0, FnV = 0
+[ 54.658348] EA = 0, S1PTW = 0
+[ 54.661498] FSC = 0x04: level 0 translation fault
+[ 54.666391] Data abort info:
+[ 54.669273] ISV = 0, ISS = 0x00000004, ISS2 = 0x00000000
+[ 54.674768] CM = 0, WnR = 0, TnD = 0, TagAccess = 0
+[ 54.674771] GCS = 0, Overlay = 0, DirtyBit = 0, Xs = 0
+[ 54.674775] user pgtable: 4k pages, 48-bit VAs, pgdp=0000000048860000
+[ 54.674780] [0000000000000080] pgd=0000000000000000, p4d=0000000000000000
+[ 54.703880] Internal error: Oops: 0000000096000004 [#1] PREEMPT SMP
+[ 54.710152] Modules linked in: btnxpuart(-) overlay fsl_jr_uio caam_jr caamkeyblob_desc caamhash_desc caamalg_desc crypto_engine authenc libdes crct10dif_ce polyval_ce polyval_generic snd_soc_imx_spdif snd_soc_imx_card snd_soc_ak5558 snd_soc_ak4458 caam secvio error snd_soc_fsl_micfil snd_soc_fsl_spdif snd_soc_fsl_sai snd_soc_fsl_utils imx_pcm_dma gpio_ir_recv rc_core sch_fq_codel fuse
+[ 54.744357] CPU: 3 PID: 72 Comm: kworker/u9:0 Not tainted 6.6.3-otbr-g128004619037 #2
+[ 54.744364] Hardware name: FSL i.MX8MM EVK board (DT)
+[ 54.744368] Workqueue: hci0 hci_power_on
+[ 54.757244] pstate: 60000005 (nZCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+[ 54.757249] pc : kfree_skb_reason+0x18/0xb0
+[ 54.772299] lr : btnxpuart_flush+0x40/0x58 [btnxpuart]
+[ 54.782921] sp : ffff8000805ebca0
+[ 54.782923] x29: ffff8000805ebca0 x28: ffffa5c6cf1869c0 x27: ffffa5c6cf186000
+[ 54.782931] x26: ffff377b84852400 x25: ffff377b848523c0 x24: ffff377b845e7230
+[ 54.782938] x23: ffffa5c6ce8dbe08 x22: ffffa5c6ceb65410 x21: 00000000ffffff92
+[ 54.782945] x20: ffffa5c6ce8dbe98 x19: ffffffffffffffac x18: ffffffffffffffff
+[ 54.807651] x17: 0000000000000000 x16: ffffa5c6ce2824ec x15: ffff8001005eb857
+[ 54.821917] x14: 0000000000000000 x13: ffffa5c6cf1a02e0 x12: 0000000000000642
+[ 54.821924] x11: 0000000000000040 x10: ffffa5c6cf19d690 x9 : ffffa5c6cf19d688
+[ 54.821931] x8 : ffff377b86000028 x7 : 0000000000000000 x6 : 0000000000000000
+[ 54.821938] x5 : ffff377b86000000 x4 : 0000000000000000 x3 : 0000000000000000
+[ 54.843331] x2 : 0000000000000000 x1 : 0000000000000002 x0 : ffffffffffffffac
+[ 54.857599] Call trace:
+[ 54.857601] kfree_skb_reason+0x18/0xb0
+[ 54.863878] btnxpuart_flush+0x40/0x58 [btnxpuart]
+[ 54.863888] hci_dev_open_sync+0x3a8/0xa04
+[ 54.872773] hci_power_on+0x54/0x2e4
+[ 54.881832] process_one_work+0x138/0x260
+[ 54.881842] worker_thread+0x32c/0x438
+[ 54.881847] kthread+0x118/0x11c
+[ 54.881853] ret_from_fork+0x10/0x20
+[ 54.896406] Code: a9be7bfd 910003fd f9000bf3 aa0003f3 (b940d400)
+[ 54.896410] ---[ end trace 0000000000000000 ]---
+
+Signed-off-by: Neeraj Sanjay Kale <neeraj.sanjaykale@nxp.com>
+Tested-by: Guillaume Legoupil <guillaume.legoupil@nxp.com>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/bluetooth/btnxpuart.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
+index eeba2d26d1cb..5890ecd8e948 100644
+--- a/drivers/bluetooth/btnxpuart.c
++++ b/drivers/bluetooth/btnxpuart.c
+@@ -1326,8 +1326,10 @@ static int btnxpuart_close(struct hci_dev *hdev)
+
+ serdev_device_close(nxpdev->serdev);
+ skb_queue_purge(&nxpdev->txq);
+- kfree_skb(nxpdev->rx_skb);
+- nxpdev->rx_skb = NULL;
++ if (!IS_ERR_OR_NULL(nxpdev->rx_skb)) {
++ kfree_skb(nxpdev->rx_skb);
++ nxpdev->rx_skb = NULL;
++ }
+ clear_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state);
+ return 0;
+ }
+@@ -1342,8 +1344,10 @@ static int btnxpuart_flush(struct hci_dev *hdev)
+
+ cancel_work_sync(&nxpdev->tx_work);
+
+- kfree_skb(nxpdev->rx_skb);
+- nxpdev->rx_skb = NULL;
++ if (!IS_ERR_OR_NULL(nxpdev->rx_skb)) {
++ kfree_skb(nxpdev->rx_skb);
++ nxpdev->rx_skb = NULL;
++ }
+
+ return 0;
+ }
+--
+2.43.0
+
--- /dev/null
+From a9e7763cf50f00aa09146af42a17653c6aff034f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Aug 2024 15:47:30 -0400
+Subject: Bluetooth: hci_sync: Introduce hci_cmd_sync_run/hci_cmd_sync_run_once
+
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+
+[ Upstream commit c898f6d7b093bd71e66569cd6797c87d4056f44b ]
+
+This introduces hci_cmd_sync_run/hci_cmd_sync_run_once which acts like
+hci_cmd_sync_queue/hci_cmd_sync_queue_once but runs immediately when
+already on hdev->cmd_sync_work context.
+
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Stable-dep-of: 227a0cdf4a02 ("Bluetooth: MGMT: Fix not generating command complete for MGMT_OP_DISCONNECT")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/bluetooth/hci_sync.h | 4 +++
+ net/bluetooth/hci_sync.c | 42 ++++++++++++++++++++++++++++++--
+ 2 files changed, 44 insertions(+), 2 deletions(-)
+
+diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
+index 534c3386e714..3cb2d10cac93 100644
+--- a/include/net/bluetooth/hci_sync.h
++++ b/include/net/bluetooth/hci_sync.h
+@@ -52,6 +52,10 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+ int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
++int hci_cmd_sync_run(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
++ void *data, hci_cmd_sync_work_destroy_t destroy);
++int hci_cmd_sync_run_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
++ void *data, hci_cmd_sync_work_destroy_t destroy);
+ struct hci_cmd_sync_work_entry *
+ hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 4e90bd722e7b..f4a54dbc07f1 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -114,7 +114,7 @@ static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen,
+ skb_queue_tail(&req->cmd_q, skb);
+ }
+
+-static int hci_cmd_sync_run(struct hci_request *req)
++static int hci_req_sync_run(struct hci_request *req)
+ {
+ struct hci_dev *hdev = req->hdev;
+ struct sk_buff *skb;
+@@ -164,7 +164,7 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+
+ hdev->req_status = HCI_REQ_PEND;
+
+- err = hci_cmd_sync_run(&req);
++ err = hci_req_sync_run(&req);
+ if (err < 0)
+ return ERR_PTR(err);
+
+@@ -730,6 +730,44 @@ int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ }
+ EXPORT_SYMBOL(hci_cmd_sync_queue_once);
+
++/* Run HCI command:
++ *
++ * - hdev must be running
++ * - if on cmd_sync_work then run immediately otherwise queue
++ */
++int hci_cmd_sync_run(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
++ void *data, hci_cmd_sync_work_destroy_t destroy)
++{
++ /* Only queue command if hdev is running which means it had been opened
++ * and is either on init phase or is already up.
++ */
++ if (!test_bit(HCI_RUNNING, &hdev->flags))
++ return -ENETDOWN;
++
++ /* If on cmd_sync_work then run immediately otherwise queue */
++ if (current_work() == &hdev->cmd_sync_work)
++ return func(hdev, data);
++
++ return hci_cmd_sync_submit(hdev, func, data, destroy);
++}
++EXPORT_SYMBOL(hci_cmd_sync_run);
++
++/* Run HCI command entry once:
++ *
++ * - Lookup if an entry already exist and only if it doesn't creates a new entry
++ * and run it.
++ * - if on cmd_sync_work then run immediately otherwise queue
++ */
++int hci_cmd_sync_run_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
++ void *data, hci_cmd_sync_work_destroy_t destroy)
++{
++ if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
++ return 0;
++
++ return hci_cmd_sync_run(hdev, func, data, destroy);
++}
++EXPORT_SYMBOL(hci_cmd_sync_run_once);
++
+ /* Lookup HCI command entry:
+ *
+ * - Return first entry that matches by function callback or data or
+--
+2.43.0
+
--- /dev/null
+From eed978a28b39fad45643ae964578a3dc46b6da99 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Aug 2024 16:14:04 -0400
+Subject: Bluetooth: MGMT: Fix not generating command complete for
+ MGMT_OP_DISCONNECT
+
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+
+[ Upstream commit 227a0cdf4a028a73dc256d0f5144b4808d718893 ]
+
+MGMT_OP_DISCONNECT can be called while mgmt_device_connected has not
+been called yet, which will cause the connection procedure to be
+aborted, so mgmt_device_disconnected shall still respond with command
+complete to MGMT_OP_DISCONNECT and just not emit
+MGMT_EV_DEVICE_DISCONNECTED since MGMT_EV_DEVICE_CONNECTED was never
+sent.
+
+To fix this MGMT_OP_DISCONNECT is changed to work similarly to other
+command which do use hci_cmd_sync_queue and then use hci_conn_abort to
+disconnect and returns the result, in order for hci_conn_abort to be
+used from hci_cmd_sync context it now uses hci_cmd_sync_run_once.
+
+Link: https://github.com/bluez/bluez/issues/932
+Fixes: 12d4a3b2ccb3 ("Bluetooth: Move check for MGMT_CONNECTED flag into mgmt.c")
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/hci_conn.c | 6 ++-
+ net/bluetooth/mgmt.c | 84 ++++++++++++++++++++--------------------
+ 2 files changed, 47 insertions(+), 43 deletions(-)
+
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 080053a85b4d..3c74d171085d 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -2953,5 +2953,9 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason)
+ return 0;
+ }
+
+- return hci_cmd_sync_queue_once(hdev, abort_conn_sync, conn, NULL);
++ /* Run immediately if on cmd_sync_work since this may be called
++ * as a result to MGMT_OP_DISCONNECT/MGMT_OP_UNPAIR which does
++ * already queue its callback on cmd_sync_work.
++ */
++ return hci_cmd_sync_run_once(hdev, abort_conn_sync, conn, NULL);
+ }
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index fa3fa1fde5df..ba28907afb3f 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -2925,7 +2925,12 @@ static int unpair_device_sync(struct hci_dev *hdev, void *data)
+ if (!conn)
+ return 0;
+
+- return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
++ /* Disregard any possible error since the likes of hci_abort_conn_sync
++ * will clean up the connection no matter the error.
++ */
++ hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
++
++ return 0;
+ }
+
+ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
+@@ -3057,13 +3062,44 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ return err;
+ }
+
++static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
++{
++ struct mgmt_pending_cmd *cmd = data;
++
++ cmd->cmd_complete(cmd, mgmt_status(err));
++ mgmt_pending_free(cmd);
++}
++
++static int disconnect_sync(struct hci_dev *hdev, void *data)
++{
++ struct mgmt_pending_cmd *cmd = data;
++ struct mgmt_cp_disconnect *cp = cmd->param;
++ struct hci_conn *conn;
++
++ if (cp->addr.type == BDADDR_BREDR)
++ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
++ &cp->addr.bdaddr);
++ else
++ conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
++ le_addr_type(cp->addr.type));
++
++ if (!conn)
++ return -ENOTCONN;
++
++ /* Disregard any possible error since the likes of hci_abort_conn_sync
++ * will clean up the connection no matter the error.
++ */
++ hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
++
++ return 0;
++}
++
+ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+ {
+ struct mgmt_cp_disconnect *cp = data;
+ struct mgmt_rp_disconnect rp;
+ struct mgmt_pending_cmd *cmd;
+- struct hci_conn *conn;
+ int err;
+
+ bt_dev_dbg(hdev, "sock %p", sk);
+@@ -3086,27 +3122,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
+ goto failed;
+ }
+
+- if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
+- err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
+- MGMT_STATUS_BUSY, &rp, sizeof(rp));
+- goto failed;
+- }
+-
+- if (cp->addr.type == BDADDR_BREDR)
+- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
+- &cp->addr.bdaddr);
+- else
+- conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
+- le_addr_type(cp->addr.type));
+-
+- if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
+- err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
+- MGMT_STATUS_NOT_CONNECTED, &rp,
+- sizeof(rp));
+- goto failed;
+- }
+-
+- cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
++ cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+@@ -3114,9 +3130,10 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
+
+ cmd->cmd_complete = generic_cmd_complete;
+
+- err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
++ err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
++ disconnect_complete);
+ if (err < 0)
+- mgmt_pending_remove(cmd);
++ mgmt_pending_free(cmd);
+
+ failed:
+ hci_dev_unlock(hdev);
+@@ -9634,18 +9651,6 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
+ mgmt_event_skb(skb, NULL);
+ }
+
+-static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
+-{
+- struct sock **sk = data;
+-
+- cmd->cmd_complete(cmd, 0);
+-
+- *sk = cmd->sk;
+- sock_hold(*sk);
+-
+- mgmt_pending_remove(cmd);
+-}
+-
+ static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
+ {
+ struct hci_dev *hdev = data;
+@@ -9689,8 +9694,6 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ if (link_type != ACL_LINK && link_type != LE_LINK)
+ return;
+
+- mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
+-
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
+ ev.reason = reason;
+@@ -9703,9 +9706,6 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
+
+ if (sk)
+ sock_put(sk);
+-
+- mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
+- hdev);
+ }
+
+ void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
+--
+2.43.0
+
--- /dev/null
+From d131c95d7e26cb7ac30c698c6a814ce939e0569d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Aug 2024 15:43:40 -0700
+Subject: Bluetooth: qca: If memdump doesn't work, re-enable IBS
+
+From: Douglas Anderson <dianders@chromium.org>
+
+[ Upstream commit 8ae22de9d2eae3c432de64bf2b3a5a69cf1d1124 ]
+
+On systems in the field, we are seeing this sometimes in the kernel logs:
+ Bluetooth: qca_controller_memdump() hci0: hci_devcd_init Return:-95
+
+This means that _something_ decided that it wanted to get a memdump
+but then hci_devcd_init() returned -EOPNOTSUPP (AKA -95).
+
+The cleanup code in qca_controller_memdump() when we get back an error
+from hci_devcd_init() undoes most things but forgets to clear
+QCA_IBS_DISABLED. One side effect of this is that, during the next
+suspend, qca_suspend() will always get a timeout.
+
+Let's fix it so that we clear the bit.
+
+Fixes: 06d3fdfcdf5c ("Bluetooth: hci_qca: Add qcom devcoredump support")
+Reviewed-by: Guenter Roeck <groeck@chromium.org>
+Reviewed-by: Stephen Boyd <swboyd@chromium.org>
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/bluetooth/hci_qca.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index 34c36f0f781e..c5606a62f230 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -1090,6 +1090,7 @@ static void qca_controller_memdump(struct work_struct *work)
+ qca->memdump_state = QCA_MEMDUMP_COLLECTED;
+ cancel_delayed_work(&qca->ctrl_memdump_timeout);
+ clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
++ clear_bit(QCA_IBS_DISABLED, &qca->flags);
+ mutex_unlock(&qca->hci_memdump_lock);
+ return;
+ }
+--
+2.43.0
+
--- /dev/null
+From 67e60c14101f138e21cd14b88559812875be2f6b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 31 Aug 2024 14:47:02 +0900
+Subject: bpf: add check for invalid name in btf_name_valid_section()
+
+From: Jeongjun Park <aha310510@gmail.com>
+
+[ Upstream commit bb6705c3f93bed2af03d43691743d4c43e3c8e6f ]
+
+If the length of the name string is 1 and the value of name[0] is NULL
+byte, an OOB vulnerability occurs in btf_name_valid_section() and the
+return value is true, so the invalid name passes the check.
+
+To solve this, you need to check if the first position is NULL byte and
+if the first character is printable.
+
+Suggested-by: Eduard Zingerman <eddyz87@gmail.com>
+Fixes: bd70a8fb7ca4 ("bpf: Allow all printable characters in BTF DATASEC names")
+Signed-off-by: Jeongjun Park <aha310510@gmail.com>
+Link: https://lore.kernel.org/r/20240831054702.364455-1-aha310510@gmail.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Eduard Zingerman <eddyz87@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/btf.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index fe360b5b211d..2f157ffbc67c 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -817,9 +817,11 @@ static bool btf_name_valid_section(const struct btf *btf, u32 offset)
+ const char *src = btf_str_by_offset(btf, offset);
+ const char *src_limit;
+
++ if (!*src)
++ return false;
++
+ /* set a limit on identifier length */
+ src_limit = src + KSYM_NAME_LEN;
+- src++;
+ while (*src && src < src_limit) {
+ if (!isprint(*src))
+ return false;
+--
+2.43.0
+
--- /dev/null
+From b4209056f2b5bfeae2073232c261323e568dad7e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Aug 2024 16:25:17 +0800
+Subject: bpf, net: Fix a potential race in do_sock_getsockopt()
+
+From: Tze-nan Wu <Tze-nan.Wu@mediatek.com>
+
+[ Upstream commit 33f339a1ba54e56bba57ee9a77c71e385ab4825c ]
+
+There's a potential race when `cgroup_bpf_enabled(CGROUP_GETSOCKOPT)` is
+false during the execution of `BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN`, but
+becomes true when `BPF_CGROUP_RUN_PROG_GETSOCKOPT` is called.
+This inconsistency can lead to `BPF_CGROUP_RUN_PROG_GETSOCKOPT` receiving
+an "-EFAULT" from `__cgroup_bpf_run_filter_getsockopt(max_optlen=0)`.
+Scenario shown as below:
+
+ `process A` `process B`
+ ----------- ------------
+ BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN
+ enable CGROUP_GETSOCKOPT
+ BPF_CGROUP_RUN_PROG_GETSOCKOPT (-EFAULT)
+
+To resolve this, remove the `BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN` macro and
+directly uses `copy_from_sockptr` to ensure that `max_optlen` is always
+set before `BPF_CGROUP_RUN_PROG_GETSOCKOPT` is invoked.
+
+Fixes: 0d01da6afc54 ("bpf: implement getsockopt and setsockopt hooks")
+Co-developed-by: Yanghui Li <yanghui.li@mediatek.com>
+Signed-off-by: Yanghui Li <yanghui.li@mediatek.com>
+Co-developed-by: Cheng-Jui Wang <cheng-jui.wang@mediatek.com>
+Signed-off-by: Cheng-Jui Wang <cheng-jui.wang@mediatek.com>
+Signed-off-by: Tze-nan Wu <Tze-nan.Wu@mediatek.com>
+Acked-by: Stanislav Fomichev <sdf@fomichev.me>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://patch.msgid.link/20240830082518.23243-1-Tze-nan.Wu@mediatek.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/bpf-cgroup.h | 9 ---------
+ net/socket.c | 4 ++--
+ 2 files changed, 2 insertions(+), 11 deletions(-)
+
+diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
+index fb3c3e7181e6..ce91d9b2acb9 100644
+--- a/include/linux/bpf-cgroup.h
++++ b/include/linux/bpf-cgroup.h
+@@ -390,14 +390,6 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
+ __ret; \
+ })
+
+-#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
+-({ \
+- int __ret = 0; \
+- if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
+- copy_from_sockptr(&__ret, optlen, sizeof(int)); \
+- __ret; \
+-})
+-
+ #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \
+ max_optlen, retval) \
+ ({ \
+@@ -518,7 +510,6 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
+ #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
+ #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
+ #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
+-#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
+ #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
+ optlen, max_optlen, retval) ({ retval; })
+ #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
+diff --git a/net/socket.c b/net/socket.c
+index e416920e9399..b5a003974058 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -2350,7 +2350,7 @@ INDIRECT_CALLABLE_DECLARE(bool tcp_bpf_bypass_getsockopt(int level,
+ int do_sock_getsockopt(struct socket *sock, bool compat, int level,
+ int optname, sockptr_t optval, sockptr_t optlen)
+ {
+- int max_optlen __maybe_unused;
++ int max_optlen __maybe_unused = 0;
+ const struct proto_ops *ops;
+ int err;
+
+@@ -2359,7 +2359,7 @@ int do_sock_getsockopt(struct socket *sock, bool compat, int level,
+ return err;
+
+ if (!compat)
+- max_optlen = BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen);
++ copy_from_sockptr(&max_optlen, optlen, sizeof(int));
+
+ ops = READ_ONCE(sock->ops);
+ if (level == SOL_SOCKET) {
+--
+2.43.0
+
--- /dev/null
+From a781d8efcb1632c54b53e72afbdd897d73d18f42 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Jul 2024 16:16:31 +0200
+Subject: bpf: Remove tst_run from lwt_seg6local_prog_ops.
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+[ Upstream commit c13fda93aca118b8e5cd202e339046728ee7dddb ]
+
+The syzbot reported that the lwt_seg6 related BPF ops can be invoked
+via bpf_test_run() without without entering input_action_end_bpf()
+first.
+
+Martin KaFai Lau said that self test for BPF_PROG_TYPE_LWT_SEG6LOCAL
+probably didn't work since it was introduced in commit 04d4b274e2a
+("ipv6: sr: Add seg6local action End.BPF"). The reason is that the
+per-CPU variable seg6_bpf_srh_states::srh is never assigned in the self
+test case but each BPF function expects it.
+
+Remove test_run for BPF_PROG_TYPE_LWT_SEG6LOCAL.
+
+Suggested-by: Martin KaFai Lau <martin.lau@linux.dev>
+Reported-by: syzbot+608a2acde8c5a101d07d@syzkaller.appspotmail.com
+Fixes: d1542d4ae4df ("seg6: Use nested-BH locking for seg6_bpf_srh_states.")
+Fixes: 004d4b274e2a ("ipv6: sr: Add seg6local action End.BPF")
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/r/20240710141631.FbmHcQaX@linutronix.de
+Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/filter.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/net/core/filter.c b/net/core/filter.c
+index ab0455c64e49..55b1d9de2334 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -11047,7 +11047,6 @@ const struct bpf_verifier_ops lwt_seg6local_verifier_ops = {
+ };
+
+ const struct bpf_prog_ops lwt_seg6local_prog_ops = {
+- .test_run = bpf_prog_test_run_skb,
+ };
+
+ const struct bpf_verifier_ops cg_sock_verifier_ops = {
+--
+2.43.0
+
--- /dev/null
+From 1a8d6f1681eedb674bb987441503343a80810016 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Jun 2024 20:42:23 +0800
+Subject: bpf, verifier: Correct tail_call_reachable for bpf prog
+
+From: Leon Hwang <hffilwlqm@gmail.com>
+
+[ Upstream commit 01793ed86b5d7df1e956520b5474940743eb7ed8 ]
+
+It's confusing to inspect 'prog->aux->tail_call_reachable' with drgn[0],
+when bpf prog has tail call but 'tail_call_reachable' is false.
+
+This patch corrects 'tail_call_reachable' when bpf prog has tail call.
+
+Signed-off-by: Leon Hwang <hffilwlqm@gmail.com>
+Link: https://lore.kernel.org/r/20240610124224.34673-2-hffilwlqm@gmail.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/verifier.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 521bd7efae03..73f55f4b945e 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2982,8 +2982,10 @@ static int check_subprogs(struct bpf_verifier_env *env)
+
+ if (code == (BPF_JMP | BPF_CALL) &&
+ insn[i].src_reg == 0 &&
+- insn[i].imm == BPF_FUNC_tail_call)
++ insn[i].imm == BPF_FUNC_tail_call) {
+ subprog[cur_subprog].has_tail_call = true;
++ subprog[cur_subprog].tail_call_reachable = true;
++ }
+ if (BPF_CLASS(code) == BPF_LD &&
+ (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND))
+ subprog[cur_subprog].has_ld_abs = true;
+--
+2.43.0
+
--- /dev/null
+From edfe4251ee1100b0c21c9ab3aaabe4f51f0e56a0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 May 2024 14:12:13 -0400
+Subject: btrfs: clean up our handling of refs == 0 in snapshot delete
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit b8ccef048354074a548f108e51d0557d6adfd3a3 ]
+
+In reada we BUG_ON(refs == 0), which could be unkind since we aren't
+holding a lock on the extent leaf and thus could get a transient
+incorrect answer. In walk_down_proc we also BUG_ON(refs == 0), which
+could happen if we have extent tree corruption. Change that to return
+-EUCLEAN. In do_walk_down() we catch this case and handle it correctly,
+however we return -EIO, which -EUCLEAN is a more appropriate error code.
+Finally in walk_up_proc we have the same BUG_ON(refs == 0), so convert
+that to proper error handling. Also adjust the error message so we can
+actually do something with the information.
+
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/extent-tree.c | 28 +++++++++++++++++++++++-----
+ 1 file changed, 23 insertions(+), 5 deletions(-)
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index d107f5809eae..96cec4d6b447 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -5275,7 +5275,15 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
+ /* We don't care about errors in readahead. */
+ if (ret < 0)
+ continue;
+- BUG_ON(refs == 0);
++
++ /*
++ * This could be racey, it's conceivable that we raced and end
++ * up with a bogus refs count, if that's the case just skip, if
++ * we are actually corrupt we will notice when we look up
++ * everything again with our locks.
++ */
++ if (refs == 0)
++ continue;
+
+ if (wc->stage == DROP_REFERENCE) {
+ if (refs == 1)
+@@ -5341,7 +5349,11 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
+ NULL);
+ if (ret)
+ return ret;
+- BUG_ON(wc->refs[level] == 0);
++ if (unlikely(wc->refs[level] == 0)) {
++ btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
++ eb->start);
++ return -EUCLEAN;
++ }
+ }
+
+ if (wc->stage == DROP_REFERENCE) {
+@@ -5514,8 +5526,9 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
+ goto out_unlock;
+
+ if (unlikely(wc->refs[level - 1] == 0)) {
+- btrfs_err(fs_info, "Missing references.");
+- ret = -EIO;
++ btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
++ bytenr);
++ ret = -EUCLEAN;
+ goto out_unlock;
+ }
+ *lookup_info = 0;
+@@ -5718,7 +5731,12 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
+ path->locks[level] = 0;
+ return ret;
+ }
+- BUG_ON(wc->refs[level] == 0);
++ if (unlikely(wc->refs[level] == 0)) {
++ btrfs_tree_unlock_rw(eb, path->locks[level]);
++ btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
++ eb->start);
++ return -EUCLEAN;
++ }
+ if (wc->refs[level] == 1) {
+ btrfs_tree_unlock_rw(eb, path->locks[level]);
+ path->locks[level] = 0;
+--
+2.43.0
+
--- /dev/null
+From 725af132260f403f51ce86d1cd883b6b82cafe8d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 May 2024 14:12:10 -0400
+Subject: btrfs: don't BUG_ON on ENOMEM from btrfs_lookup_extent_info() in
+ walk_down_proc()
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit a580fb2c3479d993556e1c31b237c9e5be4944a3 ]
+
+We handle errors here properly, ENOMEM isn't fatal, return the error.
+
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/extent-tree.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 8bf980123c5c..0effe13ae459 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -5339,7 +5339,6 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
+ &wc->refs[level],
+ &wc->flags[level],
+ NULL);
+- BUG_ON(ret == -ENOMEM);
+ if (ret)
+ return ret;
+ BUG_ON(wc->refs[level] == 0);
+--
+2.43.0
+
--- /dev/null
+From ed9d59dca50583f606f21d74f968652142b7e255 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Jun 2024 12:15:01 +0100
+Subject: btrfs: don't BUG_ON() when 0 reference count at
+ btrfs_lookup_extent_info()
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 28cb13f29faf6290597b24b728dc3100c019356f ]
+
+Instead of doing a BUG_ON() handle the error by returning -EUCLEAN,
+aborting the transaction and logging an error message.
+
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/extent-tree.c | 24 ++++++++++++++++++++----
+ 1 file changed, 20 insertions(+), 4 deletions(-)
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 033eb428ffcd..55be8a7f0bb1 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -173,9 +173,16 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
+
+ ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
+ num_refs = btrfs_extent_refs(leaf, ei);
++ if (unlikely(num_refs == 0)) {
++ ret = -EUCLEAN;
++ btrfs_err(fs_info,
++ "unexpected zero reference count for extent item (%llu %u %llu)",
++ key.objectid, key.type, key.offset);
++ btrfs_abort_transaction(trans, ret);
++ goto out_free;
++ }
+ extent_flags = btrfs_extent_flags(leaf, ei);
+ owner = btrfs_get_extent_owner_root(fs_info, leaf, path->slots[0]);
+- BUG_ON(num_refs == 0);
+ } else {
+ num_refs = 0;
+ extent_flags = 0;
+@@ -205,10 +212,19 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
+ goto search_again;
+ }
+ spin_lock(&head->lock);
+- if (head->extent_op && head->extent_op->update_flags)
++ if (head->extent_op && head->extent_op->update_flags) {
+ extent_flags |= head->extent_op->flags_to_set;
+- else
+- BUG_ON(num_refs == 0);
++ } else if (unlikely(num_refs == 0)) {
++ spin_unlock(&head->lock);
++ mutex_unlock(&head->mutex);
++ spin_unlock(&delayed_refs->lock);
++ ret = -EUCLEAN;
++ btrfs_err(fs_info,
++ "unexpected zero reference count for extent %llu (%s)",
++ bytenr, metadata ? "metadata" : "data");
++ btrfs_abort_transaction(trans, ret);
++ goto out_free;
++ }
+
+ num_refs += head->ref_mod;
+ spin_unlock(&head->lock);
+--
+2.43.0
+
--- /dev/null
+From 3993af0cee9c2ab0696d3a700e2a2ba52ab4bf6f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 May 2024 14:12:15 -0400
+Subject: btrfs: handle errors from btrfs_dec_ref() properly
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit 5eb178f373b4f16f3b42d55ff88fc94dd95b93b1 ]
+
+In walk_up_proc() we BUG_ON(ret) from btrfs_dec_ref(). This is
+incorrect, we have proper error handling here, return the error.
+
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/extent-tree.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 96cec4d6b447..033eb428ffcd 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -5754,7 +5754,10 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
+ ret = btrfs_dec_ref(trans, root, eb, 1);
+ else
+ ret = btrfs_dec_ref(trans, root, eb, 0);
+- BUG_ON(ret); /* -ENOMEM */
++ if (ret) {
++ btrfs_abort_transaction(trans, ret);
++ return ret;
++ }
+ if (is_fstree(btrfs_root_id(root))) {
+ ret = btrfs_qgroup_trace_leaf_items(trans, eb);
+ if (ret) {
+--
+2.43.0
+
--- /dev/null
+From 7cae6c251bbe27a2f159192315906cb1beca3f76 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 Jul 2024 21:59:24 +0200
+Subject: btrfs: initialize location to fix -Wmaybe-uninitialized in
+ btrfs_lookup_dentry()
+
+From: David Sterba <dsterba@suse.com>
+
+[ Upstream commit b8e947e9f64cac9df85a07672b658df5b2bcff07 ]
+
+Some arch + compiler combinations report a potentially unused variable
+location in btrfs_lookup_dentry(). This is a false alert as the variable
+is passed by value and always valid or there's an error. The compilers
+cannot probably reason about that although btrfs_inode_by_name() is in
+the same file.
+
+ > + /kisskb/src/fs/btrfs/inode.c: error: 'location.objectid' may be used
+ +uninitialized in this function [-Werror=maybe-uninitialized]: => 5603:9
+ > + /kisskb/src/fs/btrfs/inode.c: error: 'location.type' may be used
+ +uninitialized in this function [-Werror=maybe-uninitialized]: => 5674:5
+
+ m68k-gcc8/m68k-allmodconfig
+ mips-gcc8/mips-allmodconfig
+ powerpc-gcc5/powerpc-all{mod,yes}config
+ powerpc-gcc5/ppc64_defconfig
+
+Initialize it to zero, this should fix the warnings and won't change the
+behaviour as btrfs_inode_by_name() accepts only a root or inode item
+types, otherwise returns an error.
+
+Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Tested-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Link: https://lore.kernel.org/linux-btrfs/bd4e9928-17b3-9257-8ba7-6b7f9bbb639a@linux-m68k.org/
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/inode.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index c2f48fc159e5..2951aa0039fc 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -5699,7 +5699,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
+ struct inode *inode;
+ struct btrfs_root *root = BTRFS_I(dir)->root;
+ struct btrfs_root *sub_root = root;
+- struct btrfs_key location;
++ struct btrfs_key location = { 0 };
+ u8 di_type = 0;
+ int ret = 0;
+
+--
+2.43.0
+
--- /dev/null
+From 393a51f51899321fa8b1f5a8e9cc8698336001c9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 May 2024 14:12:12 -0400
+Subject: btrfs: replace BUG_ON with ASSERT in walk_down_proc()
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit 1f9d44c0a12730a24f8bb75c5e1102207413cc9b ]
+
+We have a couple of areas where we check to make sure the tree block is
+locked before looking up or messing with references. This is old code
+so it has this as BUG_ON(). Convert this to ASSERT() for developers.
+
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/extent-tree.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 0effe13ae459..d107f5809eae 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -5333,7 +5333,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
+ if (lookup_info &&
+ ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
+ (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
+- BUG_ON(!path->locks[level]);
++ ASSERT(path->locks[level]);
+ ret = btrfs_lookup_extent_info(trans, fs_info,
+ eb->start, level, 1,
+ &wc->refs[level],
+@@ -5357,7 +5357,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
+
+ /* wc->stage == UPDATE_BACKREF */
+ if (!(wc->flags[level] & flag)) {
+- BUG_ON(!path->locks[level]);
++ ASSERT(path->locks[level]);
+ ret = btrfs_inc_ref(trans, root, eb, 1);
+ BUG_ON(ret); /* -ENOMEM */
+ ret = btrfs_dec_ref(trans, root, eb, 0);
+--
+2.43.0
+
--- /dev/null
+From 0032486b529c17b0afa53b537da090543c8be999 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Jun 2024 15:55:16 +0100
+Subject: btrfs: replace BUG_ON() with error handling at update_ref_for_cow()
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit b56329a782314fde5b61058e2a25097af7ccb675 ]
+
+Instead of a BUG_ON() just return an error, log an error message and
+abort the transaction in case we find an extent buffer belonging to the
+relocation tree that doesn't have the full backref flag set. This is
+unexpected and should never happen (save for bugs or a potential bad
+memory).
+
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/ctree.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 8a791b648ac5..f56914507fce 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -462,8 +462,16 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
+ }
+
+ owner = btrfs_header_owner(buf);
+- BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
+- !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
++ if (unlikely(owner == BTRFS_TREE_RELOC_OBJECTID &&
++ !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))) {
++ btrfs_crit(fs_info,
++"found tree block at bytenr %llu level %d root %llu refs %llu flags %llx without full backref flag set",
++ buf->start, btrfs_header_level(buf),
++ btrfs_root_id(root), refs, flags);
++ ret = -EUCLEAN;
++ btrfs_abort_transaction(trans, ret);
++ return ret;
++ }
+
+ if (refs > 1) {
+ if ((owner == btrfs_root_id(root) ||
+--
+2.43.0
+
--- /dev/null
+From 3349a50379dde06ce989b925775f35236f1a0d4d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Apr 2024 14:29:32 +0930
+Subject: btrfs: slightly loosen the requirement for qgroup removal
+
+From: Qu Wenruo <wqu@suse.com>
+
+[ Upstream commit a776bf5f3c2300cfdf8a195663460b1793ac9847 ]
+
+[BUG]
+Currently if one is utilizing "qgroups/drop_subtree_threshold" sysfs,
+and a snapshot with level higher than that value is dropped, we will
+not be able to delete the qgroup until next qgroup rescan:
+
+ uuid=ffffffff-eeee-dddd-cccc-000000000000
+
+ wipefs -fa $dev
+ mkfs.btrfs -f $dev -O quota -s 4k -n 4k -U $uuid
+ mount $dev $mnt
+
+ btrfs subvolume create $mnt/subv1/
+ for (( i = 0; i < 1024; i++ )); do
+ xfs_io -f -c "pwrite 0 2k" $mnt/subv1/file_$i > /dev/null
+ done
+ sync
+ btrfs subvolume snapshot $mnt/subv1 $mnt/snapshot
+ btrfs quota enable $mnt
+ btrfs quota rescan -w $mnt
+ sync
+ echo 1 > /sys/fs/btrfs/$uuid/qgroups/drop_subtree_threshold
+ btrfs subvolume delete $mnt/snapshot
+ btrfs subvolume sync $mnt
+ btrfs qgroup show -prce --sync $mnt
+ btrfs qgroup destroy 0/257 $mnt
+ umount $mnt
+
+The final qgroup removal would fail with the following error:
+
+ ERROR: unable to destroy quota group: Device or resource busy
+
+[CAUSE]
+The above script would generate a subvolume of level 2, then snapshot
+it, enable qgroup, set the drop_subtree_threshold, then drop the
+snapshot.
+
+Since the subvolume drop would meet the threshold, qgroup would be
+marked inconsistent and skip accounting to avoid hanging the system at
+transaction commit.
+
+But currently we do not allow a qgroup with any rfer/excl numbers to be
+dropped, and this is not really compatible with the new
+drop_subtree_threshold behavior.
+
+[FIX]
+Only require the strict zero rfer/excl/rfer_cmpr/excl_cmpr for squota
+mode. This is due to the fact that squota can never go inconsistent,
+and it can have dropped subvolume but with non-zero qgroup numbers for
+future accounting.
+
+For full qgroup mode, we only check if there is a subvolume for it.
+
+Reviewed-by: Boris Burkov <boris@bur.io>
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/qgroup.c | 87 +++++++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 80 insertions(+), 7 deletions(-)
+
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index d4486518414d..24df83177007 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -1750,13 +1750,55 @@ int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
+ return ret;
+ }
+
+-static bool qgroup_has_usage(struct btrfs_qgroup *qgroup)
++/*
++ * Return 0 if we can not delete the qgroup (not empty or has children etc).
++ * Return >0 if we can delete the qgroup.
++ * Return <0 for other errors during tree search.
++ */
++static int can_delete_qgroup(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup)
+ {
+- return (qgroup->rfer > 0 || qgroup->rfer_cmpr > 0 ||
+- qgroup->excl > 0 || qgroup->excl_cmpr > 0 ||
+- qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] > 0 ||
+- qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] > 0 ||
+- qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS] > 0);
++ struct btrfs_key key;
++ struct btrfs_path *path;
++ int ret;
++
++ /*
++ * Squota would never be inconsistent, but there can still be case
++ * where a dropped subvolume still has qgroup numbers, and squota
++ * relies on such qgroup for future accounting.
++ *
++ * So for squota, do not allow dropping any non-zero qgroup.
++ */
++ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE &&
++ (qgroup->rfer || qgroup->excl || qgroup->excl_cmpr || qgroup->rfer_cmpr))
++ return 0;
++
++ /* For higher level qgroup, we can only delete it if it has no child. */
++ if (btrfs_qgroup_level(qgroup->qgroupid)) {
++ if (!list_empty(&qgroup->members))
++ return 0;
++ return 1;
++ }
++
++ /*
++ * For level-0 qgroups, we can only delete it if it has no subvolume
++ * for it.
++ * This means even a subvolume is unlinked but not yet fully dropped,
++ * we can not delete the qgroup.
++ */
++ key.objectid = qgroup->qgroupid;
++ key.type = BTRFS_ROOT_ITEM_KEY;
++ key.offset = -1ULL;
++ path = btrfs_alloc_path();
++ if (!path)
++ return -ENOMEM;
++
++ ret = btrfs_find_root(fs_info->tree_root, &key, path, NULL, NULL);
++ btrfs_free_path(path);
++ /*
++ * The @ret from btrfs_find_root() exactly matches our definition for
++ * the return value, thus can be returned directly.
++ */
++ return ret;
+ }
+
+ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
+@@ -1778,7 +1820,10 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
+ goto out;
+ }
+
+- if (is_fstree(qgroupid) && qgroup_has_usage(qgroup)) {
++ ret = can_delete_qgroup(fs_info, qgroup);
++ if (ret < 0)
++ goto out;
++ if (ret == 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+@@ -1803,6 +1848,34 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
+ }
+
+ spin_lock(&fs_info->qgroup_lock);
++ /*
++ * Warn on reserved space. The subvolume should has no child nor
++ * corresponding subvolume.
++ * Thus its reserved space should all be zero, no matter if qgroup
++ * is consistent or the mode.
++ */
++ WARN_ON(qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] ||
++ qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] ||
++ qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]);
++ /*
++ * The same for rfer/excl numbers, but that's only if our qgroup is
++ * consistent and if it's in regular qgroup mode.
++ * For simple mode it's not as accurate thus we can hit non-zero values
++ * very frequently.
++ */
++ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL &&
++ !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) {
++ if (WARN_ON(qgroup->rfer || qgroup->excl ||
++ qgroup->rfer_cmpr || qgroup->excl_cmpr)) {
++ btrfs_warn_rl(fs_info,
++"to be deleted qgroup %u/%llu has non-zero numbers, rfer %llu rfer_cmpr %llu excl %llu excl_cmpr %llu",
++ btrfs_qgroup_level(qgroup->qgroupid),
++ btrfs_qgroup_subvolid(qgroup->qgroupid),
++ qgroup->rfer, qgroup->rfer_cmpr,
++ qgroup->excl, qgroup->excl_cmpr);
++ qgroup_mark_inconsistent(fs_info);
++ }
++ }
+ del_qgroup_rb(fs_info, qgroupid);
+ spin_unlock(&fs_info->qgroup_lock);
+
+--
+2.43.0
+
--- /dev/null
+From 666a991c852d3748315086d61c152b9ccf6e20b1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Jul 2024 15:19:02 +0100
+Subject: cachefiles: Set the max subreq size for cache writes to MAX_RW_COUNT
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 51d37982bbac3ea0ca21b2797a9cb0044272b3aa ]
+
+Set the maximum size of a subrequest that writes to cachefiles to be
+MAX_RW_COUNT so that we don't overrun the maximum write we can make to the
+backing filesystem.
+
+Signed-off-by: David Howells <dhowells@redhat.com>
+Link: https://lore.kernel.org/r/1599005.1721398742@warthog.procyon.org.uk
+cc: Jeff Layton <jlayton@kernel.org>
+cc: netfs@lists.linux.dev
+cc: linux-fsdevel@vger.kernel.org
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/cachefiles/io.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c
+index e667dbcd20e8..a91acd03ee12 100644
+--- a/fs/cachefiles/io.c
++++ b/fs/cachefiles/io.c
+@@ -630,7 +630,7 @@ static void cachefiles_prepare_write_subreq(struct netfs_io_subrequest *subreq)
+
+ _enter("W=%x[%x] %llx", wreq->debug_id, subreq->debug_index, subreq->start);
+
+- subreq->max_len = ULONG_MAX;
++ subreq->max_len = MAX_RW_COUNT;
+ subreq->max_nr_segs = BIO_MAX_VECS;
+
+ if (!cachefiles_cres_file(cres)) {
+--
+2.43.0
+
--- /dev/null
+From e4e8cf347f2cf588119023357f91de977ecd5ac2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Jul 2024 12:28:42 -0700
+Subject: can: bcm: Remove proc entry when dev is unregistered.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 76fe372ccb81b0c89b6cd2fec26e2f38c958be85 ]
+
+syzkaller reported a warning in bcm_connect() below. [0]
+
+The repro calls connect() to vxcan1, removes vxcan1, and calls
+connect() with ifindex == 0.
+
+Calling connect() for a BCM socket allocates a proc entry.
+Then, bcm_sk(sk)->bound is set to 1 to prevent further connect().
+
+However, removing the bound device resets bcm_sk(sk)->bound to 0
+in bcm_notify().
+
+The 2nd connect() tries to allocate a proc entry with the same
+name and sets NULL to bcm_sk(sk)->bcm_proc_read, leaking the
+original proc entry.
+
+Since the proc entry is available only for connect()ed sockets,
+let's clean up the entry when the bound netdev is unregistered.
+
+[0]:
+proc_dir_entry 'can-bcm/2456' already registered
+WARNING: CPU: 1 PID: 394 at fs/proc/generic.c:376 proc_register+0x645/0x8f0 fs/proc/generic.c:375
+Modules linked in:
+CPU: 1 PID: 394 Comm: syz-executor403 Not tainted 6.10.0-rc7-g852e42cc2dd4
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.3-0-ga6ed6b701f0a-prebuilt.qemu.org 04/01/2014
+RIP: 0010:proc_register+0x645/0x8f0 fs/proc/generic.c:375
+Code: 00 00 00 00 00 48 85 ed 0f 85 97 02 00 00 4d 85 f6 0f 85 9f 02 00 00 48 c7 c7 9b cb cf 87 48 89 de 4c 89 fa e8 1c 6f eb fe 90 <0f> 0b 90 90 48 c7 c7 98 37 99 89 e8 cb 7e 22 05 bb 00 00 00 10 48
+RSP: 0018:ffa0000000cd7c30 EFLAGS: 00010246
+RAX: 9e129be1950f0200 RBX: ff1100011b51582c RCX: ff1100011857cd80
+RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000002
+RBP: 0000000000000000 R08: ffd400000000000f R09: ff1100013e78cac0
+R10: ffac800000cd7980 R11: ff1100013e12b1f0 R12: 0000000000000000
+R13: 0000000000000000 R14: 0000000000000000 R15: ff1100011a99a2ec
+FS: 00007fbd7086f740(0000) GS:ff1100013fd00000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00000000200071c0 CR3: 0000000118556004 CR4: 0000000000771ef0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe07f0 DR7: 0000000000000400
+PKRU: 55555554
+Call Trace:
+ <TASK>
+ proc_create_net_single+0x144/0x210 fs/proc/proc_net.c:220
+ bcm_connect+0x472/0x840 net/can/bcm.c:1673
+ __sys_connect_file net/socket.c:2049 [inline]
+ __sys_connect+0x5d2/0x690 net/socket.c:2066
+ __do_sys_connect net/socket.c:2076 [inline]
+ __se_sys_connect net/socket.c:2073 [inline]
+ __x64_sys_connect+0x8f/0x100 net/socket.c:2073
+ do_syscall_x64 arch/x86/entry/common.c:52 [inline]
+ do_syscall_64+0xd9/0x1c0 arch/x86/entry/common.c:83
+ entry_SYSCALL_64_after_hwframe+0x4b/0x53
+RIP: 0033:0x7fbd708b0e5d
+Code: ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 73 9f 1b 00 f7 d8 64 89 01 48
+RSP: 002b:00007fff8cd33f08 EFLAGS: 00000246 ORIG_RAX: 000000000000002a
+RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 00007fbd708b0e5d
+RDX: 0000000000000010 RSI: 0000000020000040 RDI: 0000000000000003
+RBP: 0000000000000000 R08: 0000000000000040 R09: 0000000000000040
+R10: 0000000000000040 R11: 0000000000000246 R12: 00007fff8cd34098
+R13: 0000000000401280 R14: 0000000000406de8 R15: 00007fbd70ab9000
+ </TASK>
+remove_proc_entry: removing non-empty directory 'net/can-bcm', leaking at least '2456'
+
+Fixes: ffd980f976e7 ("[CAN]: Add broadcast manager (bcm) protocol")
+Reported-by: syzkaller <syzkaller@googlegroups.com>
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://lore.kernel.org/all/20240722192842.37421-1-kuniyu@amazon.com
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/bcm.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 27d5fcf0eac9..46d3ec3aa44b 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -1470,6 +1470,10 @@ static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
+
+ /* remove device reference, if this is our bound device */
+ if (bo->bound && bo->ifindex == dev->ifindex) {
++#if IS_ENABLED(CONFIG_PROC_FS)
++ if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read)
++ remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir);
++#endif
+ bo->bound = 0;
+ bo->ifindex = 0;
+ notify_enodev = 1;
+--
+2.43.0
+
--- /dev/null
+From d1557b14bbccca1c11e73def7bddf7614c6c3cce Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Jun 2024 20:13:19 +0200
+Subject: can: kvaser_pciefd: Move reset of DMA RX buffers to the end of the
+ ISR
+
+From: Martin Jocic <martin.jocic@kvaser.com>
+
+[ Upstream commit 48f827d4f48f5243e37b9240029ce3f456d1f490 ]
+
+A new interrupt is triggered by resetting the DMA RX buffers.
+Since MSI interrupts are faster than legacy interrupts, the reset
+of the DMA buffers must be moved to the very end of the ISR,
+otherwise a new MSI interrupt will be masked by the current one.
+
+Signed-off-by: Martin Jocic <martin.jocic@kvaser.com>
+Link: https://lore.kernel.org/all/20240620181320.235465-2-martin.jocic@kvaser.com
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Stable-dep-of: dd885d90c047 ("can: kvaser_pciefd: Use a single write when releasing RX buffers")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/kvaser_pciefd.c | 30 ++++++++++++++++++------------
+ 1 file changed, 18 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
+index a026ea2f5b35..cc39befc9290 100644
+--- a/drivers/net/can/kvaser_pciefd.c
++++ b/drivers/net/can/kvaser_pciefd.c
+@@ -1640,23 +1640,15 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
+ return res;
+ }
+
+-static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
++static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
+ {
+ u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
+
+- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
++ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
+ kvaser_pciefd_read_buffer(pcie, 0);
+- /* Reset DMA buffer 0 */
+- iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
+- KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
+- }
+
+- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
++ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
+ kvaser_pciefd_read_buffer(pcie, 1);
+- /* Reset DMA buffer 1 */
+- iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
+- KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
+- }
+
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
+ irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
+@@ -1665,6 +1657,7 @@ static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
+ dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
+
+ iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
++ return irq;
+ }
+
+ static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
+@@ -1692,19 +1685,32 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
+ struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
+ const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask;
+ u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
++ u32 srb_irq = 0;
+ int i;
+
+ if (!(pci_irq & irq_mask->all))
+ return IRQ_NONE;
+
+ if (pci_irq & irq_mask->kcan_rx0)
+- kvaser_pciefd_receive_irq(pcie);
++ srb_irq = kvaser_pciefd_receive_irq(pcie);
+
+ for (i = 0; i < pcie->nr_channels; i++) {
+ if (pci_irq & irq_mask->kcan_tx[i])
+ kvaser_pciefd_transmit_irq(pcie->can[i]);
+ }
+
++ if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
++ /* Reset DMA buffer 0, may trigger new interrupt */
++ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
++ KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
++ }
++
++ if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
++ /* Reset DMA buffer 1, may trigger new interrupt */
++ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
++ KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
++ }
++
+ return IRQ_HANDLED;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 71c620518f08caddf862139c6940d8a2e588c2d9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Jun 2024 17:15:20 +0200
+Subject: can: kvaser_pciefd: Remove unnecessary comment
+
+From: Martin Jocic <martin.jocic@kvaser.com>
+
+[ Upstream commit 11d186697ceb10b68c6a1fd505635346b1ccd055 ]
+
+The code speaks for itself.
+
+Signed-off-by: Martin Jocic <martin.jocic@kvaser.com>
+Link: https://lore.kernel.org/all/20240614151524.2718287-4-martin.jocic@kvaser.com
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Stable-dep-of: dd885d90c047 ("can: kvaser_pciefd: Use a single write when releasing RX buffers")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/kvaser_pciefd.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
+index aebc221b82c2..3ac18dd0a022 100644
+--- a/drivers/net/can/kvaser_pciefd.c
++++ b/drivers/net/can/kvaser_pciefd.c
+@@ -1701,7 +1701,6 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
+ kvaser_pciefd_receive_irq(pcie);
+
+ for (i = 0; i < pcie->nr_channels; i++) {
+- /* Check that mask matches channel (i) IRQ mask */
+ if (board_irq & irq_mask->kcan_tx[i])
+ kvaser_pciefd_transmit_irq(pcie->can[i]);
+ }
+--
+2.43.0
+
--- /dev/null
+From 7edce4499e4192cc599312e6baf30fcfe539b4e6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Jun 2024 17:15:23 +0200
+Subject: can: kvaser_pciefd: Rename board_irq to pci_irq
+
+From: Martin Jocic <martin.jocic@kvaser.com>
+
+[ Upstream commit cbf88a6ba7bb6ce0d3131b119298f73bd7b18459 ]
+
+Rename the variable name board_irq in the ISR to pci_irq to
+be more specific and to match the macro by which it is read.
+
+Signed-off-by: Martin Jocic <martin.jocic@kvaser.com>
+Link: https://lore.kernel.org/all/20240614151524.2718287-7-martin.jocic@kvaser.com
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Stable-dep-of: dd885d90c047 ("can: kvaser_pciefd: Use a single write when releasing RX buffers")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/kvaser_pciefd.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
+index 3ac18dd0a022..a026ea2f5b35 100644
+--- a/drivers/net/can/kvaser_pciefd.c
++++ b/drivers/net/can/kvaser_pciefd.c
+@@ -1691,17 +1691,17 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
+ {
+ struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
+ const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask;
+- u32 board_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
++ u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
+ int i;
+
+- if (!(board_irq & irq_mask->all))
++ if (!(pci_irq & irq_mask->all))
+ return IRQ_NONE;
+
+- if (board_irq & irq_mask->kcan_rx0)
++ if (pci_irq & irq_mask->kcan_rx0)
+ kvaser_pciefd_receive_irq(pcie);
+
+ for (i = 0; i < pcie->nr_channels; i++) {
+- if (board_irq & irq_mask->kcan_tx[i])
++ if (pci_irq & irq_mask->kcan_tx[i])
+ kvaser_pciefd_transmit_irq(pcie->can[i]);
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 9008cac49ed55ba5a8714d0c626a0b504f052910 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Jun 2024 17:15:19 +0200
+Subject: can: kvaser_pciefd: Skip redundant NULL pointer check in ISR
+
+From: Martin Jocic <martin.jocic@kvaser.com>
+
+[ Upstream commit ac765219c2c4e44f29063724c8d36435a3e61985 ]
+
+This check is already done at the creation of the net devices in
+kvaser_pciefd_setup_can_ctrls called from kvaser_pciefd_probe.
+
+If it fails, the driver won't load, so there should be no need to
+repeat the check inside the ISR. The number of channels is read
+from the FPGA and should be trusted.
+
+Signed-off-by: Martin Jocic <martin.jocic@kvaser.com>
+Link: https://lore.kernel.org/all/20240614151524.2718287-3-martin.jocic@kvaser.com
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Stable-dep-of: dd885d90c047 ("can: kvaser_pciefd: Use a single write when releasing RX buffers")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/kvaser_pciefd.c | 6 ------
+ 1 file changed, 6 deletions(-)
+
+diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
+index 7b5028b67cd5..aebc221b82c2 100644
+--- a/drivers/net/can/kvaser_pciefd.c
++++ b/drivers/net/can/kvaser_pciefd.c
+@@ -1701,12 +1701,6 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
+ kvaser_pciefd_receive_irq(pcie);
+
+ for (i = 0; i < pcie->nr_channels; i++) {
+- if (!pcie->can[i]) {
+- dev_err(&pcie->pci->dev,
+- "IRQ mask points to unallocated controller\n");
+- break;
+- }
+-
+ /* Check that mask matches channel (i) IRQ mask */
+ if (board_irq & irq_mask->kcan_tx[i])
+ kvaser_pciefd_transmit_irq(pcie->can[i]);
+--
+2.43.0
+
--- /dev/null
+From 755e803c93d0fbcebcca2a041a65ef384fa6448e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Aug 2024 17:31:13 +0200
+Subject: can: kvaser_pciefd: Use a single write when releasing RX buffers
+
+From: Martin Jocic <martin.jocic@kvaser.com>
+
+[ Upstream commit dd885d90c047dbdd2773c1d33954cbd8747d81e2 ]
+
+Kvaser's PCIe cards uses the KCAN FPGA IP block which has dual 4K
+buffers for incoming messages shared by all (currently up to eight)
+channels. While the driver processes messages in one buffer, new
+incoming messages are stored in the other and so on.
+
+The design of KCAN is such that a buffer must be fully read and then
+released. Releasing a buffer will make the FPGA switch buffers. If the
+other buffer contains at least one incoming message the FPGA will also
+instantly issue a new interrupt, if not the interrupt will be issued
+after receiving the first new message.
+
+With IRQx interrupts, it takes a little time for the interrupt to
+happen, enough for any previous ISR call to do it's business and
+return, but MSI interrupts are way faster so this time is reduced to
+almost nothing.
+
+So with MSI, releasing the buffer HAS to be the very last action of
+the ISR before returning, otherwise the new interrupt might be
+"masked" by the kernel because the previous ISR call hasn't returned.
+And the interrupts are edge-triggered so we cannot loose one, or the
+ping-pong reading process will stop.
+
+This is why this patch modifies the driver to use a single write to
+the SRB_CMD register before returning.
+
+Signed-off-by: Martin Jocic <martin.jocic@kvaser.com>
+Reviewed-by: Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+Link: https://patch.msgid.link/20240830153113.2081440-1-martin.jocic@kvaser.com
+Fixes: 26ad340e582d ("can: kvaser_pciefd: Add driver for Kvaser PCIEcan devices")
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/kvaser_pciefd.c | 18 ++++++++----------
+ 1 file changed, 8 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
+index cc39befc9290..ab15a2ae8a20 100644
+--- a/drivers/net/can/kvaser_pciefd.c
++++ b/drivers/net/can/kvaser_pciefd.c
+@@ -1686,6 +1686,7 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
+ const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask;
+ u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
+ u32 srb_irq = 0;
++ u32 srb_release = 0;
+ int i;
+
+ if (!(pci_irq & irq_mask->all))
+@@ -1699,17 +1700,14 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
+ kvaser_pciefd_transmit_irq(pcie->can[i]);
+ }
+
+- if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
+- /* Reset DMA buffer 0, may trigger new interrupt */
+- iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
+- KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
+- }
++ if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
++ srb_release |= KVASER_PCIEFD_SRB_CMD_RDB0;
+
+- if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
+- /* Reset DMA buffer 1, may trigger new interrupt */
+- iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
+- KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
+- }
++ if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
++ srb_release |= KVASER_PCIEFD_SRB_CMD_RDB1;
++
++ if (srb_release)
++ iowrite32(srb_release, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
+
+ return IRQ_HANDLED;
+ }
+--
+2.43.0
+
--- /dev/null
+From 03b17ffeb07cf8c70edbc9ee548fa734e6d98d0f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Aug 2024 20:30:45 +0200
+Subject: can: m_can: disable_all_interrupts, not clear active_interrupts
+
+From: Markus Schneider-Pargmann <msp@baylibre.com>
+
+[ Upstream commit a572fea86c9b06cd3e6e89d79d565b52cb7e7cff ]
+
+active_interrupts is a cache for the enabled interrupts and not the
+global masking of interrupts. Do not clear this variable otherwise we
+may loose the state of the interrupts.
+
+Fixes: 07f25091ca02 ("can: m_can: Implement receive coalescing")
+Signed-off-by: Markus Schneider-Pargmann <msp@baylibre.com>
+Link: https://lore.kernel.org/all/20240805183047.305630-6-msp@baylibre.com
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/m_can/m_can.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index d15655df6393..073842ab210d 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -449,7 +449,6 @@ static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev)
+ {
+ m_can_coalescing_disable(cdev);
+ m_can_write(cdev, M_CAN_ILE, 0x0);
+- cdev->active_interrupts = 0x0;
+
+ if (!cdev->net->irq) {
+ dev_dbg(cdev->dev, "Stop hrtimer\n");
+--
+2.43.0
+
--- /dev/null
+From 1c72427406e148c20ff1ecde5b5895f50e09d434 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Aug 2024 20:30:44 +0200
+Subject: can: m_can: Do not cancel timer from within timer
+
+From: Markus Schneider-Pargmann <msp@baylibre.com>
+
+[ Upstream commit 4d5159bfafa8d1a205d8213b7434e0402588b9ed ]
+
+On setups without interrupts, the interrupt handler is called from a
+timer callback. For non-peripheral receives napi is scheduled,
+interrupts are disabled and the timer is canceled with a blocking call.
+In case of an error this can happen as well.
+
+Check if napi is scheduled in the timer callback after the interrupt
+handler executed. If napi is scheduled, the timer is disabled. It will
+be reenabled by m_can_poll().
+
+Return error values from the interrupt handler so that interrupt threads
+and timer callback can deal differently with it. In case of the timer
+we only disable the timer. The rest will be done when stopping the
+interface.
+
+Fixes: b382380c0d2d ("can: m_can: Add hrtimer to generate software interrupt")
+Fixes: a163c5761019 ("can: m_can: Start/Cancel polling timer together with interrupts")
+Signed-off-by: Markus Schneider-Pargmann <msp@baylibre.com>
+Link: https://lore.kernel.org/all/20240805183047.305630-5-msp@baylibre.com
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/m_can/m_can.c | 57 ++++++++++++++++++++++++++---------
+ 1 file changed, 42 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index 2d73fa7f8258..d15655df6393 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -453,7 +453,7 @@ static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev)
+
+ if (!cdev->net->irq) {
+ dev_dbg(cdev->dev, "Stop hrtimer\n");
+- hrtimer_cancel(&cdev->hrtimer);
++ hrtimer_try_to_cancel(&cdev->hrtimer);
+ }
+ }
+
+@@ -1167,11 +1167,15 @@ static void m_can_coalescing_update(struct m_can_classdev *cdev, u32 ir)
+ HRTIMER_MODE_REL);
+ }
+
+-static irqreturn_t m_can_isr(int irq, void *dev_id)
++/* This interrupt handler is called either from the interrupt thread or a
++ * hrtimer. This has implications like cancelling a timer won't be possible
++ * blocking.
++ */
++static int m_can_interrupt_handler(struct m_can_classdev *cdev)
+ {
+- struct net_device *dev = (struct net_device *)dev_id;
+- struct m_can_classdev *cdev = netdev_priv(dev);
++ struct net_device *dev = cdev->net;
+ u32 ir;
++ int ret;
+
+ if (pm_runtime_suspended(cdev->dev))
+ return IRQ_NONE;
+@@ -1198,11 +1202,9 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
+ m_can_disable_all_interrupts(cdev);
+ napi_schedule(&cdev->napi);
+ } else {
+- int pkts;
+-
+- pkts = m_can_rx_handler(dev, NAPI_POLL_WEIGHT, ir);
+- if (pkts < 0)
+- goto out_fail;
++ ret = m_can_rx_handler(dev, NAPI_POLL_WEIGHT, ir);
++ if (ret < 0)
++ return ret;
+ }
+ }
+
+@@ -1220,8 +1222,9 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
+ } else {
+ if (ir & (IR_TEFN | IR_TEFW)) {
+ /* New TX FIFO Element arrived */
+- if (m_can_echo_tx_event(dev) != 0)
+- goto out_fail;
++ ret = m_can_echo_tx_event(dev);
++ if (ret != 0)
++ return ret;
+ }
+ }
+
+@@ -1229,16 +1232,31 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
+ can_rx_offload_threaded_irq_finish(&cdev->offload);
+
+ return IRQ_HANDLED;
++}
+
+-out_fail:
+- m_can_disable_all_interrupts(cdev);
+- return IRQ_HANDLED;
++static irqreturn_t m_can_isr(int irq, void *dev_id)
++{
++ struct net_device *dev = (struct net_device *)dev_id;
++ struct m_can_classdev *cdev = netdev_priv(dev);
++ int ret;
++
++ ret = m_can_interrupt_handler(cdev);
++ if (ret < 0) {
++ m_can_disable_all_interrupts(cdev);
++ return IRQ_HANDLED;
++ }
++
++ return ret;
+ }
+
+ static enum hrtimer_restart m_can_coalescing_timer(struct hrtimer *timer)
+ {
+ struct m_can_classdev *cdev = container_of(timer, struct m_can_classdev, hrtimer);
+
++ if (cdev->can.state == CAN_STATE_BUS_OFF ||
++ cdev->can.state == CAN_STATE_STOPPED)
++ return HRTIMER_NORESTART;
++
+ irq_wake_thread(cdev->net->irq, cdev->net);
+
+ return HRTIMER_NORESTART;
+@@ -1930,8 +1948,17 @@ static enum hrtimer_restart hrtimer_callback(struct hrtimer *timer)
+ {
+ struct m_can_classdev *cdev = container_of(timer, struct
+ m_can_classdev, hrtimer);
++ int ret;
++
++ if (cdev->can.state == CAN_STATE_BUS_OFF ||
++ cdev->can.state == CAN_STATE_STOPPED)
++ return HRTIMER_NORESTART;
++
++ ret = m_can_interrupt_handler(cdev);
+
+- m_can_isr(0, cdev->net);
++ /* On error or if napi is scheduled to read, stop the timer */
++ if (ret < 0 || napi_is_scheduled(&cdev->napi))
++ return HRTIMER_NORESTART;
+
+ hrtimer_forward_now(timer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS));
+
+--
+2.43.0
+
--- /dev/null
+From 4de4db15e37244e0ab6d7aa146016dff067cf02d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Aug 2024 15:01:58 +0100
+Subject: can: m_can: Release irq on error in m_can_open
+
+From: Simon Horman <horms@kernel.org>
+
+[ Upstream commit 06d4ef3056a7ac31be331281bb7a6302ef5a7f8a ]
+
+It appears that the irq requested in m_can_open() may be leaked
+if an error subsequently occurs: if m_can_start() fails.
+
+Address this by calling free_irq in the unwind path for
+such cases.
+
+Flagged by Smatch.
+Compile tested only.
+
+Fixes: eaacfeaca7ad ("can: m_can: Call the RAM init directly from m_can_chip_config")
+Acked-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Simon Horman <horms@kernel.org>
+Link: https://lore.kernel.org/all/20240805-mcan-irq-v2-1-7154c0484819@kernel.org
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/m_can/m_can.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index 14b231c4d7ec..205a6cb4470f 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -2009,7 +2009,7 @@ static int m_can_open(struct net_device *dev)
+ /* start the m_can controller */
+ err = m_can_start(dev);
+ if (err)
+- goto exit_irq_fail;
++ goto exit_start_fail;
+
+ if (!cdev->is_peripheral)
+ napi_enable(&cdev->napi);
+@@ -2018,6 +2018,9 @@ static int m_can_open(struct net_device *dev)
+
+ return 0;
+
++exit_start_fail:
++ if (cdev->is_peripheral || dev->irq)
++ free_irq(dev->irq, dev);
+ exit_irq_fail:
+ if (cdev->is_peripheral)
+ destroy_workqueue(cdev->tx_wq);
+--
+2.43.0
+
--- /dev/null
+From 015ecd572da92031c31c92152d744826fb0bb339 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Aug 2024 20:30:42 +0200
+Subject: can: m_can: Remove coalesing disable in isr during suspend
+
+From: Markus Schneider-Pargmann <msp@baylibre.com>
+
+[ Upstream commit 6eff1cead75ff330bb33264424c1da6cc7179ab8 ]
+
+We don't need to disable coalescing when the interrupt handler executes
+while the chip is suspended. The coalescing is already reset during
+suspend.
+
+Fixes: 07f25091ca02 ("can: m_can: Implement receive coalescing")
+Signed-off-by: Markus Schneider-Pargmann <msp@baylibre.com>
+Link: https://lore.kernel.org/all/20240805183047.305630-3-msp@baylibre.com
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/m_can/m_can.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index 257d5bc0ae9e..dba1788f7fbb 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -1189,10 +1189,8 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
+ struct m_can_classdev *cdev = netdev_priv(dev);
+ u32 ir;
+
+- if (pm_runtime_suspended(cdev->dev)) {
+- m_can_coalescing_disable(cdev);
++ if (pm_runtime_suspended(cdev->dev))
+ return IRQ_NONE;
+- }
+
+ ir = m_can_read(cdev, M_CAN_IR);
+ m_can_coalescing_update(cdev, ir);
+--
+2.43.0
+
--- /dev/null
+From 97b2f3e9c73cb204f97e9e810228ff5e9bf081d5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Aug 2024 20:30:43 +0200
+Subject: can: m_can: Remove m_can_rx_peripheral indirection
+
+From: Markus Schneider-Pargmann <msp@baylibre.com>
+
+[ Upstream commit 40e4552eeef0e3090a5988de15889795936fd38f ]
+
+m_can_rx_peripheral() is a wrapper around m_can_rx_handler() that calls
+m_can_disable_all_interrupts() on error. The same handling for the same
+error path is done in m_can_isr() as well.
+
+So remove m_can_rx_peripheral() and do the call from m_can_isr()
+directly.
+
+Signed-off-by: Markus Schneider-Pargmann <msp@baylibre.com>
+Link: https://lore.kernel.org/all/20240805183047.305630-4-msp@baylibre.com
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Stable-dep-of: 4d5159bfafa8 ("can: m_can: Do not cancel timer from within timer")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/m_can/m_can.c | 18 +-----------------
+ 1 file changed, 1 insertion(+), 17 deletions(-)
+
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index dba1788f7fbb..2d73fa7f8258 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -1003,22 +1003,6 @@ static int m_can_rx_handler(struct net_device *dev, int quota, u32 irqstatus)
+ return work_done;
+ }
+
+-static int m_can_rx_peripheral(struct net_device *dev, u32 irqstatus)
+-{
+- struct m_can_classdev *cdev = netdev_priv(dev);
+- int work_done;
+-
+- work_done = m_can_rx_handler(dev, NAPI_POLL_WEIGHT, irqstatus);
+-
+- /* Don't re-enable interrupts if the driver had a fatal error
+- * (e.g., FIFO read failure).
+- */
+- if (work_done < 0)
+- m_can_disable_all_interrupts(cdev);
+-
+- return work_done;
+-}
+-
+ static int m_can_poll(struct napi_struct *napi, int quota)
+ {
+ struct net_device *dev = napi->dev;
+@@ -1216,7 +1200,7 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
+ } else {
+ int pkts;
+
+- pkts = m_can_rx_peripheral(dev, ir);
++ pkts = m_can_rx_handler(dev, NAPI_POLL_WEIGHT, ir);
+ if (pkts < 0)
+ goto out_fail;
+ }
+--
+2.43.0
+
--- /dev/null
+From 5446512c2f0c1cb2cc919922dda72c38076911b3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Aug 2024 20:30:46 +0200
+Subject: can: m_can: Reset cached active_interrupts on start
+
+From: Markus Schneider-Pargmann <msp@baylibre.com>
+
+[ Upstream commit 733dbf556cd5b71d5e6f6aa7a93f117b438ab785 ]
+
+To force writing the enabled interrupts, reset the active_interrupts
+cache.
+
+Fixes: 07f25091ca02 ("can: m_can: Implement receive coalescing")
+Signed-off-by: Markus Schneider-Pargmann <msp@baylibre.com>
+Link: https://lore.kernel.org/all/20240805183047.305630-7-msp@baylibre.com
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/m_can/m_can.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index 073842ab210d..e4f0a382c216 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -1505,6 +1505,7 @@ static int m_can_chip_config(struct net_device *dev)
+ else
+ interrupts &= ~(IR_ERR_LEC_31X);
+ }
++ cdev->active_interrupts = 0;
+ m_can_interrupt_enable(cdev, interrupts);
+
+ /* route all interrupts to INT0 */
+--
+2.43.0
+
--- /dev/null
+From a9c0836f4d2085bc61ab66683afe65ab5af40da0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Aug 2024 20:30:41 +0200
+Subject: can: m_can: Reset coalescing during suspend/resume
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Markus Schneider-Pargmann <msp@baylibre.com>
+
+[ Upstream commit a651261ac74298535f6d6316ebe27beceb6b17b1 ]
+
+During resume the interrupts are limited to IR_RF0N and the chip keeps
+running. In this case if coalescing is enabled and active we may miss
+waterlevel interrupts during suspend. It is safer to reset the
+coalescing by stopping the timer and adding IR_RF0N | IR_TEFN to the
+interrupts.
+
+This is a theoratical issue and probably extremely rare.
+
+Cc: Martin Hundebøll <martin@geanix.com>
+Fixes: 4a94d7e31cf5 ("can: m_can: allow keeping the transceiver running in suspend")
+Signed-off-by: Markus Schneider-Pargmann <msp@baylibre.com>
+Link: https://lore.kernel.org/all/20240805183047.305630-2-msp@baylibre.com
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/m_can/m_can.c | 16 +++++++++++++---
+ 1 file changed, 13 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index 205a6cb4470f..257d5bc0ae9e 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -2387,12 +2387,15 @@ int m_can_class_suspend(struct device *dev)
+ netif_device_detach(ndev);
+
+ /* leave the chip running with rx interrupt enabled if it is
+- * used as a wake-up source.
++ * used as a wake-up source. Coalescing needs to be reset then,
++ * the timer is cancelled here, interrupts are done in resume.
+ */
+- if (cdev->pm_wake_source)
++ if (cdev->pm_wake_source) {
++ hrtimer_cancel(&cdev->hrtimer);
+ m_can_write(cdev, M_CAN_IE, IR_RF0N);
+- else
++ } else {
+ m_can_stop(ndev);
++ }
+
+ m_can_clk_stop(cdev);
+ }
+@@ -2422,6 +2425,13 @@ int m_can_class_resume(struct device *dev)
+ return ret;
+
+ if (cdev->pm_wake_source) {
++ /* Restore active interrupts but disable coalescing as
++ * we may have missed important waterlevel interrupts
++ * between suspend and resume. Timers are already
++ * stopped in suspend. Here we enable all interrupts
++ * again.
++ */
++ cdev->active_interrupts |= IR_RF0N | IR_TEFN;
+ m_can_write(cdev, M_CAN_IE, cdev->active_interrupts);
+ } else {
+ ret = m_can_start(ndev);
+--
+2.43.0
+
--- /dev/null
+From be84cf8376ee17e3ecf410a9150a46a2e10120ff Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Jul 2024 17:28:27 +0200
+Subject: can: mcp251xfd: fix ring configuration when switching from CAN-CC to
+ CAN-FD mode
+
+From: Marc Kleine-Budde <mkl@pengutronix.de>
+
+[ Upstream commit 50ea5449c56310d2d31c28ba91a59232116d3c1e ]
+
+If the ring (rx, tx) and/or coalescing parameters (rx-frames-irq,
+tx-frames-irq) have been configured while the interface was in CAN-CC
+mode, but the interface is brought up in CAN-FD mode, the ring
+parameters might be too big.
+
+Use the default CAN-FD values in this case.
+
+Fixes: 9263c2e92be9 ("can: mcp251xfd: ring: add support for runtime configurable RX/TX ring parameters")
+Link: https://lore.kernel.org/all/20240805-mcp251xfd-fix-ringconfig-v1-1-72086f0ca5ee@pengutronix.de
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c | 11 +++++++++-
+ .../net/can/spi/mcp251xfd/mcp251xfd-ring.c | 20 ++++++++++++++++---
+ 2 files changed, 27 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
+index 9e8e82cdba46..61b0d6fa52dd 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
+@@ -97,7 +97,16 @@ void can_ram_get_layout(struct can_ram_layout *layout,
+ if (ring) {
+ u8 num_rx_coalesce = 0, num_tx_coalesce = 0;
+
+- num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, ring->rx_pending);
++ /* If the ring parameters have been configured in
++ * CAN-CC mode, but and we are in CAN-FD mode now,
++ * they might be to big. Use the default CAN-FD values
++ * in this case.
++ */
++ num_rx = ring->rx_pending;
++ if (num_rx > layout->max_rx)
++ num_rx = layout->default_rx;
++
++ num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx);
+
+ /* The ethtool doc says:
+ * To disable coalescing, set usecs = 0 and max_frames = 1.
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+index 4cb79a4f2461..3a941a71c78f 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+@@ -468,11 +468,25 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
+
+ /* switching from CAN-2.0 to CAN-FD mode or vice versa */
+ if (fd_mode != test_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags)) {
++ const struct ethtool_ringparam ring = {
++ .rx_pending = priv->rx_obj_num,
++ .tx_pending = priv->tx->obj_num,
++ };
++ const struct ethtool_coalesce ec = {
++ .rx_coalesce_usecs_irq = priv->rx_coalesce_usecs_irq,
++ .rx_max_coalesced_frames_irq = priv->rx_obj_num_coalesce_irq,
++ .tx_coalesce_usecs_irq = priv->tx_coalesce_usecs_irq,
++ .tx_max_coalesced_frames_irq = priv->tx_obj_num_coalesce_irq,
++ };
+ struct can_ram_layout layout;
+
+- can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode);
+- priv->rx_obj_num = layout.default_rx;
+- tx_ring->obj_num = layout.default_tx;
++ can_ram_get_layout(&layout, &mcp251xfd_ram_config, &ring, &ec, fd_mode);
++
++ priv->rx_obj_num = layout.cur_rx;
++ priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
++
++ tx_ring->obj_num = layout.cur_tx;
++ priv->tx_obj_num_coalesce_irq = layout.tx_coalesce;
+ }
+
+ if (fd_mode) {
+--
+2.43.0
+
--- /dev/null
+From b2089ea40d871af4bb5a1106164a3266879ce8f2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Jun 2024 10:39:43 -0400
+Subject: cgroup/cpuset: Delay setting of CS_CPU_EXCLUSIVE until valid
+ partition
+
+From: Waiman Long <longman@redhat.com>
+
+[ Upstream commit fe8cd2736e75c8ca3aed1ef181a834e41dc5310f ]
+
+The CS_CPU_EXCLUSIVE flag is currently set whenever cpuset.cpus.exclusive
+is set to make sure that the exclusivity test will be run to ensure its
+exclusiveness. At the same time, this flag can be changed whenever the
+partition root state is changed. For example, the CS_CPU_EXCLUSIVE flag
+will be reset whenever a partition root becomes invalid. This makes
+using CS_CPU_EXCLUSIVE to ensure exclusiveness a bit fragile.
+
+The current scheme also makes setting up a cpuset.cpus.exclusive
+hierarchy to enable remote partition harder as cpuset.cpus.exclusive
+cannot overlap with any cpuset.cpus of sibling cpusets if their
+cpuset.cpus.exclusive aren't set.
+
+Solve these issues by deferring the setting of CS_CPU_EXCLUSIVE flag
+until the cpuset become a valid partition root while adding new checks
+in validate_change() to ensure that cpuset.cpus.exclusive of sibling
+cpusets cannot overlap.
+
+An additional check is also added to validate_change() to make sure that
+cpuset.cpus of one cpuset cannot be a subset of cpuset.cpus.exclusive
+of a sibling cpuset to avoid the problem that none of those CPUs will
+be available when these exclusive CPUs are extracted out to a newly
+enabled partition root. The Documentation/admin-guide/cgroup-v2.rst
+file is updated to document the new constraints.
+
+Signed-off-by: Waiman Long <longman@redhat.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/admin-guide/cgroup-v2.rst | 8 ++++--
+ kernel/cgroup/cpuset.c | 36 ++++++++++++++++++++-----
+ 2 files changed, 35 insertions(+), 9 deletions(-)
+
+diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
+index 8fbb0519d556..b69f701b2485 100644
+--- a/Documentation/admin-guide/cgroup-v2.rst
++++ b/Documentation/admin-guide/cgroup-v2.rst
+@@ -2346,8 +2346,12 @@ Cpuset Interface Files
+ is always a subset of it.
+
+ Users can manually set it to a value that is different from
+- "cpuset.cpus". The only constraint in setting it is that the
+- list of CPUs must be exclusive with respect to its sibling.
++ "cpuset.cpus". One constraint in setting it is that the list of
++ CPUs must be exclusive with respect to "cpuset.cpus.exclusive"
++ of its sibling. If "cpuset.cpus.exclusive" of a sibling cgroup
++ isn't set, its "cpuset.cpus" value, if set, cannot be a subset
++ of it to leave at least one CPU available when the exclusive
++ CPUs are taken away.
+
+ For a parent cgroup, any one of its exclusive CPUs can only
+ be distributed to at most one of its child cgroups. Having an
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index fc1c6236460d..e8f24483e05f 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -826,17 +826,41 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
+
+ /*
+ * If either I or some sibling (!= me) is exclusive, we can't
+- * overlap
++ * overlap. exclusive_cpus cannot overlap with each other if set.
+ */
+ ret = -EINVAL;
+ cpuset_for_each_child(c, css, par) {
+- if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
+- c != cur) {
++ bool txset, cxset; /* Are exclusive_cpus set? */
++
++ if (c == cur)
++ continue;
++
++ txset = !cpumask_empty(trial->exclusive_cpus);
++ cxset = !cpumask_empty(c->exclusive_cpus);
++ if (is_cpu_exclusive(trial) || is_cpu_exclusive(c) ||
++ (txset && cxset)) {
+ if (!cpusets_are_exclusive(trial, c))
+ goto out;
++ } else if (txset || cxset) {
++ struct cpumask *xcpus, *acpus;
++
++ /*
++ * When just one of the exclusive_cpus's is set,
++ * cpus_allowed of the other cpuset, if set, cannot be
++ * a subset of it or none of those CPUs will be
++ * available if these exclusive CPUs are activated.
++ */
++ if (txset) {
++ xcpus = trial->exclusive_cpus;
++ acpus = c->cpus_allowed;
++ } else {
++ xcpus = c->exclusive_cpus;
++ acpus = trial->cpus_allowed;
++ }
++ if (!cpumask_empty(acpus) && cpumask_subset(acpus, xcpus))
++ goto out;
+ }
+ if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
+- c != cur &&
+ nodes_intersects(trial->mems_allowed, c->mems_allowed))
+ goto out;
+ }
+@@ -1376,7 +1400,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
+ */
+ static int update_partition_exclusive(struct cpuset *cs, int new_prs)
+ {
+- bool exclusive = (new_prs > 0);
++ bool exclusive = (new_prs > PRS_MEMBER);
+
+ if (exclusive && !is_cpu_exclusive(cs)) {
+ if (update_flag(CS_CPU_EXCLUSIVE, cs, 1))
+@@ -2624,8 +2648,6 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
+ retval = cpulist_parse(buf, trialcs->exclusive_cpus);
+ if (retval < 0)
+ return retval;
+- if (!is_cpu_exclusive(cs))
+- set_bit(CS_CPU_EXCLUSIVE, &trialcs->flags);
+ }
+
+ /* Nothing to do if the CPUs didn't change */
+--
+2.43.0
+
--- /dev/null
+From 4a422368a49757c18de0dd6197b9e12bf855bd3c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Jul 2024 14:52:29 -0400
+Subject: cgroup: Protect css->cgroup write under css_set_lock
+
+From: Waiman Long <longman@redhat.com>
+
+[ Upstream commit 57b56d16800e8961278ecff0dc755d46c4575092 ]
+
+The writing of css->cgroup associated with the cgroup root in
+rebind_subsystems() is currently protected only by cgroup_mutex.
+However, the reading of css->cgroup in both proc_cpuset_show() and
+proc_cgroup_show() is protected just by css_set_lock. That makes the
+readers susceptible to racing problems like data tearing or caching.
+It is also a problem that can be reported by KCSAN.
+
+This can be fixed by using READ_ONCE() and WRITE_ONCE() to access
+css->cgroup. Alternatively, the writing of css->cgroup can be moved
+under css_set_lock as well which is done by this patch.
+
+Signed-off-by: Waiman Long <longman@redhat.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/cgroup/cgroup.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index e32b6972c478..278889170f94 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -1839,9 +1839,9 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
+ RCU_INIT_POINTER(scgrp->subsys[ssid], NULL);
+ rcu_assign_pointer(dcgrp->subsys[ssid], css);
+ ss->root = dst_root;
+- css->cgroup = dcgrp;
+
+ spin_lock_irq(&css_set_lock);
++ css->cgroup = dcgrp;
+ WARN_ON(!list_empty(&dcgrp->e_csets[ss->id]));
+ list_for_each_entry_safe(cset, cset_pos, &scgrp->e_csets[ss->id],
+ e_cset_node[ss->id]) {
+--
+2.43.0
+
--- /dev/null
+From e66af3593e3b9b7cb893188a521afb22c03ed907 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 Aug 2024 15:47:27 +0100
+Subject: cifs: Fix copy offload to flush destination region
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 8101d6e112e2524e967368f920c404ae445a9757 ]
+
+Fix cifs_file_copychunk_range() to flush the destination region before
+invalidating it to avoid potential loss of data should the copy fail, in
+whole or in part, in some way.
+
+Fixes: 7b2404a886f8 ("cifs: Fix flushing, invalidation and file size with copy_file_range()")
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Steve French <stfrench@microsoft.com>
+cc: Paulo Alcantara <pc@manguebit.com>
+cc: Shyam Prasad N <nspmangalore@gmail.com>
+cc: Rohith Surabattula <rohiths.msft@gmail.com>
+cc: Matthew Wilcox <willy@infradead.org>
+cc: Jeff Layton <jlayton@kernel.org>
+cc: linux-cifs@vger.kernel.org
+cc: linux-mm@kvack.org
+cc: linux-fsdevel@vger.kernel.org
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/cifsfs.c | 21 ++++-----------------
+ 1 file changed, 4 insertions(+), 17 deletions(-)
+
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index 2c4b357d85e2..a1acf5bd1e3a 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -1341,7 +1341,6 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
+ struct cifsFileInfo *smb_file_target;
+ struct cifs_tcon *src_tcon;
+ struct cifs_tcon *target_tcon;
+- unsigned long long destend, fstart, fend;
+ ssize_t rc;
+
+ cifs_dbg(FYI, "copychunk range\n");
+@@ -1391,25 +1390,13 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
+ goto unlock;
+ }
+
+- destend = destoff + len - 1;
+-
+- /* Flush the folios at either end of the destination range to prevent
+- * accidental loss of dirty data outside of the range.
++ /* Flush and invalidate all the folios in the destination region. If
++ * the copy was successful, then some of the flush is extra overhead,
++ * but we need to allow for the copy failing in some way (eg. ENOSPC).
+ */
+- fstart = destoff;
+- fend = destend;
+-
+- rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
++ rc = filemap_invalidate_inode(target_inode, true, destoff, destoff + len - 1);
+ if (rc)
+ goto unlock;
+- rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
+- if (rc)
+- goto unlock;
+- if (fend > target_cifsi->netfs.zero_point)
+- target_cifsi->netfs.zero_point = fend + 1;
+-
+- /* Discard all the folios that overlap the destination region. */
+- truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
+
+ fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
+ i_size_read(target_inode), 0);
+--
+2.43.0
+
--- /dev/null
+From 175ed474961329eb835de3fc7f06f20cf61ee38b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Aug 2024 21:08:25 +0100
+Subject: cifs: Fix FALLOC_FL_ZERO_RANGE to preflush buffered part of target
+ region
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 91d1dfae464987aaf6c79ff51d8674880fb3be77 ]
+
+Under certain conditions, the range to be cleared by FALLOC_FL_ZERO_RANGE
+may only be buffered locally and not yet have been flushed to the server.
+For example:
+
+ xfs_io -f -t -c "pwrite -S 0x41 0 4k" \
+ -c "pwrite -S 0x42 4k 4k" \
+ -c "fzero 0 4k" \
+ -c "pread -v 0 8k" /xfstest.test/foo
+
+will write two 4KiB blocks of data, which get buffered in the pagecache,
+and then fallocate() is used to clear the first 4KiB block on the server -
+but we don't flush the data first, which means the EOF position on the
+server is wrong, and so the FSCTL_SET_ZERO_DATA RPC fails (and xfs_io
+ignores the error), but then when we try to read it, we see the old data.
+
+Fix this by preflushing any part of the target region that above the
+server's idea of the EOF position to force the server to update its EOF
+position.
+
+Note, however, that we don't want to simply expand the file by moving the
+EOF before doing the FSCTL_SET_ZERO_DATA[*] because someone else might see
+the zeroed region or if the RPC fails we then have to try to clean it up or
+risk getting corruption.
+
+[*] And we have to move the EOF first otherwise FSCTL_SET_ZERO_DATA won't
+do what we want.
+
+This fixes the generic/008 xfstest.
+
+[!] Note: A better way to do this might be to split the operation into two
+parts: we only do FSCTL_SET_ZERO_DATA for the part of the range below the
+server's EOF and then, if that worked, invalidate the buffered pages for the
+part above the range.
+
+Fixes: 6b69040247e1 ("cifs/smb3: Fix data inconsistent when zero file range")
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Steve French <stfrench@microsoft.com>
+cc: Zhang Xiaoxu <zhangxiaoxu5@huawei.com>
+cc: Pavel Shilovsky <pshilov@microsoft.com>
+cc: Paulo Alcantara <pc@manguebit.com>
+cc: Shyam Prasad N <nspmangalore@gmail.com>
+cc: Rohith Surabattula <rohiths.msft@gmail.com>
+cc: Jeff Layton <jlayton@kernel.org>
+cc: linux-cifs@vger.kernel.org
+cc: linux-mm@kvack.org
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smb2ops.c | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 42352f70b01c..1d6e8eacdd74 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -3219,13 +3219,15 @@ static long smb3_zero_data(struct file *file, struct cifs_tcon *tcon,
+ }
+
+ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
+- loff_t offset, loff_t len, bool keep_size)
++ unsigned long long offset, unsigned long long len,
++ bool keep_size)
+ {
+ struct cifs_ses *ses = tcon->ses;
+ struct inode *inode = file_inode(file);
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
+ struct cifsFileInfo *cfile = file->private_data;
+- unsigned long long new_size;
++ struct netfs_inode *ictx = netfs_inode(inode);
++ unsigned long long i_size, new_size, remote_size;
+ long rc;
+ unsigned int xid;
+
+@@ -3237,6 +3239,16 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
+ inode_lock(inode);
+ filemap_invalidate_lock(inode->i_mapping);
+
++ i_size = i_size_read(inode);
++ remote_size = ictx->remote_i_size;
++ if (offset + len >= remote_size && offset < i_size) {
++ unsigned long long top = umin(offset + len, i_size);
++
++ rc = filemap_write_and_wait_range(inode->i_mapping, offset, top - 1);
++ if (rc < 0)
++ goto zero_range_exit;
++ }
++
+ /*
+ * We zero the range through ioctl, so we need remove the page caches
+ * first, otherwise the data may be inconsistent with the server.
+--
+2.43.0
+
--- /dev/null
+From 88bb75fb51786918c1e0949d558f7a97562b3916 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Aug 2024 23:06:48 +0100
+Subject: cifs: Fix lack of credit renegotiation on read retry
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 6a5dcd487791e0c2d86622064602a5c7459941ed ]
+
+When netfslib asks cifs to issue a read operation, it prefaces this with a
+call to ->clamp_length() which cifs uses to negotiate credits, providing
+receive capacity on the server; however, in the event that a read op needs
+reissuing, netfslib doesn't call ->clamp_length() again as that could
+shorten the subrequest, leaving a gap.
+
+This causes the retried read to be done with zero credits which causes the
+server to reject it with STATUS_INVALID_PARAMETER. This is a problem for a
+DIO read that is requested that would go over the EOF. The short read will
+be retried, causing EINVAL to be returned to the user when it fails.
+
+Fix this by making cifs_req_issue_read() negotiate new credits if retrying
+(NETFS_SREQ_RETRYING now gets set in the read side as well as the write
+side in this instance).
+
+This isn't sufficient, however: the new credits might not be sufficient to
+complete the remainder of the read, so also add an additional field,
+rreq->actual_len, that holds the actual size of the op we want to perform
+without having to alter subreq->len.
+
+We then rely on repeated short reads being retried until we finish the read
+or reach the end of file and make a zero-length read.
+
+Also fix a couple of places where the subrequest start and length need to
+be altered by the amount so far transferred when being used.
+
+Fixes: 69c3c023af25 ("cifs: Implement netfslib hooks")
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Steve French <sfrench@samba.org>
+cc: Paulo Alcantara <pc@manguebit.com>
+cc: Jeff Layton <jlayton@kernel.org>
+cc: linux-cifs@vger.kernel.org
+cc: netfs@lists.linux.dev
+cc: linux-fsdevel@vger.kernel.org
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/netfs/io.c | 2 ++
+ fs/smb/client/cifsglob.h | 1 +
+ fs/smb/client/file.c | 37 +++++++++++++++++++++++++++++++++----
+ fs/smb/client/smb2ops.c | 2 +-
+ fs/smb/client/smb2pdu.c | 28 +++++++++++++++++-----------
+ fs/smb/client/trace.h | 1 +
+ 6 files changed, 55 insertions(+), 16 deletions(-)
+
+diff --git a/fs/netfs/io.c b/fs/netfs/io.c
+index c96431d3da6d..2a5c22606fb1 100644
+--- a/fs/netfs/io.c
++++ b/fs/netfs/io.c
+@@ -306,6 +306,7 @@ static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq)
+ break;
+ subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
+ subreq->error = 0;
++ __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
+ netfs_stat(&netfs_n_rh_download_instead);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
+ netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
+@@ -313,6 +314,7 @@ static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq)
+ netfs_reset_subreq_iter(rreq, subreq);
+ netfs_read_from_server(rreq, subreq);
+ } else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) {
++ __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
+ netfs_reset_subreq_iter(rreq, subreq);
+ netfs_rreq_short_read(rreq, subreq);
+ }
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 1e4da268de3b..552792f28122 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -1508,6 +1508,7 @@ struct cifs_io_subrequest {
+ struct cifs_io_request *req;
+ };
+ ssize_t got_bytes;
++ size_t actual_len;
+ unsigned int xid;
+ int result;
+ bool have_xid;
+diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
+index b202eac6584e..533f76118316 100644
+--- a/fs/smb/client/file.c
++++ b/fs/smb/client/file.c
+@@ -111,6 +111,7 @@ static void cifs_issue_write(struct netfs_io_subrequest *subreq)
+ goto fail;
+ }
+
++ wdata->actual_len = wdata->subreq.len;
+ rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
+ if (rc)
+ goto fail;
+@@ -153,7 +154,7 @@ static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
+ struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
+ struct TCP_Server_Info *server = req->server;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
+- size_t rsize = 0;
++ size_t rsize;
+ int rc;
+
+ rdata->xid = get_xid();
+@@ -166,8 +167,8 @@ static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
+ cifs_sb->ctx);
+
+
+- rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, &rsize,
+- &rdata->credits);
++ rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
++ &rsize, &rdata->credits);
+ if (rc) {
+ subreq->error = rc;
+ return false;
+@@ -183,7 +184,8 @@ static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
+ server->credits, server->in_flight, 0,
+ cifs_trace_rw_credits_read_submit);
+
+- subreq->len = min_t(size_t, subreq->len, rsize);
++ subreq->len = umin(subreq->len, rsize);
++ rdata->actual_len = subreq->len;
+
+ #ifdef CONFIG_CIFS_SMB_DIRECT
+ if (server->smbd_conn)
+@@ -203,12 +205,39 @@ static void cifs_req_issue_read(struct netfs_io_subrequest *subreq)
+ struct netfs_io_request *rreq = subreq->rreq;
+ struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
+ struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
++ struct TCP_Server_Info *server = req->server;
++ struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
+ int rc = 0;
+
+ cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
+ __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
+ subreq->transferred, subreq->len);
+
++ if (test_bit(NETFS_SREQ_RETRYING, &subreq->flags)) {
++ /*
++ * As we're issuing a retry, we need to negotiate some new
++ * credits otherwise the server may reject the op with
++ * INVALID_PARAMETER. Note, however, we may get back less
++ * credit than we need to complete the op, in which case, we
++ * shorten the op and rely on additional rounds of retry.
++ */
++ size_t rsize = umin(subreq->len - subreq->transferred,
++ cifs_sb->ctx->rsize);
++
++ rc = server->ops->wait_mtu_credits(server, rsize, &rdata->actual_len,
++ &rdata->credits);
++ if (rc)
++ goto out;
++
++ rdata->credits.in_flight_check = 1;
++
++ trace_smb3_rw_credits(rdata->rreq->debug_id,
++ rdata->subreq.debug_index,
++ rdata->credits.value,
++ server->credits, server->in_flight, 0,
++ cifs_trace_rw_credits_read_resubmit);
++ }
++
+ if (req->cfile->invalidHandle) {
+ do {
+ rc = cifs_reopen_file(req->cfile, true);
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index f44f5f249400..42352f70b01c 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -301,7 +301,7 @@ smb2_adjust_credits(struct TCP_Server_Info *server,
+ unsigned int /*enum smb3_rw_credits_trace*/ trace)
+ {
+ struct cifs_credits *credits = &subreq->credits;
+- int new_val = DIV_ROUND_UP(subreq->subreq.len, SMB2_MAX_BUFFER_SIZE);
++ int new_val = DIV_ROUND_UP(subreq->actual_len, SMB2_MAX_BUFFER_SIZE);
+ int scredits, in_flight;
+
+ if (!credits->value || credits->value == new_val)
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index d262e70100c9..5f5f51bf9850 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -4523,9 +4523,9 @@ smb2_readv_callback(struct mid_q_entry *mid)
+ "rdata server %p != mid server %p",
+ rdata->server, mid->server);
+
+- cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu\n",
++ cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu/%zu\n",
+ __func__, mid->mid, mid->mid_state, rdata->result,
+- rdata->subreq.len);
++ rdata->actual_len, rdata->subreq.len - rdata->subreq.transferred);
+
+ switch (mid->mid_state) {
+ case MID_RESPONSE_RECEIVED:
+@@ -4579,15 +4579,18 @@ smb2_readv_callback(struct mid_q_entry *mid)
+ rdata->subreq.debug_index,
+ rdata->xid,
+ rdata->req->cfile->fid.persistent_fid,
+- tcon->tid, tcon->ses->Suid, rdata->subreq.start,
+- rdata->subreq.len, rdata->result);
++ tcon->tid, tcon->ses->Suid,
++ rdata->subreq.start + rdata->subreq.transferred,
++ rdata->actual_len,
++ rdata->result);
+ } else
+ trace_smb3_read_done(rdata->rreq->debug_id,
+ rdata->subreq.debug_index,
+ rdata->xid,
+ rdata->req->cfile->fid.persistent_fid,
+ tcon->tid, tcon->ses->Suid,
+- rdata->subreq.start, rdata->got_bytes);
++ rdata->subreq.start + rdata->subreq.transferred,
++ rdata->got_bytes);
+
+ if (rdata->result == -ENODATA) {
+ /* We may have got an EOF error because fallocate
+@@ -4615,6 +4618,7 @@ smb2_async_readv(struct cifs_io_subrequest *rdata)
+ {
+ int rc, flags = 0;
+ char *buf;
++ struct netfs_io_subrequest *subreq = &rdata->subreq;
+ struct smb2_hdr *shdr;
+ struct cifs_io_parms io_parms;
+ struct smb_rqst rqst = { .rq_iov = rdata->iov,
+@@ -4625,15 +4629,15 @@ smb2_async_readv(struct cifs_io_subrequest *rdata)
+ int credit_request;
+
+ cifs_dbg(FYI, "%s: offset=%llu bytes=%zu\n",
+- __func__, rdata->subreq.start, rdata->subreq.len);
++ __func__, subreq->start, subreq->len);
+
+ if (!rdata->server)
+ rdata->server = cifs_pick_channel(tcon->ses);
+
+ io_parms.tcon = tlink_tcon(rdata->req->cfile->tlink);
+ io_parms.server = server = rdata->server;
+- io_parms.offset = rdata->subreq.start;
+- io_parms.length = rdata->subreq.len;
++ io_parms.offset = subreq->start + subreq->transferred;
++ io_parms.length = rdata->actual_len;
+ io_parms.persistent_fid = rdata->req->cfile->fid.persistent_fid;
+ io_parms.volatile_fid = rdata->req->cfile->fid.volatile_fid;
+ io_parms.pid = rdata->req->pid;
+@@ -4648,11 +4652,13 @@ smb2_async_readv(struct cifs_io_subrequest *rdata)
+
+ rdata->iov[0].iov_base = buf;
+ rdata->iov[0].iov_len = total_len;
++ rdata->got_bytes = 0;
++ rdata->result = 0;
+
+ shdr = (struct smb2_hdr *)buf;
+
+ if (rdata->credits.value > 0) {
+- shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->subreq.len,
++ shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->actual_len,
+ SMB2_MAX_BUFFER_SIZE));
+ credit_request = le16_to_cpu(shdr->CreditCharge) + 8;
+ if (server->credits >= server->max_credits)
+@@ -4676,11 +4682,11 @@ smb2_async_readv(struct cifs_io_subrequest *rdata)
+ if (rc) {
+ cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
+ trace_smb3_read_err(rdata->rreq->debug_id,
+- rdata->subreq.debug_index,
++ subreq->debug_index,
+ rdata->xid, io_parms.persistent_fid,
+ io_parms.tcon->tid,
+ io_parms.tcon->ses->Suid,
+- io_parms.offset, io_parms.length, rc);
++ io_parms.offset, rdata->actual_len, rc);
+ }
+
+ async_readv_out:
+diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h
+index 36d5295c2a6f..13adfe550b99 100644
+--- a/fs/smb/client/trace.h
++++ b/fs/smb/client/trace.h
+@@ -30,6 +30,7 @@
+ EM(cifs_trace_rw_credits_old_session, "old-session") \
+ EM(cifs_trace_rw_credits_read_response_add, "rd-resp-add") \
+ EM(cifs_trace_rw_credits_read_response_clear, "rd-resp-clr") \
++ EM(cifs_trace_rw_credits_read_resubmit, "rd-resubmit") \
+ EM(cifs_trace_rw_credits_read_submit, "rd-submit ") \
+ EM(cifs_trace_rw_credits_write_prepare, "wr-prepare ") \
+ EM(cifs_trace_rw_credits_write_response_add, "wr-resp-add") \
+--
+2.43.0
+
--- /dev/null
+From 83e880e603a2fd08657543d65a26bbe85839e196 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Jun 2024 15:41:19 +0100
+Subject: crypto: qat - fix unintentional re-enabling of error interrupts
+
+From: Hareshx Sankar Raj <hareshx.sankar.raj@intel.com>
+
+[ Upstream commit f0622894c59458fceb33c4197462bc2006f3fc6b ]
+
+The logic that detects pending VF2PF interrupts unintentionally clears
+the section of the error mask register(s) not related to VF2PF.
+This might cause interrupts unrelated to VF2PF, reported through
+errsou3 and errsou5, to be reported again after the execution
+of the function disable_pending_vf2pf_interrupts() in dh895xcc
+and GEN2 devices.
+
+Fix by updating only section of errmsk3 and errmsk5 related to VF2PF.
+
+Signed-off-by: Hareshx Sankar Raj <hareshx.sankar.raj@intel.com>
+Reviewed-by: Damian Muszynski <damian.muszynski@intel.com>
+Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c | 4 +++-
+ .../crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c | 8 ++++++--
+ 2 files changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c
+index 70ef11963938..43af81fcab86 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c
+@@ -100,7 +100,9 @@ static u32 adf_gen2_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
+ errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+
+- errmsk3 &= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled);
++ /* Update only section of errmsk3 related to VF2PF */
++ errmsk3 &= ~ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
++ errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+
+ /* Return the sources of the (new) interrupt(s) */
+diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+index 6e24d57e6b98..c0661ff5e929 100644
+--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
++++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+@@ -193,8 +193,12 @@ static u32 disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
+
+- errmsk3 &= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled);
+- errmsk5 &= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled);
++ /* Update only section of errmsk3 and errmsk5 related to VF2PF */
++ errmsk3 &= ~ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
++ errmsk5 &= ~ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
++
++ errmsk3 |= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled);
++ errmsk5 |= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
+
+--
+2.43.0
+
--- /dev/null
+From 74e87e992e1c6c43a6a6ca44c5007f8be00bf9c3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Jun 2024 16:51:10 +0800
+Subject: crypto: qat - initialize user_input.lock for rate_limiting
+
+From: Jiwei Sun <sunjw10@lenovo.com>
+
+[ Upstream commit ccacbbc3176277bbfc324f85fa827d1a2656bedf ]
+
+If the following configurations are set,
+CONFIG_DEBUG_RWSEMS=y
+CONFIG_DEBUG_LOCK_ALLOC=y
+CONFIG_RWSEM_SPIN_ON_OWNER=y
+
+And run the following command,
+[root@localhost sys]# cat /sys/devices/pci0000:6b/0000:6b:00.0/qat_rl/pir
+The following warning log appears,
+
+------------[ cut here ]------------
+DEBUG_RWSEMS_WARN_ON(sem->magic != sem): count = 0x0, magic = 0x0, owner = 0x1, curr 0xff11000119288040, list not empty
+WARNING: CPU: 131 PID: 1254984 at kernel/locking/rwsem.c:1280 down_read+0x439/0x7f0
+CPU: 131 PID: 1254984 Comm: cat Kdump: loaded Tainted: G W 6.10.0-rc4+ #86 b2ae60c8ceabed15f4fd2dba03c1c5a5f7f4040c
+Hardware name: Lenovo ThinkServer SR660 V3/SR660 V3, BIOS T8E166X-2.54 05/30/2024
+RIP: 0010:down_read+0x439/0x7f0
+Code: 44 24 10 80 3c 02 00 0f 85 05 03 00 00 48 8b 13 41 54 48 c7 c6 a0 3e 0e b4 48 c7 c7 e0 3e 0e b4 4c 8b 4c 24 08 e8 77 d5 40 fd <0f> 0b 59 e9 bc fc ff ff 0f 1f 44 00 00 e9 e2 fd ff ff 4c 8d 7b 08
+RSP: 0018:ffa0000035f67a78 EFLAGS: 00010286
+RAX: 0000000000000000 RBX: ff1100012b03a658 RCX: 0000000000000000
+RDX: 0000000080000002 RSI: 0000000000000008 RDI: 0000000000000001
+RBP: 1ff4000006becf53 R08: fff3fc0006becf17 R09: fff3fc0006becf17
+R10: fff3fc0006becf16 R11: ffa0000035f678b7 R12: ffffffffb40e3e60
+R13: ffffffffb627d1f4 R14: ff1100012b03a6d0 R15: ff1100012b03a6c8
+FS: 00007fa9ff9a6740(0000) GS:ff1100081e600000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007fa9ff984000 CR3: 00000002118ae006 CR4: 0000000000771ef0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe07f0 DR7: 0000000000000400
+PKRU: 55555554
+Call Trace:
+ <TASK>
+ pir_show+0x5d/0xe0 [intel_qat 9e297e249ab040329cf58b657b06f418fd5c5855]
+ dev_attr_show+0x3f/0xc0
+ sysfs_kf_seq_show+0x1ce/0x400
+ seq_read_iter+0x3fa/0x10b0
+ vfs_read+0x6f5/0xb20
+ ksys_read+0xe9/0x1d0
+ do_syscall_64+0x8a/0x170
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+RIP: 0033:0x7fa9ff6fd9b2
+Code: c0 e9 b2 fe ff ff 50 48 8d 3d ea 1d 0c 00 e8 c5 fd 01 00 0f 1f 44 00 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 0f 05 <48> 3d 00 f0 ff ff 77 56 c3 0f 1f 44 00 00 48 83 ec 28 48 89 54 24
+RSP: 002b:00007ffc0616b968 EFLAGS: 00000246 ORIG_RAX: 0000000000000000
+RAX: ffffffffffffffda RBX: 0000000000020000 RCX: 00007fa9ff6fd9b2
+RDX: 0000000000020000 RSI: 00007fa9ff985000 RDI: 0000000000000003
+RBP: 00007fa9ff985000 R08: 00007fa9ff984010 R09: 0000000000000000
+R10: 0000000000000022 R11: 0000000000000246 R12: 0000000000022000
+R13: 0000000000000003 R14: 0000000000020000 R15: 0000000000020000
+ </TASK>
+irq event stamp: 0
+hardirqs last enabled at (0): [<0000000000000000>] 0x0
+hardirqs last disabled at (0): [<ffffffffb102c126>] copy_process+0x21e6/0x6e70
+softirqs last enabled at (0): [<ffffffffb102c176>] copy_process+0x2236/0x6e70
+softirqs last disabled at (0): [<0000000000000000>] 0x0
+---[ end trace 0000000000000000 ]---
+
+The rate_limiting->user_input.lock rwsem lock is not initialized before
+use. Let's initialize it.
+
+Signed-off-by: Jiwei Sun <sunjw10@lenovo.com>
+Reviewed-by: Adrian Huang <ahuang12@lenovo.com>
+Reviewed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/intel/qat/qat_common/adf_rl.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c
+index 346ef8bee99d..e782c23fc1bf 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_rl.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c
+@@ -1106,6 +1106,7 @@ int adf_rl_init(struct adf_accel_dev *accel_dev)
+ mutex_init(&rl->rl_lock);
+ rl->device_data = &accel_dev->hw_device->rl_data;
+ rl->accel_dev = accel_dev;
++ init_rwsem(&rl->user_input.lock);
+ accel_dev->rate_limiting = rl;
+
+ err_ret:
+--
+2.43.0
+
--- /dev/null
+From d2f219616e824f9be86b542585ed98f737162bd3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Jun 2024 16:46:37 +0800
+Subject: cxl/region: Fix a race condition in memory hotplug notifier
+
+From: Huang Ying <ying.huang@intel.com>
+
+[ Upstream commit a3483ee7e6a7f2d12b5950246f4e0ef94f4a5df0 ]
+
+In the memory hotplug notifier function of the CXL region,
+cxl_region_perf_attrs_callback(), the node ID is obtained by checking
+the host address range of the region. However, the address range
+information is not available when the region is registered in
+devm_cxl_add_region(). Additionally, this information may be removed
+or added under the protection of cxl_region_rwsem during runtime. If
+the memory notifier is called for nodes other than that backed by the
+region, a race condition may occur, potentially leading to a NULL
+dereference or an invalid address range.
+
+The race condition is addressed by checking the availability of the
+address range information under the protection of cxl_region_rwsem. To
+enhance code readability and use guard(), the relevant code has been
+moved into a newly added function: cxl_region_nid().
+
+Fixes: 067353a46d8c ("cxl/region: Add memory hotplug notifier for cxl region")
+Signed-off-by: Huang, Ying <ying.huang@intel.com>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Alison Schofield <alison.schofield@intel.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Cc: Dave Jiang <dave.jiang@intel.com>
+Cc: Bharata B Rao <bharata@amd.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Cc: Davidlohr Bueso <dave@stgolabs.net>
+Cc: Vishal Verma <vishal.l.verma@intel.com>
+Cc: Ira Weiny <ira.weiny@intel.com>
+Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Link: https://patch.msgid.link/20240618084639.1419629-2-ying.huang@intel.com
+Signed-off-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cxl/core/region.c | 19 +++++++++++++++----
+ 1 file changed, 15 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
+index 538ebd5a64fd..cd9ccdc6bc81 100644
+--- a/drivers/cxl/core/region.c
++++ b/drivers/cxl/core/region.c
+@@ -2386,14 +2386,25 @@ static bool cxl_region_update_coordinates(struct cxl_region *cxlr, int nid)
+ return true;
+ }
+
++static int cxl_region_nid(struct cxl_region *cxlr)
++{
++ struct cxl_region_params *p = &cxlr->params;
++ struct cxl_endpoint_decoder *cxled;
++ struct cxl_decoder *cxld;
++
++ guard(rwsem_read)(&cxl_region_rwsem);
++ cxled = p->targets[0];
++ if (!cxled)
++ return NUMA_NO_NODE;
++ cxld = &cxled->cxld;
++ return phys_to_target_node(cxld->hpa_range.start);
++}
++
+ static int cxl_region_perf_attrs_callback(struct notifier_block *nb,
+ unsigned long action, void *arg)
+ {
+ struct cxl_region *cxlr = container_of(nb, struct cxl_region,
+ memory_notifier);
+- struct cxl_region_params *p = &cxlr->params;
+- struct cxl_endpoint_decoder *cxled = p->targets[0];
+- struct cxl_decoder *cxld = &cxled->cxld;
+ struct memory_notify *mnb = arg;
+ int nid = mnb->status_change_nid;
+ int region_nid;
+@@ -2401,7 +2412,7 @@ static int cxl_region_perf_attrs_callback(struct notifier_block *nb,
+ if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
+ return NOTIFY_DONE;
+
+- region_nid = phys_to_target_node(cxld->hpa_range.start);
++ region_nid = cxl_region_nid(cxlr);
+ if (nid != region_nid)
+ return NOTIFY_DONE;
+
+--
+2.43.0
+
--- /dev/null
+From d4320002516f8f00070959eab49a99e24e3eab19 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Jul 2024 22:29:51 -0700
+Subject: cxl/region: Verify target positions using the ordered target list
+
+From: Alison Schofield <alison.schofield@intel.com>
+
+[ Upstream commit 82a3e3a235633aa0575fac9507d648dd80f3437f ]
+
+When a root decoder is configured the interleave target list is read
+from the BIOS populated CFMWS structure. Per the CXL spec 3.1 Table
+9-22 the target list is in interleave order. The CXL driver populates
+its decoder target list in the same order and stores it in 'struct
+cxl_switch_decoder' field "@target: active ordered target list in
+current decoder configuration"
+
+Given the promise of an ordered list, the driver can stop duplicating
+the work of BIOS and simply check target positions against the ordered
+list during region configuration.
+
+The simplified check against the ordered list is presented here.
+A follow-on patch will remove the unused code.
+
+For Modulo arithmetic this is not a fix, only a simplification.
+For XOR arithmetic this is a fix for HB IW of 3,6,12.
+
+Fixes: f9db85bfec0d ("cxl/acpi: Support CXL XOR Interleave Math (CXIMS)")
+Signed-off-by: Alison Schofield <alison.schofield@intel.com>
+Reviewed-by: Dan Williams <dan.j.williams@intel.com>
+Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Link: https://patch.msgid.link/35d08d3aba08fee0f9b86ab1cef0c25116ca8a55.1719980933.git.alison.schofield@intel.com
+Signed-off-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cxl/core/region.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
+index cd9ccdc6bc81..0e30e0a29d40 100644
+--- a/drivers/cxl/core/region.c
++++ b/drivers/cxl/core/region.c
+@@ -1632,10 +1632,13 @@ static int cxl_region_attach_position(struct cxl_region *cxlr,
+ const struct cxl_dport *dport, int pos)
+ {
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
++ struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd;
++ struct cxl_decoder *cxld = &cxlsd->cxld;
++ int iw = cxld->interleave_ways;
+ struct cxl_port *iter;
+ int rc;
+
+- if (cxlrd->calc_hb(cxlrd, pos) != dport) {
++ if (dport != cxlrd->cxlsd.target[pos % iw]) {
+ dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ dev_name(&cxlrd->cxlsd.cxld.dev));
+--
+2.43.0
+
--- /dev/null
+From ecc8fb9d5c0f9b97fa3c199119a456479ea8f3d7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Jul 2024 22:51:52 +0800
+Subject: devres: Initialize an uninitialized struct member
+
+From: Zijun Hu <quic_zijuhu@quicinc.com>
+
+[ Upstream commit 56a20ad349b5c51909cf8810f7c79b288864ad33 ]
+
+Initialize an uninitialized struct member for driver API
+devres_open_group().
+
+Signed-off-by: Zijun Hu <quic_zijuhu@quicinc.com>
+Link: https://lore.kernel.org/r/1719931914-19035-4-git-send-email-quic_zijuhu@quicinc.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/base/devres.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/base/devres.c b/drivers/base/devres.c
+index 8d709dbd4e0c..e9b0d94aeabd 100644
+--- a/drivers/base/devres.c
++++ b/drivers/base/devres.c
+@@ -567,6 +567,7 @@ void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
+ grp->id = grp;
+ if (id)
+ grp->id = id;
++ grp->color = 0;
+
+ spin_lock_irqsave(&dev->devres_lock, flags);
+ add_dr(dev, &grp->node[0]);
+--
+2.43.0
+
--- /dev/null
+From 76a80e7ffc88a1c266c49ce8ea94967726da045a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Jul 2024 12:13:24 +0200
+Subject: dm init: Handle minors larger than 255
+
+From: Benjamin Marzinski <bmarzins@redhat.com>
+
+[ Upstream commit 140ce37fd78a629105377e17842465258a5459ef ]
+
+dm_parse_device_entry() simply copies the minor number into dmi.dev, but
+the dev_t format splits the minor number between the lowest 8 bytes and
+highest 12 bytes. If the minor number is larger than 255, part of it
+will end up getting treated as the major number
+
+Fix this by checking that the minor number is valid and then encoding it
+as a dev_t.
+
+Signed-off-by: Benjamin Marzinski <bmarzins@redhat.com>
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-init.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c
+index 2a71bcdba92d..b37bbe762500 100644
+--- a/drivers/md/dm-init.c
++++ b/drivers/md/dm-init.c
+@@ -212,8 +212,10 @@ static char __init *dm_parse_device_entry(struct dm_device *dev, char *str)
+ strscpy(dev->dmi.uuid, field[1], sizeof(dev->dmi.uuid));
+ /* minor */
+ if (strlen(field[2])) {
+- if (kstrtoull(field[2], 0, &dev->dmi.dev))
++ if (kstrtoull(field[2], 0, &dev->dmi.dev) ||
++ dev->dmi.dev >= (1 << MINORBITS))
+ return ERR_PTR(-EINVAL);
++ dev->dmi.dev = huge_encode_dev((dev_t)dev->dmi.dev);
+ dev->dmi.flags |= DM_PERSISTENT_DEV_FLAG;
+ }
+ /* flags */
+--
+2.43.0
+
--- /dev/null
+From 0ae82d9345b571dd009bc400074deeeb3eead481 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Jun 2024 17:28:55 +0800
+Subject: dma-mapping: benchmark: Don't starve others when doing the test
+
+From: Yicong Yang <yangyicong@hisilicon.com>
+
+[ Upstream commit 54624acf8843375a6de3717ac18df3b5104c39c5 ]
+
+The test thread will start N benchmark kthreads and then schedule out
+until the test time finished and notify the benchmark kthreads to stop.
+The benchmark kthreads will keep running until notified to stop.
+There's a problem with current implementation when the benchmark
+kthreads number is equal to the CPUs on a non-preemptible kernel:
+since the scheduler will balance the kthreads across the CPUs and
+when the test time's out the test thread won't get a chance to be
+scheduled on any CPU then cannot notify the benchmark kthreads to stop.
+
+This can be easily reproduced on a VM (simulated with 16 CPUs) with
+PREEMPT_VOLUNTARY:
+estuary:/mnt$ ./dma_map_benchmark -t 16 -s 1
+ rcu: INFO: rcu_sched self-detected stall on CPU
+ rcu: 10-...!: (5221 ticks this GP) idle=ed24/1/0x4000000000000000 softirq=142/142 fqs=0
+ rcu: (t=5254 jiffies g=-559 q=45 ncpus=16)
+ rcu: rcu_sched kthread starved for 5255 jiffies! g-559 f0x0 RCU_GP_WAIT_FQS(5) ->state=0x0 ->cpu=12
+ rcu: Unless rcu_sched kthread gets sufficient CPU time, OOM is now expected behavior.
+ rcu: RCU grace-period kthread stack dump:
+ task:rcu_sched state:R running task stack:0 pid:16 tgid:16 ppid:2 flags:0x00000008
+ Call trace
+ __switch_to+0xec/0x138
+ __schedule+0x2f8/0x1080
+ schedule+0x30/0x130
+ schedule_timeout+0xa0/0x188
+ rcu_gp_fqs_loop+0x128/0x528
+ rcu_gp_kthread+0x1c8/0x208
+ kthread+0xec/0xf8
+ ret_from_fork+0x10/0x20
+ Sending NMI from CPU 10 to CPUs 0:
+ NMI backtrace for cpu 0
+ CPU: 0 PID: 332 Comm: dma-map-benchma Not tainted 6.10.0-rc1-vanilla-LSE #8
+ Hardware name: QEMU KVM Virtual Machine, BIOS 0.0.0 02/06/2015
+ pstate: 20400005 (nzCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+ pc : arm_smmu_cmdq_issue_cmdlist+0x218/0x730
+ lr : arm_smmu_cmdq_issue_cmdlist+0x488/0x730
+ sp : ffff80008748b630
+ x29: ffff80008748b630 x28: 0000000000000000 x27: ffff80008748b780
+ x26: 0000000000000000 x25: 000000000000bc70 x24: 000000000001bc70
+ x23: ffff0000c12af080 x22: 0000000000010000 x21: 000000000000ffff
+ x20: ffff80008748b700 x19: ffff0000c12af0c0 x18: 0000000000010000
+ x17: 0000000000000001 x16: 0000000000000040 x15: ffffffffffffffff
+ x14: 0001ffffffffffff x13: 000000000000ffff x12: 00000000000002f1
+ x11: 000000000001ffff x10: 0000000000000031 x9 : ffff800080b6b0b8
+ x8 : ffff0000c2a48000 x7 : 000000000001bc71 x6 : 0001800000000000
+ x5 : 00000000000002f1 x4 : 01ffffffffffffff x3 : 000000000009aaf1
+ x2 : 0000000000000018 x1 : 000000000000000f x0 : ffff0000c12af18c
+ Call trace:
+ arm_smmu_cmdq_issue_cmdlist+0x218/0x730
+ __arm_smmu_tlb_inv_range+0xe0/0x1a8
+ arm_smmu_iotlb_sync+0xc0/0x128
+ __iommu_dma_unmap+0x248/0x320
+ iommu_dma_unmap_page+0x5c/0xe8
+ dma_unmap_page_attrs+0x38/0x1d0
+ map_benchmark_thread+0x118/0x2c0
+ kthread+0xec/0xf8
+ ret_from_fork+0x10/0x20
+
+Solve this by adding scheduling point in the kthread loop,
+so if there're other threads in the system they may have
+a chance to run, especially the thread to notify the test
+end. However this may degrade the test concurrency so it's
+recommended to run this on an idle system.
+
+Signed-off-by: Yicong Yang <yangyicong@hisilicon.com>
+Acked-by: Barry Song <baohua@kernel.org>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/dma/map_benchmark.c | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
+index 4950e0b622b1..cc19a3efea89 100644
+--- a/kernel/dma/map_benchmark.c
++++ b/kernel/dma/map_benchmark.c
+@@ -89,6 +89,22 @@ static int map_benchmark_thread(void *data)
+ atomic64_add(map_sq, &map->sum_sq_map);
+ atomic64_add(unmap_sq, &map->sum_sq_unmap);
+ atomic64_inc(&map->loops);
++
++ /*
++ * We may test for a long time so periodically check whether
++ * we need to schedule to avoid starving the others. Otherwise
++ * we may hangup the kernel in a non-preemptible kernel when
++ * the test kthreads number >= CPU number, the test kthreads
++ * will run endless on every CPU since the thread resposible
++ * for notifying the kthread stop (in do_map_benchmark())
++ * could not be scheduled.
++ *
++ * Note this may degrade the test concurrency since the test
++ * threads may need to share the CPU time with other load
++ * in the system. So it's recommended to run this benchmark
++ * on an idle system.
++ */
++ cond_resched();
+ }
+
+ out:
+--
+2.43.0
+
--- /dev/null
+From dc9b867b5045a6c85d379a3ddf6674956be1d102 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Jun 2024 16:19:48 -0600
+Subject: drm/amd/display: Check denominator crb_pipes before used
+
+From: Alex Hung <alex.hung@amd.com>
+
+[ Upstream commit ea79068d4073bf303f8203f2625af7d9185a1bc6 ]
+
+[WHAT & HOW]
+A denominator cannot be 0, and is checked before used.
+
+This fixes 2 DIVIDE_BY_ZERO issues reported by Coverity.
+
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Jerry Zuo <jerry.zuo@amd.com>
+Signed-off-by: Alex Hung <alex.hung@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+index 4ce0f4bf1d9b..3329eaecfb15 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+@@ -1756,7 +1756,7 @@ static int dcn315_populate_dml_pipes_from_context(
+ bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)
+ || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
+
+- if (remaining_det_segs > MIN_RESERVED_DET_SEGS)
++ if (remaining_det_segs > MIN_RESERVED_DET_SEGS && crb_pipes != 0)
+ pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes +
+ (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0);
+ if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) {
+--
+2.43.0
+
--- /dev/null
+From 728f10b24eee32f4c9c9ec0fab2d70b632c1bc5d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Jun 2024 16:21:20 -0600
+Subject: drm/amd/display: Check denominator pbn_div before used
+
+From: Alex Hung <alex.hung@amd.com>
+
+[ Upstream commit 116a678f3a9abc24f5c9d2525b7393d18d9eb58e ]
+
+[WHAT & HOW]
+A denominator cannot be 0, and is checked before used.
+
+This fixes 1 DIVIDE_BY_ZERO issue reported by Coverity.
+
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Jerry Zuo <jerry.zuo@amd.com>
+Signed-off-by: Alex Hung <alex.hung@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 0627961b7115..27e641f17628 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -7302,7 +7302,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
+ }
+ }
+
+- if (j == dc_state->stream_count)
++ if (j == dc_state->stream_count || pbn_div == 0)
+ continue;
+
+ slot_num = DIV_ROUND_UP(pbn, pbn_div);
+--
+2.43.0
+
--- /dev/null
+From 1069ea3f1d23d7387af1cf0fb04deb6387e3d58c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Jun 2024 10:36:49 -0600
+Subject: drm/amd/display: Check HDCP returned status
+
+From: Alex Hung <alex.hung@amd.com>
+
+[ Upstream commit 5d93060d430b359e16e7c555c8f151ead1ac614b ]
+
+[WHAT & HOW]
+Check mod_hdcp_execute_and_set() return values in authenticated_dp.
+
+This fixes 3 CHECKED_RETURN issues reported by Coverity.
+
+Reviewed-by: Rodrigo Siqueira <rodrigo.siqueira@amd.com>
+Signed-off-by: Alex Hung <alex.hung@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../amd/display/modules/hdcp/hdcp1_execution.c | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+index 182e7532dda8..d77836cef563 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+@@ -433,17 +433,20 @@ static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp,
+ }
+
+ if (status == MOD_HDCP_STATUS_SUCCESS)
+- mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
++ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ &input->bstatus_read, &status,
+- hdcp, "bstatus_read");
++ hdcp, "bstatus_read"))
++ goto out;
+ if (status == MOD_HDCP_STATUS_SUCCESS)
+- mod_hdcp_execute_and_set(check_link_integrity_dp,
++ if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
+ &input->link_integrity_check, &status,
+- hdcp, "link_integrity_check");
++ hdcp, "link_integrity_check"))
++ goto out;
+ if (status == MOD_HDCP_STATUS_SUCCESS)
+- mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
++ if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
+ &input->reauth_request_check, &status,
+- hdcp, "reauth_request_check");
++ hdcp, "reauth_request_check"))
++ goto out;
+ out:
+ return status;
+ }
+--
+2.43.0
+
--- /dev/null
+From 5f708e98fcfbeb7b6dd3b73d8fe34afd92305050 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Jun 2024 21:23:39 -0600
+Subject: drm/amd/display: Check UnboundedRequestEnabled's value
+
+From: Alex Hung <alex.hung@amd.com>
+
+[ Upstream commit a7b38c7852093385d0605aa3c8a2efd6edd1edfd ]
+
+CalculateSwathAndDETConfiguration_params_st's UnboundedRequestEnabled
+is a pointer (i.e. dml_bool_t *UnboundedRequestEnabled), and thus
+if (p->UnboundedRequestEnabled) checks its address, not bool value.
+
+This fixes 1 REVERSE_INULL issue reported by Coverity.
+
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Acked-by: Hamza Mahfooz <hamza.mahfooz@amd.com>
+Signed-off-by: Alex Hung <alex.hung@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+index 3e919f5c00ca..fee1df342f12 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+@@ -4282,7 +4282,7 @@ static void CalculateSwathAndDETConfiguration(struct display_mode_lib_scratch_st
+ }
+
+ *p->compbuf_reserved_space_64b = 2 * p->PixelChunkSizeInKByte * 1024 / 64;
+- if (p->UnboundedRequestEnabled) {
++ if (*p->UnboundedRequestEnabled) {
+ *p->compbuf_reserved_space_64b = dml_max(*p->compbuf_reserved_space_64b,
+ (dml_float_t)(p->ROBBufferSizeInKByte * 1024/64)
+ - (dml_float_t)(RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest] * TTUFIFODEPTH / MAXIMUMCOMPRESSION/64));
+--
+2.43.0
+
--- /dev/null
+From ad40c94f523752fda278c5020c2f7fd23ba5126d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 7 Jun 2024 09:21:30 -0600
+Subject: drm/amd/display: Run DC_LOG_DC after checking link->link_enc
+
+From: Alex Hung <alex.hung@amd.com>
+
+[ Upstream commit 3a82f62b0d9d7687eac47603bb6cd14a50fa718b ]
+
+[WHAT]
+The DC_LOG_DC should be run after link->link_enc is checked, not before.
+
+This fixes 1 REVERSE_INULL issue reported by Coverity.
+
+Reviewed-by: Rodrigo Siqueira <rodrigo.siqueira@amd.com>
+Signed-off-by: Alex Hung <alex.hung@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/link/link_factory.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+index cf22b8f28ba6..72df9bdfb23f 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
++++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+@@ -611,14 +611,14 @@ static bool construct_phy(struct dc_link *link,
+ link->link_enc =
+ link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data);
+
+- DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C);
+- DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE);
+-
+ if (!link->link_enc) {
+ DC_ERROR("Failed to create link encoder!\n");
+ goto link_enc_create_fail;
+ }
+
++ DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C);
++ DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE);
++
+ /* Update link encoder tracking variables. These are used for the dynamic
+ * assignment of link encoders to streams.
+ */
+--
+2.43.0
+
--- /dev/null
+From 0a4cb47677e06c840f31a735e42d72e099895860 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Jun 2024 11:45:42 -0600
+Subject: drm/amd/display: Validate function returns
+
+From: Alex Hung <alex.hung@amd.com>
+
+[ Upstream commit 673f816b9e1e92d1f70e1bf5f21b531e0ff9ad6c ]
+
+[WHAT & HOW]
+Function return values must be checked before data can be used
+in subsequent functions.
+
+This fixes 4 CHECKED_RETURN issues reported by Coverity.
+
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Hung <alex.hung@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 7 +++++--
+ drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c | 3 ++-
+ .../drm/amd/display/dc/link/protocols/link_dp_training.c | 3 +--
+ 3 files changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+index 2293a92df3be..22d2ab8ce7f8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
++++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+@@ -245,7 +245,9 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun
+ if (status == DMUB_STATUS_POWER_STATE_D3)
+ return false;
+
+- dmub_srv_wait_for_idle(dmub, 100000);
++ status = dmub_srv_wait_for_idle(dmub, 100000);
++ if (status != DMUB_STATUS_OK)
++ return false;
+
+ /* Requeue the command. */
+ status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
+@@ -511,7 +513,8 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi
+ union dmub_rb_cmd cmd = { 0 };
+ unsigned int panel_inst = 0;
+
+- dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst);
++ if (!dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst))
++ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+index c6f859871d11..7e4ca2022d64 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+@@ -595,7 +595,8 @@ static bool hubbub2_program_watermarks(
+ hubbub1->base.ctx->dc->clk_mgr->clks.p_state_change_support == false)
+ safe_to_lower = true;
+
+- hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
++ if (hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
++ wm_pending = true;
+
+ REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
+ DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+index b8e704dbe956..8c0dea6f75bf 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+@@ -1659,8 +1659,7 @@ bool perform_link_training_with_retries(
+ if (status == LINK_TRAINING_ABORT) {
+ enum dc_connection_type type = dc_connection_none;
+
+- link_detect_connection_type(link, &type);
+- if (type == dc_connection_none) {
++ if (link_detect_connection_type(link, &type) && type == dc_connection_none) {
+ DC_LOG_HW_LINK_TRAINING("%s: Aborting training because sink unplugged\n", __func__);
+ break;
+ }
+--
+2.43.0
+
--- /dev/null
+From 7895d7dff44af382c17bf39028d96ca27245e03b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Jun 2024 15:40:06 +0800
+Subject: drm/amdgpu: add missing error handling in function
+ amdgpu_gmc_flush_gpu_tlb_pasid
+
+From: Bob Zhou <bob.zhou@amd.com>
+
+[ Upstream commit 9ff2e14cf013fa887e269bdc5ea3cffacada8635 ]
+
+Fix the unchecked return value warning reported by Coverity,
+so add error handling.
+
+Signed-off-by: Bob Zhou <bob.zhou@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+index 86b096ad0319..f4478f2d5305 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+@@ -720,7 +720,11 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
+ ndw += kiq->pmf->invalidate_tlbs_size;
+
+ spin_lock(&adev->gfx.kiq[inst].ring_lock);
+- amdgpu_ring_alloc(ring, ndw);
++ r = amdgpu_ring_alloc(ring, ndw);
++ if (r) {
++ spin_unlock(&adev->gfx.kiq[inst].ring_lock);
++ goto error_unlock_reset;
++ }
+ if (adev->gmc.flush_tlb_needs_extra_type_2)
+ kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub);
+
+--
+2.43.0
+
--- /dev/null
+From 8b5cbda83a66a322ec36212a1484cf99ce973e2b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Apr 2024 13:46:03 +0800
+Subject: drm/amdgpu: add mutex to protect ras shared memory
+
+From: YiPeng Chai <YiPeng.Chai@amd.com>
+
+[ Upstream commit b3fb79cda5688a44a423c27b791f5456d801e49c ]
+
+Add mutex to protect ras shared memory.
+
+v2:
+ Add TA_RAS_COMMAND__TRIGGER_ERROR command call
+ status check.
+
+Signed-off-by: YiPeng Chai <YiPeng.Chai@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 123 ++++++++++++++-------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c | 2 +
+ 3 files changed, 86 insertions(+), 40 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index b3df27ce7663..ee19af2d20fb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -1584,6 +1584,68 @@ static void psp_ras_ta_check_status(struct psp_context *psp)
+ }
+ }
+
++static int psp_ras_send_cmd(struct psp_context *psp,
++ enum ras_command cmd_id, void *in, void *out)
++{
++ struct ta_ras_shared_memory *ras_cmd;
++ uint32_t cmd = cmd_id;
++ int ret = 0;
++
++ if (!in)
++ return -EINVAL;
++
++ mutex_lock(&psp->ras_context.mutex);
++ ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
++ memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
++
++ switch (cmd) {
++ case TA_RAS_COMMAND__ENABLE_FEATURES:
++ case TA_RAS_COMMAND__DISABLE_FEATURES:
++ memcpy(&ras_cmd->ras_in_message,
++ in, sizeof(ras_cmd->ras_in_message));
++ break;
++ case TA_RAS_COMMAND__TRIGGER_ERROR:
++ memcpy(&ras_cmd->ras_in_message.trigger_error,
++ in, sizeof(ras_cmd->ras_in_message.trigger_error));
++ break;
++ case TA_RAS_COMMAND__QUERY_ADDRESS:
++ memcpy(&ras_cmd->ras_in_message.address,
++ in, sizeof(ras_cmd->ras_in_message.address));
++ break;
++ default:
++ dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd);
++ ret = -EINVAL;
++ goto err_out;
++ }
++
++ ras_cmd->cmd_id = cmd;
++ ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
++
++ switch (cmd) {
++ case TA_RAS_COMMAND__TRIGGER_ERROR:
++ if (ret || psp->cmd_buf_mem->resp.status)
++ ret = -EINVAL;
++ else if (out)
++ memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status));
++ break;
++ case TA_RAS_COMMAND__QUERY_ADDRESS:
++ if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
++ ret = -EINVAL;
++ else if (out)
++ memcpy(out,
++ &ras_cmd->ras_out_message.address,
++ sizeof(ras_cmd->ras_out_message.address));
++ break;
++ default:
++ break;
++ }
++
++err_out:
++ mutex_unlock(&psp->ras_context.mutex);
++
++ return ret;
++}
++
+ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+ {
+ struct ta_ras_shared_memory *ras_cmd;
+@@ -1625,23 +1687,15 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+ int psp_ras_enable_features(struct psp_context *psp,
+ union ta_ras_cmd_input *info, bool enable)
+ {
+- struct ta_ras_shared_memory *ras_cmd;
++ enum ras_command cmd_id;
+ int ret;
+
+- if (!psp->ras_context.context.initialized)
++ if (!psp->ras_context.context.initialized || !info)
+ return -EINVAL;
+
+- ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
+- memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
+-
+- if (enable)
+- ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES;
+- else
+- ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES;
+-
+- ras_cmd->ras_in_message = *info;
+-
+- ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
++ cmd_id = enable ?
++ TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES;
++ ret = psp_ras_send_cmd(psp, cmd_id, info, NULL);
+ if (ret)
+ return -EINVAL;
+
+@@ -1665,6 +1719,8 @@ int psp_ras_terminate(struct psp_context *psp)
+
+ psp->ras_context.context.initialized = false;
+
++ mutex_destroy(&psp->ras_context.mutex);
++
+ return ret;
+ }
+
+@@ -1749,9 +1805,10 @@ int psp_ras_initialize(struct psp_context *psp)
+
+ ret = psp_ta_load(psp, &psp->ras_context.context);
+
+- if (!ret && !ras_cmd->ras_status)
++ if (!ret && !ras_cmd->ras_status) {
+ psp->ras_context.context.initialized = true;
+- else {
++ mutex_init(&psp->ras_context.mutex);
++ } else {
+ if (ras_cmd->ras_status)
+ dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
+
+@@ -1765,12 +1822,12 @@ int psp_ras_initialize(struct psp_context *psp)
+ int psp_ras_trigger_error(struct psp_context *psp,
+ struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
+ {
+- struct ta_ras_shared_memory *ras_cmd;
+ struct amdgpu_device *adev = psp->adev;
+ int ret;
+ uint32_t dev_mask;
++ uint32_t ras_status = 0;
+
+- if (!psp->ras_context.context.initialized)
++ if (!psp->ras_context.context.initialized || !info)
+ return -EINVAL;
+
+ switch (info->block_id) {
+@@ -1794,13 +1851,8 @@ int psp_ras_trigger_error(struct psp_context *psp,
+ dev_mask &= AMDGPU_RAS_INST_MASK;
+ info->sub_block_index |= dev_mask;
+
+- ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
+- memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
+-
+- ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
+- ras_cmd->ras_in_message.trigger_error = *info;
+-
+- ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
++ ret = psp_ras_send_cmd(psp,
++ TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status);
+ if (ret)
+ return -EINVAL;
+
+@@ -1810,9 +1862,9 @@ int psp_ras_trigger_error(struct psp_context *psp,
+ if (amdgpu_ras_intr_triggered())
+ return 0;
+
+- if (ras_cmd->ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
++ if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
+ return -EACCES;
+- else if (ras_cmd->ras_status)
++ else if (ras_status)
+ return -EINVAL;
+
+ return 0;
+@@ -1822,25 +1874,16 @@ int psp_ras_query_address(struct psp_context *psp,
+ struct ta_ras_query_address_input *addr_in,
+ struct ta_ras_query_address_output *addr_out)
+ {
+- struct ta_ras_shared_memory *ras_cmd;
+ int ret;
+
+- if (!psp->ras_context.context.initialized)
+- return -EINVAL;
+-
+- ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
+- memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
+-
+- ras_cmd->cmd_id = TA_RAS_COMMAND__QUERY_ADDRESS;
+- ras_cmd->ras_in_message.address = *addr_in;
+-
+- ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
+- if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
++ if (!psp->ras_context.context.initialized ||
++ !addr_in || !addr_out)
+ return -EINVAL;
+
+- *addr_out = ras_cmd->ras_out_message.address;
++ ret = psp_ras_send_cmd(psp,
++ TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out);
+
+- return 0;
++ return ret;
+ }
+ // ras end
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+index 3635303e6548..74a96516c913 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+@@ -200,6 +200,7 @@ struct psp_xgmi_context {
+ struct psp_ras_context {
+ struct ta_context context;
+ struct amdgpu_ras *ras;
++ struct mutex mutex;
+ };
+
+ #define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+index 9aff579c6abf..38face981c3e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+@@ -351,6 +351,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
+
+ context->session_id = ta_id;
+
++ mutex_lock(&psp->ras_context.mutex);
+ ret = prep_ta_mem_context(&context->mem_context, shared_buf, shared_buf_len);
+ if (ret)
+ goto err_free_shared_buf;
+@@ -369,6 +370,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
+ ret = -EFAULT;
+
+ err_free_shared_buf:
++ mutex_unlock(&psp->ras_context.mutex);
+ kfree(shared_buf);
+
+ return ret;
+--
+2.43.0
+
--- /dev/null
+From 27cef7dc9ba9a2861305bec0d89e3b85721828cd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Apr 2024 14:44:38 -0400
+Subject: drm/amdgpu: Add reset_context flag for host FLR
+
+From: Yunxiang Li <Yunxiang.Li@amd.com>
+
+[ Upstream commit 25c01191c2555351922e5515b6b6d31357975031 ]
+
+There are other reset sources that pass NULL as the job pointer, such as
+amdgpu_amdkfd_reset_work. Therefore, using the job pointer to check if
+the FLR comes from the host does not work.
+
+Add a flag in reset_context to explicitly mark host triggered reset, and
+set this flag when we receive host reset notification.
+
+Signed-off-by: Yunxiang Li <Yunxiang.Li@amd.com>
+Reviewed-by: Emily Deng <Emily.Deng@amd.com>
+Reviewed-by: Zhigang Luo <zhigang.luo@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: 6e4aa08fa9c6 ("drm/amdgpu: Fix amdgpu_device_reset_sriov retry logic")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 13 ++++++++-----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c | 1 +
+ 5 files changed, 12 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 9f7f96be1ac7..bd6f2aba0662 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -5057,13 +5057,13 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
+ * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
+ *
+ * @adev: amdgpu_device pointer
+- * @from_hypervisor: request from hypervisor
++ * @reset_context: amdgpu reset context pointer
+ *
+ * do VF FLR and reinitialize Asic
+ * return 0 means succeeded otherwise failed
+ */
+ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
+- bool from_hypervisor)
++ struct amdgpu_reset_context *reset_context)
+ {
+ int r;
+ struct amdgpu_hive_info *hive = NULL;
+@@ -5072,12 +5072,15 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
+ retry:
+ amdgpu_amdkfd_pre_reset(adev);
+
+- if (from_hypervisor)
++ if (test_bit(AMDGPU_HOST_FLR, &reset_context->flags)) {
++ clear_bit(AMDGPU_HOST_FLR, &reset_context->flags);
+ r = amdgpu_virt_request_full_gpu(adev, true);
+- else
++ } else {
+ r = amdgpu_virt_reset_gpu(adev);
++ }
+ if (r)
+ return r;
++
+ amdgpu_ras_set_fed(adev, false);
+ amdgpu_irq_gpu_reset_resume_helper(adev);
+
+@@ -5831,7 +5834,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ /* Actual ASIC resets if needed.*/
+ /* Host driver will handle XGMI hive reset for SRIOV */
+ if (amdgpu_sriov_vf(adev)) {
+- r = amdgpu_device_reset_sriov(adev, job ? false : true);
++ r = amdgpu_device_reset_sriov(adev, reset_context);
+ if (r)
+ adev->asic_reset_res = r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+index b11d190ece53..5a9cc043b858 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+@@ -33,6 +33,7 @@ enum AMDGPU_RESET_FLAGS {
+ AMDGPU_NEED_FULL_RESET = 0,
+ AMDGPU_SKIP_HW_RESET = 1,
+ AMDGPU_SKIP_COREDUMP = 2,
++ AMDGPU_HOST_FLR = 3,
+ };
+
+ struct amdgpu_reset_context {
+diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+index c5ba9c4757a8..f4c47492e0cd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
++++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+@@ -292,6 +292,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
+ reset_context.method = AMD_RESET_METHOD_NONE;
+ reset_context.reset_req_dev = adev;
+ clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
++ set_bit(AMDGPU_HOST_FLR, &reset_context.flags);
+
+ amdgpu_device_gpu_recover(adev, NULL, &reset_context);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+index fa9d1b02f391..14cc7910e5cf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
++++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+@@ -328,6 +328,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
+ reset_context.method = AMD_RESET_METHOD_NONE;
+ reset_context.reset_req_dev = adev;
+ clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
++ set_bit(AMDGPU_HOST_FLR, &reset_context.flags);
+
+ amdgpu_device_gpu_recover(adev, NULL, &reset_context);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+index 14a065516ae4..78cd07744ebe 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
++++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+@@ -529,6 +529,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
+ reset_context.method = AMD_RESET_METHOD_NONE;
+ reset_context.reset_req_dev = adev;
+ clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
++ set_bit(AMDGPU_HOST_FLR, &reset_context.flags);
+
+ amdgpu_device_gpu_recover(adev, NULL, &reset_context);
+ }
+--
+2.43.0
+
--- /dev/null
+From be31cfcf9b875400d6c9a336d2051a4fafc5f0b6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 1 Jun 2024 16:36:27 -0400
+Subject: drm/amdgpu: check for LINEAR_ALIGNED correctly in
+ check_tiling_flags_gfx6
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Marek Olšák <marek.olsak@amd.com>
+
+[ Upstream commit 11317d2963fa79767cd7c6231a00a9d77f2e0f54 ]
+
+Fix incorrect check.
+
+Signed-off-by: Marek Olšák <marek.olsak@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+index 3ecc7ef95172..4fcc227db00b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+@@ -917,8 +917,7 @@ static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
+ {
+ u64 micro_tile_mode;
+
+- /* Zero swizzle mode means linear */
+- if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0)
++ if (AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) == 1) /* LINEAR_ALIGNED */
+ return 0;
+
+ micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
+--
+2.43.0
+
--- /dev/null
+From 06d8449e5534813432d1eca18706a7923e443978 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Jun 2024 07:58:24 +0200
+Subject: drm/amdgpu: clear RB_OVERFLOW bit when enabling interrupts
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Danijel Slivka <danijel.slivka@amd.com>
+
+[ Upstream commit afbf7955ff01e952dbdd465fa25a2ba92d00291c ]
+
+Why:
+Setting IH_RB_WPTR register to 0 will not clear the RB_OVERFLOW bit
+if RB_ENABLE is not set.
+
+How to fix:
+Set WPTR_OVERFLOW_CLEAR bit after RB_ENABLE bit is set.
+The RB_ENABLE bit is required to be set, together with
+WPTR_OVERFLOW_ENABLE bit so that setting WPTR_OVERFLOW_CLEAR bit
+would clear the RB_OVERFLOW.
+
+Signed-off-by: Danijel Slivka <danijel.slivka@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/ih_v6_0.c | 28 ++++++++++++++++++++++++++++
+ 1 file changed, 28 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+index 3cb64c8f7175..18a761d6ef33 100644
+--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+@@ -135,6 +135,34 @@ static int ih_v6_0_toggle_ring_interrupts(struct amdgpu_device *adev,
+
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
++
++ if (enable) {
++ /* Unset the CLEAR_OVERFLOW bit to make sure the next step
++ * is switching the bit from 0 to 1
++ */
++ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
++ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
++ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
++ return -ETIMEDOUT;
++ } else {
++ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
++ }
++
++ /* Clear RB_OVERFLOW bit */
++ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
++ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
++ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
++ return -ETIMEDOUT;
++ } else {
++ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
++ }
++
++ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
++ * can be detected.
++ */
++ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
++ }
++
+ /* enable_intr field is only valid in ring0 */
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
+--
+2.43.0
+
--- /dev/null
+From c80ea1da8e6196180d7d9462e6110c92fff740e6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Jun 2024 16:47:36 +0800
+Subject: drm/amdgpu: Correct register used to clear fault status
+
+From: Hawking Zhang <Hawking.Zhang@amd.com>
+
+[ Upstream commit c2fad7317441be573175c4d98b28347ddec7fe77 ]
+
+Driver should write to fault_cntl registers to do
+one-shot address/status clear.
+
+Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Reviewed-by: Tao Zhou <tao.zhou1@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+index 8d7267a013d2..621761a17ac7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+@@ -569,7 +569,7 @@ static bool mmhub_v1_8_query_utcl2_poison_status(struct amdgpu_device *adev,
+ if (!amdgpu_sriov_vf(adev)) {
+ /* clear page fault status and address */
+ WREG32_P(SOC15_REG_OFFSET(MMHUB, hub_inst,
+- regVM_L2_PROTECTION_FAULT_STATUS), 1, ~1);
++ regVM_L2_PROTECTION_FAULT_CNTL), 1, ~1);
+ }
+
+ return fed;
+--
+2.43.0
+
--- /dev/null
+From bc6f0056528f683623d978d31046c6377e35493f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 1 Jun 2024 19:59:34 -0400
+Subject: drm/amdgpu/display: handle gfx12 in
+ amdgpu_dm_plane_format_mod_supported
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Marek Olšák <marek.olsak@amd.com>
+
+[ Upstream commit ed17b63e7e25f03b40db66a8d5802b89aac40441 ]
+
+All this code has undefined behavior on GFX12 and shouldn't be executed.
+
+Signed-off-by: Marek Olšák <marek.olsak@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Aurabindo Pillai <aurabindo.pillai@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../amd/display/amdgpu_dm/amdgpu_dm_plane.c | 47 ++++++++++---------
+ 1 file changed, 25 insertions(+), 22 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+index 70e45d980bb9..7d47acdd11d5 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+@@ -1400,8 +1400,6 @@ static bool amdgpu_dm_plane_format_mod_supported(struct drm_plane *plane,
+ const struct drm_format_info *info = drm_format_info(format);
+ int i;
+
+- enum dm_micro_swizzle microtile = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier) & 3;
+-
+ if (!info)
+ return false;
+
+@@ -1423,29 +1421,34 @@ static bool amdgpu_dm_plane_format_mod_supported(struct drm_plane *plane,
+ if (i == plane->modifier_count)
+ return false;
+
+- /*
+- * For D swizzle the canonical modifier depends on the bpp, so check
+- * it here.
+- */
+- if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
+- adev->family >= AMDGPU_FAMILY_NV) {
+- if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
+- return false;
+- }
+-
+- if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
+- info->cpp[0] < 8)
+- return false;
++ /* GFX12 doesn't have these limitations. */
++ if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) <= AMD_FMT_MOD_TILE_VER_GFX11) {
++ enum dm_micro_swizzle microtile = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier) & 3;
+
+- if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
+- /* Per radeonsi comments 16/64 bpp are more complicated. */
+- if (info->cpp[0] != 4)
+- return false;
+- /* We support multi-planar formats, but not when combined with
+- * additional DCC metadata planes.
++ /*
++ * For D swizzle the canonical modifier depends on the bpp, so check
++ * it here.
+ */
+- if (info->num_planes > 1)
++ if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
++ adev->family >= AMDGPU_FAMILY_NV) {
++ if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
++ return false;
++ }
++
++ if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
++ info->cpp[0] < 8)
+ return false;
++
++ if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
++ /* Per radeonsi comments 16/64 bpp are more complicated. */
++ if (info->cpp[0] != 4)
++ return false;
++ /* We support multi-planar formats, but not when combined with
++ * additional DCC metadata planes.
++ */
++ if (info->num_planes > 1)
++ return false;
++ }
+ }
+
+ return true;
+--
+2.43.0
+
--- /dev/null
+From a830873dbe170cf60ca06d05f18b60ba52f5d28b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Apr 2024 15:04:52 -0400
+Subject: drm/amdgpu: Fix amdgpu_device_reset_sriov retry logic
+
+From: Yunxiang Li <Yunxiang.Li@amd.com>
+
+[ Upstream commit 6e4aa08fa9c6c0c027fc86f242517c925d159393 ]
+
+The retry loop for SRIOV reset have refcount and memory leak issue.
+Depending on which function call fails it can potentially call
+amdgpu_amdkfd_pre/post_reset different number of times and causes
+kfd_locked count to be wrong. This will block all future attempts at
+opening /dev/kfd. The retry loop also leakes resources by calling
+amdgpu_virt_init_data_exchange multiple times without calling the
+corresponding fini function.
+
+Align with the bare-metal reset path which doesn't have these issues.
+This means taking the amdgpu_amdkfd_pre/post_reset functions out of the
+reset loop and calling amdgpu_device_pre_asic_reset each retry which
+properly free the resources from previous try by calling
+amdgpu_virt_fini_data_exchange.
+
+Signed-off-by: Yunxiang Li <Yunxiang.Li@amd.com>
+Reviewed-by: Emily Deng <Emily.Deng@amd.com>
+Reviewed-by: Zhigang Luo <zhigang.luo@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 47 ++++++++++------------
+ 1 file changed, 22 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index bd6f2aba0662..e66546df0bc1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -5067,10 +5067,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
+ {
+ int r;
+ struct amdgpu_hive_info *hive = NULL;
+- int retry_limit = 0;
+-
+-retry:
+- amdgpu_amdkfd_pre_reset(adev);
+
+ if (test_bit(AMDGPU_HOST_FLR, &reset_context->flags)) {
+ clear_bit(AMDGPU_HOST_FLR, &reset_context->flags);
+@@ -5090,7 +5086,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
+ /* Resume IP prior to SMC */
+ r = amdgpu_device_ip_reinit_early_sriov(adev);
+ if (r)
+- goto error;
++ return r;
+
+ amdgpu_virt_init_data_exchange(adev);
+
+@@ -5101,38 +5097,35 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
+ /* now we are okay to resume SMC/CP/SDMA */
+ r = amdgpu_device_ip_reinit_late_sriov(adev);
+ if (r)
+- goto error;
++ return r;
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ /* Update PSP FW topology after reset */
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
+ r = amdgpu_xgmi_update_topology(hive, adev);
+-
+ if (hive)
+ amdgpu_put_xgmi_hive(hive);
++ if (r)
++ return r;
+
+- if (!r) {
+- r = amdgpu_ib_ring_tests(adev);
+-
+- amdgpu_amdkfd_post_reset(adev);
+- }
++ r = amdgpu_ib_ring_tests(adev);
++ if (r)
++ return r;
+
+-error:
+- if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
++ if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
+ amdgpu_inc_vram_lost(adev);
+ r = amdgpu_device_recover_vram(adev);
+ }
+- amdgpu_virt_release_full_gpu(adev, true);
++ if (r)
++ return r;
+
+- if (AMDGPU_RETRY_SRIOV_RESET(r)) {
+- if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
+- retry_limit++;
+- goto retry;
+- } else
+- DRM_ERROR("GPU reset retry is beyond the retry limit\n");
+- }
++ /* need to be called during full access so we can't do it later like
++ * bare-metal does.
++ */
++ amdgpu_amdkfd_post_reset(adev);
++ amdgpu_virt_release_full_gpu(adev, true);
+
+- return r;
++ return 0;
+ }
+
+ /**
+@@ -5694,6 +5687,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ int i, r = 0;
+ bool need_emergency_restart = false;
+ bool audio_suspended = false;
++ int retry_limit = AMDGPU_MAX_RETRY_LIMIT;
+
+ /*
+ * Special case: RAS triggered and full reset isn't supported
+@@ -5775,8 +5769,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+
+ cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
+
+- if (!amdgpu_sriov_vf(tmp_adev))
+- amdgpu_amdkfd_pre_reset(tmp_adev);
++ amdgpu_amdkfd_pre_reset(tmp_adev);
+
+ /*
+ * Mark these ASICs to be reseted as untracked first
+@@ -5835,6 +5828,10 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ /* Host driver will handle XGMI hive reset for SRIOV */
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_device_reset_sriov(adev, reset_context);
++ if (AMDGPU_RETRY_SRIOV_RESET(r) && (retry_limit--) > 0) {
++ amdgpu_virt_release_full_gpu(adev, true);
++ goto retry;
++ }
+ if (r)
+ adev->asic_reset_res = r;
+
+--
+2.43.0
+
--- /dev/null
+From 5873d3e6bc5f72d6609afe3656cc3c4b9c32e21f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Jun 2024 11:25:30 +0800
+Subject: drm/amdgpu: Fix register access violation
+
+From: Hawking Zhang <Hawking.Zhang@amd.com>
+
+[ Upstream commit 9da0f7736763aa0fbf63bb15060c6827135f3f67 ]
+
+fault_status is read only register. fault_cntl
+is not accessible from guest environment.
+
+Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Reviewed-by: Tao Zhou <tao.zhou1@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c | 8 +++++---
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 3 ++-
+ drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 8 +++++---
+ 3 files changed, 12 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
+index 77df8c9cbad2..9e10e552952e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
+@@ -627,9 +627,11 @@ static bool gfxhub_v1_2_query_utcl2_poison_status(struct amdgpu_device *adev,
+
+ status = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regVM_L2_PROTECTION_FAULT_STATUS);
+ fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
+- /* reset page fault status */
+- WREG32_P(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id),
+- regVM_L2_PROTECTION_FAULT_STATUS), 1, ~1);
++ if (!amdgpu_sriov_vf(adev)) {
++ /* clear page fault status and address */
++ WREG32_P(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id),
++ regVM_L2_PROTECTION_FAULT_CNTL), 1, ~1);
++ }
+
+ return fed;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index f7f492475102..bd55a7e43f07 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -671,7 +671,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
+ (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2)))
+ return 0;
+
+- WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
++ if (!amdgpu_sriov_vf(adev))
++ WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
+
+ amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, vmhub);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+index 7a1ff298417a..8d7267a013d2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+@@ -566,9 +566,11 @@ static bool mmhub_v1_8_query_utcl2_poison_status(struct amdgpu_device *adev,
+
+ status = RREG32_SOC15(MMHUB, hub_inst, regVM_L2_PROTECTION_FAULT_STATUS);
+ fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
+- /* reset page fault status */
+- WREG32_P(SOC15_REG_OFFSET(MMHUB, hub_inst,
+- regVM_L2_PROTECTION_FAULT_STATUS), 1, ~1);
++ if (!amdgpu_sriov_vf(adev)) {
++ /* clear page fault status and address */
++ WREG32_P(SOC15_REG_OFFSET(MMHUB, hub_inst,
++ regVM_L2_PROTECTION_FAULT_STATUS), 1, ~1);
++ }
+
+ return fed;
+ }
+--
+2.43.0
+
--- /dev/null
+From d462e7b89f20f2ee68ddf8e39da95f50cfccedd5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Jun 2024 17:53:30 +0800
+Subject: drm/amdgpu: Fix smatch static checker warning
+
+From: Hawking Zhang <Hawking.Zhang@amd.com>
+
+[ Upstream commit bdbdc7cecd00305dc844a361f9883d3a21022027 ]
+
+adev->gfx.imu.funcs could be NULL
+
+Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Reviewed-by: Likun Gao <Likun.Gao@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+index ad6431013c73..4ba8eb45ac17 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+@@ -4293,11 +4293,11 @@ static int gfx_v11_0_hw_init(void *handle)
+ /* RLC autoload sequence 1: Program rlc ram */
+ if (adev->gfx.imu.funcs->program_rlc_ram)
+ adev->gfx.imu.funcs->program_rlc_ram(adev);
++ /* rlc autoload firmware */
++ r = gfx_v11_0_rlc_backdoor_autoload_enable(adev);
++ if (r)
++ return r;
+ }
+- /* rlc autoload firmware */
+- r = gfx_v11_0_rlc_backdoor_autoload_enable(adev);
+- if (r)
+- return r;
+ } else {
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+ if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) {
+--
+2.43.0
+
--- /dev/null
+From f10edfbeebf015d3efb54cb1c7b0c556fd52721e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Apr 2024 14:59:02 -0400
+Subject: drm/amdgpu: Fix two reset triggered in a row
+
+From: Yunxiang Li <Yunxiang.Li@amd.com>
+
+[ Upstream commit f4322b9f8ad5f9f62add288c785d2e10bb6a5efe ]
+
+Some times a hang GPU causes multiple reset sources to schedule resets.
+The second source will be able to trigger an unnecessary reset if they
+schedule after we call amdgpu_device_stop_pending_resets.
+
+Move amdgpu_device_stop_pending_resets to after the reset is done. Since
+at this point the GPU is supposedly in a good state, any reset scheduled
+after this point would be a legitimate reset.
+
+Remove unnecessary and incorrect checks for amdgpu_in_reset that was
+kinda serving this purpose.
+
+Signed-off-by: Yunxiang Li <Yunxiang.Li@amd.com>
+Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: 6e4aa08fa9c6 ("drm/amdgpu: Fix amdgpu_device_reset_sriov retry logic")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 19 ++++++++++---------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c | 2 +-
+ 5 files changed, 14 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index d24d7a108624..9f7f96be1ac7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -5072,8 +5072,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
+ retry:
+ amdgpu_amdkfd_pre_reset(adev);
+
+- amdgpu_device_stop_pending_resets(adev);
+-
+ if (from_hypervisor)
+ r = amdgpu_virt_request_full_gpu(adev, true);
+ else
+@@ -5828,13 +5826,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ r, adev_to_drm(tmp_adev)->unique);
+ tmp_adev->asic_reset_res = r;
+ }
+-
+- if (!amdgpu_sriov_vf(tmp_adev))
+- /*
+- * Drop all pending non scheduler resets. Scheduler resets
+- * were already dropped during drm_sched_stop
+- */
+- amdgpu_device_stop_pending_resets(tmp_adev);
+ }
+
+ /* Actual ASIC resets if needed.*/
+@@ -5856,6 +5847,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ goto retry;
+ }
+
++ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
++ /*
++ * Drop any pending non scheduler resets queued before reset is done.
++ * Any reset scheduled after this point would be valid. Scheduler resets
++ * were already dropped during drm_sched_stop and no new ones can come
++ * in before drm_sched_start.
++ */
++ amdgpu_device_stop_pending_resets(tmp_adev);
++ }
++
+ skip_hw_reset:
+
+ /* Post ASIC reset for all devs .*/
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+index 26cea0076c9b..e12d179a451b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+@@ -601,7 +601,7 @@ static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
+ if (ret) {
+ adev->virt.vf2pf_update_retry_cnt++;
+ if ((adev->virt.vf2pf_update_retry_cnt >= AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT) &&
+- amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev)) {
++ amdgpu_sriov_runtime(adev)) {
+ amdgpu_ras_set_fed(adev, true);
+ if (amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->kfd.reset_work))
+diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+index 0c7275bca8f7..c5ba9c4757a8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
++++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+@@ -319,7 +319,7 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
+
+ switch (event) {
+ case IDH_FLR_NOTIFICATION:
+- if (amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev))
++ if (amdgpu_sriov_runtime(adev))
+ WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+index aba00d961627..fa9d1b02f391 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
++++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+@@ -358,7 +358,7 @@ static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
+
+ switch (event) {
+ case IDH_FLR_NOTIFICATION:
+- if (amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev))
++ if (amdgpu_sriov_runtime(adev))
+ WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+index 59f53c743362..14a065516ae4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
++++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+@@ -560,7 +560,7 @@ static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
+ r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
+
+ /* only handle FLR_NOTIFY now */
+- if (!r && !amdgpu_in_reset(adev))
++ if (!r)
+ WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+--
+2.43.0
+
--- /dev/null
+From 2a11cb6485f5507062a3ab9bf1ffb21ed7da30cb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Jan 2024 14:57:29 +0100
+Subject: drm/amdgpu: reject gang submit on reserved VMIDs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Christian König <christian.koenig@amd.com>
+
+[ Upstream commit 320debca1ba3a81c87247eac84eff976ead09ee0 ]
+
+A gang submit won't work if the VMID is reserved and we can't flush out
+VM changes from multiple engines at the same time.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 15 +++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 15 ++++++++++++++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h | 1 +
+ 3 files changed, 30 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 936c98a13a24..6dfdff58bffd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -1096,6 +1096,21 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
+ unsigned int i;
+ int r;
+
++ /*
++ * We can't use gang submit on with reserved VMIDs when the VM changes
++ * can't be invalidated by more than one engine at the same time.
++ */
++ if (p->gang_size > 1 && !p->adev->vm_manager.concurrent_flush) {
++ for (i = 0; i < p->gang_size; ++i) {
++ struct drm_sched_entity *entity = p->entities[i];
++ struct drm_gpu_scheduler *sched = entity->rq->sched;
++ struct amdgpu_ring *ring = to_amdgpu_ring(sched);
++
++ if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub))
++ return -EINVAL;
++ }
++ }
++
+ r = amdgpu_vm_clear_freed(adev, vm, NULL);
+ if (r)
+ return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+index 3d7fcdeaf8cf..e8f6e4dbc5a4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+@@ -406,7 +406,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ if (r || !idle)
+ goto error;
+
+- if (vm->reserved_vmid[vmhub] || (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)))) {
++ if (amdgpu_vmid_uses_reserved(vm, vmhub)) {
+ r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
+ if (r || !id)
+ goto error;
+@@ -456,6 +456,19 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ return r;
+ }
+
++/*
++ * amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID
++ * @vm: the VM to check
++ * @vmhub: the VMHUB which will be used
++ *
++ * Returns: True if the VM will use a reserved VMID.
++ */
++bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub)
++{
++ return vm->reserved_vmid[vmhub] ||
++ (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)));
++}
++
+ int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
+ unsigned vmhub)
+ {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+index fa8c42c83d5d..240fa6751260 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+@@ -78,6 +78,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
+
+ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
+ struct amdgpu_vmid *id);
++bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub);
+ int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
+ unsigned vmhub);
+ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
+--
+2.43.0
+
--- /dev/null
+From 5fe6394d6484afff0209e069d68063eb3a38f996 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 Jun 2024 15:06:23 +0800
+Subject: drm/amdgpu: Set no_hw_access when VF request full GPU fails
+
+From: Yifan Zha <Yifan.Zha@amd.com>
+
+[ Upstream commit 33f23fc3155b13c4a96d94a0a22dc26db767440b ]
+
+[Why]
+If VF request full GPU access and the request failed,
+the VF driver can get stuck accessing registers for an extended period during
+the unload of KMS.
+
+[How]
+Set no_hw_access flag when VF request for full GPU access fails
+This prevents further hardware access attempts, avoiding the prolonged
+stuck state.
+
+Signed-off-by: Yifan Zha <Yifan.Zha@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+index 2359d1d60275..26cea0076c9b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+@@ -86,8 +86,10 @@ int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
+
+ if (virt->ops && virt->ops->req_full_gpu) {
+ r = virt->ops->req_full_gpu(adev, init);
+- if (r)
++ if (r) {
++ adev->no_hw_access = true;
+ return r;
++ }
+
+ adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
+ }
+--
+2.43.0
+
--- /dev/null
+From 6dcdb6c1b22826f1868d171bbb8b17ef333f046e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Jun 2024 21:54:50 +0300
+Subject: ELF: fix kernel.randomize_va_space double read
+
+From: Alexey Dobriyan <adobriyan@gmail.com>
+
+[ Upstream commit 2a97388a807b6ab5538aa8f8537b2463c6988bd2 ]
+
+ELF loader uses "randomize_va_space" twice. It is sysctl and can change
+at any moment, so 2 loads could see 2 different values in theory with
+unpredictable consequences.
+
+Issue exactly one load for consistent value across one exec.
+
+Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
+Link: https://lore.kernel.org/r/3329905c-7eb8-400a-8f0a-d87cff979b5b@p183
+Signed-off-by: Kees Cook <kees@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/binfmt_elf.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index a43897b03ce9..777405719de8 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -1003,7 +1003,8 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ if (elf_read_implies_exec(*elf_ex, executable_stack))
+ current->personality |= READ_IMPLIES_EXEC;
+
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
++ const int snapshot_randomize_va_space = READ_ONCE(randomize_va_space);
++ if (!(current->personality & ADDR_NO_RANDOMIZE) && snapshot_randomize_va_space)
+ current->flags |= PF_RANDOMIZE;
+
+ setup_new_exec(bprm);
+@@ -1251,7 +1252,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ mm->end_data = end_data;
+ mm->start_stack = bprm->p;
+
+- if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
++ if ((current->flags & PF_RANDOMIZE) && (snapshot_randomize_va_space > 1)) {
+ /*
+ * For architectures with ELF randomization, when executing
+ * a loader directly (i.e. no interpreter listed in ELF
+--
+2.43.0
+
--- /dev/null
+From 50de10924c81e317f11454c07e26c3de7e70f72d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Jul 2024 10:40:42 -0700
+Subject: ethtool: fail closed if we can't get max channel used in indirection
+ tables
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 2899d58462ba868287d6ff3acad3675e7adf934f ]
+
+Commit 0d1b7d6c9274 ("bnxt: fix crashes when reducing ring count with
+active RSS contexts") proves that allowing indirection table to contain
+channels with out of bounds IDs may lead to crashes. Currently the
+max channel check in the core gets skipped if driver can't fetch
+the indirection table or when we can't allocate memory.
+
+Both of those conditions should be extremely rare but if they do
+happen we should try to be safe and fail the channel change.
+
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://patch.msgid.link/20240710174043.754664-2-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ethtool/channels.c | 6 ++----
+ net/ethtool/common.c | 26 +++++++++++++++-----------
+ net/ethtool/common.h | 2 +-
+ net/ethtool/ioctl.c | 4 +---
+ 4 files changed, 19 insertions(+), 19 deletions(-)
+
+diff --git a/net/ethtool/channels.c b/net/ethtool/channels.c
+index 7b4bbd674bae..cee188da54f8 100644
+--- a/net/ethtool/channels.c
++++ b/net/ethtool/channels.c
+@@ -171,11 +171,9 @@ ethnl_set_channels(struct ethnl_req_info *req_info, struct genl_info *info)
+ */
+ if (ethtool_get_max_rxnfc_channel(dev, &max_rxnfc_in_use))
+ max_rxnfc_in_use = 0;
+- if (!netif_is_rxfh_configured(dev) ||
+- ethtool_get_max_rxfh_channel(dev, &max_rxfh_in_use))
+- max_rxfh_in_use = 0;
++ max_rxfh_in_use = ethtool_get_max_rxfh_channel(dev);
+ if (channels.combined_count + channels.rx_count <= max_rxfh_in_use) {
+- GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing indirection table settings");
++ GENL_SET_ERR_MSG_FMT(info, "requested channel counts are too low for existing indirection table (%d)", max_rxfh_in_use);
+ return -EINVAL;
+ }
+ if (channels.combined_count + channels.rx_count <= max_rxnfc_in_use) {
+diff --git a/net/ethtool/common.c b/net/ethtool/common.c
+index 6b2a360dcdf0..8a62375ebd1f 100644
+--- a/net/ethtool/common.c
++++ b/net/ethtool/common.c
+@@ -587,35 +587,39 @@ int ethtool_get_max_rxnfc_channel(struct net_device *dev, u64 *max)
+ return err;
+ }
+
+-int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max)
++u32 ethtool_get_max_rxfh_channel(struct net_device *dev)
+ {
+ struct ethtool_rxfh_param rxfh = {};
+- u32 dev_size, current_max = 0;
++ u32 dev_size, current_max;
+ int ret;
+
++ if (!netif_is_rxfh_configured(dev))
++ return 0;
++
+ if (!dev->ethtool_ops->get_rxfh_indir_size ||
+ !dev->ethtool_ops->get_rxfh)
+- return -EOPNOTSUPP;
++ return 0;
+ dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
+ if (dev_size == 0)
+- return -EOPNOTSUPP;
++ return 0;
+
+ rxfh.indir = kcalloc(dev_size, sizeof(rxfh.indir[0]), GFP_USER);
+ if (!rxfh.indir)
+- return -ENOMEM;
++ return U32_MAX;
+
+ ret = dev->ethtool_ops->get_rxfh(dev, &rxfh);
+- if (ret)
+- goto out;
++ if (ret) {
++ current_max = U32_MAX;
++ goto out_free;
++ }
+
++ current_max = 0;
+ while (dev_size--)
+ current_max = max(current_max, rxfh.indir[dev_size]);
+
+- *max = current_max;
+-
+-out:
++out_free:
+ kfree(rxfh.indir);
+- return ret;
++ return current_max;
+ }
+
+ int ethtool_check_ops(const struct ethtool_ops *ops)
+diff --git a/net/ethtool/common.h b/net/ethtool/common.h
+index 28b8aaaf9bcb..b55705a9ad5a 100644
+--- a/net/ethtool/common.h
++++ b/net/ethtool/common.h
+@@ -42,7 +42,7 @@ int __ethtool_get_link(struct net_device *dev);
+ bool convert_legacy_settings_to_link_ksettings(
+ struct ethtool_link_ksettings *link_ksettings,
+ const struct ethtool_cmd *legacy_settings);
+-int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max);
++u32 ethtool_get_max_rxfh_channel(struct net_device *dev);
+ int ethtool_get_max_rxnfc_channel(struct net_device *dev, u64 *max);
+ int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info);
+
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index f99fd564d0ee..2f5b69d5d4b0 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -1928,9 +1928,7 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
+ * indirection table/rxnfc settings */
+ if (ethtool_get_max_rxnfc_channel(dev, &max_rxnfc_in_use))
+ max_rxnfc_in_use = 0;
+- if (!netif_is_rxfh_configured(dev) ||
+- ethtool_get_max_rxfh_channel(dev, &max_rxfh_in_use))
+- max_rxfh_in_use = 0;
++ max_rxfh_in_use = ethtool_get_max_rxfh_channel(dev);
+ if (channels.combined_count + channels.rx_count <=
+ max_t(u64, max_rxnfc_in_use, max_rxfh_in_use))
+ return -EINVAL;
+--
+2.43.0
+
--- /dev/null
+From 88ede2e6682a6b77ddce85f27c901eaf4a61d153 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 May 2024 10:20:30 +0100
+Subject: ext4: fix possible tid_t sequence overflows
+
+From: Luis Henriques (SUSE) <luis.henriques@linux.dev>
+
+[ Upstream commit 63469662cc45d41705f14b4648481d5d29cf5999 ]
+
+In the fast commit code there are a few places where tid_t variables are
+being compared without taking into account the fact that these sequence
+numbers may wrap. Fix this issue by using the helper functions tid_gt()
+and tid_geq().
+
+Signed-off-by: Luis Henriques (SUSE) <luis.henriques@linux.dev>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Harshad Shirwadkar <harshadshirwadkar@gmail.com>
+Link: https://patch.msgid.link/20240529092030.9557-3-luis.henriques@linux.dev
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/fast_commit.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
+index d3a67bc06d10..3926a05eceee 100644
+--- a/fs/ext4/fast_commit.c
++++ b/fs/ext4/fast_commit.c
+@@ -353,7 +353,7 @@ void ext4_fc_mark_ineligible(struct super_block *sb, int reason, handle_t *handl
+ read_unlock(&sbi->s_journal->j_state_lock);
+ }
+ spin_lock(&sbi->s_fc_lock);
+- if (sbi->s_fc_ineligible_tid < tid)
++ if (tid_gt(tid, sbi->s_fc_ineligible_tid))
+ sbi->s_fc_ineligible_tid = tid;
+ spin_unlock(&sbi->s_fc_lock);
+ WARN_ON(reason >= EXT4_FC_REASON_MAX);
+@@ -1213,7 +1213,7 @@ int ext4_fc_commit(journal_t *journal, tid_t commit_tid)
+ if (ret == -EALREADY) {
+ /* There was an ongoing commit, check if we need to restart */
+ if (atomic_read(&sbi->s_fc_subtid) <= subtid &&
+- commit_tid > journal->j_commit_sequence)
++ tid_gt(commit_tid, journal->j_commit_sequence))
+ goto restart_fc;
+ ext4_fc_update_stats(sb, EXT4_FC_STATUS_SKIPPED, 0, 0,
+ commit_tid);
+@@ -1288,7 +1288,7 @@ static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid)
+ list_del_init(&iter->i_fc_list);
+ ext4_clear_inode_state(&iter->vfs_inode,
+ EXT4_STATE_FC_COMMITTING);
+- if (iter->i_sync_tid <= tid)
++ if (tid_geq(tid, iter->i_sync_tid))
+ ext4_fc_reset_inode(&iter->vfs_inode);
+ /* Make sure EXT4_STATE_FC_COMMITTING bit is clear */
+ smp_mb();
+@@ -1319,7 +1319,7 @@ static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid)
+ list_splice_init(&sbi->s_fc_q[FC_Q_STAGING],
+ &sbi->s_fc_q[FC_Q_MAIN]);
+
+- if (tid >= sbi->s_fc_ineligible_tid) {
++ if (tid_geq(tid, sbi->s_fc_ineligible_tid)) {
+ sbi->s_fc_ineligible_tid = 0;
+ ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
+ }
+--
+2.43.0
+
--- /dev/null
+From a7222bf8ea1f287818352eb9e1ac9efcca18eecb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Jul 2024 12:08:09 +0100
+Subject: firmware: cs_dsp: Don't allow writes to read-only controls
+
+From: Richard Fitzgerald <rf@opensource.cirrus.com>
+
+[ Upstream commit 62412a9357b16a4e39dc582deb2e2a682b92524c ]
+
+Add a check to cs_dsp_coeff_write_ctrl() to abort if the control
+is not writeable.
+
+The cs_dsp code originated as an ASoC driver (wm_adsp) where all
+controls were exported as ALSA controls. It relied on ALSA to
+enforce the read-only permission. Now that the code has been
+separated from ALSA/ASoC it must perform its own permission check.
+
+This isn't currently causing any problems so there shouldn't be any
+need to backport this. If the client of cs_dsp exposes the control as
+an ALSA control, it should set permissions on that ALSA control to
+protect it. The few uses of cs_dsp_coeff_write_ctrl() inside drivers
+are for writable controls.
+
+Signed-off-by: Richard Fitzgerald <rf@opensource.cirrus.com>
+Link: https://patch.msgid.link/20240702110809.16836-1-rf@opensource.cirrus.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/firmware/cirrus/cs_dsp.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
+index 8a347b938406..89fd63205a6e 100644
+--- a/drivers/firmware/cirrus/cs_dsp.c
++++ b/drivers/firmware/cirrus/cs_dsp.c
+@@ -796,6 +796,9 @@ int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl,
+
+ lockdep_assert_held(&ctl->dsp->pwr_lock);
+
++ if (ctl->flags && !(ctl->flags & WMFW_CTL_FLAG_WRITEABLE))
++ return -EPERM;
++
+ if (len + off * sizeof(u32) > ctl->len)
+ return -EINVAL;
+
+--
+2.43.0
+
--- /dev/null
+From 50a22452903b5b838546e8ee8ce22d859108f941 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Sep 2024 10:39:27 -0700
+Subject: fou: Fix null-ptr-deref in GRO.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 7e4196935069947d8b70b09c1660b67b067e75cb ]
+
+We observed a null-ptr-deref in fou_gro_receive() while shutting down
+a host. [0]
+
+The NULL pointer is sk->sk_user_data, and the offset 8 is of protocol
+in struct fou.
+
+When fou_release() is called due to netns dismantle or explicit tunnel
+teardown, udp_tunnel_sock_release() sets NULL to sk->sk_user_data.
+Then, the tunnel socket is destroyed after a single RCU grace period.
+
+So, in-flight udp4_gro_receive() could find the socket and execute the
+FOU GRO handler, where sk->sk_user_data could be NULL.
+
+Let's use rcu_dereference_sk_user_data() in fou_from_sock() and add NULL
+checks in FOU GRO handlers.
+
+[0]:
+BUG: kernel NULL pointer dereference, address: 0000000000000008
+ PF: supervisor read access in kernel mode
+ PF: error_code(0x0000) - not-present page
+PGD 80000001032f4067 P4D 80000001032f4067 PUD 103240067 PMD 0
+SMP PTI
+CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.10.216-204.855.amzn2.x86_64 #1
+Hardware name: Amazon EC2 c5.large/, BIOS 1.0 10/16/2017
+RIP: 0010:fou_gro_receive (net/ipv4/fou.c:233) [fou]
+Code: 41 5f c3 cc cc cc cc e8 e7 2e 69 f4 0f 1f 80 00 00 00 00 0f 1f 44 00 00 49 89 f8 41 54 48 89 f7 48 89 d6 49 8b 80 88 02 00 00 <0f> b6 48 08 0f b7 42 4a 66 25 fd fd 80 cc 02 66 89 42 4a 0f b6 42
+RSP: 0018:ffffa330c0003d08 EFLAGS: 00010297
+RAX: 0000000000000000 RBX: ffff93d9e3a6b900 RCX: 0000000000000010
+RDX: ffff93d9e3a6b900 RSI: ffff93d9e3a6b900 RDI: ffff93dac2e24d08
+RBP: ffff93d9e3a6b900 R08: ffff93dacbce6400 R09: 0000000000000002
+R10: 0000000000000000 R11: ffffffffb5f369b0 R12: ffff93dacbce6400
+R13: ffff93dac2e24d08 R14: 0000000000000000 R15: ffffffffb4edd1c0
+FS: 0000000000000000(0000) GS:ffff93daee800000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000000000000008 CR3: 0000000102140001 CR4: 00000000007706f0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+PKRU: 55555554
+Call Trace:
+ <IRQ>
+ ? show_trace_log_lvl (arch/x86/kernel/dumpstack.c:259)
+ ? __die_body.cold (arch/x86/kernel/dumpstack.c:478 arch/x86/kernel/dumpstack.c:420)
+ ? no_context (arch/x86/mm/fault.c:752)
+ ? exc_page_fault (arch/x86/include/asm/irqflags.h:49 arch/x86/include/asm/irqflags.h:89 arch/x86/mm/fault.c:1435 arch/x86/mm/fault.c:1483)
+ ? asm_exc_page_fault (arch/x86/include/asm/idtentry.h:571)
+ ? fou_gro_receive (net/ipv4/fou.c:233) [fou]
+ udp_gro_receive (include/linux/netdevice.h:2552 net/ipv4/udp_offload.c:559)
+ udp4_gro_receive (net/ipv4/udp_offload.c:604)
+ inet_gro_receive (net/ipv4/af_inet.c:1549 (discriminator 7))
+ dev_gro_receive (net/core/dev.c:6035 (discriminator 4))
+ napi_gro_receive (net/core/dev.c:6170)
+ ena_clean_rx_irq (drivers/amazon/net/ena/ena_netdev.c:1558) [ena]
+ ena_io_poll (drivers/amazon/net/ena/ena_netdev.c:1742) [ena]
+ napi_poll (net/core/dev.c:6847)
+ net_rx_action (net/core/dev.c:6917)
+ __do_softirq (arch/x86/include/asm/jump_label.h:25 include/linux/jump_label.h:200 include/trace/events/irq.h:142 kernel/softirq.c:299)
+ asm_call_irq_on_stack (arch/x86/entry/entry_64.S:809)
+</IRQ>
+ do_softirq_own_stack (arch/x86/include/asm/irq_stack.h:27 arch/x86/include/asm/irq_stack.h:77 arch/x86/kernel/irq_64.c:77)
+ irq_exit_rcu (kernel/softirq.c:393 kernel/softirq.c:423 kernel/softirq.c:435)
+ common_interrupt (arch/x86/kernel/irq.c:239)
+ asm_common_interrupt (arch/x86/include/asm/idtentry.h:626)
+RIP: 0010:acpi_idle_do_entry (arch/x86/include/asm/irqflags.h:49 arch/x86/include/asm/irqflags.h:89 drivers/acpi/processor_idle.c:114 drivers/acpi/processor_idle.c:575)
+Code: 8b 15 d1 3c c4 02 ed c3 cc cc cc cc 65 48 8b 04 25 40 ef 01 00 48 8b 00 a8 08 75 eb 0f 1f 44 00 00 0f 00 2d d5 09 55 00 fb f4 <fa> c3 cc cc cc cc e9 be fc ff ff 66 66 2e 0f 1f 84 00 00 00 00 00
+RSP: 0018:ffffffffb5603e58 EFLAGS: 00000246
+RAX: 0000000000004000 RBX: ffff93dac0929c00 RCX: ffff93daee833900
+RDX: ffff93daee800000 RSI: ffff93daee87dc00 RDI: ffff93daee87dc64
+RBP: 0000000000000001 R08: ffffffffb5e7b6c0 R09: 0000000000000044
+R10: ffff93daee831b04 R11: 00000000000001cd R12: 0000000000000001
+R13: ffffffffb5e7b740 R14: 0000000000000001 R15: 0000000000000000
+ ? sched_clock_cpu (kernel/sched/clock.c:371)
+ acpi_idle_enter (drivers/acpi/processor_idle.c:712 (discriminator 3))
+ cpuidle_enter_state (drivers/cpuidle/cpuidle.c:237)
+ cpuidle_enter (drivers/cpuidle/cpuidle.c:353)
+ cpuidle_idle_call (kernel/sched/idle.c:158 kernel/sched/idle.c:239)
+ do_idle (kernel/sched/idle.c:302)
+ cpu_startup_entry (kernel/sched/idle.c:395 (discriminator 1))
+ start_kernel (init/main.c:1048)
+ secondary_startup_64_no_verify (arch/x86/kernel/head_64.S:310)
+Modules linked in: udp_diag tcp_diag inet_diag nft_nat ipip tunnel4 dummy fou ip_tunnel nft_masq nft_chain_nat nf_nat wireguard nft_ct curve25519_x86_64 libcurve25519_generic nf_conntrack libchacha20poly1305 nf_defrag_ipv6 nf_defrag_ipv4 nft_objref chacha_x86_64 nft_counter nf_tables nfnetlink poly1305_x86_64 ip6_udp_tunnel udp_tunnel libchacha crc32_pclmul ghash_clmulni_intel aesni_intel crypto_simd cryptd glue_helper mousedev psmouse button ena ptp pps_core crc32c_intel
+CR2: 0000000000000008
+
+Fixes: d92283e338f6 ("fou: change to use UDP socket GRO")
+Reported-by: Alphonse Kurian <alkurian@amazon.com>
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Link: https://patch.msgid.link/20240902173927.62706-1-kuniyu@amazon.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/fou_core.c | 29 ++++++++++++++++++++++++-----
+ 1 file changed, 24 insertions(+), 5 deletions(-)
+
+diff --git a/net/ipv4/fou_core.c b/net/ipv4/fou_core.c
+index 0abbc413e0fe..78b869b31492 100644
+--- a/net/ipv4/fou_core.c
++++ b/net/ipv4/fou_core.c
+@@ -50,7 +50,7 @@ struct fou_net {
+
+ static inline struct fou *fou_from_sock(struct sock *sk)
+ {
+- return sk->sk_user_data;
++ return rcu_dereference_sk_user_data(sk);
+ }
+
+ static int fou_recv_pull(struct sk_buff *skb, struct fou *fou, size_t len)
+@@ -233,9 +233,15 @@ static struct sk_buff *fou_gro_receive(struct sock *sk,
+ struct sk_buff *skb)
+ {
+ const struct net_offload __rcu **offloads;
+- u8 proto = fou_from_sock(sk)->protocol;
++ struct fou *fou = fou_from_sock(sk);
+ const struct net_offload *ops;
+ struct sk_buff *pp = NULL;
++ u8 proto;
++
++ if (!fou)
++ goto out;
++
++ proto = fou->protocol;
+
+ /* We can clear the encap_mark for FOU as we are essentially doing
+ * one of two possible things. We are either adding an L4 tunnel
+@@ -263,14 +269,24 @@ static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
+ int nhoff)
+ {
+ const struct net_offload __rcu **offloads;
+- u8 proto = fou_from_sock(sk)->protocol;
++ struct fou *fou = fou_from_sock(sk);
+ const struct net_offload *ops;
+- int err = -ENOSYS;
++ u8 proto;
++ int err;
++
++ if (!fou) {
++ err = -ENOENT;
++ goto out;
++ }
++
++ proto = fou->protocol;
+
+ offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
+ ops = rcu_dereference(offloads[proto]);
+- if (WARN_ON(!ops || !ops->callbacks.gro_complete))
++ if (WARN_ON(!ops || !ops->callbacks.gro_complete)) {
++ err = -ENOSYS;
+ goto out;
++ }
+
+ err = ops->callbacks.gro_complete(skb, nhoff);
+
+@@ -320,6 +336,9 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
+ struct gro_remcsum grc;
+ u8 proto;
+
++ if (!fou)
++ goto out;
++
+ skb_gro_remcsum_init(&grc);
+
+ off = skb_gro_offset(skb);
+--
+2.43.0
+
--- /dev/null
+From 6c5205379ea5337f695c9b5a99871f6a6570776c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Jun 2024 14:49:54 +0200
+Subject: fs: don't copy to userspace under namespace semaphore
+
+From: Christian Brauner <brauner@kernel.org>
+
+[ Upstream commit cb54ef4f050e7c504ed87114276a296d727e918a ]
+
+Don't copy mount ids to userspace while holding the namespace semaphore.
+We really shouldn't do that and I've gone through lenghts avoiding that
+in statmount() already.
+
+Limit the number of mounts that can be retrieved in one go to 1 million
+mount ids. That's effectively 10 times the default limt of 100000 mounts
+that we put on each mount namespace by default. Since listmount() is an
+iterator limiting the number of mounts retrievable in one go isn't a
+problem as userspace can just pick up where they left off.
+
+Karel menti_ned that libmount will probably be reading the mount table
+in "in small steps, 512 nodes per request. Nobody likes a tool that
+takes too long in the kernel, and huge servers are unusual use cases.
+Libmount will very probably provide API to define size of the step (IDs
+per request)."
+
+Reported-by: Mateusz Guzik <mjguzik@gmail.com>
+Link: https://lore.kernel.org/r/20240610-frettchen-liberal-a9a5c53865f8@brauner
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/namespace.c | 98 ++++++++++++++++++++++++++++----------------------
+ 1 file changed, 56 insertions(+), 42 deletions(-)
+
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 5a51315c6678..57311ecbdf5a 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -5047,55 +5047,81 @@ static struct mount *listmnt_next(struct mount *curr)
+ return node_to_mount(rb_next(&curr->mnt_node));
+ }
+
+-static ssize_t do_listmount(struct mount *first, struct path *orig,
+- u64 mnt_parent_id, u64 __user *mnt_ids,
+- size_t nr_mnt_ids, const struct path *root)
++static ssize_t do_listmount(u64 mnt_parent_id, u64 last_mnt_id, u64 *mnt_ids,
++ size_t nr_mnt_ids)
+ {
+- struct mount *r;
++ struct path root;
++ struct mnt_namespace *ns = current->nsproxy->mnt_ns;
++ struct path orig;
++ struct mount *r, *first;
+ ssize_t ret;
+
++ rwsem_assert_held(&namespace_sem);
++
++ get_fs_root(current->fs, &root);
++ if (mnt_parent_id == LSMT_ROOT) {
++ orig = root;
++ } else {
++ orig.mnt = lookup_mnt_in_ns(mnt_parent_id, ns);
++ if (!orig.mnt) {
++ ret = -ENOENT;
++ goto err;
++ }
++ orig.dentry = orig.mnt->mnt_root;
++ }
++
+ /*
+ * Don't trigger audit denials. We just want to determine what
+ * mounts to show users.
+ */
+- if (!is_path_reachable(real_mount(orig->mnt), orig->dentry, root) &&
+- !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN))
+- return -EPERM;
++ if (!is_path_reachable(real_mount(orig.mnt), orig.dentry, &root) &&
++ !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN)) {
++ ret = -EPERM;
++ goto err;
++ }
+
+- ret = security_sb_statfs(orig->dentry);
++ ret = security_sb_statfs(orig.dentry);
+ if (ret)
+- return ret;
++ goto err;
++
++ if (!last_mnt_id)
++ first = node_to_mount(rb_first(&ns->mounts));
++ else
++ first = mnt_find_id_at(ns, last_mnt_id + 1);
+
+ for (ret = 0, r = first; r && nr_mnt_ids; r = listmnt_next(r)) {
+ if (r->mnt_id_unique == mnt_parent_id)
+ continue;
+- if (!is_path_reachable(r, r->mnt.mnt_root, orig))
++ if (!is_path_reachable(r, r->mnt.mnt_root, &orig))
+ continue;
+- if (put_user(r->mnt_id_unique, mnt_ids))
+- return -EFAULT;
++ *mnt_ids = r->mnt_id_unique;
+ mnt_ids++;
+ nr_mnt_ids--;
+ ret++;
+ }
++err:
++ path_put(&root);
+ return ret;
+ }
+
+-SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req, u64 __user *,
+- mnt_ids, size_t, nr_mnt_ids, unsigned int, flags)
++SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req,
++ u64 __user *, mnt_ids, size_t, nr_mnt_ids, unsigned int, flags)
+ {
+- struct mnt_namespace *ns = current->nsproxy->mnt_ns;
++ u64 *kmnt_ids __free(kvfree) = NULL;
++ const size_t maxcount = 1000000;
+ struct mnt_id_req kreq;
+- struct mount *first;
+- struct path root, orig;
+- u64 mnt_parent_id, last_mnt_id;
+- const size_t maxcount = (size_t)-1 >> 3;
+ ssize_t ret;
+
+ if (flags)
+ return -EINVAL;
+
++ /*
++ * If the mount namespace really has more than 1 million mounts the
++ * caller must iterate over the mount namespace (and reconsider their
++ * system design...).
++ */
+ if (unlikely(nr_mnt_ids > maxcount))
+- return -EFAULT;
++ return -EOVERFLOW;
+
+ if (!access_ok(mnt_ids, nr_mnt_ids * sizeof(*mnt_ids)))
+ return -EFAULT;
+@@ -5103,33 +5129,21 @@ SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req, u64 __user *,
+ ret = copy_mnt_id_req(req, &kreq);
+ if (ret)
+ return ret;
+- mnt_parent_id = kreq.mnt_id;
+- last_mnt_id = kreq.param;
+
+- down_read(&namespace_sem);
+- get_fs_root(current->fs, &root);
+- if (mnt_parent_id == LSMT_ROOT) {
+- orig = root;
+- } else {
+- ret = -ENOENT;
+- orig.mnt = lookup_mnt_in_ns(mnt_parent_id, ns);
+- if (!orig.mnt)
+- goto err;
+- orig.dentry = orig.mnt->mnt_root;
+- }
+- if (!last_mnt_id)
+- first = node_to_mount(rb_first(&ns->mounts));
+- else
+- first = mnt_find_id_at(ns, last_mnt_id + 1);
++ kmnt_ids = kvmalloc_array(nr_mnt_ids, sizeof(*kmnt_ids),
++ GFP_KERNEL_ACCOUNT);
++ if (!kmnt_ids)
++ return -ENOMEM;
++
++ scoped_guard(rwsem_read, &namespace_sem)
++ ret = do_listmount(kreq.mnt_id, kreq.param, kmnt_ids, nr_mnt_ids);
++
++ if (copy_to_user(mnt_ids, kmnt_ids, ret * sizeof(*mnt_ids)))
++ return -EFAULT;
+
+- ret = do_listmount(first, &orig, mnt_parent_id, mnt_ids, nr_mnt_ids, &root);
+-err:
+- path_put(&root);
+- up_read(&namespace_sem);
+ return ret;
+ }
+
+-
+ static void __init init_mount_tree(void)
+ {
+ struct vfsmount *mnt;
+--
+2.43.0
+
--- /dev/null
+From b9d1bcc04d41f852bcd575c79f6fe926b85b918f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Jun 2024 14:53:57 +0300
+Subject: fs/ntfs3: Check more cases when directory is corrupted
+
+From: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
+
+[ Upstream commit 744375343662058cbfda96d871786e5a5cbe1947 ]
+
+Mark ntfs dirty in this case.
+Rename ntfs_filldir to ntfs_dir_emit.
+
+Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ntfs3/dir.c | 52 +++++++++++++++++++++++++++++++-------------------
+ 1 file changed, 32 insertions(+), 20 deletions(-)
+
+diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
+index 858efe255f6f..1ec09f2fca64 100644
+--- a/fs/ntfs3/dir.c
++++ b/fs/ntfs3/dir.c
+@@ -272,9 +272,12 @@ struct inode *dir_search_u(struct inode *dir, const struct cpu_str *uni,
+ return err == -ENOENT ? NULL : err ? ERR_PTR(err) : inode;
+ }
+
+-static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
+- const struct NTFS_DE *e, u8 *name,
+- struct dir_context *ctx)
++/*
++ * returns false if 'ctx' if full
++ */
++static inline bool ntfs_dir_emit(struct ntfs_sb_info *sbi,
++ struct ntfs_inode *ni, const struct NTFS_DE *e,
++ u8 *name, struct dir_context *ctx)
+ {
+ const struct ATTR_FILE_NAME *fname;
+ unsigned long ino;
+@@ -284,29 +287,29 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
+ fname = Add2Ptr(e, sizeof(struct NTFS_DE));
+
+ if (fname->type == FILE_NAME_DOS)
+- return 0;
++ return true;
+
+ if (!mi_is_ref(&ni->mi, &fname->home))
+- return 0;
++ return true;
+
+ ino = ino_get(&e->ref);
+
+ if (ino == MFT_REC_ROOT)
+- return 0;
++ return true;
+
+ /* Skip meta files. Unless option to show metafiles is set. */
+ if (!sbi->options->showmeta && ntfs_is_meta_file(sbi, ino))
+- return 0;
++ return true;
+
+ if (sbi->options->nohidden && (fname->dup.fa & FILE_ATTRIBUTE_HIDDEN))
+- return 0;
++ return true;
+
+ name_len = ntfs_utf16_to_nls(sbi, fname->name, fname->name_len, name,
+ PATH_MAX);
+ if (name_len <= 0) {
+ ntfs_warn(sbi->sb, "failed to convert name for inode %lx.",
+ ino);
+- return 0;
++ return true;
+ }
+
+ /*
+@@ -336,17 +339,20 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
+ }
+ }
+
+- return !dir_emit(ctx, (s8 *)name, name_len, ino, dt_type);
++ return dir_emit(ctx, (s8 *)name, name_len, ino, dt_type);
+ }
+
+ /*
+ * ntfs_read_hdr - Helper function for ntfs_readdir().
++ *
++ * returns 0 if ok.
++ * returns -EINVAL if directory is corrupted.
++ * returns +1 if 'ctx' is full.
+ */
+ static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
+ const struct INDEX_HDR *hdr, u64 vbo, u64 pos,
+ u8 *name, struct dir_context *ctx)
+ {
+- int err;
+ const struct NTFS_DE *e;
+ u32 e_size;
+ u32 end = le32_to_cpu(hdr->used);
+@@ -354,12 +360,12 @@ static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
+
+ for (;; off += e_size) {
+ if (off + sizeof(struct NTFS_DE) > end)
+- return -1;
++ return -EINVAL;
+
+ e = Add2Ptr(hdr, off);
+ e_size = le16_to_cpu(e->size);
+ if (e_size < sizeof(struct NTFS_DE) || off + e_size > end)
+- return -1;
++ return -EINVAL;
+
+ if (de_is_last(e))
+ return 0;
+@@ -369,14 +375,15 @@ static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
+ continue;
+
+ if (le16_to_cpu(e->key_size) < SIZEOF_ATTRIBUTE_FILENAME)
+- return -1;
++ return -EINVAL;
+
+ ctx->pos = vbo + off;
+
+ /* Submit the name to the filldir callback. */
+- err = ntfs_filldir(sbi, ni, e, name, ctx);
+- if (err)
+- return err;
++ if (!ntfs_dir_emit(sbi, ni, e, name, ctx)) {
++ /* ctx is full. */
++ return +1;
++ }
+ }
+ }
+
+@@ -475,8 +482,6 @@ static int ntfs_readdir(struct file *file, struct dir_context *ctx)
+
+ vbo = (u64)bit << index_bits;
+ if (vbo >= i_size) {
+- ntfs_inode_err(dir, "Looks like your dir is corrupt");
+- ctx->pos = eod;
+ err = -EINVAL;
+ goto out;
+ }
+@@ -499,9 +504,16 @@ static int ntfs_readdir(struct file *file, struct dir_context *ctx)
+ __putname(name);
+ put_indx_node(node);
+
+- if (err == -ENOENT) {
++ if (err == 1) {
++ /* 'ctx' is full. */
++ err = 0;
++ } else if (err == -ENOENT) {
+ err = 0;
+ ctx->pos = pos;
++ } else if (err < 0) {
++ if (err == -EINVAL)
++ ntfs_inode_err(dir, "directory corrupted");
++ ctx->pos = eod;
+ }
+
+ return err;
+--
+2.43.0
+
--- /dev/null
+From 2c6727d1cc71dc74e3e1241309bcd48ba4457a51 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 May 2024 10:55:12 +0300
+Subject: fs/ntfs3: One more reason to mark inode bad
+
+From: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
+
+[ Upstream commit a0dde5d7a58b6bf9184ef3d8c6e62275c3645584 ]
+
+In addition to returning an error, mark the node as bad.
+
+Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ntfs3/frecord.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
+index ded451a84b77..7a73df871037 100644
+--- a/fs/ntfs3/frecord.c
++++ b/fs/ntfs3/frecord.c
+@@ -1601,8 +1601,10 @@ int ni_delete_all(struct ntfs_inode *ni)
+ asize = le32_to_cpu(attr->size);
+ roff = le16_to_cpu(attr->nres.run_off);
+
+- if (roff > asize)
++ if (roff > asize) {
++ _ntfs_bad_inode(&ni->vfs_inode);
+ return -EINVAL;
++ }
+
+ /* run==1 means unpack and deallocate. */
+ run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn,
+--
+2.43.0
+
--- /dev/null
+From 0f29f2dff8262b0c0c47e336411adae0278bd9e4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Jun 2024 11:49:45 -0400
+Subject: fs: relax permissions for statmount()
+
+From: Christian Brauner <brauner@kernel.org>
+
+[ Upstream commit f3107df39df123328a9d3c8f40c006834b37287d ]
+
+It is sufficient to have capabilities in the owning user namespace of
+the mount namespace to stat a mount regardless of whether it's reachable
+or not.
+
+Link: https://lore.kernel.org/r/bf5961d71ec479ba85806766b0d8d96043e67bba.1719243756.git.josef@toxicpanda.com
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/namespace.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 57311ecbdf5a..4494064205a6 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -4906,6 +4906,7 @@ static int copy_statmount_to_user(struct kstatmount *s)
+ static int do_statmount(struct kstatmount *s)
+ {
+ struct mount *m = real_mount(s->mnt);
++ struct mnt_namespace *ns = m->mnt_ns;
+ int err;
+
+ /*
+@@ -4913,7 +4914,7 @@ static int do_statmount(struct kstatmount *s)
+ * mounts to show users.
+ */
+ if (!is_path_reachable(m, m->mnt.mnt_root, &s->root) &&
+- !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN))
++ !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
+ return -EPERM;
+
+ err = security_sb_statfs(s->mnt->mnt_root);
+--
+2.43.0
+
--- /dev/null
+From 1ed71db58e24f63b65c0fabe79f1384ebd807a0b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Jun 2024 00:12:27 +0000
+Subject: gve: Add adminq mutex lock
+
+From: Ziwei Xiao <ziweixiao@google.com>
+
+[ Upstream commit 1108566ca509e67aa8abfbf914b1cd31e9ff51f8 ]
+
+We were depending on the rtnl_lock to make sure there is only one adminq
+command running at a time. But some commands may take too long to hold
+the rtnl_lock, such as the upcoming flow steering operations. For such
+situations, it can temporarily drop the rtnl_lock, and replace it for
+these operations with a new adminq lock, which can ensure the adminq
+command execution to be thread-safe.
+
+Signed-off-by: Ziwei Xiao <ziweixiao@google.com>
+Reviewed-by: Praveen Kaligineedi <pkaligineedi@google.com>
+Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Link: https://patch.msgid.link/20240625001232.1476315-2-ziweixiao@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/google/gve/gve.h | 1 +
+ drivers/net/ethernet/google/gve/gve_adminq.c | 22 +++++++++++---------
+ 2 files changed, 13 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
+index ae1e21c9b0a5..ca7fce17f2c0 100644
+--- a/drivers/net/ethernet/google/gve/gve.h
++++ b/drivers/net/ethernet/google/gve/gve.h
+@@ -724,6 +724,7 @@ struct gve_priv {
+ union gve_adminq_command *adminq;
+ dma_addr_t adminq_bus_addr;
+ struct dma_pool *adminq_pool;
++ struct mutex adminq_lock; /* Protects adminq command execution */
+ u32 adminq_mask; /* masks prod_cnt to adminq size */
+ u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
+ u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
+diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
+index 8ca0def176ef..2e0c1eb87b11 100644
+--- a/drivers/net/ethernet/google/gve/gve_adminq.c
++++ b/drivers/net/ethernet/google/gve/gve_adminq.c
+@@ -284,6 +284,7 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
+ &priv->reg_bar0->adminq_base_address_lo);
+ iowrite32be(GVE_DRIVER_STATUS_RUN_MASK, &priv->reg_bar0->driver_status);
+ }
++ mutex_init(&priv->adminq_lock);
+ gve_set_admin_queue_ok(priv);
+ return 0;
+ }
+@@ -511,28 +512,29 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
+ return 0;
+ }
+
+-/* This function is not threadsafe - the caller is responsible for any
+- * necessary locks.
+- * The caller is also responsible for making sure there are no commands
+- * waiting to be executed.
+- */
+ static int gve_adminq_execute_cmd(struct gve_priv *priv,
+ union gve_adminq_command *cmd_orig)
+ {
+ u32 tail, head;
+ int err;
+
++ mutex_lock(&priv->adminq_lock);
+ tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
+ head = priv->adminq_prod_cnt;
+- if (tail != head)
+- // This is not a valid path
+- return -EINVAL;
++ if (tail != head) {
++ err = -EINVAL;
++ goto out;
++ }
+
+ err = gve_adminq_issue_cmd(priv, cmd_orig);
+ if (err)
+- return err;
++ goto out;
+
+- return gve_adminq_kick_and_wait(priv);
++ err = gve_adminq_kick_and_wait(priv);
++
++out:
++ mutex_unlock(&priv->adminq_lock);
++ return err;
+ }
+
+ /* The device specifies that the management vector can either be the first irq
+--
+2.43.0
+
--- /dev/null
+From ee877dcd73def6fb2e12b27e0e412dc1c2e8cdac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Jul 2024 10:44:35 +0200
+Subject: HID: amd_sfh: free driver_data after destroying hid device
+
+From: Olivier Sobrie <olivier@sobrie.be>
+
+[ Upstream commit 97155021ae17b86985121b33cf8098bcde00d497 ]
+
+HID driver callbacks aren't called anymore once hid_destroy_device() has
+been called. Hence, hid driver_data should be freed only after the
+hid_destroy_device() function returned as driver_data is used in several
+callbacks.
+
+I observed a crash with kernel 6.10.0 on my T14s Gen 3, after enabling
+KASAN to debug memory allocation, I got this output:
+
+ [ 13.050438] ==================================================================
+ [ 13.054060] BUG: KASAN: slab-use-after-free in amd_sfh_get_report+0x3ec/0x530 [amd_sfh]
+ [ 13.054809] psmouse serio1: trackpoint: Synaptics TrackPoint firmware: 0x02, buttons: 3/3
+ [ 13.056432] Read of size 8 at addr ffff88813152f408 by task (udev-worker)/479
+
+ [ 13.060970] CPU: 5 PID: 479 Comm: (udev-worker) Not tainted 6.10.0-arch1-2 #1 893bb55d7f0073f25c46adbb49eb3785fefd74b0
+ [ 13.063978] Hardware name: LENOVO 21CQCTO1WW/21CQCTO1WW, BIOS R22ET70W (1.40 ) 03/21/2024
+ [ 13.067860] Call Trace:
+ [ 13.069383] input: TPPS/2 Synaptics TrackPoint as /devices/platform/i8042/serio1/input/input8
+ [ 13.071486] <TASK>
+ [ 13.071492] dump_stack_lvl+0x5d/0x80
+ [ 13.074870] snd_hda_intel 0000:33:00.6: enabling device (0000 -> 0002)
+ [ 13.078296] ? amd_sfh_get_report+0x3ec/0x530 [amd_sfh 05f43221435b5205f734cd9da29399130f398a38]
+ [ 13.082199] print_report+0x174/0x505
+ [ 13.085776] ? __pfx__raw_spin_lock_irqsave+0x10/0x10
+ [ 13.089367] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.093255] ? amd_sfh_get_report+0x3ec/0x530 [amd_sfh 05f43221435b5205f734cd9da29399130f398a38]
+ [ 13.097464] kasan_report+0xc8/0x150
+ [ 13.101461] ? amd_sfh_get_report+0x3ec/0x530 [amd_sfh 05f43221435b5205f734cd9da29399130f398a38]
+ [ 13.105802] amd_sfh_get_report+0x3ec/0x530 [amd_sfh 05f43221435b5205f734cd9da29399130f398a38]
+ [ 13.110303] amdtp_hid_request+0xb8/0x110 [amd_sfh 05f43221435b5205f734cd9da29399130f398a38]
+ [ 13.114879] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.119450] sensor_hub_get_feature+0x1d3/0x540 [hid_sensor_hub 3f13be3016ff415bea03008d45d99da837ee3082]
+ [ 13.124097] hid_sensor_parse_common_attributes+0x4d0/0xad0 [hid_sensor_iio_common c3a5cbe93969c28b122609768bbe23efe52eb8f5]
+ [ 13.127404] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.131925] ? __pfx_hid_sensor_parse_common_attributes+0x10/0x10 [hid_sensor_iio_common c3a5cbe93969c28b122609768bbe23efe52eb8f5]
+ [ 13.136455] ? _raw_spin_lock_irqsave+0x96/0xf0
+ [ 13.140197] ? __pfx__raw_spin_lock_irqsave+0x10/0x10
+ [ 13.143602] ? devm_iio_device_alloc+0x34/0x50 [industrialio 3d261d5e5765625d2b052be40e526d62b1d2123b]
+ [ 13.147234] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.150446] ? __devm_add_action+0x167/0x1d0
+ [ 13.155061] hid_gyro_3d_probe+0x120/0x7f0 [hid_sensor_gyro_3d 63da36a143b775846ab2dbb86c343b401b5e3172]
+ [ 13.158581] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.161814] platform_probe+0xa2/0x150
+ [ 13.165029] really_probe+0x1e3/0x8a0
+ [ 13.168243] __driver_probe_device+0x18c/0x370
+ [ 13.171500] driver_probe_device+0x4a/0x120
+ [ 13.175000] __driver_attach+0x190/0x4a0
+ [ 13.178521] ? __pfx___driver_attach+0x10/0x10
+ [ 13.181771] bus_for_each_dev+0x106/0x180
+ [ 13.185033] ? __pfx__raw_spin_lock+0x10/0x10
+ [ 13.188229] ? __pfx_bus_for_each_dev+0x10/0x10
+ [ 13.191446] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.194382] bus_add_driver+0x29e/0x4d0
+ [ 13.197328] driver_register+0x1a5/0x360
+ [ 13.200283] ? __pfx_hid_gyro_3d_platform_driver_init+0x10/0x10 [hid_sensor_gyro_3d 63da36a143b775846ab2dbb86c343b401b5e3172]
+ [ 13.203362] do_one_initcall+0xa7/0x380
+ [ 13.206432] ? __pfx_do_one_initcall+0x10/0x10
+ [ 13.210175] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.213211] ? kasan_unpoison+0x44/0x70
+ [ 13.216688] do_init_module+0x238/0x750
+ [ 13.219696] load_module+0x5011/0x6af0
+ [ 13.223096] ? kasan_save_stack+0x30/0x50
+ [ 13.226743] ? kasan_save_track+0x14/0x30
+ [ 13.230080] ? kasan_save_free_info+0x3b/0x60
+ [ 13.233323] ? poison_slab_object+0x109/0x180
+ [ 13.236778] ? __pfx_load_module+0x10/0x10
+ [ 13.239703] ? poison_slab_object+0x109/0x180
+ [ 13.243070] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.245924] ? init_module_from_file+0x13d/0x150
+ [ 13.248745] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.251503] ? init_module_from_file+0xdf/0x150
+ [ 13.254198] init_module_from_file+0xdf/0x150
+ [ 13.256826] ? __pfx_init_module_from_file+0x10/0x10
+ [ 13.259428] ? kasan_save_track+0x14/0x30
+ [ 13.261959] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.264471] ? kasan_save_free_info+0x3b/0x60
+ [ 13.267026] ? poison_slab_object+0x109/0x180
+ [ 13.269494] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.271949] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.274324] ? _raw_spin_lock+0x85/0xe0
+ [ 13.276671] ? __pfx__raw_spin_lock+0x10/0x10
+ [ 13.278963] ? __rseq_handle_notify_resume+0x1a6/0xad0
+ [ 13.281193] idempotent_init_module+0x23b/0x650
+ [ 13.283420] ? __pfx_idempotent_init_module+0x10/0x10
+ [ 13.285619] ? __pfx___seccomp_filter+0x10/0x10
+ [ 13.287714] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.289828] ? __fget_light+0x57/0x420
+ [ 13.291870] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.293880] ? security_capable+0x74/0xb0
+ [ 13.295820] __x64_sys_finit_module+0xbe/0x130
+ [ 13.297874] do_syscall_64+0x82/0x190
+ [ 13.299898] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.301905] ? irqtime_account_irq+0x3d/0x1f0
+ [ 13.303877] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.305753] ? __irq_exit_rcu+0x4e/0x130
+ [ 13.307577] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.309489] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+ [ 13.311371] RIP: 0033:0x7a21f96ade9d
+ [ 13.313234] Code: ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 63 de 0c 00 f7 d8 64 89 01 48
+ [ 13.317051] RSP: 002b:00007ffeae934e78 EFLAGS: 00000246 ORIG_RAX: 0000000000000139
+ [ 13.319024] RAX: ffffffffffffffda RBX: 00005987276bfcf0 RCX: 00007a21f96ade9d
+ [ 13.321100] RDX: 0000000000000004 RSI: 00007a21f8eda376 RDI: 000000000000001c
+ [ 13.323314] RBP: 00007a21f8eda376 R08: 0000000000000001 R09: 00007ffeae934ec0
+ [ 13.325505] R10: 0000000000000050 R11: 0000000000000246 R12: 0000000000020000
+ [ 13.327637] R13: 00005987276c1250 R14: 0000000000000000 R15: 00005987276c4530
+ [ 13.329737] </TASK>
+
+ [ 13.333945] Allocated by task 139:
+ [ 13.336111] kasan_save_stack+0x30/0x50
+ [ 13.336121] kasan_save_track+0x14/0x30
+ [ 13.336125] __kasan_kmalloc+0xaa/0xb0
+ [ 13.336129] amdtp_hid_probe+0xb1/0x440 [amd_sfh]
+ [ 13.336138] amd_sfh_hid_client_init+0xb8a/0x10f0 [amd_sfh]
+ [ 13.336144] sfh_init_work+0x47/0x120 [amd_sfh]
+ [ 13.336150] process_one_work+0x673/0xeb0
+ [ 13.336155] worker_thread+0x795/0x1250
+ [ 13.336160] kthread+0x290/0x350
+ [ 13.336164] ret_from_fork+0x34/0x70
+ [ 13.336169] ret_from_fork_asm+0x1a/0x30
+
+ [ 13.338175] Freed by task 139:
+ [ 13.340064] kasan_save_stack+0x30/0x50
+ [ 13.340072] kasan_save_track+0x14/0x30
+ [ 13.340076] kasan_save_free_info+0x3b/0x60
+ [ 13.340081] poison_slab_object+0x109/0x180
+ [ 13.340085] __kasan_slab_free+0x32/0x50
+ [ 13.340089] kfree+0xe5/0x310
+ [ 13.340094] amdtp_hid_remove+0xb2/0x160 [amd_sfh]
+ [ 13.340102] amd_sfh_hid_client_deinit+0x324/0x640 [amd_sfh]
+ [ 13.340107] amd_sfh_hid_client_init+0x94a/0x10f0 [amd_sfh]
+ [ 13.340113] sfh_init_work+0x47/0x120 [amd_sfh]
+ [ 13.340118] process_one_work+0x673/0xeb0
+ [ 13.340123] worker_thread+0x795/0x1250
+ [ 13.340127] kthread+0x290/0x350
+ [ 13.340132] ret_from_fork+0x34/0x70
+ [ 13.340136] ret_from_fork_asm+0x1a/0x30
+
+ [ 13.342482] The buggy address belongs to the object at ffff88813152f400
+ which belongs to the cache kmalloc-64 of size 64
+ [ 13.347357] The buggy address is located 8 bytes inside of
+ freed 64-byte region [ffff88813152f400, ffff88813152f440)
+
+ [ 13.347367] The buggy address belongs to the physical page:
+ [ 13.355409] page: refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x13152f
+ [ 13.355416] anon flags: 0x2ffff8000000000(node=0|zone=2|lastcpupid=0x1ffff)
+ [ 13.355423] page_type: 0xffffefff(slab)
+ [ 13.355429] raw: 02ffff8000000000 ffff8881000428c0 ffffea0004c43a00 0000000000000005
+ [ 13.355435] raw: 0000000000000000 0000000000200020 00000001ffffefff 0000000000000000
+ [ 13.355439] page dumped because: kasan: bad access detected
+
+ [ 13.357295] Memory state around the buggy address:
+ [ 13.357299] ffff88813152f300: fa fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
+ [ 13.357303] ffff88813152f380: fa fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
+ [ 13.357306] >ffff88813152f400: fa fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
+ [ 13.357309] ^
+ [ 13.357311] ffff88813152f480: 00 00 00 00 00 fc fc fc fc fc fc fc fc fc fc fc
+ [ 13.357315] ffff88813152f500: 00 00 00 00 00 00 00 06 fc fc fc fc fc fc fc fc
+ [ 13.357318] ==================================================================
+ [ 13.357405] Disabling lock debugging due to kernel taint
+ [ 13.383534] Oops: general protection fault, probably for non-canonical address 0xe0a1bc4140000013: 0000 [#1] PREEMPT SMP KASAN NOPTI
+ [ 13.383544] KASAN: maybe wild-memory-access in range [0x050e020a00000098-0x050e020a0000009f]
+ [ 13.383551] CPU: 3 PID: 479 Comm: (udev-worker) Tainted: G B 6.10.0-arch1-2 #1 893bb55d7f0073f25c46adbb49eb3785fefd74b0
+ [ 13.383561] Hardware name: LENOVO 21CQCTO1WW/21CQCTO1WW, BIOS R22ET70W (1.40 ) 03/21/2024
+ [ 13.383565] RIP: 0010:amd_sfh_get_report+0x81/0x530 [amd_sfh]
+ [ 13.383580] Code: 89 fa 48 c1 ea 03 80 3c 02 00 0f 85 78 03 00 00 48 b8 00 00 00 00 00 fc ff df 4c 8b 63 08 49 8d 7c 24 10 48 89 fa 48 c1 ea 03 <0f> b6 04 02 84 c0 74 08 3c 03 0f 8e 1a 03 00 00 45 8b 74 24 10 45
+ [ 13.383585] RSP: 0018:ffff8881261f7388 EFLAGS: 00010212
+ [ 13.383592] RAX: dffffc0000000000 RBX: ffff88813152f400 RCX: 0000000000000002
+ [ 13.383597] RDX: 00a1c04140000013 RSI: 0000000000000008 RDI: 050e020a0000009b
+ [ 13.383600] RBP: ffff88814d010000 R08: 0000000000000002 R09: fffffbfff3ddb8c0
+ [ 13.383604] R10: ffffffff9eedc607 R11: ffff88810ce98000 R12: 050e020a0000008b
+ [ 13.383607] R13: ffff88814d010000 R14: dffffc0000000000 R15: 0000000000000004
+ [ 13.383611] FS: 00007a21f94d0880(0000) GS:ffff8887e7d80000(0000) knlGS:0000000000000000
+ [ 13.383615] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ [ 13.383618] CR2: 00007e0014c438f0 CR3: 000000012614c000 CR4: 0000000000f50ef0
+ [ 13.383622] PKRU: 55555554
+ [ 13.383625] Call Trace:
+ [ 13.383629] <TASK>
+ [ 13.383632] ? __die_body.cold+0x19/0x27
+ [ 13.383644] ? die_addr+0x46/0x70
+ [ 13.383652] ? exc_general_protection+0x150/0x240
+ [ 13.383664] ? asm_exc_general_protection+0x26/0x30
+ [ 13.383674] ? amd_sfh_get_report+0x81/0x530 [amd_sfh 05f43221435b5205f734cd9da29399130f398a38]
+ [ 13.383686] ? amd_sfh_get_report+0x3ec/0x530 [amd_sfh 05f43221435b5205f734cd9da29399130f398a38]
+ [ 13.383697] amdtp_hid_request+0xb8/0x110 [amd_sfh 05f43221435b5205f734cd9da29399130f398a38]
+ [ 13.383706] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.383713] sensor_hub_get_feature+0x1d3/0x540 [hid_sensor_hub 3f13be3016ff415bea03008d45d99da837ee3082]
+ [ 13.383727] hid_sensor_parse_common_attributes+0x4d0/0xad0 [hid_sensor_iio_common c3a5cbe93969c28b122609768bbe23efe52eb8f5]
+ [ 13.383739] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.383745] ? __pfx_hid_sensor_parse_common_attributes+0x10/0x10 [hid_sensor_iio_common c3a5cbe93969c28b122609768bbe23efe52eb8f5]
+ [ 13.383753] ? _raw_spin_lock_irqsave+0x96/0xf0
+ [ 13.383762] ? __pfx__raw_spin_lock_irqsave+0x10/0x10
+ [ 13.383768] ? devm_iio_device_alloc+0x34/0x50 [industrialio 3d261d5e5765625d2b052be40e526d62b1d2123b]
+ [ 13.383790] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.383795] ? __devm_add_action+0x167/0x1d0
+ [ 13.383806] hid_gyro_3d_probe+0x120/0x7f0 [hid_sensor_gyro_3d 63da36a143b775846ab2dbb86c343b401b5e3172]
+ [ 13.383818] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.383826] platform_probe+0xa2/0x150
+ [ 13.383832] really_probe+0x1e3/0x8a0
+ [ 13.383838] __driver_probe_device+0x18c/0x370
+ [ 13.383844] driver_probe_device+0x4a/0x120
+ [ 13.383851] __driver_attach+0x190/0x4a0
+ [ 13.383857] ? __pfx___driver_attach+0x10/0x10
+ [ 13.383863] bus_for_each_dev+0x106/0x180
+ [ 13.383868] ? __pfx__raw_spin_lock+0x10/0x10
+ [ 13.383874] ? __pfx_bus_for_each_dev+0x10/0x10
+ [ 13.383880] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.383887] bus_add_driver+0x29e/0x4d0
+ [ 13.383895] driver_register+0x1a5/0x360
+ [ 13.383902] ? __pfx_hid_gyro_3d_platform_driver_init+0x10/0x10 [hid_sensor_gyro_3d 63da36a143b775846ab2dbb86c343b401b5e3172]
+ [ 13.383910] do_one_initcall+0xa7/0x380
+ [ 13.383919] ? __pfx_do_one_initcall+0x10/0x10
+ [ 13.383927] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.383933] ? kasan_unpoison+0x44/0x70
+ [ 13.383943] do_init_module+0x238/0x750
+ [ 13.383955] load_module+0x5011/0x6af0
+ [ 13.383962] ? kasan_save_stack+0x30/0x50
+ [ 13.383968] ? kasan_save_track+0x14/0x30
+ [ 13.383973] ? kasan_save_free_info+0x3b/0x60
+ [ 13.383980] ? poison_slab_object+0x109/0x180
+ [ 13.383993] ? __pfx_load_module+0x10/0x10
+ [ 13.384007] ? poison_slab_object+0x109/0x180
+ [ 13.384012] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.384018] ? init_module_from_file+0x13d/0x150
+ [ 13.384025] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.384032] ? init_module_from_file+0xdf/0x150
+ [ 13.384037] init_module_from_file+0xdf/0x150
+ [ 13.384044] ? __pfx_init_module_from_file+0x10/0x10
+ [ 13.384050] ? kasan_save_track+0x14/0x30
+ [ 13.384055] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.384060] ? kasan_save_free_info+0x3b/0x60
+ [ 13.384066] ? poison_slab_object+0x109/0x180
+ [ 13.384071] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.384080] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.384085] ? _raw_spin_lock+0x85/0xe0
+ [ 13.384091] ? __pfx__raw_spin_lock+0x10/0x10
+ [ 13.384096] ? __rseq_handle_notify_resume+0x1a6/0xad0
+ [ 13.384106] idempotent_init_module+0x23b/0x650
+ [ 13.384114] ? __pfx_idempotent_init_module+0x10/0x10
+ [ 13.384120] ? __pfx___seccomp_filter+0x10/0x10
+ [ 13.384129] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.384135] ? __fget_light+0x57/0x420
+ [ 13.384142] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.384147] ? security_capable+0x74/0xb0
+ [ 13.384157] __x64_sys_finit_module+0xbe/0x130
+ [ 13.384164] do_syscall_64+0x82/0x190
+ [ 13.384174] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.384179] ? irqtime_account_irq+0x3d/0x1f0
+ [ 13.384188] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.384193] ? __irq_exit_rcu+0x4e/0x130
+ [ 13.384201] ? srso_alias_return_thunk+0x5/0xfbef5
+ [ 13.384206] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+ [ 13.384212] RIP: 0033:0x7a21f96ade9d
+ [ 13.384263] Code: ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 63 de 0c 00 f7 d8 64 89 01 48
+ [ 13.384267] RSP: 002b:00007ffeae934e78 EFLAGS: 00000246 ORIG_RAX: 0000000000000139
+ [ 13.384273] RAX: ffffffffffffffda RBX: 00005987276bfcf0 RCX: 00007a21f96ade9d
+ [ 13.384277] RDX: 0000000000000004 RSI: 00007a21f8eda376 RDI: 000000000000001c
+ [ 13.384280] RBP: 00007a21f8eda376 R08: 0000000000000001 R09: 00007ffeae934ec0
+ [ 13.384284] R10: 0000000000000050 R11: 0000000000000246 R12: 0000000000020000
+ [ 13.384288] R13: 00005987276c1250 R14: 0000000000000000 R15: 00005987276c4530
+ [ 13.384297] </TASK>
+ [ 13.384299] Modules linked in: soundwire_amd(+) hid_sensor_gyro_3d(+) hid_sensor_magn_3d hid_sensor_accel_3d soundwire_generic_allocation amdxcp hid_sensor_trigger drm_exec industrialio_triggered_buffer soundwire_bus gpu_sched kvm_amd kfifo_buf qmi_helpers joydev drm_buddy hid_sensor_iio_common mousedev snd_soc_core industrialio i2c_algo_bit mac80211 snd_compress drm_suballoc_helper kvm snd_hda_intel drm_ttm_helper ac97_bus snd_pcm_dmaengine snd_intel_dspcfg ttm thinkpad_acpi(+) snd_intel_sdw_acpi hid_sensor_hub snd_rpl_pci_acp6x drm_display_helper snd_hda_codec hid_multitouch libarc4 snd_acp_pci platform_profile think_lmi(+) hid_generic firmware_attributes_class wmi_bmof cec snd_acp_legacy_common sparse_keymap rapl snd_hda_core psmouse cfg80211 pcspkr snd_pci_acp6x snd_hwdep video snd_pcm snd_pci_acp5x snd_timer snd_rn_pci_acp3x ucsi_acpi snd_acp_config snd sp5100_tco rfkill snd_soc_acpi typec_ucsi thunderbolt amd_sfh k10temp mhi soundcore i2c_piix4 snd_pci_acp3x typec i2c_hid_acpi roles i2c_hid wmi acpi_tad amd_pmc
+ [ 13.384454] mac_hid i2c_dev crypto_user loop nfnetlink zram ip_tables x_tables dm_crypt cbc encrypted_keys trusted asn1_encoder tee dm_mod crct10dif_pclmul crc32_pclmul polyval_clmulni polyval_generic gf128mul ghash_clmulni_intel serio_raw sha512_ssse3 atkbd sha256_ssse3 libps2 sha1_ssse3 vivaldi_fmap nvme aesni_intel crypto_simd nvme_core cryptd ccp xhci_pci i8042 nvme_auth xhci_pci_renesas serio vfat fat btrfs blake2b_generic libcrc32c crc32c_generic crc32c_intel xor raid6_pq
+ [ 13.384552] ---[ end trace 0000000000000000 ]---
+
+KASAN reports a use-after-free of hid->driver_data in function
+amd_sfh_get_report(). The backtrace indicates that the function is called
+by amdtp_hid_request() which is one of the callbacks of hid device.
+The current make sure that driver_data is freed only once
+hid_destroy_device() returned.
+
+Note that I observed the crash both on v6.9.9 and v6.10.0. The
+code seems to be as it was from the early days of the driver.
+
+Signed-off-by: Olivier Sobrie <olivier@sobrie.be>
+Acked-by: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hid/amd-sfh-hid/amd_sfh_hid.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.c b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
+index 705b52337068..81f3024b7b1b 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
+@@ -171,11 +171,13 @@ int amdtp_hid_probe(u32 cur_hid_dev, struct amdtp_cl_data *cli_data)
+ void amdtp_hid_remove(struct amdtp_cl_data *cli_data)
+ {
+ int i;
++ struct amdtp_hid_data *hid_data;
+
+ for (i = 0; i < cli_data->num_hid_devices; ++i) {
+ if (cli_data->hid_sensor_hubs[i]) {
+- kfree(cli_data->hid_sensor_hubs[i]->driver_data);
++ hid_data = cli_data->hid_sensor_hubs[i]->driver_data;
+ hid_destroy_device(cli_data->hid_sensor_hubs[i]);
++ kfree(hid_data);
+ cli_data->hid_sensor_hubs[i] = NULL;
+ }
+ }
+--
+2.43.0
+
--- /dev/null
+From 5b03fde88155074aa0c753716ef1fc25f734ea38 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Jul 2024 19:42:43 -0400
+Subject: HID: cougar: fix slab-out-of-bounds Read in cougar_report_fixup
+
+From: Camila Alvarez <cam.alvarez.i@gmail.com>
+
+[ Upstream commit a6e9c391d45b5865b61e569146304cff72821a5d ]
+
+report_fixup for the Cougar 500k Gaming Keyboard was not verifying
+that the report descriptor size was correct before accessing it
+
+Reported-by: syzbot+24c0361074799d02c452@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=24c0361074799d02c452
+Signed-off-by: Camila Alvarez <cam.alvarez.i@gmail.com>
+Reviewed-by: Silvan Jegen <s.jegen@gmail.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hid/hid-cougar.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/hid/hid-cougar.c b/drivers/hid/hid-cougar.c
+index cb8bd8aae15b..0fa785f52707 100644
+--- a/drivers/hid/hid-cougar.c
++++ b/drivers/hid/hid-cougar.c
+@@ -106,7 +106,7 @@ static void cougar_fix_g6_mapping(void)
+ static __u8 *cougar_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+ {
+- if (rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
++ if (*rsize >= 117 && rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
+ (rdesc[115] | rdesc[116] << 8) >= HID_MAX_USAGES) {
+ hid_info(hdev,
+ "usage count exceeds max: fixing up report descriptor\n");
+--
+2.43.0
+
--- /dev/null
+From bd9881730312550feb4fdddcc2aa5c54b6f9e388 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 6 Jul 2024 23:43:04 -0700
+Subject: hwmon: (adc128d818) Fix underflows seen when writing limit attributes
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit 8cad724c8537fe3e0da8004646abc00290adae40 ]
+
+DIV_ROUND_CLOSEST() after kstrtol() results in an underflow if a large
+negative number such as -9223372036854775808 is provided by the user.
+Fix it by reordering clamp_val() and DIV_ROUND_CLOSEST() operations.
+
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/adc128d818.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
+index 8ac6e735ec5c..5e805d4ee76a 100644
+--- a/drivers/hwmon/adc128d818.c
++++ b/drivers/hwmon/adc128d818.c
+@@ -175,7 +175,7 @@ static ssize_t adc128_in_store(struct device *dev,
+
+ mutex_lock(&data->update_lock);
+ /* 10 mV LSB on limit registers */
+- regval = clamp_val(DIV_ROUND_CLOSEST(val, 10), 0, 255);
++ regval = DIV_ROUND_CLOSEST(clamp_val(val, 0, 2550), 10);
+ data->in[index][nr] = regval << 4;
+ reg = index == 1 ? ADC128_REG_IN_MIN(nr) : ADC128_REG_IN_MAX(nr);
+ i2c_smbus_write_byte_data(data->client, reg, regval);
+@@ -213,7 +213,7 @@ static ssize_t adc128_temp_store(struct device *dev,
+ return err;
+
+ mutex_lock(&data->update_lock);
+- regval = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
++ regval = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
+ data->temp[index] = regval << 1;
+ i2c_smbus_write_byte_data(data->client,
+ index == 1 ? ADC128_REG_TEMP_MAX
+--
+2.43.0
+
--- /dev/null
+From 2ac7cdc133cab5338db1782fe34116120afcff46 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 Sep 2024 05:10:51 +0200
+Subject: hwmon: (hp-wmi-sensors) Check if WMI event data exists
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Armin Wolf <W_Armin@gmx.de>
+
+[ Upstream commit a54da9df75cd1b4b5028f6c60f9a211532680585 ]
+
+The BIOS can choose to return no event data in response to a
+WMI event, so the ACPI object passed to the WMI notify handler
+can be NULL.
+
+Check for such a situation and ignore the event in such a case.
+
+Fixes: 23902f98f8d4 ("hwmon: add HP WMI Sensors driver")
+Signed-off-by: Armin Wolf <W_Armin@gmx.de>
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Message-ID: <20240901031055.3030-2-W_Armin@gmx.de>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/hp-wmi-sensors.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/hwmon/hp-wmi-sensors.c b/drivers/hwmon/hp-wmi-sensors.c
+index b5325d0e72b9..dfa1d6926dea 100644
+--- a/drivers/hwmon/hp-wmi-sensors.c
++++ b/drivers/hwmon/hp-wmi-sensors.c
+@@ -1637,6 +1637,8 @@ static void hp_wmi_notify(u32 value, void *context)
+ goto out_unlock;
+
+ wobj = out.pointer;
++ if (!wobj)
++ goto out_unlock;
+
+ err = populate_event_from_wobj(dev, &event, wobj);
+ if (err) {
+--
+2.43.0
+
--- /dev/null
+From 551c3fbb9430c97170848ea8a71b873fe4d58879 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 6 Jul 2024 23:48:42 -0700
+Subject: hwmon: (lm95234) Fix underflows seen when writing limit attributes
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit af64e3e1537896337405f880c1e9ac1f8c0c6198 ]
+
+DIV_ROUND_CLOSEST() after kstrtol() results in an underflow if a large
+negative number such as -9223372036854775808 is provided by the user.
+Fix it by reordering clamp_val() and DIV_ROUND_CLOSEST() operations.
+
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/lm95234.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/hwmon/lm95234.c b/drivers/hwmon/lm95234.c
+index 67b9d7636ee4..37e8e9679aeb 100644
+--- a/drivers/hwmon/lm95234.c
++++ b/drivers/hwmon/lm95234.c
+@@ -301,7 +301,8 @@ static ssize_t tcrit2_store(struct device *dev, struct device_attribute *attr,
+ if (ret < 0)
+ return ret;
+
+- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, index ? 255 : 127);
++ val = DIV_ROUND_CLOSEST(clamp_val(val, 0, (index ? 255 : 127) * 1000),
++ 1000);
+
+ mutex_lock(&data->update_lock);
+ data->tcrit2[index] = val;
+@@ -350,7 +351,7 @@ static ssize_t tcrit1_store(struct device *dev, struct device_attribute *attr,
+ if (ret < 0)
+ return ret;
+
+- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 255);
++ val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 255000), 1000);
+
+ mutex_lock(&data->update_lock);
+ data->tcrit1[index] = val;
+@@ -391,7 +392,7 @@ static ssize_t tcrit1_hyst_store(struct device *dev,
+ if (ret < 0)
+ return ret;
+
+- val = DIV_ROUND_CLOSEST(val, 1000);
++ val = DIV_ROUND_CLOSEST(clamp_val(val, -255000, 255000), 1000);
+ val = clamp_val((int)data->tcrit1[index] - val, 0, 31);
+
+ mutex_lock(&data->update_lock);
+@@ -431,7 +432,7 @@ static ssize_t offset_store(struct device *dev, struct device_attribute *attr,
+ return ret;
+
+ /* Accuracy is 1/2 degrees C */
+- val = clamp_val(DIV_ROUND_CLOSEST(val, 500), -128, 127);
++ val = DIV_ROUND_CLOSEST(clamp_val(val, -64000, 63500), 500);
+
+ mutex_lock(&data->update_lock);
+ data->toffset[index] = val;
+--
+2.43.0
+
--- /dev/null
+From 2c0303476b42387b40b0786ef3e49c8cdcbb57f8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Aug 2024 13:13:50 +0200
+Subject: hwmon: ltc2991: fix register bits defines
+
+From: Pawel Dembicki <paweldembicki@gmail.com>
+
+[ Upstream commit 6a422a96bc84cf9b9f0ff741f293a1f9059e0883 ]
+
+In the LTC2991, V5 and V6 channels use the low nibble of the
+"V5, V6, V7, and V8 Control Register" for configuration, but currently,
+the high nibble is defined.
+
+This patch changes the defines to use the low nibble.
+
+Fixes: 2b9ea4262ae9 ("hwmon: Add driver for ltc2991")
+Signed-off-by: Pawel Dembicki <paweldembicki@gmail.com>
+Message-ID: <20240830111349.30531-1-paweldembicki@gmail.com>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/ltc2991.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/hwmon/ltc2991.c b/drivers/hwmon/ltc2991.c
+index f74ce9c25bf7..d5e120dfd592 100644
+--- a/drivers/hwmon/ltc2991.c
++++ b/drivers/hwmon/ltc2991.c
+@@ -42,9 +42,9 @@
+ #define LTC2991_V7_V8_FILT_EN BIT(7)
+ #define LTC2991_V7_V8_TEMP_EN BIT(5)
+ #define LTC2991_V7_V8_DIFF_EN BIT(4)
+-#define LTC2991_V5_V6_FILT_EN BIT(7)
+-#define LTC2991_V5_V6_TEMP_EN BIT(5)
+-#define LTC2991_V5_V6_DIFF_EN BIT(4)
++#define LTC2991_V5_V6_FILT_EN BIT(3)
++#define LTC2991_V5_V6_TEMP_EN BIT(1)
++#define LTC2991_V5_V6_DIFF_EN BIT(0)
+
+ #define LTC2991_REPEAT_ACQ_EN BIT(4)
+ #define LTC2991_T_INT_FILT_EN BIT(3)
+--
+2.43.0
+
--- /dev/null
+From e442d5ca910de872eee23768b076264b7987313b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 6 Jul 2024 23:50:08 -0700
+Subject: hwmon: (nct6775-core) Fix underflows seen when writing limit
+ attributes
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit 0403e10bf0824bf0ec2bb135d4cf1c0cc3bf4bf0 ]
+
+DIV_ROUND_CLOSEST() after kstrtol() results in an underflow if a large
+negative number such as -9223372036854775808 is provided by the user.
+Fix it by reordering clamp_val() and DIV_ROUND_CLOSEST() operations.
+
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/nct6775-core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
+index 9fbab8f02334..934fed3dd586 100644
+--- a/drivers/hwmon/nct6775-core.c
++++ b/drivers/hwmon/nct6775-core.c
+@@ -2262,7 +2262,7 @@ store_temp_offset(struct device *dev, struct device_attribute *attr,
+ if (err < 0)
+ return err;
+
+- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
++ val = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
+
+ mutex_lock(&data->update_lock);
+ data->temp_offset[nr] = val;
+--
+2.43.0
+
--- /dev/null
+From d2c876120a629dfff260f5757124b4d3510f4bec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 6 Jul 2024 23:51:34 -0700
+Subject: hwmon: (w83627ehf) Fix underflows seen when writing limit attributes
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit 5c1de37969b7bc0abcb20b86e91e70caebbd4f89 ]
+
+DIV_ROUND_CLOSEST() after kstrtol() results in an underflow if a large
+negative number such as -9223372036854775808 is provided by the user.
+Fix it by reordering clamp_val() and DIV_ROUND_CLOSEST() operations.
+
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/w83627ehf.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
+index fe960c0a624f..7d7d70afde65 100644
+--- a/drivers/hwmon/w83627ehf.c
++++ b/drivers/hwmon/w83627ehf.c
+@@ -895,7 +895,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr,
+ if (err < 0)
+ return err;
+
+- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 127);
++ val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 127000), 1000);
+
+ mutex_lock(&data->update_lock);
+ data->target_temp[nr] = val;
+@@ -920,7 +920,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
+ return err;
+
+ /* Limit the temp to 0C - 15C */
+- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15);
++ val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 15000), 1000);
+
+ mutex_lock(&data->update_lock);
+ reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
+--
+2.43.0
+
--- /dev/null
+From 5cd33e2252369a4586f2e8fb494b3e5c6b6ea841 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Jun 2024 11:15:27 -0400
+Subject: i3c: master: svc: resend target address when get NACK
+
+From: Frank Li <Frank.Li@nxp.com>
+
+[ Upstream commit 9bc7501b0b90f4d0c34b97c14ff1f708ce7ad8f3 ]
+
+According to I3C Spec 1.1.1, 11-Jun-2021, section: 5.1.2.2.3:
+
+If the Controller chooses to start an I3C Message with an I3C Dynamic
+Address, then special provisions shall be made because that same I3C Target
+may be initiating an IBI or a Controller Role Request. So, one of three
+things may happen: (skip 1, 2)
+
+3. The Addresses match and the RnW bits also match, and so neither
+Controller nor Target will ACK since both are expecting the other side to
+provide ACK. As a result, each side might think it had "won" arbitration,
+but neither side would continue, as each would subsequently see that the
+other did not provide ACK.
+...
+For either value of RnW: Due to the NACK, the Controller shall defer the
+Private Write or Private Read, and should typically transmit the Target
+ ^^^^^^^^^^^^^^^^^^^
+Address again after a Repeated START (i.e., the next one or any one prior
+^^^^^^^^^^^^^
+to a STOP in the Frame). Since the Address Header following a Repeated
+START is not arbitrated, the Controller will always win (see Section
+5.1.2.2.4).
+
+Resend target address again if address is not 7E and controller get NACK.
+
+Reviewed-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Signed-off-by: Frank Li <Frank.Li@nxp.com>
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/i3c/master/svc-i3c-master.c | 58 ++++++++++++++++++++++-------
+ 1 file changed, 44 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
+index bb299ce02ccc..f0362509319e 100644
+--- a/drivers/i3c/master/svc-i3c-master.c
++++ b/drivers/i3c/master/svc-i3c-master.c
+@@ -1052,29 +1052,59 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
+ u8 *in, const u8 *out, unsigned int xfer_len,
+ unsigned int *actual_len, bool continued)
+ {
++ int retry = 2;
+ u32 reg;
+ int ret;
+
+ /* clean SVC_I3C_MINT_IBIWON w1c bits */
+ writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
+
+- writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
+- xfer_type |
+- SVC_I3C_MCTRL_IBIRESP_NACK |
+- SVC_I3C_MCTRL_DIR(rnw) |
+- SVC_I3C_MCTRL_ADDR(addr) |
+- SVC_I3C_MCTRL_RDTERM(*actual_len),
+- master->regs + SVC_I3C_MCTRL);
+
+- ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
++ while (retry--) {
++ writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
++ xfer_type |
++ SVC_I3C_MCTRL_IBIRESP_NACK |
++ SVC_I3C_MCTRL_DIR(rnw) |
++ SVC_I3C_MCTRL_ADDR(addr) |
++ SVC_I3C_MCTRL_RDTERM(*actual_len),
++ master->regs + SVC_I3C_MCTRL);
++
++ ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
+ SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
+- if (ret)
+- goto emit_stop;
++ if (ret)
++ goto emit_stop;
+
+- if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
+- ret = -ENXIO;
+- *actual_len = 0;
+- goto emit_stop;
++ if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
++ /*
++ * According to I3C Spec 1.1.1, 11-Jun-2021, section: 5.1.2.2.3.
++ * If the Controller chooses to start an I3C Message with an I3C Dynamic
++ * Address, then special provisions shall be made because that same I3C
++ * Target may be initiating an IBI or a Controller Role Request. So, one of
++ * three things may happen: (skip 1, 2)
++ *
++ * 3. The Addresses match and the RnW bits also match, and so neither
++ * Controller nor Target will ACK since both are expecting the other side to
++ * provide ACK. As a result, each side might think it had "won" arbitration,
++ * but neither side would continue, as each would subsequently see that the
++ * other did not provide ACK.
++ * ...
++ * For either value of RnW: Due to the NACK, the Controller shall defer the
++ * Private Write or Private Read, and should typically transmit the Target
++ * Address again after a Repeated START (i.e., the next one or any one prior
++ * to a STOP in the Frame). Since the Address Header following a Repeated
++ * START is not arbitrated, the Controller will always win (see Section
++ * 5.1.2.2.4).
++ */
++ if (retry && addr != 0x7e) {
++ writel(SVC_I3C_MERRWARN_NACK, master->regs + SVC_I3C_MERRWARN);
++ } else {
++ ret = -ENXIO;
++ *actual_len = 0;
++ goto emit_stop;
++ }
++ } else {
++ break;
++ }
+ }
+
+ /*
+--
+2.43.0
+
--- /dev/null
+From ff5af4d55918203b044cff428a446ebec4eb818a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Jun 2024 16:15:58 +0300
+Subject: i3c: mipi-i3c-hci: Error out instead on BUG_ON() in IBI DMA setup
+
+From: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+
+[ Upstream commit 8a2be2f1db268ec735419e53ef04ca039fc027dc ]
+
+Definitely condition dma_get_cache_alignment * defined value > 256
+during driver initialization is not reason to BUG_ON(). Turn that to
+graceful error out with -EINVAL.
+
+Signed-off-by: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+Link: https://lore.kernel.org/r/20240628131559.502822-3-jarkko.nikula@linux.intel.com
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/i3c/master/mipi-i3c-hci/dma.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
+index 4e01a95cc4d0..1a96bf5a0bf8 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
++++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
+@@ -294,7 +294,10 @@ static int hci_dma_init(struct i3c_hci *hci)
+
+ rh->ibi_chunk_sz = dma_get_cache_alignment();
+ rh->ibi_chunk_sz *= IBI_CHUNK_CACHELINES;
+- BUG_ON(rh->ibi_chunk_sz > 256);
++ if (rh->ibi_chunk_sz > 256) {
++ ret = -EINVAL;
++ goto err_out;
++ }
+
+ ibi_status_ring_sz = rh->ibi_status_sz * rh->ibi_status_entries;
+ ibi_data_ring_sz = rh->ibi_chunk_sz * rh->ibi_chunks_total;
+--
+2.43.0
+
--- /dev/null
+From 091b6e44781d3a933aaf7674be9ecf4178c60057 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Aug 2024 18:06:40 +0200
+Subject: ice: Add netif_device_attach/detach into PF reset flow
+
+From: Dawid Osuchowski <dawid.osuchowski@linux.intel.com>
+
+[ Upstream commit d11a67634227f9f9da51938af085fb41a733848f ]
+
+Ethtool callbacks can be executed while reset is in progress and try to
+access deleted resources, e.g. getting coalesce settings can result in a
+NULL pointer dereference seen below.
+
+Reproduction steps:
+Once the driver is fully initialized, trigger reset:
+ # echo 1 > /sys/class/net/<interface>/device/reset
+when reset is in progress try to get coalesce settings using ethtool:
+ # ethtool -c <interface>
+
+BUG: kernel NULL pointer dereference, address: 0000000000000020
+PGD 0 P4D 0
+Oops: Oops: 0000 [#1] PREEMPT SMP PTI
+CPU: 11 PID: 19713 Comm: ethtool Tainted: G S 6.10.0-rc7+ #7
+RIP: 0010:ice_get_q_coalesce+0x2e/0xa0 [ice]
+RSP: 0018:ffffbab1e9bcf6a8 EFLAGS: 00010206
+RAX: 000000000000000c RBX: ffff94512305b028 RCX: 0000000000000000
+RDX: 0000000000000000 RSI: ffff9451c3f2e588 RDI: ffff9451c3f2e588
+RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000
+R10: ffff9451c3f2e580 R11: 000000000000001f R12: ffff945121fa9000
+R13: ffffbab1e9bcf760 R14: 0000000000000013 R15: ffffffff9e65dd40
+FS: 00007faee5fbe740(0000) GS:ffff94546fd80000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000000000000020 CR3: 0000000106c2e005 CR4: 00000000001706f0
+Call Trace:
+<TASK>
+ice_get_coalesce+0x17/0x30 [ice]
+coalesce_prepare_data+0x61/0x80
+ethnl_default_doit+0xde/0x340
+genl_family_rcv_msg_doit+0xf2/0x150
+genl_rcv_msg+0x1b3/0x2c0
+netlink_rcv_skb+0x5b/0x110
+genl_rcv+0x28/0x40
+netlink_unicast+0x19c/0x290
+netlink_sendmsg+0x222/0x490
+__sys_sendto+0x1df/0x1f0
+__x64_sys_sendto+0x24/0x30
+do_syscall_64+0x82/0x160
+entry_SYSCALL_64_after_hwframe+0x76/0x7e
+RIP: 0033:0x7faee60d8e27
+
+Calling netif_device_detach() before reset makes the net core not call
+the driver when ethtool command is issued, the attempt to execute an
+ethtool command during reset will result in the following message:
+
+ netlink error: No such device
+
+instead of NULL pointer dereference. Once reset is done and
+ice_rebuild() is executing, the netif_device_attach() is called to allow
+for ethtool operations to occur again in a safe manner.
+
+Fixes: fcea6f3da546 ("ice: Add stats and ethtool support")
+Suggested-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: Igor Bagnucki <igor.bagnucki@intel.com>
+Signed-off-by: Dawid Osuchowski <dawid.osuchowski@linux.intel.com>
+Tested-by: Pucha Himasekhar Reddy <himasekharx.reddy.pucha@intel.com> (A Contingent worker at Intel)
+Reviewed-by: Michal Schmidt <mschmidt@redhat.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_main.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 253689dbf6c3..209bfd70c430 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -609,6 +609,9 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
+ memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
+ }
+ }
++
++ if (vsi->netdev)
++ netif_device_detach(vsi->netdev);
+ skip:
+
+ /* clear SW filtering DB */
+@@ -7590,6 +7593,7 @@ static void ice_update_pf_netdev_link(struct ice_pf *pf)
+ */
+ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
+ {
++ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ bool dvm;
+@@ -7734,6 +7738,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
+ ice_rebuild_arfs(pf);
+ }
+
++ if (vsi && vsi->netdev)
++ netif_device_attach(vsi->netdev);
++
+ ice_update_pf_netdev_link(pf);
+
+ /* tell the firmware we are up */
+--
+2.43.0
+
--- /dev/null
+From 6bcea0207c2eb99b11ecdeeb3236b49a1ff2dab5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Jun 2024 14:46:25 +0200
+Subject: ice: Check all ice_vsi_rebuild() errors in function
+
+From: Eric Joyner <eric.joyner@intel.com>
+
+[ Upstream commit d47bf9a495cf424fad674321d943123dc12b926d ]
+
+Check the return value from ice_vsi_rebuild() and prevent the usage of
+incorrectly configured VSI.
+
+Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Signed-off-by: Eric Joyner <eric.joyner@intel.com>
+Signed-off-by: Karen Ostrowska <karen.ostrowska@intel.com>
+Tested-by: Pucha Himasekhar Reddy <himasekharx.reddy.pucha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_main.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index f16d13e9ff6e..253689dbf6c3 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -4160,13 +4160,17 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
+
+ /* set for the next time the netdev is started */
+ if (!netif_running(vsi->netdev)) {
+- ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
++ err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
++ if (err)
++ goto rebuild_err;
+ dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
+ goto done;
+ }
+
+ ice_vsi_close(vsi);
+- ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
++ err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
++ if (err)
++ goto rebuild_err;
+
+ ice_for_each_traffic_class(i) {
+ if (vsi->tc_cfg.ena_tc & BIT(i))
+@@ -4177,6 +4181,11 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
+ }
+ ice_pf_dcb_recfg(pf, locked);
+ ice_vsi_open(vsi);
++ goto done;
++
++rebuild_err:
++ dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n",
++ err);
+ done:
+ clear_bit(ICE_CFG_BUSY, pf->state);
+ return err;
+--
+2.43.0
+
--- /dev/null
+From a10b4fb29c1853d3f8ae4de910cd9682f749b8ef Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Aug 2024 11:59:29 +0200
+Subject: ice: check ICE_VSI_DOWN under rtnl_lock when preparing for reset
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit d8c40b9d3a6cef61eb5a0c58c34a3090ea938d89 ]
+
+Consider the following scenario:
+
+.ndo_bpf() | ice_prepare_for_reset() |
+________________________|_______________________________________|
+rtnl_lock() | |
+ice_down() | |
+ | test_bit(ICE_VSI_DOWN) - true |
+ | ice_dis_vsi() returns |
+ice_up() | |
+ | proceeds to rebuild a running VSI |
+
+.ndo_bpf() is not the only rtnl-locked callback that toggles the interface
+to apply new configuration. Another example is .set_channels().
+
+To avoid the race condition above, act only after reading ICE_VSI_DOWN
+under rtnl_lock.
+
+Fixes: 0f9d5027a749 ("ice: Refactor VSI allocation, deletion and rebuild flow")
+Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Reviewed-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_lib.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index 3e772c014ae3..7076a7738864 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -2672,8 +2672,7 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
+ */
+ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
+ {
+- if (test_bit(ICE_VSI_DOWN, vsi->state))
+- return;
++ bool already_down = test_bit(ICE_VSI_DOWN, vsi->state);
+
+ set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
+
+@@ -2681,15 +2680,16 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
+ if (netif_running(vsi->netdev)) {
+ if (!locked)
+ rtnl_lock();
+-
+- ice_vsi_close(vsi);
++ already_down = test_bit(ICE_VSI_DOWN, vsi->state);
++ if (!already_down)
++ ice_vsi_close(vsi);
+
+ if (!locked)
+ rtnl_unlock();
+- } else {
++ } else if (!already_down) {
+ ice_vsi_close(vsi);
+ }
+- } else if (vsi->type == ICE_VSI_CTRL) {
++ } else if (vsi->type == ICE_VSI_CTRL && !already_down) {
+ ice_vsi_close(vsi);
+ }
+ }
+--
+2.43.0
+
--- /dev/null
+From 4bd9f75d631b52b4e52621b603f4c9048032c567 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Aug 2024 11:59:31 +0200
+Subject: ice: do not bring the VSI up, if it was down before the XDP setup
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit 04c7e14e5b0b6227e7b00d7a96ca2f2426ab9171 ]
+
+After XDP configuration is completed, we bring the interface up
+unconditionally, regardless of its state before the call to .ndo_bpf().
+
+Preserve the information whether the interface had to be brought down and
+later bring it up only in such case.
+
+Fixes: efc2214b6047 ("ice: Add support for XDP")
+Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com>
+Acked-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_main.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 746cae5964fa..766f9a466bc3 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -3006,8 +3006,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+ {
+ unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
+- bool if_running = netif_running(vsi->netdev);
+ int ret = 0, xdp_ring_err = 0;
++ bool if_running;
+
+ if (prog && !prog->aux->xdp_has_frags) {
+ if (frame_size > ice_max_xdp_frame_size(vsi)) {
+@@ -3024,8 +3024,11 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
+ return 0;
+ }
+
++ if_running = netif_running(vsi->netdev) &&
++ !test_and_set_bit(ICE_VSI_DOWN, vsi->state);
++
+ /* need to stop netdev while setting up the program for Rx rings */
+- if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
++ if (if_running) {
+ ret = ice_down(vsi);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
+--
+2.43.0
+
--- /dev/null
+From d4cd116e15a4bd69305911596b4583a012d2ae64 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Aug 2024 11:59:26 +0200
+Subject: ice: move netif_queue_set_napi to rtnl-protected sections
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit 2a5dc090b92cfa5270e20056074241c6db5c9cdd ]
+
+Currently, netif_queue_set_napi() is called from ice_vsi_rebuild() that is
+not rtnl-locked when called from the reset. This creates the need to take
+the rtnl_lock just for a single function and complicates the
+synchronization with .ndo_bpf. At the same time, there no actual need to
+fill napi-to-queue information at this exact point.
+
+Fill napi-to-queue information when opening the VSI and clear it when the
+VSI is being closed. Those routines are already rtnl-locked.
+
+Also, rewrite napi-to-queue assignment in a way that prevents inclusion of
+XDP queues, as this leads to out-of-bounds writes, such as one below.
+
+[ +0.000004] BUG: KASAN: slab-out-of-bounds in netif_queue_set_napi+0x1c2/0x1e0
+[ +0.000012] Write of size 8 at addr ffff889881727c80 by task bash/7047
+[ +0.000006] CPU: 24 PID: 7047 Comm: bash Not tainted 6.10.0-rc2+ #2
+[ +0.000004] Hardware name: Intel Corporation S2600WFT/S2600WFT, BIOS SE5C620.86B.02.01.0014.082620210524 08/26/2021
+[ +0.000003] Call Trace:
+[ +0.000003] <TASK>
+[ +0.000002] dump_stack_lvl+0x60/0x80
+[ +0.000007] print_report+0xce/0x630
+[ +0.000007] ? __pfx__raw_spin_lock_irqsave+0x10/0x10
+[ +0.000007] ? __virt_addr_valid+0x1c9/0x2c0
+[ +0.000005] ? netif_queue_set_napi+0x1c2/0x1e0
+[ +0.000003] kasan_report+0xe9/0x120
+[ +0.000004] ? netif_queue_set_napi+0x1c2/0x1e0
+[ +0.000004] netif_queue_set_napi+0x1c2/0x1e0
+[ +0.000005] ice_vsi_close+0x161/0x670 [ice]
+[ +0.000114] ice_dis_vsi+0x22f/0x270 [ice]
+[ +0.000095] ice_pf_dis_all_vsi.constprop.0+0xae/0x1c0 [ice]
+[ +0.000086] ice_prepare_for_reset+0x299/0x750 [ice]
+[ +0.000087] pci_dev_save_and_disable+0x82/0xd0
+[ +0.000006] pci_reset_function+0x12d/0x230
+[ +0.000004] reset_store+0xa0/0x100
+[ +0.000006] ? __pfx_reset_store+0x10/0x10
+[ +0.000002] ? __pfx_mutex_lock+0x10/0x10
+[ +0.000004] ? __check_object_size+0x4c1/0x640
+[ +0.000007] kernfs_fop_write_iter+0x30b/0x4a0
+[ +0.000006] vfs_write+0x5d6/0xdf0
+[ +0.000005] ? fd_install+0x180/0x350
+[ +0.000005] ? __pfx_vfs_write+0x10/0xA10
+[ +0.000004] ? do_fcntl+0x52c/0xcd0
+[ +0.000004] ? kasan_save_track+0x13/0x60
+[ +0.000003] ? kasan_save_free_info+0x37/0x60
+[ +0.000006] ksys_write+0xfa/0x1d0
+[ +0.000003] ? __pfx_ksys_write+0x10/0x10
+[ +0.000002] ? __x64_sys_fcntl+0x121/0x180
+[ +0.000004] ? _raw_spin_lock+0x87/0xe0
+[ +0.000005] do_syscall_64+0x80/0x170
+[ +0.000007] ? _raw_spin_lock+0x87/0xe0
+[ +0.000004] ? __pfx__raw_spin_lock+0x10/0x10
+[ +0.000003] ? file_close_fd_locked+0x167/0x230
+[ +0.000005] ? syscall_exit_to_user_mode+0x7d/0x220
+[ +0.000005] ? do_syscall_64+0x8c/0x170
+[ +0.000004] ? do_syscall_64+0x8c/0x170
+[ +0.000003] ? do_syscall_64+0x8c/0x170
+[ +0.000003] ? fput+0x1a/0x2c0
+[ +0.000004] ? filp_close+0x19/0x30
+[ +0.000004] ? do_dup2+0x25a/0x4c0
+[ +0.000004] ? __x64_sys_dup2+0x6e/0x2e0
+[ +0.000002] ? syscall_exit_to_user_mode+0x7d/0x220
+[ +0.000004] ? do_syscall_64+0x8c/0x170
+[ +0.000003] ? __count_memcg_events+0x113/0x380
+[ +0.000005] ? handle_mm_fault+0x136/0x820
+[ +0.000005] ? do_user_addr_fault+0x444/0xa80
+[ +0.000004] ? clear_bhb_loop+0x25/0x80
+[ +0.000004] ? clear_bhb_loop+0x25/0x80
+[ +0.000002] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+[ +0.000005] RIP: 0033:0x7f2033593154
+
+Fixes: 080b0c8d6d26 ("ice: Fix ASSERT_RTNL() warning during certain scenarios")
+Fixes: 91fdbce7e8d6 ("ice: Add support in the driver for associating queue with napi")
+Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Reviewed-by: Amritha Nambiar <amritha.nambiar@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Reviewed-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Tested-by: George Kuruvinakunnel <george.kuruvinakunnel@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_base.c | 11 +-
+ drivers/net/ethernet/intel/ice/ice_lib.c | 129 ++++++----------------
+ drivers/net/ethernet/intel/ice/ice_lib.h | 10 +-
+ drivers/net/ethernet/intel/ice/ice_main.c | 17 ++-
+ 4 files changed, 49 insertions(+), 118 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
+index f448d3a84564..c158749a80e0 100644
+--- a/drivers/net/ethernet/intel/ice/ice_base.c
++++ b/drivers/net/ethernet/intel/ice/ice_base.c
+@@ -190,16 +190,11 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
+ }
+ q_vector = vsi->q_vectors[v_idx];
+
+- ice_for_each_tx_ring(tx_ring, q_vector->tx) {
+- ice_queue_set_napi(vsi, tx_ring->q_index, NETDEV_QUEUE_TYPE_TX,
+- NULL);
++ ice_for_each_tx_ring(tx_ring, vsi->q_vectors[v_idx]->tx)
+ tx_ring->q_vector = NULL;
+- }
+- ice_for_each_rx_ring(rx_ring, q_vector->rx) {
+- ice_queue_set_napi(vsi, rx_ring->q_index, NETDEV_QUEUE_TYPE_RX,
+- NULL);
++
++ ice_for_each_rx_ring(rx_ring, vsi->q_vectors[v_idx]->rx)
+ rx_ring->q_vector = NULL;
+- }
+
+ /* only VSI with an associated netdev is set up with NAPI */
+ if (vsi->netdev)
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index 7629b0190578..cdf05e57499f 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -2286,9 +2286,6 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
+
+ ice_vsi_map_rings_to_vectors(vsi);
+
+- /* Associate q_vector rings to napi */
+- ice_vsi_set_napi_queues(vsi);
+-
+ vsi->stat_offsets_loaded = false;
+
+ /* ICE_VSI_CTRL does not need RSS so skip RSS processing */
+@@ -2628,6 +2625,7 @@ void ice_vsi_close(struct ice_vsi *vsi)
+ if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state))
+ ice_down(vsi);
+
++ ice_vsi_clear_napi_queues(vsi);
+ ice_vsi_free_irq(vsi);
+ ice_vsi_free_tx_rings(vsi);
+ ice_vsi_free_rx_rings(vsi);
+@@ -2694,120 +2692,55 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
+ }
+
+ /**
+- * __ice_queue_set_napi - Set the napi instance for the queue
+- * @dev: device to which NAPI and queue belong
+- * @queue_index: Index of queue
+- * @type: queue type as RX or TX
+- * @napi: NAPI context
+- * @locked: is the rtnl_lock already held
+- *
+- * Set the napi instance for the queue. Caller indicates the lock status.
+- */
+-static void
+-__ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
+- enum netdev_queue_type type, struct napi_struct *napi,
+- bool locked)
+-{
+- if (!locked)
+- rtnl_lock();
+- netif_queue_set_napi(dev, queue_index, type, napi);
+- if (!locked)
+- rtnl_unlock();
+-}
+-
+-/**
+- * ice_queue_set_napi - Set the napi instance for the queue
+- * @vsi: VSI being configured
+- * @queue_index: Index of queue
+- * @type: queue type as RX or TX
+- * @napi: NAPI context
++ * ice_vsi_set_napi_queues - associate netdev queues with napi
++ * @vsi: VSI pointer
+ *
+- * Set the napi instance for the queue. The rtnl lock state is derived from the
+- * execution path.
++ * Associate queue[s] with napi for all vectors.
++ * The caller must hold rtnl_lock.
+ */
+-void
+-ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
+- enum netdev_queue_type type, struct napi_struct *napi)
++void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
+ {
+- struct ice_pf *pf = vsi->back;
++ struct net_device *netdev = vsi->netdev;
++ int q_idx, v_idx;
+
+- if (!vsi->netdev)
++ if (!netdev)
+ return;
+
+- if (current_work() == &pf->serv_task ||
+- test_bit(ICE_PREPARED_FOR_RESET, pf->state) ||
+- test_bit(ICE_DOWN, pf->state) ||
+- test_bit(ICE_SUSPENDED, pf->state))
+- __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
+- false);
+- else
+- __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
+- true);
+-}
++ ice_for_each_rxq(vsi, q_idx)
++ netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX,
++ &vsi->rx_rings[q_idx]->q_vector->napi);
+
+-/**
+- * __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
+- * @q_vector: q_vector pointer
+- * @locked: is the rtnl_lock already held
+- *
+- * Associate the q_vector napi with all the queue[s] on the vector.
+- * Caller indicates the lock status.
+- */
+-void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
+-{
+- struct ice_rx_ring *rx_ring;
+- struct ice_tx_ring *tx_ring;
+-
+- ice_for_each_rx_ring(rx_ring, q_vector->rx)
+- __ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
+- NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
+- locked);
+-
+- ice_for_each_tx_ring(tx_ring, q_vector->tx)
+- __ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
+- NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
+- locked);
++ ice_for_each_txq(vsi, q_idx)
++ netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX,
++ &vsi->tx_rings[q_idx]->q_vector->napi);
+ /* Also set the interrupt number for the NAPI */
+- netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
+-}
++ ice_for_each_q_vector(vsi, v_idx) {
++ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+
+-/**
+- * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
+- * @q_vector: q_vector pointer
+- *
+- * Associate the q_vector napi with all the queue[s] on the vector
+- */
+-void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector)
+-{
+- struct ice_rx_ring *rx_ring;
+- struct ice_tx_ring *tx_ring;
+-
+- ice_for_each_rx_ring(rx_ring, q_vector->rx)
+- ice_queue_set_napi(q_vector->vsi, rx_ring->q_index,
+- NETDEV_QUEUE_TYPE_RX, &q_vector->napi);
+-
+- ice_for_each_tx_ring(tx_ring, q_vector->tx)
+- ice_queue_set_napi(q_vector->vsi, tx_ring->q_index,
+- NETDEV_QUEUE_TYPE_TX, &q_vector->napi);
+- /* Also set the interrupt number for the NAPI */
+- netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
++ netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
++ }
+ }
+
+ /**
+- * ice_vsi_set_napi_queues
++ * ice_vsi_clear_napi_queues - dissociate netdev queues from napi
+ * @vsi: VSI pointer
+ *
+- * Associate queue[s] with napi for all vectors
++ * Clear the association between all VSI queues queue[s] and napi.
++ * The caller must hold rtnl_lock.
+ */
+-void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
++void ice_vsi_clear_napi_queues(struct ice_vsi *vsi)
+ {
+- int i;
++ struct net_device *netdev = vsi->netdev;
++ int q_idx;
+
+- if (!vsi->netdev)
++ if (!netdev)
+ return;
+
+- ice_for_each_q_vector(vsi, i)
+- ice_q_vector_set_napi_queues(vsi->q_vectors[i]);
++ ice_for_each_txq(vsi, q_idx)
++ netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, NULL);
++
++ ice_for_each_rxq(vsi, q_idx)
++ netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, NULL);
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
+index 94ce8964dda6..36d86535695d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.h
++++ b/drivers/net/ethernet/intel/ice/ice_lib.h
+@@ -44,16 +44,10 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
+ struct ice_vsi *
+ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
+
+-void
+-ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
+- enum netdev_queue_type type, struct napi_struct *napi);
+-
+-void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked);
+-
+-void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector);
+-
+ void ice_vsi_set_napi_queues(struct ice_vsi *vsi);
+
++void ice_vsi_clear_napi_queues(struct ice_vsi *vsi);
++
+ int ice_vsi_release(struct ice_vsi *vsi);
+
+ void ice_vsi_close(struct ice_vsi *vsi);
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 209bfd70c430..0e0086494a54 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -3559,11 +3559,9 @@ static void ice_napi_add(struct ice_vsi *vsi)
+ if (!vsi->netdev)
+ return;
+
+- ice_for_each_q_vector(vsi, v_idx) {
++ ice_for_each_q_vector(vsi, v_idx)
+ netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
+ ice_napi_poll);
+- __ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
+- }
+ }
+
+ /**
+@@ -5541,7 +5539,9 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
+ if (ret)
+ goto err_reinit;
+ ice_vsi_map_rings_to_vectors(pf->vsi[v]);
++ rtnl_lock();
+ ice_vsi_set_napi_queues(pf->vsi[v]);
++ rtnl_unlock();
+ }
+
+ ret = ice_req_irq_msix_misc(pf);
+@@ -5555,8 +5555,12 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
+
+ err_reinit:
+ while (v--)
+- if (pf->vsi[v])
++ if (pf->vsi[v]) {
++ rtnl_lock();
++ ice_vsi_clear_napi_queues(pf->vsi[v]);
++ rtnl_unlock();
+ ice_vsi_free_q_vectors(pf->vsi[v]);
++ }
+
+ return ret;
+ }
+@@ -5621,6 +5625,9 @@ static int ice_suspend(struct device *dev)
+ ice_for_each_vsi(pf, v) {
+ if (!pf->vsi[v])
+ continue;
++ rtnl_lock();
++ ice_vsi_clear_napi_queues(pf->vsi[v]);
++ rtnl_unlock();
+ ice_vsi_free_q_vectors(pf->vsi[v]);
+ }
+ ice_clear_interrupt_scheme(pf);
+@@ -7456,6 +7463,8 @@ int ice_vsi_open(struct ice_vsi *vsi)
+ err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
+ if (err)
+ goto err_set_qs;
++
++ ice_vsi_set_napi_queues(vsi);
+ }
+
+ err = ice_up_complete(vsi);
+--
+2.43.0
+
--- /dev/null
+From f9976636a0995cc27d877ddb2377a526a8f5ac23 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Aug 2024 11:59:27 +0200
+Subject: ice: protect XDP configuration with a mutex
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit 2504b8405768a57a71e660dbfd5abd59f679a03f ]
+
+The main threat to data consistency in ice_xdp() is a possible asynchronous
+PF reset. It can be triggered by a user or by TX timeout handler.
+
+XDP setup and PF reset code access the same resources in the following
+sections:
+* ice_vsi_close() in ice_prepare_for_reset() - already rtnl-locked
+* ice_vsi_rebuild() for the PF VSI - not protected
+* ice_vsi_open() - already rtnl-locked
+
+With an unfortunate timing, such accesses can result in a crash such as the
+one below:
+
+[ +1.999878] ice 0000:b1:00.0: Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring 14
+[ +2.002992] ice 0000:b1:00.0: Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring 18
+[Mar15 18:17] ice 0000:b1:00.0 ens801f0np0: NETDEV WATCHDOG: CPU: 38: transmit queue 14 timed out 80692736 ms
+[ +0.000093] ice 0000:b1:00.0 ens801f0np0: tx_timeout: VSI_num: 6, Q 14, NTC: 0x0, HW_HEAD: 0x0, NTU: 0x0, INT: 0x4000001
+[ +0.000012] ice 0000:b1:00.0 ens801f0np0: tx_timeout recovery level 1, txqueue 14
+[ +0.394718] ice 0000:b1:00.0: PTP reset successful
+[ +0.006184] BUG: kernel NULL pointer dereference, address: 0000000000000098
+[ +0.000045] #PF: supervisor read access in kernel mode
+[ +0.000023] #PF: error_code(0x0000) - not-present page
+[ +0.000023] PGD 0 P4D 0
+[ +0.000018] Oops: 0000 [#1] PREEMPT SMP NOPTI
+[ +0.000023] CPU: 38 PID: 7540 Comm: kworker/38:1 Not tainted 6.8.0-rc7 #1
+[ +0.000031] Hardware name: Intel Corporation S2600WFT/S2600WFT, BIOS SE5C620.86B.02.01.0014.082620210524 08/26/2021
+[ +0.000036] Workqueue: ice ice_service_task [ice]
+[ +0.000183] RIP: 0010:ice_clean_tx_ring+0xa/0xd0 [ice]
+[...]
+[ +0.000013] Call Trace:
+[ +0.000016] <TASK>
+[ +0.000014] ? __die+0x1f/0x70
+[ +0.000029] ? page_fault_oops+0x171/0x4f0
+[ +0.000029] ? schedule+0x3b/0xd0
+[ +0.000027] ? exc_page_fault+0x7b/0x180
+[ +0.000022] ? asm_exc_page_fault+0x22/0x30
+[ +0.000031] ? ice_clean_tx_ring+0xa/0xd0 [ice]
+[ +0.000194] ice_free_tx_ring+0xe/0x60 [ice]
+[ +0.000186] ice_destroy_xdp_rings+0x157/0x310 [ice]
+[ +0.000151] ice_vsi_decfg+0x53/0xe0 [ice]
+[ +0.000180] ice_vsi_rebuild+0x239/0x540 [ice]
+[ +0.000186] ice_vsi_rebuild_by_type+0x76/0x180 [ice]
+[ +0.000145] ice_rebuild+0x18c/0x840 [ice]
+[ +0.000145] ? delay_tsc+0x4a/0xc0
+[ +0.000022] ? delay_tsc+0x92/0xc0
+[ +0.000020] ice_do_reset+0x140/0x180 [ice]
+[ +0.000886] ice_service_task+0x404/0x1030 [ice]
+[ +0.000824] process_one_work+0x171/0x340
+[ +0.000685] worker_thread+0x277/0x3a0
+[ +0.000675] ? preempt_count_add+0x6a/0xa0
+[ +0.000677] ? _raw_spin_lock_irqsave+0x23/0x50
+[ +0.000679] ? __pfx_worker_thread+0x10/0x10
+[ +0.000653] kthread+0xf0/0x120
+[ +0.000635] ? __pfx_kthread+0x10/0x10
+[ +0.000616] ret_from_fork+0x2d/0x50
+[ +0.000612] ? __pfx_kthread+0x10/0x10
+[ +0.000604] ret_from_fork_asm+0x1b/0x30
+[ +0.000604] </TASK>
+
+The previous way of handling this through returning -EBUSY is not viable,
+particularly when destroying AF_XDP socket, because the kernel proceeds
+with removal anyway.
+
+There is plenty of code between those calls and there is no need to create
+a large critical section that covers all of them, same as there is no need
+to protect ice_vsi_rebuild() with rtnl_lock().
+
+Add xdp_state_lock mutex to protect ice_vsi_rebuild() and ice_xdp().
+
+Leaving unprotected sections in between would result in two states that
+have to be considered:
+1. when the VSI is closed, but not yet rebuild
+2. when VSI is already rebuild, but not yet open
+
+The latter case is actually already handled through !netif_running() case,
+we just need to adjust flag checking a little. The former one is not as
+trivial, because between ice_vsi_close() and ice_vsi_rebuild(), a lot of
+hardware interaction happens, this can make adding/deleting rings exit
+with an error. Luckily, VSI rebuild is pending and can apply new
+configuration for us in a managed fashion.
+
+Therefore, add an additional VSI state flag ICE_VSI_REBUILD_PENDING to
+indicate that ice_xdp() can just hot-swap the program.
+
+Also, as ice_vsi_rebuild() flow is touched in this patch, make it more
+consistent by deconfiguring VSI when coalesce allocation fails.
+
+Fixes: 2d4238f55697 ("ice: Add support for AF_XDP")
+Fixes: efc2214b6047 ("ice: Add support for XDP")
+Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Reviewed-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice.h | 2 ++
+ drivers/net/ethernet/intel/ice/ice_lib.c | 34 ++++++++++++++---------
+ drivers/net/ethernet/intel/ice/ice_main.c | 19 +++++++++----
+ drivers/net/ethernet/intel/ice/ice_xsk.c | 3 +-
+ 4 files changed, 39 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index caaa10157909..ce8b5505b16d 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -318,6 +318,7 @@ enum ice_vsi_state {
+ ICE_VSI_UMAC_FLTR_CHANGED,
+ ICE_VSI_MMAC_FLTR_CHANGED,
+ ICE_VSI_PROMISC_CHANGED,
++ ICE_VSI_REBUILD_PENDING,
+ ICE_VSI_STATE_NBITS /* must be last */
+ };
+
+@@ -411,6 +412,7 @@ struct ice_vsi {
+ struct ice_tx_ring **xdp_rings; /* XDP ring array */
+ u16 num_xdp_txq; /* Used XDP queues */
+ u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
++ struct mutex xdp_state_lock;
+
+ struct net_device **target_netdevs;
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index cdf05e57499f..3e772c014ae3 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -447,6 +447,7 @@ static void ice_vsi_free(struct ice_vsi *vsi)
+
+ ice_vsi_free_stats(vsi);
+ ice_vsi_free_arrays(vsi);
++ mutex_destroy(&vsi->xdp_state_lock);
+ mutex_unlock(&pf->sw_mutex);
+ devm_kfree(dev, vsi);
+ }
+@@ -626,6 +627,8 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
+ pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
+ pf->next_vsi);
+
++ mutex_init(&vsi->xdp_state_lock);
++
+ unlock_pf:
+ mutex_unlock(&pf->sw_mutex);
+ return vsi;
+@@ -2972,19 +2975,23 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
+ if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
+ return -EINVAL;
+
++ mutex_lock(&vsi->xdp_state_lock);
++
+ ret = ice_vsi_realloc_stat_arrays(vsi);
+ if (ret)
+- goto err_vsi_cfg;
++ goto unlock;
+
+ ice_vsi_decfg(vsi);
+ ret = ice_vsi_cfg_def(vsi);
+ if (ret)
+- goto err_vsi_cfg;
++ goto unlock;
+
+ coalesce = kcalloc(vsi->num_q_vectors,
+ sizeof(struct ice_coalesce_stored), GFP_KERNEL);
+- if (!coalesce)
+- return -ENOMEM;
++ if (!coalesce) {
++ ret = -ENOMEM;
++ goto decfg;
++ }
+
+ prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
+
+@@ -2992,22 +2999,23 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
+ if (ret) {
+ if (vsi_flags & ICE_VSI_FLAG_INIT) {
+ ret = -EIO;
+- goto err_vsi_cfg_tc_lan;
++ goto free_coalesce;
+ }
+
+- kfree(coalesce);
+- return ice_schedule_reset(pf, ICE_RESET_PFR);
++ ret = ice_schedule_reset(pf, ICE_RESET_PFR);
++ goto free_coalesce;
+ }
+
+ ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
+- kfree(coalesce);
++ clear_bit(ICE_VSI_REBUILD_PENDING, vsi->state);
+
+- return 0;
+-
+-err_vsi_cfg_tc_lan:
+- ice_vsi_decfg(vsi);
++free_coalesce:
+ kfree(coalesce);
+-err_vsi_cfg:
++decfg:
++ if (ret)
++ ice_vsi_decfg(vsi);
++unlock:
++ mutex_unlock(&vsi->xdp_state_lock);
+ return ret;
+ }
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 0e0086494a54..746cae5964fa 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -617,6 +617,7 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
+ /* clear SW filtering DB */
+ ice_clear_hw_tbls(hw);
+ /* disable the VSIs and their queues that are not already DOWN */
++ set_bit(ICE_VSI_REBUILD_PENDING, ice_get_main_vsi(pf)->state);
+ ice_pf_dis_all_vsi(pf, false);
+
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
+@@ -3017,7 +3018,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
+ }
+
+ /* hot swap progs and avoid toggling link */
+- if (ice_is_xdp_ena_vsi(vsi) == !!prog) {
++ if (ice_is_xdp_ena_vsi(vsi) == !!prog ||
++ test_bit(ICE_VSI_REBUILD_PENDING, vsi->state)) {
+ ice_vsi_assign_bpf_prog(vsi, prog);
+ return 0;
+ }
+@@ -3089,21 +3091,28 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+ {
+ struct ice_netdev_priv *np = netdev_priv(dev);
+ struct ice_vsi *vsi = np->vsi;
++ int ret;
+
+ if (vsi->type != ICE_VSI_PF) {
+ NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
+ return -EINVAL;
+ }
+
++ mutex_lock(&vsi->xdp_state_lock);
++
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+- return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
++ ret = ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
++ break;
+ case XDP_SETUP_XSK_POOL:
+- return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
+- xdp->xsk.queue_id);
++ ret = ice_xsk_pool_setup(vsi, xdp->xsk.pool, xdp->xsk.queue_id);
++ break;
+ default:
+- return -EINVAL;
++ ret = -EINVAL;
+ }
++
++ mutex_unlock(&vsi->xdp_state_lock);
++ return ret;
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index 240a7bec242b..a659951fa987 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -390,7 +390,8 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
+ goto failure;
+ }
+
+- if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
++ if_running = !test_bit(ICE_VSI_DOWN, vsi->state) &&
++ ice_is_xdp_ena_vsi(vsi);
+
+ if (if_running) {
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
+--
+2.43.0
+
--- /dev/null
+From 0efdcf631a675610c0d3fc5f506d39ced6c19ec7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Aug 2024 11:59:30 +0200
+Subject: ice: remove ICE_CFG_BUSY locking from AF_XDP code
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit 7e3b407ccbea3259b8583ccc34807622025e390f ]
+
+Locking used in ice_qp_ena() and ice_qp_dis() does pretty much nothing,
+because ICE_CFG_BUSY is a state flag that is supposed to be set in a PF
+state, not VSI one. Therefore it does not protect the queue pair from
+e.g. reset.
+
+Remove ICE_CFG_BUSY locking from ice_qp_dis() and ice_qp_ena().
+
+Fixes: 2d4238f55697 ("ice: Add support for AF_XDP")
+Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com>
+Reviewed-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_xsk.c | 9 ---------
+ 1 file changed, 9 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index a659951fa987..87a5427570d7 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -165,7 +165,6 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+ struct ice_q_vector *q_vector;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
+- int timeout = 50;
+ int fail = 0;
+ int err;
+
+@@ -176,13 +175,6 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+ rx_ring = vsi->rx_rings[q_idx];
+ q_vector = rx_ring->q_vector;
+
+- while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) {
+- timeout--;
+- if (!timeout)
+- return -EBUSY;
+- usleep_range(1000, 2000);
+- }
+-
+ synchronize_net();
+ netif_carrier_off(vsi->netdev);
+ netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+@@ -261,7 +253,6 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
+ netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+ netif_carrier_on(vsi->netdev);
+ }
+- clear_bit(ICE_CFG_BUSY, vsi->state);
+
+ return fail;
+ }
+--
+2.43.0
+
--- /dev/null
+From eb4297a9072c9f789af8bf5999ac4e0a6da8f0b6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Aug 2024 21:55:53 -0700
+Subject: igb: Fix not clearing TimeSync interrupts for 82580
+
+From: Daiwei Li <daiweili@google.com>
+
+[ Upstream commit ba8cf80724dbc09825b52498e4efacb563935408 ]
+
+82580 NICs have a hardware bug that makes it
+necessary to write into the TSICR (TimeSync Interrupt Cause) register
+to clear it:
+https://lore.kernel.org/all/CDCB8BE0.1EC2C%25matthew.vick@intel.com/
+
+Add a conditional so only for 82580 we write into the TSICR register,
+so we don't risk losing events for other models.
+
+Without this change, when running ptp4l with an Intel 82580 card,
+I get the following output:
+
+> timed out while polling for tx timestamp increasing tx_timestamp_timeout or
+> increasing kworker priority may correct this issue, but a driver bug likely
+> causes it
+
+This goes away with this change.
+
+This (partially) reverts commit ee14cc9ea19b ("igb: Fix missing time sync events").
+
+Fixes: ee14cc9ea19b ("igb: Fix missing time sync events")
+Closes: https://lore.kernel.org/intel-wired-lan/CAN0jFd1kO0MMtOh8N2Ztxn6f7vvDKp2h507sMryobkBKe=xk=w@mail.gmail.com/
+Tested-by: Daiwei Li <daiweili@google.com>
+Suggested-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Signed-off-by: Daiwei Li <daiweili@google.com>
+Acked-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de>
+Tested-by: Pucha Himasekhar Reddy <himasekharx.reddy.pucha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igb/igb_main.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index b6aa449aa56a..a27d0a4d3d9c 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -6961,10 +6961,20 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
+
+ static void igb_tsync_interrupt(struct igb_adapter *adapter)
+ {
++ const u32 mask = (TSINTR_SYS_WRAP | E1000_TSICR_TXTS |
++ TSINTR_TT0 | TSINTR_TT1 |
++ TSINTR_AUTT0 | TSINTR_AUTT1);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 tsicr = rd32(E1000_TSICR);
+ struct ptp_clock_event event;
+
++ if (hw->mac.type == e1000_82580) {
++ /* 82580 has a hardware bug that requires an explicit
++ * write to clear the TimeSync interrupt cause.
++ */
++ wr32(E1000_TSICR, tsicr & mask);
++ }
++
+ if (tsicr & TSINTR_SYS_WRAP) {
+ event.type = PTP_CLOCK_PPS;
+ if (adapter->ptp_caps.pps)
+--
+2.43.0
+
--- /dev/null
+From fdda130d1f12cb491053f2adf56ec556d2483786 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Aug 2024 22:22:45 +0300
+Subject: igc: Unlock on error in igc_io_resume()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit ef4a99a0164e3972abb421cbb1b09ea6c61414df ]
+
+Call rtnl_unlock() on this error path, before returning.
+
+Fixes: bc23aa949aeb ("igc: Add pcie error handler support")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Reviewed-by: Gerhard Engleder <gerhard@engleder-embedded.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igc/igc_main.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 3041f8142324..773136925fd0 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -7417,6 +7417,7 @@ static void igc_io_resume(struct pci_dev *pdev)
+ rtnl_lock();
+ if (netif_running(netdev)) {
+ if (igc_open(netdev)) {
++ rtnl_unlock();
+ netdev_err(netdev, "igc_open failed after reset\n");
+ return;
+ }
+--
+2.43.0
+
--- /dev/null
+From a09422b38b035c9bc5e7825f75328157c3e8d78a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 9 Jun 2024 16:47:53 -0700
+Subject: Input: ili210x - use kvmalloc() to allocate buffer for firmware
+ update
+
+From: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+
+[ Upstream commit 17f5eebf6780eee50f887542e1833fda95f53e4d ]
+
+Allocating a contiguous buffer of 64K may fail if memory is sufficiently
+fragmented, and may cause OOM kill of an unrelated process. However we
+do not need to have contiguous memory. We also do not need to zero
+out the buffer since it will be overwritten with firmware data.
+
+Switch to using kvmalloc() instead of kzalloc().
+
+Link: https://lore.kernel.org/r/20240609234757.610273-1-dmitry.torokhov@gmail.com
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/input/touchscreen/ili210x.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
+index 79bdb2b10949..f3c3ad70244f 100644
+--- a/drivers/input/touchscreen/ili210x.c
++++ b/drivers/input/touchscreen/ili210x.c
+@@ -597,7 +597,7 @@ static int ili251x_firmware_to_buffer(const struct firmware *fw,
+ * once, copy them all into this buffer at the right locations, and then
+ * do all operations on this linear buffer.
+ */
+- fw_buf = kzalloc(SZ_64K, GFP_KERNEL);
++ fw_buf = kvmalloc(SZ_64K, GFP_KERNEL);
+ if (!fw_buf)
+ return -ENOMEM;
+
+@@ -627,7 +627,7 @@ static int ili251x_firmware_to_buffer(const struct firmware *fw,
+ return 0;
+
+ err_big:
+- kfree(fw_buf);
++ kvfree(fw_buf);
+ return error;
+ }
+
+@@ -870,7 +870,7 @@ static ssize_t ili210x_firmware_update_store(struct device *dev,
+ ili210x_hardware_reset(priv->reset_gpio);
+ dev_dbg(dev, "Firmware update ended, error=%i\n", error);
+ enable_irq(client->irq);
+- kfree(fwbuf);
++ kvfree(fwbuf);
+ return error;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 340ab88fe41e938951e545359bc108d1914ab413 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 4 Aug 2024 17:50:25 -0700
+Subject: Input: uinput - reject requests with unreasonable number of slots
+
+From: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+
+[ Upstream commit 206f533a0a7c683982af473079c4111f4a0f9f5e ]
+
+From: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+
+When exercising uinput interface syzkaller may try setting up device
+with a really large number of slots, which causes memory allocation
+failure in input_mt_init_slots(). While this allocation failure is
+handled properly and request is rejected, it results in syzkaller
+reports. Additionally, such request may put undue burden on the
+system which will try to free a lot of memory for a bogus request.
+
+Fix it by limiting allowed number of slots to 100. This can easily
+be extended if we see devices that can track more than 100 contacts.
+
+Reported-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Reported-by: syzbot <syzbot+0122fa359a69694395d5@syzkaller.appspotmail.com>
+Closes: https://syzkaller.appspot.com/bug?extid=0122fa359a69694395d5
+Link: https://lore.kernel.org/r/Zqgi7NYEbpRsJfa2@google.com
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/input/misc/uinput.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
+index d98212d55108..2c973f15cab7 100644
+--- a/drivers/input/misc/uinput.c
++++ b/drivers/input/misc/uinput.c
+@@ -417,6 +417,20 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
+ return -EINVAL;
+ }
+
++ /*
++ * Limit number of contacts to a reasonable value (100). This
++ * ensures that we need less than 2 pages for struct input_mt
++ * (we are not using in-kernel slot assignment so not going to
++ * allocate memory for the "red" table), and we should have no
++ * trouble getting this much memory.
++ */
++ if (code == ABS_MT_SLOT && max > 99) {
++ printk(KERN_DEBUG
++ "%s: unreasonably large number of slots requested: %d\n",
++ UINPUT_NAME, max);
++ return -EINVAL;
++ }
++
+ return 0;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From e3ba95330c3666a759b7410bf3076dec125c4a2c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 16 Jun 2024 23:40:52 +0100
+Subject: iommu: sun50i: clear bypass register
+
+From: Jernej Skrabec <jernej.skrabec@gmail.com>
+
+[ Upstream commit 927c70c93d929f4c2dcaf72f51b31bb7d118a51a ]
+
+The Allwinner H6 IOMMU has a bypass register, which allows to circumvent
+the page tables for each possible master. The reset value for this
+register is 0, which disables the bypass.
+The Allwinner H616 IOMMU resets this register to 0x7f, which activates
+the bypass for all masters, which is not what we want.
+
+Always clear this register to 0, to enforce the usage of page tables,
+and make this driver compatible with the H616 in this respect.
+
+Signed-off-by: Jernej Skrabec <jernej.skrabec@gmail.com>
+Signed-off-by: Andre Przywara <andre.przywara@arm.com>
+Reviewed-by: Chen-Yu Tsai <wens@csie.org>
+Link: https://lore.kernel.org/r/20240616224056.29159-2-andre.przywara@arm.com
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/sun50i-iommu.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
+index c519b991749d..dd3f07384624 100644
+--- a/drivers/iommu/sun50i-iommu.c
++++ b/drivers/iommu/sun50i-iommu.c
+@@ -452,6 +452,7 @@ static int sun50i_iommu_enable(struct sun50i_iommu *iommu)
+ IOMMU_TLB_PREFETCH_MASTER_ENABLE(3) |
+ IOMMU_TLB_PREFETCH_MASTER_ENABLE(4) |
+ IOMMU_TLB_PREFETCH_MASTER_ENABLE(5));
++ iommu_write(iommu, IOMMU_BYPASS_REG, 0);
+ iommu_write(iommu, IOMMU_INT_ENABLE_REG, IOMMU_INT_MASK);
+ iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE),
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
+--
+2.43.0
+
--- /dev/null
+From 711bc365f1f8134224b82b152547b33c644f68e1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Jul 2024 21:08:33 +0800
+Subject: iommu/vt-d: Handle volatile descriptor status read
+
+From: Jacob Pan <jacob.jun.pan@linux.intel.com>
+
+[ Upstream commit b5e86a95541cea737394a1da967df4cd4d8f7182 ]
+
+Queued invalidation wait descriptor status is volatile in that IOMMU
+hardware writes the data upon completion.
+
+Use READ_ONCE() to prevent compiler optimizations which ensures memory
+reads every time. As a side effect, READ_ONCE() also enforces strict
+types and may add an extra instruction. But it should not have negative
+performance impact since we use cpu_relax anyway and the extra time(by
+adding an instruction) may allow IOMMU HW request cacheline ownership
+easier.
+
+e.g. gcc 12.3
+BEFORE:
+ 81 38 ad de 00 00 cmpl $0x2,(%rax)
+
+AFTER (with READ_ONCE())
+ 772f: 8b 00 mov (%rax),%eax
+ 7731: 3d ad de 00 00 cmp $0x2,%eax
+ //status data is 32 bit
+
+Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com>
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Reviewed-by: Yi Liu <yi.l.liu@intel.com>
+Link: https://lore.kernel.org/r/20240607173817.3914600-1-jacob.jun.pan@linux.intel.com
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Link: https://lore.kernel.org/r/20240702130839.108139-2-baolu.lu@linux.intel.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/intel/dmar.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
+index 304e84949ca7..1c8d3141cb55 100644
+--- a/drivers/iommu/intel/dmar.c
++++ b/drivers/iommu/intel/dmar.c
+@@ -1446,7 +1446,7 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
+ */
+ writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
+
+- while (qi->desc_status[wait_index] != QI_DONE) {
++ while (READ_ONCE(qi->desc_status[wait_index]) != QI_DONE) {
+ /*
+ * We will leave the interrupts disabled, to prevent interrupt
+ * context to queue another cmd while a cmd is already submitted
+--
+2.43.0
+
--- /dev/null
+From 9755ac5d823e731428cdfbf60e643c3bdb60e4a6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Jul 2024 21:08:35 +0800
+Subject: iommu/vt-d: Remove control over Execute-Requested requests
+
+From: Lu Baolu <baolu.lu@linux.intel.com>
+
+[ Upstream commit e995fcde6070f0981e083c1e2e17e401e6c17ad9 ]
+
+The VT-d specification has removed architectural support of the requests
+with pasid with a value of 1 for Execute-Requested (ER). And the NXE bit
+in the pasid table entry and XD bit in the first-stage paging Entries are
+deprecated accordingly.
+
+Remove the programming of these bits to make it consistent with the spec.
+
+Suggested-by: Jacob Pan <jacob.jun.pan@linux.intel.com>
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Link: https://lore.kernel.org/r/20240624032351.249858-1-baolu.lu@linux.intel.com
+Link: https://lore.kernel.org/r/20240702130839.108139-4-baolu.lu@linux.intel.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/intel/iommu.c | 4 ++--
+ drivers/iommu/intel/iommu.h | 6 ++----
+ drivers/iommu/intel/pasid.c | 1 -
+ drivers/iommu/intel/pasid.h | 10 ----------
+ 4 files changed, 4 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index f55ec1fd7942..e9bea0305c26 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -854,7 +854,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
+ domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
+ pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
+ if (domain->use_first_level)
+- pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
++ pteval |= DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
+
+ tmp = 0ULL;
+ if (!try_cmpxchg64(&pte->val, &tmp, pteval))
+@@ -1872,7 +1872,7 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
+ attr |= DMA_FL_PTE_PRESENT;
+ if (domain->use_first_level) {
+- attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
++ attr |= DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
+ if (prot & DMA_PTE_WRITE)
+ attr |= DMA_FL_PTE_DIRTY;
+ }
+diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
+index eaf015b4353b..9a3b064126de 100644
+--- a/drivers/iommu/intel/iommu.h
++++ b/drivers/iommu/intel/iommu.h
+@@ -49,7 +49,6 @@
+ #define DMA_FL_PTE_US BIT_ULL(2)
+ #define DMA_FL_PTE_ACCESS BIT_ULL(5)
+ #define DMA_FL_PTE_DIRTY BIT_ULL(6)
+-#define DMA_FL_PTE_XD BIT_ULL(63)
+
+ #define DMA_SL_PTE_DIRTY_BIT 9
+ #define DMA_SL_PTE_DIRTY BIT_ULL(DMA_SL_PTE_DIRTY_BIT)
+@@ -831,11 +830,10 @@ static inline void dma_clear_pte(struct dma_pte *pte)
+ static inline u64 dma_pte_addr(struct dma_pte *pte)
+ {
+ #ifdef CONFIG_64BIT
+- return pte->val & VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
++ return pte->val & VTD_PAGE_MASK;
+ #else
+ /* Must have a full atomic 64-bit read */
+- return __cmpxchg64(&pte->val, 0ULL, 0ULL) &
+- VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
++ return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
+ #endif
+ }
+
+diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
+index abce19e2ad6f..aabcdf756581 100644
+--- a/drivers/iommu/intel/pasid.c
++++ b/drivers/iommu/intel/pasid.c
+@@ -333,7 +333,6 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
+ pasid_set_domain_id(pte, did);
+ pasid_set_address_width(pte, iommu->agaw);
+ pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
+- pasid_set_nxe(pte);
+
+ /* Setup Present and PASID Granular Transfer Type: */
+ pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY);
+diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
+index da9978fef7ac..dde6d3ba5ae0 100644
+--- a/drivers/iommu/intel/pasid.h
++++ b/drivers/iommu/intel/pasid.h
+@@ -247,16 +247,6 @@ static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
+ pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
+ }
+
+-/*
+- * Setup No Execute Enable bit (Bit 133) of a scalable mode PASID
+- * entry. It is required when XD bit of the first level page table
+- * entry is about to be set.
+- */
+-static inline void pasid_set_nxe(struct pasid_entry *pe)
+-{
+- pasid_set_bits(&pe->val[2], 1 << 5, 1 << 5);
+-}
+-
+ /*
+ * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
+ * PASID entry.
+--
+2.43.0
+
--- /dev/null
+From 97ef83590c6312e35fb3aa338c74105a3624042b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Jun 2024 13:11:11 -0300
+Subject: iommufd: Require drivers to supply the cache_invalidate_user ops
+
+From: Jason Gunthorpe <jgg@nvidia.com>
+
+[ Upstream commit a11dda723c6493bb1853bbc61c093377f96e2d47 ]
+
+If drivers don't do this then iommufd will oops invalidation ioctls with
+something like:
+
+ Unable to handle kernel NULL pointer dereference at virtual address 0000000000000000
+ Mem abort info:
+ ESR = 0x0000000086000004
+ EC = 0x21: IABT (current EL), IL = 32 bits
+ SET = 0, FnV = 0
+ EA = 0, S1PTW = 0
+ FSC = 0x04: level 0 translation fault
+ user pgtable: 4k pages, 48-bit VAs, pgdp=0000000101059000
+ [0000000000000000] pgd=0000000000000000, p4d=0000000000000000
+ Internal error: Oops: 0000000086000004 [#1] PREEMPT SMP
+ Modules linked in:
+ CPU: 2 PID: 371 Comm: qemu-system-aar Not tainted 6.8.0-rc7-gde77230ac23a #9
+ Hardware name: linux,dummy-virt (DT)
+ pstate: 81400809 (Nzcv daif +PAN -UAO -TCO +DIT -SSBS BTYPE=-c)
+ pc : 0x0
+ lr : iommufd_hwpt_invalidate+0xa4/0x204
+ sp : ffff800080f3bcc0
+ x29: ffff800080f3bcf0 x28: ffff0000c369b300 x27: 0000000000000000
+ x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000
+ x23: 0000000000000000 x22: 00000000c1e334a0 x21: ffff0000c1e334a0
+ x20: ffff800080f3bd38 x19: ffff800080f3bd58 x18: 0000000000000000
+ x17: 0000000000000000 x16: 0000000000000000 x15: 0000ffff8240d6d8
+ x14: 0000000000000000 x13: 0000000000000000 x12: 0000000000000000
+ x11: 0000000000000000 x10: 0000000000000000 x9 : 0000000000000000
+ x8 : 0000001000000002 x7 : 0000fffeac1ec950 x6 : 0000000000000000
+ x5 : ffff800080f3bd78 x4 : 0000000000000003 x3 : 0000000000000002
+ x2 : 0000000000000000 x1 : ffff800080f3bcc8 x0 : ffff0000c6034d80
+ Call trace:
+ 0x0
+ iommufd_fops_ioctl+0x154/0x274
+ __arm64_sys_ioctl+0xac/0xf0
+ invoke_syscall+0x48/0x110
+ el0_svc_common.constprop.0+0x40/0xe0
+ do_el0_svc+0x1c/0x28
+ el0_svc+0x34/0xb4
+ el0t_64_sync_handler+0x120/0x12c
+ el0t_64_sync+0x190/0x194
+
+All existing drivers implement this op for nesting, this is mostly a
+bisection aid.
+
+Fixes: 8c6eabae3807 ("iommufd: Add IOMMU_HWPT_INVALIDATE")
+Link: https://lore.kernel.org/r/0-v1-e153859bd707+61-iommufd_check_ops_jgg@nvidia.com
+Reviewed-by: Nicolin Chen <nicolinc@nvidia.com>
+Reviewed-by: Yi Liu <yi.l.liu@intel.com>
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/iommufd/hw_pagetable.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c
+index 33d142f8057d..a9f1fe44c4c0 100644
+--- a/drivers/iommu/iommufd/hw_pagetable.c
++++ b/drivers/iommu/iommufd/hw_pagetable.c
+@@ -236,7 +236,8 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
+ }
+ hwpt->domain->owner = ops;
+
+- if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) {
++ if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED ||
++ !hwpt->domain->ops->cache_invalidate_user)) {
+ rc = -EINVAL;
+ goto out_abort;
+ }
+--
+2.43.0
+
--- /dev/null
+From 72b3f4e5717ede246d5d5b52a6c6d7864554fa9b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Jun 2024 11:38:28 +0200
+Subject: irqchip/armada-370-xp: Do not allow mapping IRQ 0 and 1
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Pali Rohár <pali@kernel.org>
+
+[ Upstream commit 3cef738208e5c3cb7084e208caf9bbf684f24feb ]
+
+IRQs 0 (IPI) and 1 (MSI) are handled internally by this driver,
+generic_handle_domain_irq() is never called for these IRQs.
+
+Disallow mapping these IRQs.
+
+[ Marek: changed commit message ]
+
+Signed-off-by: Pali Rohár <pali@kernel.org>
+Signed-off-by: Marek Behún <kabel@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-armada-370-xp.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
+index 4b021a67bdfe..f488c35d9130 100644
+--- a/drivers/irqchip/irq-armada-370-xp.c
++++ b/drivers/irqchip/irq-armada-370-xp.c
+@@ -566,6 +566,10 @@ static struct irq_chip armada_370_xp_irq_chip = {
+ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
+ unsigned int virq, irq_hw_number_t hw)
+ {
++ /* IRQs 0 and 1 cannot be mapped, they are handled internally */
++ if (hw <= 1)
++ return -EINVAL;
++
+ armada_370_xp_irq_mask(irq_get_irq_data(virq));
+ if (!is_percpu_irq(hw))
+ writel(hw, per_cpu_int_base +
+--
+2.43.0
+
--- /dev/null
+From c0ddf48dd19c4b48eacd886bde59c426bc9fcc0e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Jul 2024 10:31:53 +0100
+Subject: irqchip/gic-v4: Always configure affinity on VPE activation
+
+From: Marc Zyngier <maz@kernel.org>
+
+[ Upstream commit 7d2c2048a86477461f7bc75d064579ed349472bc ]
+
+There are currently two paths to set the initial affinity of a VPE:
+
+ - at activation time on GICv4 without the stupid VMOVP list, and
+ on GICv4.1
+
+ - at map time for GICv4 with VMOVP list
+
+The latter location may end-up modifying the affinity of VPE that is
+currently running, making the results unpredictible.
+
+Instead, unify the two paths, making sure to set the initial affinity only
+at activation time.
+
+Reported-by: Nianyao Tang <tangnianyao@huawei.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Nianyao Tang <tangnianyao@huawei.com>
+Link: https://lore.kernel.org/r/20240705093155.871070-2-maz@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-gic-v3-its.c | 13 ++++---------
+ 1 file changed, 4 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index 3c755d5dad6e..a00c5e8c4ea6 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -1809,13 +1809,9 @@ static void its_map_vm(struct its_node *its, struct its_vm *vm)
+
+ for (i = 0; i < vm->nr_vpes; i++) {
+ struct its_vpe *vpe = vm->vpes[i];
+- struct irq_data *d = irq_get_irq_data(vpe->irq);
+
+- /* Map the VPE to the first possible CPU */
+- vpe->col_idx = cpumask_first(cpu_online_mask);
+ its_send_vmapp(its, vpe, true);
+ its_send_vinvall(its, vpe);
+- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
+ }
+ }
+
+@@ -4562,6 +4558,10 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_node *its;
+
++ /* Map the VPE to the first possible CPU */
++ vpe->col_idx = cpumask_first(cpu_online_mask);
++ irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
++
+ /*
+ * If we use the list map, we issue VMAPP on demand... Unless
+ * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
+@@ -4570,9 +4570,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
+ if (!gic_requires_eager_mapping())
+ return 0;
+
+- /* Map the VPE to the first possible CPU */
+- vpe->col_idx = cpumask_first(cpu_online_mask);
+-
+ list_for_each_entry(its, &its_nodes, entry) {
+ if (!is_v4(its))
+ continue;
+@@ -4581,8 +4578,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
+ its_send_vinvall(its, vpe);
+ }
+
+- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
+-
+ return 0;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From abc4c9e0864960d0d138a410ae1c57118bdeaa39 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Jul 2024 10:31:55 +0100
+Subject: irqchip/gic-v4: Make sure a VPE is locked when VMAPP is issued
+
+From: Marc Zyngier <maz@kernel.org>
+
+[ Upstream commit a84a07fa3100d7ad46a3d6882af25a3df9c9e7e3 ]
+
+In order to make sure that vpe->col_idx is correctly sampled when a VMAPP
+command is issued, the vpe_lock must be held for the VPE. This is now
+possible since the introduction of the per-VM vmapp_lock, which can be
+taken before vpe_lock in the correct locking order.
+
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Nianyao Tang <tangnianyao@huawei.com>
+Link: https://lore.kernel.org/r/20240705093155.871070-4-maz@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-gic-v3-its.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index a00c5e8c4ea6..eb906d3789b1 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -1810,7 +1810,9 @@ static void its_map_vm(struct its_node *its, struct its_vm *vm)
+ for (i = 0; i < vm->nr_vpes; i++) {
+ struct its_vpe *vpe = vm->vpes[i];
+
+- its_send_vmapp(its, vpe, true);
++ scoped_guard(raw_spinlock, &vpe->vpe_lock)
++ its_send_vmapp(its, vpe, true);
++
+ its_send_vinvall(its, vpe);
+ }
+ }
+@@ -1831,8 +1833,10 @@ static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
+ if (!--vm->vlpi_count[its->list_nr]) {
+ int i;
+
+- for (i = 0; i < vm->nr_vpes; i++)
++ for (i = 0; i < vm->nr_vpes; i++) {
++ guard(raw_spinlock)(&vm->vpes[i]->vpe_lock);
+ its_send_vmapp(its, vm->vpes[i], false);
++ }
+ }
+
+ raw_spin_unlock_irqrestore(&vmovp_lock, flags);
+--
+2.43.0
+
--- /dev/null
+From ddf3920fc806c039dac80d1b16fd727f71782f17 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Jun 2024 20:48:13 +0100
+Subject: irqchip/renesas-rzg2l: Reorder function calls in
+ rzg2l_irqc_irq_disable()
+
+From: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+
+[ Upstream commit 492eee82574b163fbb3f099c74ce3b4322d0af28 ]
+
+The order of function calls in the disable operation should be the reverse
+of that in the enable operation. Thus, reorder the function calls to first
+disable the parent IRQ chip before disabling the TINT IRQ.
+
+Reported-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Signed-off-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> # on RZ/G3S
+Link: https://lore.kernel.org/r/20240606194813.676823-1-prabhakar.mahadev-lad.rj@bp.renesas.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-renesas-rzg2l.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
+index f6484bf15e0b..5a4521cf3ec6 100644
+--- a/drivers/irqchip/irq-renesas-rzg2l.c
++++ b/drivers/irqchip/irq-renesas-rzg2l.c
+@@ -162,8 +162,8 @@ static void rzg2l_tint_irq_endisable(struct irq_data *d, bool enable)
+
+ static void rzg2l_irqc_irq_disable(struct irq_data *d)
+ {
+- rzg2l_tint_irq_endisable(d, false);
+ irq_chip_disable_parent(d);
++ rzg2l_tint_irq_endisable(d, false);
+ }
+
+ static void rzg2l_irqc_irq_enable(struct irq_data *d)
+--
+2.43.0
+
--- /dev/null
+From 028df971eec7f04e89b583a6a29c900ff0572a9e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Jun 2024 15:24:05 +0800
+Subject: jbd2: avoid mount failed when commit block is partial submitted
+
+From: Ye Bin <yebin10@huawei.com>
+
+[ Upstream commit 0bab8db4152c4a2185a1367db09cc402bdc62d5e ]
+
+We encountered a problem that the file system could not be mounted in
+the power-off scenario. The analysis of the file system mirror shows that
+only part of the data is written to the last commit block.
+The valid data of the commit block is concentrated in the first sector.
+However, the data of the entire block is involved in the checksum calculation.
+For different hardware, the minimum atomic unit may be different.
+If the checksum of a committed block is incorrect, clear the data except the
+'commit_header' and then calculate the checksum. If the checkusm is correct,
+it is considered that the block is partially committed, Then continue to replay
+journal.
+
+Signed-off-by: Ye Bin <yebin10@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://patch.msgid.link/20240620072405.3533701-1-yebin@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/jbd2/recovery.c | 30 ++++++++++++++++++++++++++++++
+ 1 file changed, 30 insertions(+)
+
+diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
+index 1f7664984d6e..0d14b5f39be6 100644
+--- a/fs/jbd2/recovery.c
++++ b/fs/jbd2/recovery.c
+@@ -443,6 +443,27 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
+ return provided == cpu_to_be32(calculated);
+ }
+
++static bool jbd2_commit_block_csum_verify_partial(journal_t *j, void *buf)
++{
++ struct commit_header *h;
++ __be32 provided;
++ __u32 calculated;
++ void *tmpbuf;
++
++ tmpbuf = kzalloc(j->j_blocksize, GFP_KERNEL);
++ if (!tmpbuf)
++ return false;
++
++ memcpy(tmpbuf, buf, sizeof(struct commit_header));
++ h = tmpbuf;
++ provided = h->h_chksum[0];
++ h->h_chksum[0] = 0;
++ calculated = jbd2_chksum(j, j->j_csum_seed, tmpbuf, j->j_blocksize);
++ kfree(tmpbuf);
++
++ return provided == cpu_to_be32(calculated);
++}
++
+ static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag,
+ journal_block_tag3_t *tag3,
+ void *buf, __u32 sequence)
+@@ -810,6 +831,13 @@ static int do_one_pass(journal_t *journal,
+ if (pass == PASS_SCAN &&
+ !jbd2_commit_block_csum_verify(journal,
+ bh->b_data)) {
++ if (jbd2_commit_block_csum_verify_partial(
++ journal,
++ bh->b_data)) {
++ pr_notice("JBD2: Find incomplete commit block in transaction %u block %lu\n",
++ next_commit_ID, next_log_block);
++ goto chksum_ok;
++ }
+ chksum_error:
+ if (commit_time < last_trans_commit_time)
+ goto ignore_crc_mismatch;
+@@ -824,6 +852,7 @@ static int do_one_pass(journal_t *journal,
+ }
+ }
+ if (pass == PASS_SCAN) {
++ chksum_ok:
+ last_trans_commit_time = commit_time;
+ head_block = next_log_block;
+ }
+@@ -843,6 +872,7 @@ static int do_one_pass(journal_t *journal,
+ next_log_block);
+ need_check_commit_time = true;
+ }
++
+ /* If we aren't in the REVOKE pass, then we can
+ * just skip over this block. */
+ if (pass != PASS_REVOKE) {
+--
+2.43.0
+
--- /dev/null
+From 66b5aa8fdb1411b737ae3f6485ebbecc596285cb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 Jul 2024 10:46:04 +0800
+Subject: kselftests: dmabuf-heaps: Ensure the driver name is null-terminated
+
+From: Zenghui Yu <yuzenghui@huawei.com>
+
+[ Upstream commit 291e4baf70019f17a81b7b47aeb186b27d222159 ]
+
+Even if a vgem device is configured in, we will skip the import_vgem_fd()
+test almost every time.
+
+ TAP version 13
+ 1..11
+ # Testing heap: system
+ # =======================================
+ # Testing allocation and importing:
+ ok 1 # SKIP Could not open vgem -1
+
+The problem is that we use the DRM_IOCTL_VERSION ioctl to query the driver
+version information but leave the name field a non-null-terminated string.
+Terminate it properly to actually test against the vgem device.
+
+While at it, let's check the length of the driver name is exactly 4 bytes
+and return early otherwise (in case there is a name like "vgemfoo" that
+gets converted to "vgem\0" unexpectedly).
+
+Signed-off-by: Zenghui Yu <yuzenghui@huawei.com>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240729024604.2046-1-yuzenghui@huawei.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
+index 5f541522364f..5d0a809dc2df 100644
+--- a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
++++ b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
+@@ -29,9 +29,11 @@ static int check_vgem(int fd)
+ version.name = name;
+
+ ret = ioctl(fd, DRM_IOCTL_VERSION, &version);
+- if (ret)
++ if (ret || version.name_len != 4)
+ return 0;
+
++ name[4] = '\0';
++
+ return !strcmp(name, "vgem");
+ }
+
+--
+2.43.0
+
--- /dev/null
+From cd07484564f3ef90273243f1c1211687b8fdadcc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 Aug 2024 01:04:31 +0100
+Subject: kunit/overflow: Fix UB in overflow_allocation_test
+
+From: Ivan Orlov <ivan.orlov0322@gmail.com>
+
+[ Upstream commit 92e9bac18124682c4b99ede9ee3bcdd68f121e92 ]
+
+The 'device_name' array doesn't exist out of the
+'overflow_allocation_test' function scope. However, it is being used as
+a driver name when calling 'kunit_driver_create' from
+'kunit_device_register'. It produces the kernel panic with KASAN
+enabled.
+
+Since this variable is used in one place only, remove it and pass the
+device name into kunit_device_register directly as an ascii string.
+
+Signed-off-by: Ivan Orlov <ivan.orlov0322@gmail.com>
+Reviewed-by: David Gow <davidgow@google.com>
+Link: https://lore.kernel.org/r/20240815000431.401869-1-ivan.orlov0322@gmail.com
+Signed-off-by: Kees Cook <kees@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ lib/overflow_kunit.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c
+index d305b0c054bb..9249181fff37 100644
+--- a/lib/overflow_kunit.c
++++ b/lib/overflow_kunit.c
+@@ -668,7 +668,6 @@ DEFINE_TEST_ALLOC(devm_kzalloc, devm_kfree, 1, 1, 0);
+
+ static void overflow_allocation_test(struct kunit *test)
+ {
+- const char device_name[] = "overflow-test";
+ struct device *dev;
+ int count = 0;
+
+@@ -678,7 +677,7 @@ static void overflow_allocation_test(struct kunit *test)
+ } while (0)
+
+ /* Create dummy device for devm_kmalloc()-family tests. */
+- dev = kunit_device_register(test, device_name);
++ dev = kunit_device_register(test, "overflow-test");
+ KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev),
+ "Cannot register test device\n");
+
+--
+2.43.0
+
--- /dev/null
+From 77b23ae008d9e8336e3e9794c763a192d15f10e5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Jun 2024 20:29:18 +0300
+Subject: leds: spi-byte: Call of_node_put() on error path
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit 7f9ab862e05c5bc755f65bf6db7edcffb3b49dfc ]
+
+Add a missing call to of_node_put(np) on error.
+
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Link: https://lore.kernel.org/r/20240606173037.3091598-2-andriy.shevchenko@linux.intel.com
+Signed-off-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/leds/leds-spi-byte.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/leds/leds-spi-byte.c b/drivers/leds/leds-spi-byte.c
+index 96296db5f410..b04cf502e603 100644
+--- a/drivers/leds/leds-spi-byte.c
++++ b/drivers/leds/leds-spi-byte.c
+@@ -91,7 +91,6 @@ static int spi_byte_probe(struct spi_device *spi)
+ dev_err(dev, "Device must have exactly one LED sub-node.");
+ return -EINVAL;
+ }
+- child = of_get_next_available_child(dev_of_node(dev), NULL);
+
+ led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+@@ -104,11 +103,13 @@ static int spi_byte_probe(struct spi_device *spi)
+ led->ldev.max_brightness = led->cdef->max_value - led->cdef->off_value;
+ led->ldev.brightness_set_blocking = spi_byte_brightness_set_blocking;
+
++ child = of_get_next_available_child(dev_of_node(dev), NULL);
+ state = of_get_property(child, "default-state", NULL);
+ if (state) {
+ if (!strcmp(state, "on")) {
+ led->ldev.brightness = led->ldev.max_brightness;
+ } else if (strcmp(state, "off")) {
++ of_node_put(child);
+ /* all other cases except "off" */
+ dev_err(dev, "default-state can only be 'on' or 'off'");
+ return -EINVAL;
+@@ -123,9 +124,12 @@ static int spi_byte_probe(struct spi_device *spi)
+
+ ret = devm_led_classdev_register_ext(&spi->dev, &led->ldev, &init_data);
+ if (ret) {
++ of_node_put(child);
+ mutex_destroy(&led->mutex);
+ return ret;
+ }
++
++ of_node_put(child);
+ spi_set_drvdata(spi, led);
+
+ return 0;
+--
+2.43.0
+
--- /dev/null
+From 11a4c4bc56d3b000d512b901a207ab4343758853 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 10 Aug 2024 21:04:35 -0400
+Subject: lib/generic-radix-tree.c: Fix rare race in __genradix_ptr_alloc()
+
+From: Kent Overstreet <kent.overstreet@linux.dev>
+
+[ Upstream commit b2f11c6f3e1fc60742673b8675c95b78447f3dae ]
+
+If we need to increase the tree depth, allocate a new node, and then
+race with another thread that increased the tree depth before us, we'll
+still have a preallocated node that might be used later.
+
+If we then use that node for a new non-root node, it'll still have a
+pointer to the old root instead of being zeroed - fix this by zeroing it
+in the cmpxchg failure path.
+
+Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ lib/generic-radix-tree.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
+index aaefb9b678c8..fa692c86f069 100644
+--- a/lib/generic-radix-tree.c
++++ b/lib/generic-radix-tree.c
+@@ -121,6 +121,8 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
+ if ((v = cmpxchg_release(&radix->root, r, new_root)) == r) {
+ v = new_root;
+ new_node = NULL;
++ } else {
++ new_node->children[0] = NULL;
+ }
+ }
+
+--
+2.43.0
+
--- /dev/null
+From f6f72e2c46b57176cf43b0263672cbc9681c174a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Jul 2024 10:34:36 +0200
+Subject: libbpf: Add NULL checks to bpf_object__{prev_map,next_map}
+
+From: Andreas Ziegler <ziegler.andreas@siemens.com>
+
+[ Upstream commit cedc12c5b57f7efa6dbebfb2b140e8675f5a2616 ]
+
+In the current state, an erroneous call to
+bpf_object__find_map_by_name(NULL, ...) leads to a segmentation
+fault through the following call chain:
+
+ bpf_object__find_map_by_name(obj = NULL, ...)
+ -> bpf_object__for_each_map(pos, obj = NULL)
+ -> bpf_object__next_map((obj = NULL), NULL)
+ -> return (obj = NULL)->maps
+
+While calling bpf_object__find_map_by_name with obj = NULL is
+obviously incorrect, this should not lead to a segmentation
+fault but rather be handled gracefully.
+
+As __bpf_map__iter already handles this situation correctly, we
+can delegate the check for the regular case there and only add
+a check in case the prev or next parameter is NULL.
+
+Signed-off-by: Andreas Ziegler <ziegler.andreas@siemens.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20240703083436.505124-1-ziegler.andreas@siemens.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/lib/bpf/libbpf.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index 5401f2df463d..5edb71764784 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -10336,7 +10336,7 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
+ struct bpf_map *
+ bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
+ {
+- if (prev == NULL)
++ if (prev == NULL && obj != NULL)
+ return obj->maps;
+
+ return __bpf_map__iter(prev, obj, 1);
+@@ -10345,7 +10345,7 @@ bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
+ struct bpf_map *
+ bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
+ {
+- if (next == NULL) {
++ if (next == NULL && obj != NULL) {
+ if (!obj->nr_maps)
+ return NULL;
+ return obj->maps + obj->nr_maps - 1;
+--
+2.43.0
+
--- /dev/null
+From fff8fafc740b44300a030aa4727b1952d26823df Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 20 Jul 2024 22:41:07 +0800
+Subject: LoongArch: Use correct API to map cmdline in relocate_kernel()
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+[ Upstream commit 0124fbb4c6dba23dbdf80c829be68adbccde2722 ]
+
+fw_arg1 is in memory space rather than I/O space, so we should use
+early_memremap_ro() instead of early_ioremap() to map the cmdline.
+Moreover, we should unmap it after using.
+
+Suggested-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Reviewed-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/kernel/relocate.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c
+index 1acfa704c8d0..0eddd4a66b87 100644
+--- a/arch/loongarch/kernel/relocate.c
++++ b/arch/loongarch/kernel/relocate.c
+@@ -13,6 +13,7 @@
+ #include <asm/bootinfo.h>
+ #include <asm/early_ioremap.h>
+ #include <asm/inst.h>
++#include <asm/io.h>
+ #include <asm/sections.h>
+ #include <asm/setup.h>
+
+@@ -170,7 +171,7 @@ unsigned long __init relocate_kernel(void)
+ unsigned long kernel_length;
+ unsigned long random_offset = 0;
+ void *location_new = _text; /* Default to original kernel start */
+- char *cmdline = early_ioremap(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */
++ char *cmdline = early_memremap_ro(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */
+
+ strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE);
+
+@@ -182,6 +183,7 @@ unsigned long __init relocate_kernel(void)
+ random_offset = (unsigned long)location_new - (unsigned long)(_text);
+ #endif
+ reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS;
++ early_memunmap(cmdline, COMMAND_LINE_SIZE);
+
+ if (random_offset) {
+ kernel_length = (long)(_end) - (long)(_text);
+--
+2.43.0
+
--- /dev/null
+From c0cb32576f4dc2d63923e21462b14c6dc8c83373 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Jun 2024 09:52:26 +0200
+Subject: media: b2c2: flexcop-usb: fix flexcop_usb_memory_req
+
+From: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+
+[ Upstream commit b178aa6f333b07bda0548d7e45085660a112414d ]
+
+smatch generated this warning:
+
+drivers/media/usb/b2c2/flexcop-usb.c:199 flexcop_usb_memory_req() warn: iterator 'i' not incremented
+
+and indeed the function is not using i or updating buf.
+
+The reason this always worked is that this function is called to write just
+6 bytes (a MAC address) to the USB device, and so in practice there is only
+a single chunk written. If we ever would need to write more than one chunk,
+this function would fail since each chunk would read from or write to the
+same buf address.
+
+Rewrite the function to properly handle this.
+
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/usb/b2c2/flexcop-usb.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
+index 90f1aea99dac..8033622543f2 100644
+--- a/drivers/media/usb/b2c2/flexcop-usb.c
++++ b/drivers/media/usb/b2c2/flexcop-usb.c
+@@ -179,7 +179,7 @@ static int flexcop_usb_memory_req(struct flexcop_usb *fc_usb,
+ flexcop_usb_request_t req, flexcop_usb_mem_page_t page_start,
+ u32 addr, int extended, u8 *buf, u32 len)
+ {
+- int i, ret = 0;
++ int ret = 0;
+ u16 wMax;
+ u32 pagechunk = 0;
+
+@@ -196,7 +196,7 @@ static int flexcop_usb_memory_req(struct flexcop_usb *fc_usb,
+ default:
+ return -EINVAL;
+ }
+- for (i = 0; i < len;) {
++ while (len) {
+ pagechunk = min(wMax, bytes_left_to_read_on_page(addr, len));
+ deb_info("%x\n",
+ (addr & V8_MEMORY_PAGE_MASK) |
+@@ -206,11 +206,12 @@ static int flexcop_usb_memory_req(struct flexcop_usb *fc_usb,
+ page_start + (addr / V8_MEMORY_PAGE_SIZE),
+ (addr & V8_MEMORY_PAGE_MASK) |
+ (V8_MEMORY_EXTENDED*extended),
+- &buf[i], pagechunk);
++ buf, pagechunk);
+
+ if (ret < 0)
+ return ret;
+ addr += pagechunk;
++ buf += pagechunk;
+ len -= pagechunk;
+ }
+ return 0;
+--
+2.43.0
+
--- /dev/null
+From 20d6d30037e3e637b5b65492a541fe76229d3a2c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Jun 2024 09:35:22 +0800
+Subject: media: qcom: camss: Add check for v4l2_fwnode_endpoint_parse
+
+From: Chen Ni <nichen@iscas.ac.cn>
+
+[ Upstream commit 4caf6d93d9f2c11d6441c64e1c549c445fa322ed ]
+
+Add check for the return value of v4l2_fwnode_endpoint_parse() and
+return the error if it fails in order to catch the error.
+
+Signed-off-by: Chen Ni <nichen@iscas.ac.cn>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/platform/qcom/camss/camss.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
+index 1923615f0eea..c90a28fa8891 100644
+--- a/drivers/media/platform/qcom/camss/camss.c
++++ b/drivers/media/platform/qcom/camss/camss.c
+@@ -1406,8 +1406,11 @@ static int camss_of_parse_endpoint_node(struct device *dev,
+ struct v4l2_mbus_config_mipi_csi2 *mipi_csi2;
+ struct v4l2_fwnode_endpoint vep = { { 0 } };
+ unsigned int i;
++ int ret;
+
+- v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep);
++ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep);
++ if (ret)
++ return ret;
+
+ csd->interface.csiphy_id = vep.base.port;
+
+--
+2.43.0
+
--- /dev/null
+From 5f01437d55dc4122607739cea802f1562739bb1f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Jun 2024 12:52:59 +0300
+Subject: media: vivid: don't set HDMI TX controls if there are no HDMI outputs
+
+From: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+
+[ Upstream commit 17763960b1784578e8fe915304b330922f646209 ]
+
+When setting the EDID it would attempt to update two controls
+that are only present if there is an HDMI output configured.
+
+If there isn't any (e.g. when the vivid module is loaded with
+node_types=1), then calling VIDIOC_S_EDID would crash.
+
+Fix this by first checking if outputs are present.
+
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/test-drivers/vivid/vivid-vid-cap.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+index 3a3041a0378f..afa0dc5bcdae 100644
+--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c
++++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+@@ -1554,8 +1554,10 @@ int vidioc_s_edid(struct file *file, void *_fh,
+ return -EINVAL;
+ if (edid->blocks == 0) {
+ dev->edid_blocks = 0;
+- v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, 0);
+- v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, 0);
++ if (dev->num_outputs) {
++ v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, 0);
++ v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, 0);
++ }
+ phys_addr = CEC_PHYS_ADDR_INVALID;
+ goto set_phys_addr;
+ }
+@@ -1579,8 +1581,10 @@ int vidioc_s_edid(struct file *file, void *_fh,
+ display_present |=
+ dev->display_present[i] << j++;
+
+- v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, display_present);
+- v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, display_present);
++ if (dev->num_outputs) {
++ v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, display_present);
++ v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, display_present);
++ }
+
+ set_phys_addr:
+ /* TODO: a proper hotplug detect cycle should be emulated here */
+--
+2.43.0
+
--- /dev/null
+From 332e0bf3ec471bc0845caa02ed02bf4c43cff0be Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Jun 2024 12:59:13 +0200
+Subject: media: vivid: fix wrong sizeimage value for mplane
+
+From: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+
+[ Upstream commit 0fd7c0c2c156270dceb8c15fad3120cdce03e539 ]
+
+In several places a division by fmt->vdownsampling[p] was
+missing in the sizeimage[p] calculation, causing incorrect
+behavior for multiplanar formats were some planes are smaller
+than the first plane.
+
+Found by new v4l2-compliance tests.
+
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/test-drivers/vivid/vivid-vid-cap.c | 5 +++--
+ drivers/media/test-drivers/vivid/vivid-vid-out.c | 16 +++++++++-------
+ 2 files changed, 12 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+index 2804975fe278..3a3041a0378f 100644
+--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c
++++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+@@ -106,8 +106,9 @@ static int vid_cap_queue_setup(struct vb2_queue *vq,
+ if (*nplanes != buffers)
+ return -EINVAL;
+ for (p = 0; p < buffers; p++) {
+- if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h +
+- dev->fmt_cap->data_offset[p])
++ if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h /
++ dev->fmt_cap->vdownsampling[p] +
++ dev->fmt_cap->data_offset[p])
+ return -EINVAL;
+ }
+ } else {
+diff --git a/drivers/media/test-drivers/vivid/vivid-vid-out.c b/drivers/media/test-drivers/vivid/vivid-vid-out.c
+index 1653b2988f7e..7a0f4c61ac80 100644
+--- a/drivers/media/test-drivers/vivid/vivid-vid-out.c
++++ b/drivers/media/test-drivers/vivid/vivid-vid-out.c
+@@ -63,14 +63,16 @@ static int vid_out_queue_setup(struct vb2_queue *vq,
+ if (sizes[0] < size)
+ return -EINVAL;
+ for (p = 1; p < planes; p++) {
+- if (sizes[p] < dev->bytesperline_out[p] * h +
+- vfmt->data_offset[p])
++ if (sizes[p] < dev->bytesperline_out[p] * h /
++ vfmt->vdownsampling[p] +
++ vfmt->data_offset[p])
+ return -EINVAL;
+ }
+ } else {
+ for (p = 0; p < planes; p++)
+- sizes[p] = p ? dev->bytesperline_out[p] * h +
+- vfmt->data_offset[p] : size;
++ sizes[p] = p ? dev->bytesperline_out[p] * h /
++ vfmt->vdownsampling[p] +
++ vfmt->data_offset[p] : size;
+ }
+
+ *nplanes = planes;
+@@ -124,7 +126,7 @@ static int vid_out_buf_prepare(struct vb2_buffer *vb)
+
+ for (p = 0; p < planes; p++) {
+ if (p)
+- size = dev->bytesperline_out[p] * h;
++ size = dev->bytesperline_out[p] * h / vfmt->vdownsampling[p];
+ size += vb->planes[p].data_offset;
+
+ if (vb2_get_plane_payload(vb, p) < size) {
+@@ -331,8 +333,8 @@ int vivid_g_fmt_vid_out(struct file *file, void *priv,
+ for (p = 0; p < mp->num_planes; p++) {
+ mp->plane_fmt[p].bytesperline = dev->bytesperline_out[p];
+ mp->plane_fmt[p].sizeimage =
+- mp->plane_fmt[p].bytesperline * mp->height +
+- fmt->data_offset[p];
++ mp->plane_fmt[p].bytesperline * mp->height /
++ fmt->vdownsampling[p] + fmt->data_offset[p];
+ }
+ for (p = fmt->buffers; p < fmt->planes; p++) {
+ unsigned stride = dev->bytesperline_out[p];
+--
+2.43.0
+
--- /dev/null
+From 1eec3a03b1323e26ae178e4939a18d7c0b4d7d29 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Aug 2024 10:59:08 +0100
+Subject: MIPS: cevt-r4k: Don't call get_c0_compare_int if timer irq is
+ installed
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jiaxun Yang <jiaxun.yang@flygoat.com>
+
+[ Upstream commit 50f2b98dc83de7809a5c5bf0ccf9af2e75c37c13 ]
+
+This avoids warning:
+
+[ 0.118053] BUG: sleeping function called from invalid context at kernel/locking/mutex.c:283
+
+Caused by get_c0_compare_int on secondary CPU.
+
+We also skipped saving IRQ number to struct clock_event_device *cd as
+it's never used by clockevent core, as per comments it's only meant
+for "non CPU local devices".
+
+Reported-by: Serge Semin <fancer.lancer@gmail.com>
+Closes: https://lore.kernel.org/linux-mips/6szkkqxpsw26zajwysdrwplpjvhl5abpnmxgu2xuj3dkzjnvsf@4daqrz4mf44k/
+Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
+Reviewed-by: Serge Semin <fancer.lancer@gmail.com>
+Tested-by: Serge Semin <fancer.lancer@gmail.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/kernel/cevt-r4k.c | 15 +++++++--------
+ 1 file changed, 7 insertions(+), 8 deletions(-)
+
+diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
+index 368e8475870f..5f6e9e2ebbdb 100644
+--- a/arch/mips/kernel/cevt-r4k.c
++++ b/arch/mips/kernel/cevt-r4k.c
+@@ -303,13 +303,6 @@ int r4k_clockevent_init(void)
+ if (!c0_compare_int_usable())
+ return -ENXIO;
+
+- /*
+- * With vectored interrupts things are getting platform specific.
+- * get_c0_compare_int is a hook to allow a platform to return the
+- * interrupt number of its liking.
+- */
+- irq = get_c0_compare_int();
+-
+ cd = &per_cpu(mips_clockevent_device, cpu);
+
+ cd->name = "MIPS";
+@@ -320,7 +313,6 @@ int r4k_clockevent_init(void)
+ min_delta = calculate_min_delta();
+
+ cd->rating = 300;
+- cd->irq = irq;
+ cd->cpumask = cpumask_of(cpu);
+ cd->set_next_event = mips_next_event;
+ cd->event_handler = mips_event_handler;
+@@ -332,6 +324,13 @@ int r4k_clockevent_init(void)
+
+ cp0_timer_irq_installed = 1;
+
++ /*
++ * With vectored interrupts things are getting platform specific.
++ * get_c0_compare_int is a hook to allow a platform to return the
++ * interrupt number of its liking.
++ */
++ irq = get_c0_compare_int();
++
+ if (request_irq(irq, c0_compare_interrupt, flags, "timer",
+ c0_compare_interrupt))
+ pr_err("Failed to request irq %d (timer)\n", irq);
+--
+2.43.0
+
--- /dev/null
+From 789fb88eefd123c07d4279cf62685a407b6b09fb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Sep 2024 10:19:57 +0200
+Subject: net: bridge: br_fdb_external_learn_add(): always set EXT_LEARN
+
+From: Jonas Gorski <jonas.gorski@bisdn.de>
+
+[ Upstream commit bee2ef946d3184e99077be526567d791c473036f ]
+
+When userspace wants to take over a fdb entry by setting it as
+EXTERN_LEARNED, we set both flags BR_FDB_ADDED_BY_EXT_LEARN and
+BR_FDB_ADDED_BY_USER in br_fdb_external_learn_add().
+
+If the bridge updates the entry later because its port changed, we clear
+the BR_FDB_ADDED_BY_EXT_LEARN flag, but leave the BR_FDB_ADDED_BY_USER
+flag set.
+
+If userspace then wants to take over the entry again,
+br_fdb_external_learn_add() sees that BR_FDB_ADDED_BY_USER and skips
+setting the BR_FDB_ADDED_BY_EXT_LEARN flags, thus silently ignores the
+update.
+
+Fix this by always allowing to set BR_FDB_ADDED_BY_EXT_LEARN regardless
+if this was a user fdb entry or not.
+
+Fixes: 710ae7287737 ("net: bridge: Mark FDB entries that were added by user as such")
+Signed-off-by: Jonas Gorski <jonas.gorski@bisdn.de>
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Link: https://patch.msgid.link/20240903081958.29951-1-jonas.gorski@bisdn.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bridge/br_fdb.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
+index c77591e63841..ad7a42b505ef 100644
+--- a/net/bridge/br_fdb.c
++++ b/net/bridge/br_fdb.c
+@@ -1469,12 +1469,10 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
+ modified = true;
+ }
+
+- if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
++ if (test_and_set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
+ /* Refresh entry */
+ fdb->used = jiffies;
+- } else if (!test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags)) {
+- /* Take over SW learned entry */
+- set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags);
++ } else {
+ modified = true;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 97b8097945eec5150dd9357c43756eae40094299 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 14 Jul 2024 01:53:32 +0300
+Subject: net: dpaa: avoid on-stack arrays of NR_CPUS elements
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 555a05d84ca2c587e2d4777006e2c2fb3dfbd91d ]
+
+The dpaa-eth driver is written for PowerPC and Arm SoCs which have 1-24
+CPUs. It depends on CONFIG_NR_CPUS having a reasonably small value in
+Kconfig. Otherwise, there are 2 functions which allocate on-stack arrays
+of NR_CPUS elements, and these can quickly explode in size, leading to
+warnings such as:
+
+ drivers/net/ethernet/freescale/dpaa/dpaa_eth.c:3280:12: warning:
+ stack frame size (16664) exceeds limit (2048) in 'dpaa_eth_probe' [-Wframe-larger-than]
+
+The problem is twofold:
+- Reducing the array size to the boot-time num_possible_cpus() (rather
+ than the compile-time NR_CPUS) creates a variable-length array,
+ which should be avoided in the Linux kernel.
+- Using NR_CPUS as an array size makes the driver blow up in stack
+ consumption with generic, as opposed to hand-crafted, .config files.
+
+A simple solution is to use dynamic allocation for num_possible_cpus()
+elements (aka a small number determined at runtime).
+
+Link: https://lore.kernel.org/all/202406261920.l5pzM1rj-lkp@intel.com/
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Breno Leitao <leitao@debian.org>
+Acked-by: Madalin Bucur <madalin.bucur@oss.nxp.com>
+Link: https://patch.msgid.link/20240713225336.1746343-2-vladimir.oltean@nxp.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/freescale/dpaa/dpaa_eth.c | 20 ++++++++++++++-----
+ .../ethernet/freescale/dpaa/dpaa_ethtool.c | 10 +++++++++-
+ 2 files changed, 24 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+index baa0b3c2ce6f..946c3d3b69d9 100644
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+@@ -931,14 +931,18 @@ static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
+ }
+ }
+
+-static void dpaa_fq_setup(struct dpaa_priv *priv,
+- const struct dpaa_fq_cbs *fq_cbs,
+- struct fman_port *tx_port)
++static int dpaa_fq_setup(struct dpaa_priv *priv,
++ const struct dpaa_fq_cbs *fq_cbs,
++ struct fman_port *tx_port)
+ {
+ int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu;
+ const cpumask_t *affine_cpus = qman_affine_cpus();
+- u16 channels[NR_CPUS];
+ struct dpaa_fq *fq;
++ u16 *channels;
++
++ channels = kcalloc(num_possible_cpus(), sizeof(u16), GFP_KERNEL);
++ if (!channels)
++ return -ENOMEM;
+
+ for_each_cpu_and(cpu, affine_cpus, cpu_online_mask)
+ channels[num_portals++] = qman_affine_channel(cpu);
+@@ -997,6 +1001,10 @@ static void dpaa_fq_setup(struct dpaa_priv *priv,
+ break;
+ }
+ }
++
++ kfree(channels);
++
++ return 0;
+ }
+
+ static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
+@@ -3416,7 +3424,9 @@ static int dpaa_eth_probe(struct platform_device *pdev)
+ */
+ dpaa_eth_add_channel(priv->channel, &pdev->dev);
+
+- dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
++ err = dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
++ if (err)
++ goto free_dpaa_bps;
+
+ /* Create a congestion group for this netdev, with
+ * dynamically-allocated CGR ID.
+diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+index 5bd0b36d1feb..3f8cd4a7d845 100644
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+@@ -457,12 +457,16 @@ static int dpaa_set_coalesce(struct net_device *dev,
+ struct netlink_ext_ack *extack)
+ {
+ const cpumask_t *cpus = qman_affine_cpus();
+- bool needs_revert[NR_CPUS] = {false};
+ struct qman_portal *portal;
+ u32 period, prev_period;
+ u8 thresh, prev_thresh;
++ bool *needs_revert;
+ int cpu, res;
+
++ needs_revert = kcalloc(num_possible_cpus(), sizeof(bool), GFP_KERNEL);
++ if (!needs_revert)
++ return -ENOMEM;
++
+ period = c->rx_coalesce_usecs;
+ thresh = c->rx_max_coalesced_frames;
+
+@@ -485,6 +489,8 @@ static int dpaa_set_coalesce(struct net_device *dev,
+ needs_revert[cpu] = true;
+ }
+
++ kfree(needs_revert);
++
+ return 0;
+
+ revert_values:
+@@ -498,6 +504,8 @@ static int dpaa_set_coalesce(struct net_device *dev,
+ qman_dqrr_set_ithresh(portal, prev_thresh);
+ }
+
++ kfree(needs_revert);
++
+ return res;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From ab5c90b12b1dcdf9ac5aec74c8400fd56a08ae96 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Sep 2024 03:17:30 -0700
+Subject: net: dqs: Do not use extern for unused dql_group
+
+From: Breno Leitao <leitao@debian.org>
+
+[ Upstream commit 77461c10819103eaee7b33c744174b32a8c78b40 ]
+
+When CONFIG_DQL is not enabled, dql_group should be treated as a dead
+declaration. However, its current extern declaration assumes the linker
+will ignore it, which is generally true across most compiler and
+architecture combinations.
+
+But in certain cases, the linker still attempts to resolve the extern
+struct, even when the associated code is dead, resulting in a linking
+error. For instance the following error in loongarch64:
+
+>> loongarch64-linux-ld: net-sysfs.c:(.text+0x589c): undefined reference to `dql_group'
+
+Modify the declaration of the dead object to be an empty declaration
+instead of an extern. This change will prevent the linker from
+attempting to resolve an undefined reference.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202409012047.eCaOdfQJ-lkp@intel.com/
+Fixes: 74293ea1c4db ("net: sysfs: Do not create sysfs for non BQL device")
+Signed-off-by: Breno Leitao <leitao@debian.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Tested-by: Simon Horman <horms@kernel.org> # build-tested
+Link: https://patch.msgid.link/20240902101734.3260455-1-leitao@debian.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/net-sysfs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
+index dc91921da4ea..15ad775ddd3c 100644
+--- a/net/core/net-sysfs.c
++++ b/net/core/net-sysfs.c
+@@ -1524,7 +1524,7 @@ static const struct attribute_group dql_group = {
+ };
+ #else
+ /* Fake declaration, all the code using it should be dead */
+-extern const struct attribute_group dql_group;
++static const struct attribute_group dql_group = {};
+ #endif /* CONFIG_BQL */
+
+ #ifdef CONFIG_XPS
+--
+2.43.0
+
--- /dev/null
+From 5ab0ec7be8e616f69ba12bc16bc1019071aa940a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Sep 2024 22:33:41 +0200
+Subject: net: dsa: vsc73xx: fix possible subblocks range of CAPT block
+
+From: Pawel Dembicki <paweldembicki@gmail.com>
+
+[ Upstream commit 8e69c96df771ab469cec278edb47009351de4da6 ]
+
+CAPT block (CPU Capture Buffer) have 7 sublocks: 0-3, 4, 6, 7.
+Function 'vsc73xx_is_addr_valid' allows to use only block 0 at this
+moment.
+
+This patch fix it.
+
+Fixes: 05bd97fc559d ("net: dsa: Add Vitesse VSC73xx DSA router driver")
+Signed-off-by: Pawel Dembicki <paweldembicki@gmail.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20240903203340.1518789-1-paweldembicki@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/vitesse-vsc73xx-core.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
+index 56bb77dbd28a..cefddcf3cc6f 100644
+--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
++++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
+@@ -34,7 +34,7 @@
+ #define VSC73XX_BLOCK_ANALYZER 0x2 /* Only subblock 0 */
+ #define VSC73XX_BLOCK_MII 0x3 /* Subblocks 0 and 1 */
+ #define VSC73XX_BLOCK_MEMINIT 0x3 /* Only subblock 2 */
+-#define VSC73XX_BLOCK_CAPTURE 0x4 /* Only subblock 2 */
++#define VSC73XX_BLOCK_CAPTURE 0x4 /* Subblocks 0-4, 6, 7 */
+ #define VSC73XX_BLOCK_ARBITER 0x5 /* Only subblock 0 */
+ #define VSC73XX_BLOCK_SYSTEM 0x7 /* Only subblock 0 */
+
+@@ -370,13 +370,19 @@ int vsc73xx_is_addr_valid(u8 block, u8 subblock)
+ break;
+
+ case VSC73XX_BLOCK_MII:
+- case VSC73XX_BLOCK_CAPTURE:
+ case VSC73XX_BLOCK_ARBITER:
+ switch (subblock) {
+ case 0 ... 1:
+ return 1;
+ }
+ break;
++ case VSC73XX_BLOCK_CAPTURE:
++ switch (subblock) {
++ case 0 ... 4:
++ case 6 ... 7:
++ return 1;
++ }
++ break;
+ }
+
+ return 0;
+--
+2.43.0
+
--- /dev/null
+From 153c3e2ca9c01c41d94caec5429cc21afc0885d0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Aug 2024 15:03:21 +0300
+Subject: net: ethernet: ti: am65-cpsw: Fix RX statistics for XDP_TX and
+ XDP_REDIRECT
+
+From: Roger Quadros <rogerq@kernel.org>
+
+[ Upstream commit 624d3291484f9cada10660f820db926c0bce7741 ]
+
+We are not using ndev->stats for rx_packets and rx_bytes anymore.
+Instead, we use per CPU stats which are collated in
+am65_cpsw_nuss_ndo_get_stats().
+
+Fix RX statistics for XDP_TX and XDP_REDIRECT cases.
+
+Fixes: 8acacc40f733 ("net: ethernet: ti: am65-cpsw: Add minimal XDP support")
+Signed-off-by: Roger Quadros <rogerq@kernel.org>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Acked-by: Julien Panis <jpanis@baylibre.com>
+Reviewed-by: MD Danish Anwar <danishanwar@ti.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/ti/am65-cpsw-nuss.c | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index 902b22de61d1..330eea349caa 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -998,7 +998,9 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
+ int desc_idx, int cpu, int *len)
+ {
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
++ struct am65_cpsw_ndev_priv *ndev_priv;
+ struct net_device *ndev = port->ndev;
++ struct am65_cpsw_ndev_stats *stats;
+ int ret = AM65_CPSW_XDP_CONSUMED;
+ struct am65_cpsw_tx_chn *tx_chn;
+ struct netdev_queue *netif_txq;
+@@ -1016,6 +1018,9 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
+ /* XDP prog might have changed packet data and boundaries */
+ *len = xdp->data_end - xdp->data;
+
++ ndev_priv = netdev_priv(ndev);
++ stats = this_cpu_ptr(ndev_priv->stats);
++
+ switch (act) {
+ case XDP_PASS:
+ ret = AM65_CPSW_XDP_PASS;
+@@ -1035,16 +1040,20 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
+ if (err)
+ goto drop;
+
+- ndev->stats.rx_bytes += *len;
+- ndev->stats.rx_packets++;
++ u64_stats_update_begin(&stats->syncp);
++ stats->rx_bytes += *len;
++ stats->rx_packets++;
++ u64_stats_update_end(&stats->syncp);
+ ret = AM65_CPSW_XDP_CONSUMED;
+ goto out;
+ case XDP_REDIRECT:
+ if (unlikely(xdp_do_redirect(ndev, xdp, prog)))
+ goto drop;
+
+- ndev->stats.rx_bytes += *len;
+- ndev->stats.rx_packets++;
++ u64_stats_update_begin(&stats->syncp);
++ stats->rx_bytes += *len;
++ stats->rx_packets++;
++ u64_stats_update_end(&stats->syncp);
+ ret = AM65_CPSW_XDP_REDIRECT;
+ goto out;
+ default:
+--
+2.43.0
+
--- /dev/null
+From 0d6ab74453e179d67ac195fd4c244aadb760f3d1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Aug 2024 22:10:23 +0800
+Subject: net: hns3: void array out of bound when loop tnl_num
+
+From: Peiyang Wang <wangpeiyang1@huawei.com>
+
+[ Upstream commit 86db7bfb06704ef17340eeae71c832f21cfce35c ]
+
+When query reg inf of SSU, it loops tnl_num times. However, tnl_num comes
+from hardware and the length of array is a fixed value. To void array out
+of bound, make sure the loop time is not greater than the length of array
+
+Signed-off-by: Peiyang Wang <wangpeiyang1@huawei.com>
+Signed-off-by: Jijie Shao <shaojijie@huawei.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+index e132c2f09560..cc7f46c0b35f 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+@@ -1598,8 +1598,7 @@ static void hclge_query_reg_info_of_ssu(struct hclge_dev *hdev)
+ {
+ u32 loop_para[HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE] = {0};
+ struct hclge_mod_reg_common_msg msg;
+- u8 i, j, num;
+- u32 loop_time;
++ u8 i, j, num, loop_time;
+
+ num = ARRAY_SIZE(hclge_ssu_reg_common_msg);
+ for (i = 0; i < num; i++) {
+@@ -1609,7 +1608,8 @@ static void hclge_query_reg_info_of_ssu(struct hclge_dev *hdev)
+ loop_time = 1;
+ loop_para[0] = 0;
+ if (msg.need_para) {
+- loop_time = hdev->ae_dev->dev_specs.tnl_num;
++ loop_time = min(hdev->ae_dev->dev_specs.tnl_num,
++ HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE);
+ for (j = 0; j < loop_time; j++)
+ loop_para[j] = j + 1;
+ }
+--
+2.43.0
+
--- /dev/null
+From 5058bbf7e0250872b9824b42e1215c5841b9d93f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Aug 2024 10:20:25 +0800
+Subject: net: phy: Fix missing of_node_put() for leds
+
+From: Jinjie Ruan <ruanjinjie@huawei.com>
+
+[ Upstream commit 2560db6ede1aaf162a73b2df43e0b6c5ed8819f7 ]
+
+The call of of_get_child_by_name() will cause refcount incremented
+for leds, if it succeeds, it should call of_node_put() to decrease
+it, fix it.
+
+Fixes: 01e5b728e9e4 ("net: phy: Add a binding for PHY LEDs")
+Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://patch.msgid.link/20240830022025.610844-1-ruanjinjie@huawei.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/phy_device.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 6c6ec9475709..2c0ee5cf8b6e 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -3346,11 +3346,13 @@ static int of_phy_leds(struct phy_device *phydev)
+ err = of_phy_led(phydev, led);
+ if (err) {
+ of_node_put(led);
++ of_node_put(leds);
+ phy_leds_unregister(phydev);
+ return err;
+ }
+ }
+
++ of_node_put(leds);
+ return 0;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 9eff043d152339f04ef2b723adf1dc51dbc68c84 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Sep 2024 13:51:41 -0400
+Subject: net: xilinx: axienet: Fix race in axienet_stop
+
+From: Sean Anderson <sean.anderson@linux.dev>
+
+[ Upstream commit 858430db28a5f5a11f8faa3a6fa805438e6f0851 ]
+
+axienet_dma_err_handler can race with axienet_stop in the following
+manner:
+
+CPU 1 CPU 2
+====================== ==================
+axienet_stop()
+ napi_disable()
+ axienet_dma_stop()
+ axienet_dma_err_handler()
+ napi_disable()
+ axienet_dma_stop()
+ axienet_dma_start()
+ napi_enable()
+ cancel_work_sync()
+ free_irq()
+
+Fix this by setting a flag in axienet_stop telling
+axienet_dma_err_handler not to bother doing anything. I chose not to use
+disable_work_sync to allow for easier backporting.
+
+Signed-off-by: Sean Anderson <sean.anderson@linux.dev>
+Fixes: 8a3b7a252dca ("drivers/net/ethernet/xilinx: added Xilinx AXI Ethernet driver")
+Link: https://patch.msgid.link/20240903175141.4132898-1-sean.anderson@linux.dev
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/xilinx/xilinx_axienet.h | 3 +++
+ drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 8 ++++++++
+ 2 files changed, 11 insertions(+)
+
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
+index 09c9f9787180..1223fcc1a8da 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
+@@ -436,6 +436,8 @@ struct skbuf_dma_descriptor {
+ * @tx_bytes: TX byte count for statistics
+ * @tx_stat_sync: Synchronization object for TX stats
+ * @dma_err_task: Work structure to process Axi DMA errors
++ * @stopping: Set when @dma_err_task shouldn't do anything because we are
++ * about to stop the device.
+ * @tx_irq: Axidma TX IRQ number
+ * @rx_irq: Axidma RX IRQ number
+ * @eth_irq: Ethernet core IRQ number
+@@ -507,6 +509,7 @@ struct axienet_local {
+ struct u64_stats_sync tx_stat_sync;
+
+ struct work_struct dma_err_task;
++ bool stopping;
+
+ int tx_irq;
+ int rx_irq;
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 559c0d60d948..88d7bc2ea713 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -1460,6 +1460,7 @@ static int axienet_init_legacy_dma(struct net_device *ndev)
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ /* Enable worker thread for Axi DMA error handling */
++ lp->stopping = false;
+ INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
+
+ napi_enable(&lp->napi_rx);
+@@ -1580,6 +1581,9 @@ static int axienet_stop(struct net_device *ndev)
+ dev_dbg(&ndev->dev, "axienet_close()\n");
+
+ if (!lp->use_dmaengine) {
++ WRITE_ONCE(lp->stopping, true);
++ flush_work(&lp->dma_err_task);
++
+ napi_disable(&lp->napi_tx);
+ napi_disable(&lp->napi_rx);
+ }
+@@ -2154,6 +2158,10 @@ static void axienet_dma_err_handler(struct work_struct *work)
+ dma_err_task);
+ struct net_device *ndev = lp->ndev;
+
++ /* Don't bother if we are going to stop anyway */
++ if (READ_ONCE(lp->stopping))
++ return;
++
+ napi_disable(&lp->napi_tx);
+ napi_disable(&lp->napi_rx);
+
+--
+2.43.0
+
--- /dev/null
+From 9ba3c53f34b8791fd1a0e6dd8c4a5118c3ff0751 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 31 May 2024 11:48:47 +0800
+Subject: netfilter: nf_conncount: fix wrong variable type
+
+From: Yunjian Wang <wangyunjian@huawei.com>
+
+[ Upstream commit 0b88d1654d556264bcd24a9cb6383f0888e30131 ]
+
+Now there is a issue is that code checks reports a warning: implicit
+narrowing conversion from type 'unsigned int' to small type 'u8' (the
+'keylen' variable). Fix it by removing the 'keylen' variable.
+
+Signed-off-by: Yunjian Wang <wangyunjian@huawei.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conncount.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
+index 8715617b02fe..34ba14e59e95 100644
+--- a/net/netfilter/nf_conncount.c
++++ b/net/netfilter/nf_conncount.c
+@@ -321,7 +321,6 @@ insert_tree(struct net *net,
+ struct nf_conncount_rb *rbconn;
+ struct nf_conncount_tuple *conn;
+ unsigned int count = 0, gc_count = 0;
+- u8 keylen = data->keylen;
+ bool do_gc = true;
+
+ spin_lock_bh(&nf_conncount_locks[hash]);
+@@ -333,7 +332,7 @@ insert_tree(struct net *net,
+ rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node);
+
+ parent = *rbnode;
+- diff = key_diff(key, rbconn->key, keylen);
++ diff = key_diff(key, rbconn->key, data->keylen);
+ if (diff < 0) {
+ rbnode = &((*rbnode)->rb_left);
+ } else if (diff > 0) {
+@@ -378,7 +377,7 @@ insert_tree(struct net *net,
+
+ conn->tuple = *tuple;
+ conn->zone = *zone;
+- memcpy(rbconn->key, key, sizeof(u32) * keylen);
++ memcpy(rbconn->key, key, sizeof(u32) * data->keylen);
+
+ nf_conncount_list_init(&rbconn->list);
+ list_add(&conn->node, &rbconn->list.head);
+@@ -403,7 +402,6 @@ count_tree(struct net *net,
+ struct rb_node *parent;
+ struct nf_conncount_rb *rbconn;
+ unsigned int hash;
+- u8 keylen = data->keylen;
+
+ hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS;
+ root = &data->root[hash];
+@@ -414,7 +412,7 @@ count_tree(struct net *net,
+
+ rbconn = rb_entry(parent, struct nf_conncount_rb, node);
+
+- diff = key_diff(key, rbconn->key, keylen);
++ diff = key_diff(key, rbconn->key, data->keylen);
+ if (diff < 0) {
+ parent = rcu_dereference_raw(parent->rb_left);
+ } else if (diff > 0) {
+--
+2.43.0
+
--- /dev/null
+From ea31ee7c8b47104a5fb93bd23e0b322bf4a7e492 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Aug 2024 23:06:49 +0100
+Subject: netfs, cifs: Fix handling of short DIO read
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 1da29f2c39b67b846b74205c81bf0ccd96d34727 ]
+
+Short DIO reads, particularly in relation to cifs, are not being handled
+correctly by cifs and netfslib. This can be tested by doing a DIO read of
+a file where the size of read is larger than the size of the file. When it
+crosses the EOF, it gets a short read and this gets retried, and in the
+case of cifs, the retry read fails, with the failure being translated to
+ENODATA.
+
+Fix this by the following means:
+
+ (1) Add a flag, NETFS_SREQ_HIT_EOF, for the filesystem to set when it
+ detects that the read did hit the EOF.
+
+ (2) Make the netfslib read assessment stop processing subrequests when it
+ encounters one with that flag set.
+
+ (3) Return rreq->transferred, the accumulated contiguous amount read to
+ that point, to userspace for a DIO read.
+
+ (4) Make cifs set the flag and clear the error if the read RPC returned
+ ENODATA.
+
+ (5) Make cifs set the flag and clear the error if a short read occurred
+ without error and the read-to file position is now at the remote inode
+ size.
+
+Fixes: 69c3c023af25 ("cifs: Implement netfslib hooks")
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Steve French <sfrench@samba.org>
+cc: Paulo Alcantara <pc@manguebit.com>
+cc: Jeff Layton <jlayton@kernel.org>
+cc: linux-cifs@vger.kernel.org
+cc: netfs@lists.linux.dev
+cc: linux-fsdevel@vger.kernel.org
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/netfs/io.c | 17 +++++++++++------
+ fs/smb/client/smb2pdu.c | 13 +++++++++----
+ include/linux/netfs.h | 1 +
+ 3 files changed, 21 insertions(+), 10 deletions(-)
+
+diff --git a/fs/netfs/io.c b/fs/netfs/io.c
+index 2a5c22606fb1..c91e7b12bbf1 100644
+--- a/fs/netfs/io.c
++++ b/fs/netfs/io.c
+@@ -368,7 +368,8 @@ static void netfs_rreq_assess_dio(struct netfs_io_request *rreq)
+ if (subreq->error || subreq->transferred == 0)
+ break;
+ transferred += subreq->transferred;
+- if (subreq->transferred < subreq->len)
++ if (subreq->transferred < subreq->len ||
++ test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags))
+ break;
+ }
+
+@@ -503,7 +504,8 @@ void netfs_subreq_terminated(struct netfs_io_subrequest *subreq,
+
+ subreq->error = 0;
+ subreq->transferred += transferred_or_error;
+- if (subreq->transferred < subreq->len)
++ if (subreq->transferred < subreq->len &&
++ !test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags))
+ goto incomplete;
+
+ complete:
+@@ -777,10 +779,13 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
+ TASK_UNINTERRUPTIBLE);
+
+ ret = rreq->error;
+- if (ret == 0 && rreq->submitted < rreq->len &&
+- rreq->origin != NETFS_DIO_READ) {
+- trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
+- ret = -EIO;
++ if (ret == 0) {
++ if (rreq->origin == NETFS_DIO_READ) {
++ ret = rreq->transferred;
++ } else if (rreq->submitted < rreq->len) {
++ trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
++ ret = -EIO;
++ }
+ }
+ } else {
+ /* If we decrement nr_outstanding to 0, the ref belongs to us. */
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 5f5f51bf9850..8e02e9f45e0e 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -4501,6 +4501,7 @@ static void
+ smb2_readv_callback(struct mid_q_entry *mid)
+ {
+ struct cifs_io_subrequest *rdata = mid->callback_data;
++ struct netfs_inode *ictx = netfs_inode(rdata->rreq->inode);
+ struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink);
+ struct TCP_Server_Info *server = rdata->server;
+ struct smb2_hdr *shdr =
+@@ -4593,11 +4594,15 @@ smb2_readv_callback(struct mid_q_entry *mid)
+ rdata->got_bytes);
+
+ if (rdata->result == -ENODATA) {
+- /* We may have got an EOF error because fallocate
+- * failed to enlarge the file.
+- */
+- if (rdata->subreq.start < rdata->subreq.rreq->i_size)
++ __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
++ rdata->result = 0;
++ } else {
++ if (rdata->got_bytes < rdata->actual_len &&
++ rdata->subreq.start + rdata->subreq.transferred + rdata->got_bytes ==
++ ictx->remote_i_size) {
++ __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
+ rdata->result = 0;
++ }
+ }
+ trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, rdata->credits.value,
+ server->credits, server->in_flight,
+diff --git a/include/linux/netfs.h b/include/linux/netfs.h
+index 5d0288938cc2..d8892b1a2dd7 100644
+--- a/include/linux/netfs.h
++++ b/include/linux/netfs.h
+@@ -200,6 +200,7 @@ struct netfs_io_subrequest {
+ #define NETFS_SREQ_NEED_RETRY 9 /* Set if the filesystem requests a retry */
+ #define NETFS_SREQ_RETRYING 10 /* Set if we're retrying */
+ #define NETFS_SREQ_FAILED 11 /* Set if the subreq failed unretryably */
++#define NETFS_SREQ_HIT_EOF 12 /* Set if we hit the EOF */
+ };
+
+ enum netfs_io_origin {
+--
+2.43.0
+
--- /dev/null
+From cd3729e2fe73e3aaab748cf25e69aa1faea748f5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Aug 2024 14:05:00 -0400
+Subject: NFSv4: Add missing rescheduling points in
+ nfs_client_return_marked_delegations
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit a017ad1313fc91bdf235097fd0a02f673fc7bb11 ]
+
+We're seeing reports of soft lockups when iterating through the loops,
+so let's add rescheduling points.
+
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/super.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index cbbd4866b0b7..97b386032b71 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -47,6 +47,7 @@
+ #include <linux/vfs.h>
+ #include <linux/inet.h>
+ #include <linux/in6.h>
++#include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <net/ipv6.h>
+ #include <linux/netdevice.h>
+@@ -228,6 +229,7 @@ static int __nfs_list_for_each_server(struct list_head *head,
+ ret = fn(server, data);
+ if (ret)
+ goto out;
++ cond_resched();
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
+--
+2.43.0
+
--- /dev/null
+From 1516c9ecd7b2699bc25bbe4bf47b5adda24efbf0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Aug 2024 12:06:51 +0200
+Subject: of/irq: Prevent device address out-of-bounds read in interrupt map
+ walk
+
+From: Stefan Wiehler <stefan.wiehler@nokia.com>
+
+[ Upstream commit b739dffa5d570b411d4bdf4bb9b8dfd6b7d72305 ]
+
+When of_irq_parse_raw() is invoked with a device address smaller than
+the interrupt parent node (from #address-cells property), KASAN detects
+the following out-of-bounds read when populating the initial match table
+(dyndbg="func of_irq_parse_* +p"):
+
+ OF: of_irq_parse_one: dev=/soc@0/picasso/watchdog, index=0
+ OF: parent=/soc@0/pci@878000000000/gpio0@17,0, intsize=2
+ OF: intspec=4
+ OF: of_irq_parse_raw: ipar=/soc@0/pci@878000000000/gpio0@17,0, size=2
+ OF: -> addrsize=3
+ ==================================================================
+ BUG: KASAN: slab-out-of-bounds in of_irq_parse_raw+0x2b8/0x8d0
+ Read of size 4 at addr ffffff81beca5608 by task bash/764
+
+ CPU: 1 PID: 764 Comm: bash Tainted: G O 6.1.67-484c613561-nokia_sm_arm64 #1
+ Hardware name: Unknown Unknown Product/Unknown Product, BIOS 2023.01-12.24.03-dirty 01/01/2023
+ Call trace:
+ dump_backtrace+0xdc/0x130
+ show_stack+0x1c/0x30
+ dump_stack_lvl+0x6c/0x84
+ print_report+0x150/0x448
+ kasan_report+0x98/0x140
+ __asan_load4+0x78/0xa0
+ of_irq_parse_raw+0x2b8/0x8d0
+ of_irq_parse_one+0x24c/0x270
+ parse_interrupts+0xc0/0x120
+ of_fwnode_add_links+0x100/0x2d0
+ fw_devlink_parse_fwtree+0x64/0xc0
+ device_add+0xb38/0xc30
+ of_device_add+0x64/0x90
+ of_platform_device_create_pdata+0xd0/0x170
+ of_platform_bus_create+0x244/0x600
+ of_platform_notify+0x1b0/0x254
+ blocking_notifier_call_chain+0x9c/0xd0
+ __of_changeset_entry_notify+0x1b8/0x230
+ __of_changeset_apply_notify+0x54/0xe4
+ of_overlay_fdt_apply+0xc04/0xd94
+ ...
+
+ The buggy address belongs to the object at ffffff81beca5600
+ which belongs to the cache kmalloc-128 of size 128
+ The buggy address is located 8 bytes inside of
+ 128-byte region [ffffff81beca5600, ffffff81beca5680)
+
+ The buggy address belongs to the physical page:
+ page:00000000230d3d03 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x1beca4
+ head:00000000230d3d03 order:1 compound_mapcount:0 compound_pincount:0
+ flags: 0x8000000000010200(slab|head|zone=2)
+ raw: 8000000000010200 0000000000000000 dead000000000122 ffffff810000c300
+ raw: 0000000000000000 0000000000200020 00000001ffffffff 0000000000000000
+ page dumped because: kasan: bad access detected
+
+ Memory state around the buggy address:
+ ffffff81beca5500: 04 fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ ffffff81beca5580: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ >ffffff81beca5600: 00 fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ ^
+ ffffff81beca5680: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ ffffff81beca5700: 00 00 00 00 00 00 fc fc fc fc fc fc fc fc fc fc
+ ==================================================================
+ OF: -> got it !
+
+Prevent the out-of-bounds read by copying the device address into a
+buffer of sufficient size.
+
+Signed-off-by: Stefan Wiehler <stefan.wiehler@nokia.com>
+Link: https://lore.kernel.org/r/20240812100652.3800963-1-stefan.wiehler@nokia.com
+Signed-off-by: Rob Herring (Arm) <robh@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/of/irq.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/of/irq.c b/drivers/of/irq.c
+index c94203ce65bb..8fd63100ba8f 100644
+--- a/drivers/of/irq.c
++++ b/drivers/of/irq.c
+@@ -344,7 +344,8 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
+ struct device_node *p;
+ const __be32 *addr;
+ u32 intsize;
+- int i, res;
++ int i, res, addr_len;
++ __be32 addr_buf[3] = { 0 };
+
+ pr_debug("of_irq_parse_one: dev=%pOF, index=%d\n", device, index);
+
+@@ -353,13 +354,19 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
+ return of_irq_parse_oldworld(device, index, out_irq);
+
+ /* Get the reg property (if any) */
+- addr = of_get_property(device, "reg", NULL);
++ addr = of_get_property(device, "reg", &addr_len);
++
++ /* Prevent out-of-bounds read in case of longer interrupt parent address size */
++ if (addr_len > (3 * sizeof(__be32)))
++ addr_len = 3 * sizeof(__be32);
++ if (addr)
++ memcpy(addr_buf, addr, addr_len);
+
+ /* Try the new-style interrupts-extended first */
+ res = of_parse_phandle_with_args(device, "interrupts-extended",
+ "#interrupt-cells", index, out_irq);
+ if (!res)
+- return of_irq_parse_raw(addr, out_irq);
++ return of_irq_parse_raw(addr_buf, out_irq);
+
+ /* Look for the interrupt parent. */
+ p = of_irq_find_parent(device);
+@@ -389,7 +396,7 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
+
+
+ /* Check if there are any interrupt-map translations to process */
+- res = of_irq_parse_raw(addr, out_irq);
++ res = of_irq_parse_raw(addr_buf, out_irq);
+ out:
+ of_node_put(p);
+ return res;
+--
+2.43.0
+
--- /dev/null
+From cf1a008c2d41af1b919e3fc4d8728ffe09ee95e1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 May 2024 18:04:35 -0700
+Subject: PCI: Add missing bridge lock to pci_bus_lock()
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+[ Upstream commit a4e772898f8bf2e7e1cf661a12c60a5612c4afab ]
+
+One of the true positives that the cfg_access_lock lockdep effort
+identified is this sequence:
+
+ WARNING: CPU: 14 PID: 1 at drivers/pci/pci.c:4886 pci_bridge_secondary_bus_reset+0x5d/0x70
+ RIP: 0010:pci_bridge_secondary_bus_reset+0x5d/0x70
+ Call Trace:
+ <TASK>
+ ? __warn+0x8c/0x190
+ ? pci_bridge_secondary_bus_reset+0x5d/0x70
+ ? report_bug+0x1f8/0x200
+ ? handle_bug+0x3c/0x70
+ ? exc_invalid_op+0x18/0x70
+ ? asm_exc_invalid_op+0x1a/0x20
+ ? pci_bridge_secondary_bus_reset+0x5d/0x70
+ pci_reset_bus+0x1d8/0x270
+ vmd_probe+0x778/0xa10
+ pci_device_probe+0x95/0x120
+
+Where pci_reset_bus() users are triggering unlocked secondary bus resets.
+Ironically pci_bus_reset(), several calls down from pci_reset_bus(), uses
+pci_bus_lock() before issuing the reset which locks everything *but* the
+bridge itself.
+
+For the same motivation as adding:
+
+ bridge = pci_upstream_bridge(dev);
+ if (bridge)
+ pci_dev_lock(bridge);
+
+to pci_reset_function() for the "bus" and "cxl_bus" reset cases, add
+pci_dev_lock() for @bus->self to pci_bus_lock().
+
+Link: https://lore.kernel.org/r/171711747501.1628941.15217746952476635316.stgit@dwillia2-xfh.jf.intel.com
+Reported-by: Imre Deak <imre.deak@intel.com>
+Closes: http://lore.kernel.org/r/6657833b3b5ae_14984b29437@dwillia2-xfh.jf.intel.com.notmuch
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+[bhelgaas: squash in recursive locking deadlock fix from Keith Busch:
+https://lore.kernel.org/r/20240711193650.701834-1-kbusch@meta.com]
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Tested-by: Hans de Goede <hdegoede@redhat.com>
+Tested-by: Kalle Valo <kvalo@kernel.org>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/pci.c | 35 +++++++++++++++++++++--------------
+ 1 file changed, 21 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index dff09e4892d3..8db214d4b1d4 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -5441,10 +5441,12 @@ static void pci_bus_lock(struct pci_bus *bus)
+ {
+ struct pci_dev *dev;
+
++ pci_dev_lock(bus->self);
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+- pci_dev_lock(dev);
+ if (dev->subordinate)
+ pci_bus_lock(dev->subordinate);
++ else
++ pci_dev_lock(dev);
+ }
+ }
+
+@@ -5456,8 +5458,10 @@ static void pci_bus_unlock(struct pci_bus *bus)
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (dev->subordinate)
+ pci_bus_unlock(dev->subordinate);
+- pci_dev_unlock(dev);
++ else
++ pci_dev_unlock(dev);
+ }
++ pci_dev_unlock(bus->self);
+ }
+
+ /* Return 1 on successful lock, 0 on contention */
+@@ -5465,15 +5469,15 @@ static int pci_bus_trylock(struct pci_bus *bus)
+ {
+ struct pci_dev *dev;
+
++ if (!pci_dev_trylock(bus->self))
++ return 0;
++
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+- if (!pci_dev_trylock(dev))
+- goto unlock;
+ if (dev->subordinate) {
+- if (!pci_bus_trylock(dev->subordinate)) {
+- pci_dev_unlock(dev);
++ if (!pci_bus_trylock(dev->subordinate))
+ goto unlock;
+- }
+- }
++ } else if (!pci_dev_trylock(dev))
++ goto unlock;
+ }
+ return 1;
+
+@@ -5481,8 +5485,10 @@ static int pci_bus_trylock(struct pci_bus *bus)
+ list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
+ if (dev->subordinate)
+ pci_bus_unlock(dev->subordinate);
+- pci_dev_unlock(dev);
++ else
++ pci_dev_unlock(dev);
+ }
++ pci_dev_unlock(bus->self);
+ return 0;
+ }
+
+@@ -5514,9 +5520,10 @@ static void pci_slot_lock(struct pci_slot *slot)
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+- pci_dev_lock(dev);
+ if (dev->subordinate)
+ pci_bus_lock(dev->subordinate);
++ else
++ pci_dev_lock(dev);
+ }
+ }
+
+@@ -5542,14 +5549,13 @@ static int pci_slot_trylock(struct pci_slot *slot)
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+- if (!pci_dev_trylock(dev))
+- goto unlock;
+ if (dev->subordinate) {
+ if (!pci_bus_trylock(dev->subordinate)) {
+ pci_dev_unlock(dev);
+ goto unlock;
+ }
+- }
++ } else if (!pci_dev_trylock(dev))
++ goto unlock;
+ }
+ return 1;
+
+@@ -5560,7 +5566,8 @@ static int pci_slot_trylock(struct pci_slot *slot)
+ continue;
+ if (dev->subordinate)
+ pci_bus_unlock(dev->subordinate);
+- pci_dev_unlock(dev);
++ else
++ pci_dev_unlock(dev);
+ }
+ return 0;
+ }
+--
+2.43.0
+
--- /dev/null
+From f06defc5d8f8c6e4bac5ad0fea46b7dfe7f0923e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 Jul 2024 13:15:06 +0530
+Subject: pci/hotplug/pnv_php: Fix hotplug driver crash on Powernv
+
+From: Krishna Kumar <krishnak@linux.ibm.com>
+
+[ Upstream commit 335e35b748527f0c06ded9eebb65387f60647fda ]
+
+The hotplug driver for powerpc (pci/hotplug/pnv_php.c) causes a kernel
+crash when we try to hot-unplug/disable the PCIe switch/bridge from
+the PHB.
+
+The crash occurs because although the MSI data structure has been
+released during disable/hot-unplug path and it has been assigned
+with NULL, still during unregistration the code was again trying to
+explicitly disable the MSI which causes the NULL pointer dereference and
+kernel crash.
+
+The patch fixes the check during unregistration path to prevent invoking
+pci_disable_msi/msix() since its data structure is already freed.
+
+Reported-by: Timothy Pearson <tpearson@raptorengineering.com>
+Closes: https://lore.kernel.org/all/1981605666.2142272.1703742465927.JavaMail.zimbra@raptorengineeringinc.com/
+Acked-by: Bjorn Helgaas <bhelgaas@google.com>
+Tested-by: Shawn Anastasio <sanastasio@raptorengineering.com>
+Signed-off-by: Krishna Kumar <krishnak@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/20240701074513.94873-2-krishnak@linux.ibm.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/hotplug/pnv_php.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
+index 694349be9d0a..573a41869c15 100644
+--- a/drivers/pci/hotplug/pnv_php.c
++++ b/drivers/pci/hotplug/pnv_php.c
+@@ -40,7 +40,6 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
+ bool disable_device)
+ {
+ struct pci_dev *pdev = php_slot->pdev;
+- int irq = php_slot->irq;
+ u16 ctrl;
+
+ if (php_slot->irq > 0) {
+@@ -59,7 +58,7 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
+ php_slot->wq = NULL;
+ }
+
+- if (disable_device || irq > 0) {
++ if (disable_device) {
+ if (pdev->msix_enabled)
+ pci_disable_msix(pdev);
+ else if (pdev->msi_enabled)
+--
+2.43.0
+
--- /dev/null
+From 5d5c2499d2839d40e43491208a878328cc48116e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Jun 2024 13:45:29 +0200
+Subject: PCI: keystone: Add workaround for Errata #i2037 (AM65x SR 1.0)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Kishon Vijay Abraham I <kishon@ti.com>
+
+[ Upstream commit 86f271f22bbb6391410a07e08d6ca3757fda01fa ]
+
+Errata #i2037 in AM65x/DRA80xM Processors Silicon Revision 1.0
+(SPRZ452D_July 2018_Revised December 2019 [1]) mentions when an
+inbound PCIe TLP spans more than two internal AXI 128-byte bursts,
+the bus may corrupt the packet payload and the corrupt data may
+cause associated applications or the processor to hang.
+
+The workaround for Errata #i2037 is to limit the maximum read
+request size and maximum payload size to 128 bytes. Add workaround
+for Errata #i2037 here.
+
+The errata and workaround is applicable only to AM65x SR 1.0 and
+later versions of the silicon will have this fixed.
+
+[1] -> https://www.ti.com/lit/er/sprz452i/sprz452i.pdf
+
+Link: https://lore.kernel.org/linux-pci/16e1fcae-1ea7-46be-b157-096e05661b15@siemens.com
+Signed-off-by: Kishon Vijay Abraham I <kishon@ti.com>
+Signed-off-by: Achal Verma <a-verma1@ti.com>
+Signed-off-by: Vignesh Raghavendra <vigneshr@ti.com>
+Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
+Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
+Reviewed-by: Siddharth Vadapalli <s-vadapalli@ti.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/dwc/pci-keystone.c | 44 ++++++++++++++++++++++-
+ 1 file changed, 43 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
+index cd0e0022f91d..483c95406513 100644
+--- a/drivers/pci/controller/dwc/pci-keystone.c
++++ b/drivers/pci/controller/dwc/pci-keystone.c
+@@ -34,6 +34,11 @@
+ #define PCIE_DEVICEID_SHIFT 16
+
+ /* Application registers */
++#define PID 0x000
++#define RTL GENMASK(15, 11)
++#define RTL_SHIFT 11
++#define AM6_PCI_PG1_RTL_VER 0x15
++
+ #define CMD_STATUS 0x004
+ #define LTSSM_EN_VAL BIT(0)
+ #define OB_XLAT_EN_VAL BIT(1)
+@@ -104,6 +109,8 @@
+
+ #define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
+
++#define PCI_DEVICE_ID_TI_AM654X 0xb00c
++
+ struct ks_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+ const struct dw_pcie_host_ops *host_ops;
+@@ -516,7 +523,11 @@ static int ks_pcie_start_link(struct dw_pcie *pci)
+ static void ks_pcie_quirk(struct pci_dev *dev)
+ {
+ struct pci_bus *bus = dev->bus;
++ struct keystone_pcie *ks_pcie;
++ struct device *bridge_dev;
+ struct pci_dev *bridge;
++ u32 val;
++
+ static const struct pci_device_id rc_pci_devids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
+ .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
+@@ -528,6 +539,11 @@ static void ks_pcie_quirk(struct pci_dev *dev)
+ .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
+ { 0, },
+ };
++ static const struct pci_device_id am6_pci_devids[] = {
++ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654X),
++ .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
++ { 0, },
++ };
+
+ if (pci_is_root_bus(bus))
+ bridge = dev;
+@@ -549,10 +565,36 @@ static void ks_pcie_quirk(struct pci_dev *dev)
+ */
+ if (pci_match_id(rc_pci_devids, bridge)) {
+ if (pcie_get_readrq(dev) > 256) {
+- dev_info(&dev->dev, "limiting MRRS to 256\n");
++ dev_info(&dev->dev, "limiting MRRS to 256 bytes\n");
+ pcie_set_readrq(dev, 256);
+ }
+ }
++
++ /*
++ * Memory transactions fail with PCI controller in AM654 PG1.0
++ * when MRRS is set to more than 128 bytes. Force the MRRS to
++ * 128 bytes in all downstream devices.
++ */
++ if (pci_match_id(am6_pci_devids, bridge)) {
++ bridge_dev = pci_get_host_bridge_device(dev);
++ if (!bridge_dev && !bridge_dev->parent)
++ return;
++
++ ks_pcie = dev_get_drvdata(bridge_dev->parent);
++ if (!ks_pcie)
++ return;
++
++ val = ks_pcie_app_readl(ks_pcie, PID);
++ val &= RTL;
++ val >>= RTL_SHIFT;
++ if (val != AM6_PCI_PG1_RTL_VER)
++ return;
++
++ if (pcie_get_readrq(dev) > 128) {
++ dev_info(&dev->dev, "limiting MRRS to 128 bytes\n");
++ pcie_set_readrq(dev, 128);
++ }
++ }
+ }
+ DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
+
+--
+2.43.0
+
--- /dev/null
+From a753677d5c1c468ac87a1f058b30fb88cb912b66 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Mar 2024 19:41:35 +0530
+Subject: PCI: qcom: Override NO_SNOOP attribute for SA8775P RC
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Mrinmay Sarkar <quic_msarkar@quicinc.com>
+
+[ Upstream commit 1d648bf79d4dca909f242b1a0cdc458e4f9d0253 ]
+
+Due to some hardware changes, SA8775P has set the NO_SNOOP attribute
+in its TLP for all the PCIe controllers. NO_SNOOP attribute when set,
+the requester is indicating that no cache coherency issue exist for
+the addressed memory on the endpoint i.e., memory is not cached. But
+in reality, requester cannot assume this unless there is a complete
+control/visibility over the addressed memory on the endpoint.
+
+And worst case, if the memory is cached on the endpoint, it may lead to
+memory corruption issues. It should be noted that the caching of memory
+on the endpoint is not solely dependent on the NO_SNOOP attribute in TLP.
+
+So to avoid the corruption, this patch overrides the NO_SNOOP attribute
+by setting the PCIE_PARF_NO_SNOOP_OVERIDE register. This patch is not
+needed for other upstream supported platforms since they do not set
+NO_SNOOP attribute by default.
+
+8775 has IP version 1.34.0 so introduce a new cfg(cfg_1_34_0) for this
+platform. Assign override_no_snoop flag into struct qcom_pcie_cfg and
+set it true in cfg_1_34_0 and enable cache snooping if this particular
+flag is true.
+
+Link: https://lore.kernel.org/linux-pci/1710166298-27144-2-git-send-email-quic_msarkar@quicinc.com
+Signed-off-by: Mrinmay Sarkar <quic_msarkar@quicinc.com>
+Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
+[bhelgaas: wrap comments to fit in 80 columns]
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/dwc/pcie-qcom.c | 25 ++++++++++++++++++++++++-
+ 1 file changed, 24 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index 14772edcf0d3..7fa1fe5a29e3 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -51,6 +51,7 @@
+ #define PARF_SID_OFFSET 0x234
+ #define PARF_BDF_TRANSLATE_CFG 0x24c
+ #define PARF_SLV_ADDR_SPACE_SIZE 0x358
++#define PARF_NO_SNOOP_OVERIDE 0x3d4
+ #define PARF_DEVICE_TYPE 0x1000
+ #define PARF_BDF_TO_SID_TABLE_N 0x2000
+ #define PARF_BDF_TO_SID_CFG 0x2c00
+@@ -118,6 +119,10 @@
+ /* PARF_LTSSM register fields */
+ #define LTSSM_EN BIT(8)
+
++/* PARF_NO_SNOOP_OVERIDE register fields */
++#define WR_NO_SNOOP_OVERIDE_EN BIT(1)
++#define RD_NO_SNOOP_OVERIDE_EN BIT(3)
++
+ /* PARF_DEVICE_TYPE register fields */
+ #define DEVICE_TYPE_RC 0x4
+
+@@ -231,8 +236,15 @@ struct qcom_pcie_ops {
+ int (*config_sid)(struct qcom_pcie *pcie);
+ };
+
++ /**
++ * struct qcom_pcie_cfg - Per SoC config struct
++ * @ops: qcom PCIe ops structure
++ * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache
++ * snooping
++ */
+ struct qcom_pcie_cfg {
+ const struct qcom_pcie_ops *ops;
++ bool override_no_snoop;
+ bool no_l0s;
+ };
+
+@@ -986,6 +998,12 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
+
+ static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
+ {
++ const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg;
++
++ if (pcie_cfg->override_no_snoop)
++ writel(WR_NO_SNOOP_OVERIDE_EN | RD_NO_SNOOP_OVERIDE_EN,
++ pcie->parf + PARF_NO_SNOOP_OVERIDE);
++
+ qcom_pcie_clear_aspm_l0s(pcie->pci);
+ qcom_pcie_clear_hpc(pcie->pci);
+
+@@ -1366,6 +1384,11 @@ static const struct qcom_pcie_cfg cfg_1_9_0 = {
+ .ops = &ops_1_9_0,
+ };
+
++static const struct qcom_pcie_cfg cfg_1_34_0 = {
++ .ops = &ops_1_9_0,
++ .override_no_snoop = true,
++};
++
+ static const struct qcom_pcie_cfg cfg_2_1_0 = {
+ .ops = &ops_2_1_0,
+ };
+@@ -1667,7 +1690,7 @@ static const struct of_device_id qcom_pcie_match[] = {
+ { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
+ { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
+ { .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp },
+- { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_9_0},
++ { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0},
+ { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp },
+--
+2.43.0
+
--- /dev/null
+From 174639590814bd054234507a19ca5bf586096e91 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 12 May 2024 23:31:21 +0100
+Subject: pcmcia: Use resource_size function on resource object
+
+From: Jules Irenge <jbi.octave@gmail.com>
+
+[ Upstream commit 24a025497e7e883bd2adef5d0ece1e9b9268009f ]
+
+Cocinnele reports a warning
+
+WARNING: Suspicious code. resource_size is maybe missing with root
+
+The root cause is the function resource_size is not used when needed
+
+Use resource_size() on variable "root" of type resource
+
+Signed-off-by: Jules Irenge <jbi.octave@gmail.com>
+Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pcmcia/yenta_socket.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
+index 1365eaa20ff4..ff169124929c 100644
+--- a/drivers/pcmcia/yenta_socket.c
++++ b/drivers/pcmcia/yenta_socket.c
+@@ -638,11 +638,11 @@ static int yenta_search_one_res(struct resource *root, struct resource *res,
+ start = PCIBIOS_MIN_CARDBUS_IO;
+ end = ~0U;
+ } else {
+- unsigned long avail = root->end - root->start;
++ unsigned long avail = resource_size(root);
+ int i;
+ size = BRIDGE_MEM_MAX;
+- if (size > avail/8) {
+- size = (avail+1)/8;
++ if (size > (avail - 1) / 8) {
++ size = avail / 8;
+ /* round size down to next power of 2 */
+ i = 0;
+ while ((size /= 2) != 0)
+--
+2.43.0
+
--- /dev/null
+From 7f0c7b133eb3ad503cd2de6f62256824b8c09bc8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 Aug 2024 22:29:53 -0700
+Subject: perf lock contention: Fix spinlock and rwlock accounting
+
+From: Namhyung Kim <namhyung@kernel.org>
+
+[ Upstream commit 287bd5cf06e0f2c02293ce942777ad1f18059ed3 ]
+
+The spinlock and rwlock use a single-element per-cpu array to track
+current locks due to performance reason. But this means the key is
+always available and it cannot simply account lock stats in the array
+because some of them are invalid.
+
+In fact, the contention_end() program in the BPF invalidates the entry
+by setting the 'lock' value to 0 instead of deleting the entry for the
+hashmap. So it should skip entries with the lock value of 0 in the
+account_end_timestamp().
+
+Otherwise, it'd have spurious high contention on an idle machine:
+
+ $ sudo perf lock con -ab -Y spinlock sleep 3
+ contended total wait max wait avg wait type caller
+
+ 8 4.72 s 1.84 s 590.46 ms spinlock rcu_core+0xc7
+ 8 1.87 s 1.87 s 233.48 ms spinlock process_one_work+0x1b5
+ 2 1.87 s 1.87 s 933.92 ms spinlock worker_thread+0x1a2
+ 3 1.81 s 1.81 s 603.93 ms spinlock tmigr_update_events+0x13c
+ 2 1.72 s 1.72 s 861.98 ms spinlock tick_do_update_jiffies64+0x25
+ 6 42.48 us 13.02 us 7.08 us spinlock futex_q_lock+0x2a
+ 1 13.03 us 13.03 us 13.03 us spinlock futex_wake+0xce
+ 1 11.61 us 11.61 us 11.61 us spinlock rcu_core+0xc7
+
+I don't believe it has contention on a spinlock longer than 1 second.
+After this change, it only reports some small contentions.
+
+ $ sudo perf lock con -ab -Y spinlock sleep 3
+ contended total wait max wait avg wait type caller
+
+ 4 133.51 us 43.29 us 33.38 us spinlock tick_do_update_jiffies64+0x25
+ 4 69.06 us 31.82 us 17.27 us spinlock process_one_work+0x1b5
+ 2 50.66 us 25.77 us 25.33 us spinlock rcu_core+0xc7
+ 1 28.45 us 28.45 us 28.45 us spinlock rcu_core+0xc7
+ 1 24.77 us 24.77 us 24.77 us spinlock tmigr_update_events+0x13c
+ 1 23.34 us 23.34 us 23.34 us spinlock raw_spin_rq_lock_nested+0x15
+
+Fixes: b5711042a1c8 ("perf lock contention: Use per-cpu array map for spinlocks")
+Reported-by: Xi Wang <xii@google.com>
+Cc: Song Liu <song@kernel.org>
+Cc: bpf@vger.kernel.org
+Link: https://lore.kernel.org/r/20240828052953.1445862-1-namhyung@kernel.org
+Signed-off-by: Namhyung Kim <namhyung@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/bpf_lock_contention.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/tools/perf/util/bpf_lock_contention.c b/tools/perf/util/bpf_lock_contention.c
+index b4cb3fe5cc25..bc4e92c0c08b 100644
+--- a/tools/perf/util/bpf_lock_contention.c
++++ b/tools/perf/util/bpf_lock_contention.c
+@@ -286,6 +286,9 @@ static void account_end_timestamp(struct lock_contention *con)
+ goto next;
+
+ for (int i = 0; i < total_cpus; i++) {
++ if (cpu_data[i].lock == 0)
++ continue;
++
+ update_lock_stat(stat_fd, -1, end_ts, aggr_mode,
+ &cpu_data[i]);
+ }
+--
+2.43.0
+
--- /dev/null
+From fdb17f7a9a73de92ad790b7e15148eb94a65f194 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 8 Jul 2024 12:33:34 -0700
+Subject: perf/x86/intel: Hide Topdown metrics events if the feature is not
+ enumerated
+
+From: Kan Liang <kan.liang@linux.intel.com>
+
+[ Upstream commit 556a7c039a52c21da33eaae9269984a1ef59189b ]
+
+The below error is observed on Ice Lake VM.
+
+$ perf stat
+Error:
+The sys_perf_event_open() syscall returned with 22 (Invalid argument)
+for event (slots).
+/bin/dmesg | grep -i perf may provide additional information.
+
+In a virtualization env, the Topdown metrics and the slots event haven't
+been supported yet. The guest CPUID doesn't enumerate them. However, the
+current kernel unconditionally exposes the slots event and the Topdown
+metrics events to sysfs, which misleads the perf tool and triggers the
+error.
+
+Hide the perf-metrics topdown events and the slots event if the
+perf-metrics feature is not enumerated.
+
+The big core of a hybrid platform can also supports the perf-metrics
+feature. Fix the hybrid platform as well.
+
+Closes: https://lore.kernel.org/lkml/CAM9d7cj8z+ryyzUHR+P1Dcpot2jjW+Qcc4CPQpfafTXN=LEU0Q@mail.gmail.com/
+Reported-by: Dongli Zhang <dongli.zhang@oracle.com>
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: Dongli Zhang <dongli.zhang@oracle.com>
+Link: https://lkml.kernel.org/r/20240708193336.1192217-2-kan.liang@linux.intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/events/intel/core.c | 34 +++++++++++++++++++++++++++++++++-
+ 1 file changed, 33 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 05ec651663cb..dcac96133cb6 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -5733,8 +5733,22 @@ exra_is_visible(struct kobject *kobj, struct attribute *attr, int i)
+ return x86_pmu.version >= 2 ? attr->mode : 0;
+ }
+
++static umode_t
++td_is_visible(struct kobject *kobj, struct attribute *attr, int i)
++{
++ /*
++ * Hide the perf metrics topdown events
++ * if the feature is not enumerated.
++ */
++ if (x86_pmu.num_topdown_events)
++ return x86_pmu.intel_cap.perf_metrics ? attr->mode : 0;
++
++ return attr->mode;
++}
++
+ static struct attribute_group group_events_td = {
+ .name = "events",
++ .is_visible = td_is_visible,
+ };
+
+ static struct attribute_group group_events_mem = {
+@@ -5936,9 +5950,27 @@ static umode_t hybrid_format_is_visible(struct kobject *kobj,
+ return (cpu >= 0) && (pmu->pmu_type & pmu_attr->pmu_type) ? attr->mode : 0;
+ }
+
++static umode_t hybrid_td_is_visible(struct kobject *kobj,
++ struct attribute *attr, int i)
++{
++ struct device *dev = kobj_to_dev(kobj);
++ struct x86_hybrid_pmu *pmu =
++ container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
++
++ if (!is_attr_for_this_pmu(kobj, attr))
++ return 0;
++
++
++ /* Only the big core supports perf metrics */
++ if (pmu->pmu_type == hybrid_big)
++ return pmu->intel_cap.perf_metrics ? attr->mode : 0;
++
++ return attr->mode;
++}
++
+ static struct attribute_group hybrid_group_events_td = {
+ .name = "events",
+- .is_visible = hybrid_events_is_visible,
++ .is_visible = hybrid_td_is_visible,
+ };
+
+ static struct attribute_group hybrid_group_events_mem = {
+--
+2.43.0
+
--- /dev/null
+From 4b4ec64502b202097ac91cca9a9b67fad8a6807f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Jun 2024 16:55:39 -0400
+Subject: phy: zynqmp: Take the phy mutex in xlate
+
+From: Sean Anderson <sean.anderson@linux.dev>
+
+[ Upstream commit d79c6840917097285e03a49f709321f5fb972750 ]
+
+Take the phy mutex in xlate to protect against concurrent
+modification/access to gtr_phy. This does not typically cause any
+issues, since in most systems the phys are only xlated once and
+thereafter accessed with the phy API (which takes the locks). However,
+we are about to allow userspace to access phys for debugging, so it's
+important to avoid any data races.
+
+Signed-off-by: Sean Anderson <sean.anderson@linux.dev>
+Link: https://lore.kernel.org/r/20240628205540.3098010-5-sean.anderson@linux.dev
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/phy/xilinx/phy-zynqmp.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c
+index d7d12cf3011a..9cf0007cfd64 100644
+--- a/drivers/phy/xilinx/phy-zynqmp.c
++++ b/drivers/phy/xilinx/phy-zynqmp.c
+@@ -846,6 +846,7 @@ static struct phy *xpsgtr_xlate(struct device *dev,
+ phy_type = args->args[1];
+ phy_instance = args->args[2];
+
++ guard(mutex)(>r_phy->phy->mutex);
+ ret = xpsgtr_set_lane_type(gtr_phy, phy_type, phy_instance);
+ if (ret < 0) {
+ dev_err(gtr_dev->dev, "Invalid PHY type and/or instance\n");
+--
+2.43.0
+
--- /dev/null
+From 73634e44e9e896d417d1cdcd9183bb9be67e03aa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Aug 2024 09:54:28 +0300
+Subject: platform/x86: dell-smbios: Fix error path in dell_smbios_init()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Aleksandr Mishin <amishin@t-argos.ru>
+
+[ Upstream commit ffc17e1479e8e9459b7afa80e5d9d40d0dd78abb ]
+
+In case of error in build_tokens_sysfs(), all the memory that has been
+allocated is freed at end of this function. But then free_group() is
+called which performs memory deallocation again.
+
+Also, instead of free_group() call, there should be exit_dell_smbios_smm()
+and exit_dell_smbios_wmi() calls, since there is initialization, but there
+is no release of resources in case of an error.
+
+Fix these issues by replacing free_group() call with
+exit_dell_smbios_wmi() and exit_dell_smbios_smm().
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Fixes: 33b9ca1e53b4 ("platform/x86: dell-smbios: Add a sysfs interface for SMBIOS tokens")
+Signed-off-by: Aleksandr Mishin <amishin@t-argos.ru>
+Link: https://lore.kernel.org/r/20240830065428.9544-1-amishin@t-argos.ru
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/platform/x86/dell/dell-smbios-base.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c
+index b562ed99ec4e..4702669dbb60 100644
+--- a/drivers/platform/x86/dell/dell-smbios-base.c
++++ b/drivers/platform/x86/dell/dell-smbios-base.c
+@@ -587,7 +587,10 @@ static int __init dell_smbios_init(void)
+ return 0;
+
+ fail_sysfs:
+- free_group(platform_device);
++ if (!wmi)
++ exit_dell_smbios_wmi();
++ if (!smm)
++ exit_dell_smbios_smm();
+
+ fail_create_group:
+ platform_device_del(platform_device);
+--
+2.43.0
+
--- /dev/null
+From d61cfd990a36ab6a9e55d8c433c891b73cdd179a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 May 2024 19:44:12 -0500
+Subject: powerpc/rtas: Prevent Spectre v1 gadget construction in sys_rtas()
+
+From: Nathan Lynch <nathanl@linux.ibm.com>
+
+[ Upstream commit 0974d03eb479384466d828d65637814bee6b26d7 ]
+
+Smatch warns:
+
+ arch/powerpc/kernel/rtas.c:1932 __do_sys_rtas() warn: potential
+ spectre issue 'args.args' [r] (local cap)
+
+The 'nargs' and 'nret' locals come directly from a user-supplied
+buffer and are used as indexes into a small stack-based array and as
+inputs to copy_to_user() after they are subject to bounds checks.
+
+Use array_index_nospec() after the bounds checks to clamp these values
+for speculative execution.
+
+Signed-off-by: Nathan Lynch <nathanl@linux.ibm.com>
+Reported-by: Breno Leitao <leitao@debian.org>
+Reviewed-by: Breno Leitao <leitao@debian.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/20240530-sys_rtas-nargs-nret-v1-1-129acddd4d89@linux.ibm.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/rtas.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
+index 8064d9c3de86..f7e86e09c49f 100644
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -19,6 +19,7 @@
+ #include <linux/lockdep.h>
+ #include <linux/memblock.h>
+ #include <linux/mutex.h>
++#include <linux/nospec.h>
+ #include <linux/of.h>
+ #include <linux/of_fdt.h>
+ #include <linux/reboot.h>
+@@ -1916,6 +1917,9 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
+ || nargs + nret > ARRAY_SIZE(args.args))
+ return -EINVAL;
+
++ nargs = array_index_nospec(nargs, ARRAY_SIZE(args.args));
++ nret = array_index_nospec(nret, ARRAY_SIZE(args.args) - nargs);
++
+ /* Copy in args. */
+ if (copy_from_user(args.args, uargs->args,
+ nargs * sizeof(rtas_arg_t)) != 0)
+--
+2.43.0
+
--- /dev/null
+From cceaaea302ded0ead7875969aa3bc162b8dceb62 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Aug 2024 11:36:02 -0700
+Subject: ptp: ocp: adjust sysfs entries to expose tty information
+
+From: Vadim Fedorenko <vadfed@meta.com>
+
+[ Upstream commit 82ace0c8fe9b025eaa273365e27057402cdaeb02 ]
+
+Implement additional attribute group to expose serial port information.
+Fixes tag points to the commit which introduced the change in serial
+port subsystem and made it impossible to use symlinks.
+
+Fixes: b286f4e87e32 ("serial: core: Move tty and serdev to be children of serial core port device")
+Signed-off-by: Vadim Fedorenko <vadfed@meta.com>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ptp/ptp_ocp.c | 62 +++++++++++++++++++++++++++++++++++--------
+ 1 file changed, 51 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
+index 46369de8e30b..e7479b9b90cb 100644
+--- a/drivers/ptp/ptp_ocp.c
++++ b/drivers/ptp/ptp_ocp.c
+@@ -3361,6 +3361,54 @@ static EXT_ATTR_RO(freq, frequency, 1);
+ static EXT_ATTR_RO(freq, frequency, 2);
+ static EXT_ATTR_RO(freq, frequency, 3);
+
++static ssize_t
++ptp_ocp_tty_show(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ struct dev_ext_attribute *ea = to_ext_attr(attr);
++ struct ptp_ocp *bp = dev_get_drvdata(dev);
++
++ return sysfs_emit(buf, "ttyS%d", bp->port[(uintptr_t)ea->var].line);
++}
++
++static umode_t
++ptp_ocp_timecard_tty_is_visible(struct kobject *kobj, struct attribute *attr, int n)
++{
++ struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj));
++ struct ptp_ocp_serial_port *port;
++ struct device_attribute *dattr;
++ struct dev_ext_attribute *ea;
++
++ if (strncmp(attr->name, "tty", 3))
++ return attr->mode;
++
++ dattr = container_of(attr, struct device_attribute, attr);
++ ea = container_of(dattr, struct dev_ext_attribute, attr);
++ port = &bp->port[(uintptr_t)ea->var];
++ return port->line == -1 ? 0 : 0444;
++}
++
++#define EXT_TTY_ATTR_RO(_name, _val) \
++ struct dev_ext_attribute dev_attr_tty##_name = \
++ { __ATTR(tty##_name, 0444, ptp_ocp_tty_show, NULL), (void *)_val }
++
++static EXT_TTY_ATTR_RO(GNSS, PORT_GNSS);
++static EXT_TTY_ATTR_RO(GNSS2, PORT_GNSS2);
++static EXT_TTY_ATTR_RO(MAC, PORT_MAC);
++static EXT_TTY_ATTR_RO(NMEA, PORT_NMEA);
++static struct attribute *ptp_ocp_timecard_tty_attrs[] = {
++ &dev_attr_ttyGNSS.attr.attr,
++ &dev_attr_ttyGNSS2.attr.attr,
++ &dev_attr_ttyMAC.attr.attr,
++ &dev_attr_ttyNMEA.attr.attr,
++ NULL,
++};
++
++static const struct attribute_group ptp_ocp_timecard_tty_group = {
++ .name = "tty",
++ .attrs = ptp_ocp_timecard_tty_attrs,
++ .is_visible = ptp_ocp_timecard_tty_is_visible,
++};
++
+ static ssize_t
+ serialnum_show(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+@@ -3790,6 +3838,7 @@ static const struct attribute_group fb_timecard_group = {
+
+ static const struct ocp_attr_group fb_timecard_groups[] = {
+ { .cap = OCP_CAP_BASIC, .group = &fb_timecard_group },
++ { .cap = OCP_CAP_BASIC, .group = &ptp_ocp_timecard_tty_group },
+ { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal0_group },
+ { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal1_group },
+ { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal2_group },
+@@ -3829,6 +3878,7 @@ static const struct attribute_group art_timecard_group = {
+
+ static const struct ocp_attr_group art_timecard_groups[] = {
+ { .cap = OCP_CAP_BASIC, .group = &art_timecard_group },
++ { .cap = OCP_CAP_BASIC, .group = &ptp_ocp_timecard_tty_group },
+ { },
+ };
+
+@@ -3856,6 +3906,7 @@ static const struct attribute_group adva_timecard_group = {
+
+ static const struct ocp_attr_group adva_timecard_groups[] = {
+ { .cap = OCP_CAP_BASIC, .group = &adva_timecard_group },
++ { .cap = OCP_CAP_BASIC, .group = &ptp_ocp_timecard_tty_group },
+ { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal0_group },
+ { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal1_group },
+ { .cap = OCP_CAP_FREQ, .group = &fb_timecard_freq0_group },
+@@ -4361,14 +4412,6 @@ ptp_ocp_complete(struct ptp_ocp *bp)
+ {
+ struct pps_device *pps;
+ char buf[32];
+- int i;
+-
+- for (i = 0; i < __PORT_COUNT; i++) {
+- if (bp->port[i].line != -1) {
+- sprintf(buf, "ttyS%d", bp->port[i].line);
+- ptp_ocp_link_child(bp, buf, ptp_ocp_tty_port_name(i));
+- }
+- }
+
+ sprintf(buf, "ptp%d", ptp_clock_index(bp->ptp));
+ ptp_ocp_link_child(bp, buf, "ptp");
+@@ -4440,9 +4483,6 @@ ptp_ocp_detach_sysfs(struct ptp_ocp *bp)
+ {
+ struct device *dev = &bp->dev;
+
+- sysfs_remove_link(&dev->kobj, "ttyGNSS");
+- sysfs_remove_link(&dev->kobj, "ttyGNSS2");
+- sysfs_remove_link(&dev->kobj, "ttyMAC");
+ sysfs_remove_link(&dev->kobj, "ptp");
+ sysfs_remove_link(&dev->kobj, "pps");
+ }
+--
+2.43.0
+
--- /dev/null
+From 2da41e40c3f766561c5fd794348e92a14209c744 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Aug 2024 11:36:01 -0700
+Subject: ptp: ocp: convert serial ports to array
+
+From: Vadim Fedorenko <vadfed@meta.com>
+
+[ Upstream commit d7875b4b078f7e2d862e88aed99c3ea0381aa189 ]
+
+Simplify serial port management code by using array of ports and helpers
+to get the name of the port. This change is needed to make the next
+patch simplier.
+
+Signed-off-by: Vadim Fedorenko <vadfed@meta.com>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Stable-dep-of: 82ace0c8fe9b ("ptp: ocp: adjust sysfs entries to expose tty information")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ptp/ptp_ocp.c | 120 ++++++++++++++++++++----------------------
+ 1 file changed, 57 insertions(+), 63 deletions(-)
+
+diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
+index ee2ced88ab34..46369de8e30b 100644
+--- a/drivers/ptp/ptp_ocp.c
++++ b/drivers/ptp/ptp_ocp.c
+@@ -316,6 +316,15 @@ struct ptp_ocp_serial_port {
+ #define OCP_SERIAL_LEN 6
+ #define OCP_SMA_NUM 4
+
++enum {
++ PORT_GNSS,
++ PORT_GNSS2,
++ PORT_MAC, /* miniature atomic clock */
++ PORT_NMEA,
++
++ __PORT_COUNT,
++};
++
+ struct ptp_ocp {
+ struct pci_dev *pdev;
+ struct device dev;
+@@ -357,10 +366,7 @@ struct ptp_ocp {
+ struct delayed_work sync_work;
+ int id;
+ int n_irqs;
+- struct ptp_ocp_serial_port gnss_port;
+- struct ptp_ocp_serial_port gnss2_port;
+- struct ptp_ocp_serial_port mac_port; /* miniature atomic clock */
+- struct ptp_ocp_serial_port nmea_port;
++ struct ptp_ocp_serial_port port[__PORT_COUNT];
+ bool fw_loader;
+ u8 fw_tag;
+ u16 fw_version;
+@@ -655,28 +661,28 @@ static struct ocp_resource ocp_fb_resource[] = {
+ },
+ },
+ {
+- OCP_SERIAL_RESOURCE(gnss_port),
++ OCP_SERIAL_RESOURCE(port[PORT_GNSS]),
+ .offset = 0x00160000 + 0x1000, .irq_vec = 3,
+ .extra = &(struct ptp_ocp_serial_port) {
+ .baud = 115200,
+ },
+ },
+ {
+- OCP_SERIAL_RESOURCE(gnss2_port),
++ OCP_SERIAL_RESOURCE(port[PORT_GNSS2]),
+ .offset = 0x00170000 + 0x1000, .irq_vec = 4,
+ .extra = &(struct ptp_ocp_serial_port) {
+ .baud = 115200,
+ },
+ },
+ {
+- OCP_SERIAL_RESOURCE(mac_port),
++ OCP_SERIAL_RESOURCE(port[PORT_MAC]),
+ .offset = 0x00180000 + 0x1000, .irq_vec = 5,
+ .extra = &(struct ptp_ocp_serial_port) {
+ .baud = 57600,
+ },
+ },
+ {
+- OCP_SERIAL_RESOURCE(nmea_port),
++ OCP_SERIAL_RESOURCE(port[PORT_NMEA]),
+ .offset = 0x00190000 + 0x1000, .irq_vec = 10,
+ },
+ {
+@@ -740,7 +746,7 @@ static struct ocp_resource ocp_art_resource[] = {
+ .offset = 0x01000000, .size = 0x10000,
+ },
+ {
+- OCP_SERIAL_RESOURCE(gnss_port),
++ OCP_SERIAL_RESOURCE(port[PORT_GNSS]),
+ .offset = 0x00160000 + 0x1000, .irq_vec = 3,
+ .extra = &(struct ptp_ocp_serial_port) {
+ .baud = 115200,
+@@ -839,7 +845,7 @@ static struct ocp_resource ocp_art_resource[] = {
+ },
+ },
+ {
+- OCP_SERIAL_RESOURCE(mac_port),
++ OCP_SERIAL_RESOURCE(port[PORT_MAC]),
+ .offset = 0x00190000, .irq_vec = 7,
+ .extra = &(struct ptp_ocp_serial_port) {
+ .baud = 9600,
+@@ -950,14 +956,14 @@ static struct ocp_resource ocp_adva_resource[] = {
+ .offset = 0x00220000, .size = 0x1000,
+ },
+ {
+- OCP_SERIAL_RESOURCE(gnss_port),
++ OCP_SERIAL_RESOURCE(port[PORT_GNSS]),
+ .offset = 0x00160000 + 0x1000, .irq_vec = 3,
+ .extra = &(struct ptp_ocp_serial_port) {
+ .baud = 9600,
+ },
+ },
+ {
+- OCP_SERIAL_RESOURCE(mac_port),
++ OCP_SERIAL_RESOURCE(port[PORT_MAC]),
+ .offset = 0x00180000 + 0x1000, .irq_vec = 5,
+ .extra = &(struct ptp_ocp_serial_port) {
+ .baud = 115200,
+@@ -1649,6 +1655,15 @@ ptp_ocp_tod_gnss_name(int idx)
+ return gnss_name[idx];
+ }
+
++static const char *
++ptp_ocp_tty_port_name(int idx)
++{
++ static const char * const tty_name[] = {
++ "GNSS", "GNSS2", "MAC", "NMEA"
++ };
++ return tty_name[idx];
++}
++
+ struct ptp_ocp_nvmem_match_info {
+ struct ptp_ocp *bp;
+ const void * const tag;
+@@ -3960,16 +3975,11 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
+ bp = dev_get_drvdata(dev);
+
+ seq_printf(s, "%7s: /dev/ptp%d\n", "PTP", ptp_clock_index(bp->ptp));
+- if (bp->gnss_port.line != -1)
+- seq_printf(s, "%7s: /dev/ttyS%d\n", "GNSS1",
+- bp->gnss_port.line);
+- if (bp->gnss2_port.line != -1)
+- seq_printf(s, "%7s: /dev/ttyS%d\n", "GNSS2",
+- bp->gnss2_port.line);
+- if (bp->mac_port.line != -1)
+- seq_printf(s, "%7s: /dev/ttyS%d\n", "MAC", bp->mac_port.line);
+- if (bp->nmea_port.line != -1)
+- seq_printf(s, "%7s: /dev/ttyS%d\n", "NMEA", bp->nmea_port.line);
++ for (i = 0; i < __PORT_COUNT; i++) {
++ if (bp->port[i].line != -1)
++ seq_printf(s, "%7s: /dev/ttyS%d\n", ptp_ocp_tty_port_name(i),
++ bp->port[i].line);
++ }
+
+ memset(sma_val, 0xff, sizeof(sma_val));
+ if (bp->sma_map1) {
+@@ -4279,7 +4289,7 @@ ptp_ocp_dev_release(struct device *dev)
+ static int
+ ptp_ocp_device_init(struct ptp_ocp *bp, struct pci_dev *pdev)
+ {
+- int err;
++ int i, err;
+
+ mutex_lock(&ptp_ocp_lock);
+ err = idr_alloc(&ptp_ocp_idr, bp, 0, 0, GFP_KERNEL);
+@@ -4292,10 +4302,10 @@ ptp_ocp_device_init(struct ptp_ocp *bp, struct pci_dev *pdev)
+
+ bp->ptp_info = ptp_ocp_clock_info;
+ spin_lock_init(&bp->lock);
+- bp->gnss_port.line = -1;
+- bp->gnss2_port.line = -1;
+- bp->mac_port.line = -1;
+- bp->nmea_port.line = -1;
++
++ for (i = 0; i < __PORT_COUNT; i++)
++ bp->port[i].line = -1;
++
+ bp->pdev = pdev;
+
+ device_initialize(&bp->dev);
+@@ -4351,23 +4361,15 @@ ptp_ocp_complete(struct ptp_ocp *bp)
+ {
+ struct pps_device *pps;
+ char buf[32];
++ int i;
+
+- if (bp->gnss_port.line != -1) {
+- sprintf(buf, "ttyS%d", bp->gnss_port.line);
+- ptp_ocp_link_child(bp, buf, "ttyGNSS");
+- }
+- if (bp->gnss2_port.line != -1) {
+- sprintf(buf, "ttyS%d", bp->gnss2_port.line);
+- ptp_ocp_link_child(bp, buf, "ttyGNSS2");
+- }
+- if (bp->mac_port.line != -1) {
+- sprintf(buf, "ttyS%d", bp->mac_port.line);
+- ptp_ocp_link_child(bp, buf, "ttyMAC");
+- }
+- if (bp->nmea_port.line != -1) {
+- sprintf(buf, "ttyS%d", bp->nmea_port.line);
+- ptp_ocp_link_child(bp, buf, "ttyNMEA");
++ for (i = 0; i < __PORT_COUNT; i++) {
++ if (bp->port[i].line != -1) {
++ sprintf(buf, "ttyS%d", bp->port[i].line);
++ ptp_ocp_link_child(bp, buf, ptp_ocp_tty_port_name(i));
++ }
+ }
++
+ sprintf(buf, "ptp%d", ptp_clock_index(bp->ptp));
+ ptp_ocp_link_child(bp, buf, "ptp");
+
+@@ -4416,23 +4418,20 @@ ptp_ocp_info(struct ptp_ocp *bp)
+ };
+ struct device *dev = &bp->pdev->dev;
+ u32 reg;
++ int i;
+
+ ptp_ocp_phc_info(bp);
+
+- ptp_ocp_serial_info(dev, "GNSS", bp->gnss_port.line,
+- bp->gnss_port.baud);
+- ptp_ocp_serial_info(dev, "GNSS2", bp->gnss2_port.line,
+- bp->gnss2_port.baud);
+- ptp_ocp_serial_info(dev, "MAC", bp->mac_port.line, bp->mac_port.baud);
+- if (bp->nmea_out && bp->nmea_port.line != -1) {
+- bp->nmea_port.baud = -1;
++ for (i = 0; i < __PORT_COUNT; i++) {
++ if (i == PORT_NMEA && bp->nmea_out && bp->port[PORT_NMEA].line != -1) {
++ bp->port[PORT_NMEA].baud = -1;
+
+- reg = ioread32(&bp->nmea_out->uart_baud);
+- if (reg < ARRAY_SIZE(nmea_baud))
+- bp->nmea_port.baud = nmea_baud[reg];
+-
+- ptp_ocp_serial_info(dev, "NMEA", bp->nmea_port.line,
+- bp->nmea_port.baud);
++ reg = ioread32(&bp->nmea_out->uart_baud);
++ if (reg < ARRAY_SIZE(nmea_baud))
++ bp->port[PORT_NMEA].baud = nmea_baud[reg];
++ }
++ ptp_ocp_serial_info(dev, ptp_ocp_tty_port_name(i), bp->port[i].line,
++ bp->port[i].baud);
+ }
+ }
+
+@@ -4473,14 +4472,9 @@ ptp_ocp_detach(struct ptp_ocp *bp)
+ for (i = 0; i < 4; i++)
+ if (bp->signal_out[i])
+ ptp_ocp_unregister_ext(bp->signal_out[i]);
+- if (bp->gnss_port.line != -1)
+- serial8250_unregister_port(bp->gnss_port.line);
+- if (bp->gnss2_port.line != -1)
+- serial8250_unregister_port(bp->gnss2_port.line);
+- if (bp->mac_port.line != -1)
+- serial8250_unregister_port(bp->mac_port.line);
+- if (bp->nmea_port.line != -1)
+- serial8250_unregister_port(bp->nmea_port.line);
++ for (i = 0; i < __PORT_COUNT; i++)
++ if (bp->port[i].line != -1)
++ serial8250_unregister_port(bp->port[i].line);
+ platform_device_unregister(bp->spi_flash);
+ platform_device_unregister(bp->i2c_ctrl);
+ if (bp->i2c_clk)
+--
+2.43.0
+
--- /dev/null
+From 8973b3bf96fef2cea3a6fdbf72333bf8c4ca4880 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Sep 2024 14:33:33 +0800
+Subject: r8152: fix the firmware doesn't work
+
+From: Hayes Wang <hayeswang@realtek.com>
+
+[ Upstream commit 8487b4af59d4d7feda4b119dc2d92c67ca25c27e ]
+
+generic_ocp_write() asks the parameter "size" must be 4 bytes align.
+Therefore, write the bp would fail, if the mac->bp_num is odd. Align the
+size to 4 for fixing it. The way may write an extra bp, but the
+rtl8152_is_fw_mac_ok() makes sure the value must be 0 for the bp whose
+index is more than mac->bp_num. That is, there is no influence for the
+firmware.
+
+Besides, I check the return value of generic_ocp_write() to make sure
+everything is correct.
+
+Fixes: e5c266a61186 ("r8152: set bp in bulk")
+Signed-off-by: Hayes Wang <hayeswang@realtek.com>
+Link: https://patch.msgid.link/20240903063333.4502-1-hayeswang@realtek.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/r8152.c | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 19df1cd9f072..51d5d4f0a8f9 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -5177,14 +5177,23 @@ static void rtl8152_fw_mac_apply(struct r8152 *tp, struct fw_mac *mac)
+ data = (u8 *)mac;
+ data += __le16_to_cpu(mac->fw_offset);
+
+- generic_ocp_write(tp, __le16_to_cpu(mac->fw_reg), 0xff, length, data,
+- type);
++ if (generic_ocp_write(tp, __le16_to_cpu(mac->fw_reg), 0xff, length,
++ data, type) < 0) {
++ dev_err(&tp->intf->dev, "Write %s fw fail\n",
++ type ? "PLA" : "USB");
++ return;
++ }
+
+ ocp_write_word(tp, type, __le16_to_cpu(mac->bp_ba_addr),
+ __le16_to_cpu(mac->bp_ba_value));
+
+- generic_ocp_write(tp, __le16_to_cpu(mac->bp_start), BYTE_EN_DWORD,
+- __le16_to_cpu(mac->bp_num) << 1, mac->bp, type);
++ if (generic_ocp_write(tp, __le16_to_cpu(mac->bp_start), BYTE_EN_DWORD,
++ ALIGN(__le16_to_cpu(mac->bp_num) << 1, 4),
++ mac->bp, type) < 0) {
++ dev_err(&tp->intf->dev, "Write %s bp fail\n",
++ type ? "PLA" : "USB");
++ return;
++ }
+
+ bp_en_addr = __le16_to_cpu(mac->bp_en_addr);
+ if (bp_en_addr)
+--
+2.43.0
+
--- /dev/null
+From 820573b81ef55d4f08bb9ea6b405faa2bb2036e9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Jul 2024 12:40:24 +0200
+Subject: regmap: maple: work around gcc-14.1 false-positive warning
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+[ Upstream commit 542440fd7b30983cae23e32bd22f69a076ec7ef4 ]
+
+With gcc-14.1, there is a false-postive -Wuninitialized warning in
+regcache_maple_drop:
+
+drivers/base/regmap/regcache-maple.c: In function 'regcache_maple_drop':
+drivers/base/regmap/regcache-maple.c:113:23: error: 'lower_index' is used uninitialized [-Werror=uninitialized]
+ 113 | unsigned long lower_index, lower_last;
+ | ^~~~~~~~~~~
+drivers/base/regmap/regcache-maple.c:113:36: error: 'lower_last' is used uninitialized [-Werror=uninitialized]
+ 113 | unsigned long lower_index, lower_last;
+ | ^~~~~~~~~~
+
+I've created a reduced test case to see if this needs to be reported
+as a gcc, but it appears that the gcc-14.x branch already has a change
+that turns this into a more sensible -Wmaybe-uninitialized warning, so
+I ended up not reporting it so far.
+
+The reduced test case also produces a warning for gcc-13 and gcc-12
+but I don't see that with the version in the kernel.
+
+Link: https://godbolt.org/z/oKbohKqd3
+Link: https://lore.kernel.org/all/CAMuHMdWj=FLmkazPbYKPevDrcym2_HDb_U7Mb9YE9ovrP0jJfA@mail.gmail.com/
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Link: https://patch.msgid.link/20240719104030.1382465-1-arnd@kernel.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/base/regmap/regcache-maple.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
+index e42433404854..4c034c813126 100644
+--- a/drivers/base/regmap/regcache-maple.c
++++ b/drivers/base/regmap/regcache-maple.c
+@@ -110,7 +110,8 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
+ struct maple_tree *mt = map->cache;
+ MA_STATE(mas, mt, min, max);
+ unsigned long *entry, *lower, *upper;
+- unsigned long lower_index, lower_last;
++ /* initialized to work around false-positive -Wuninitialized warning */
++ unsigned long lower_index = 0, lower_last = 0;
+ unsigned long upper_index, upper_last;
+ int ret = 0;
+
+--
+2.43.0
+
--- /dev/null
+From 1ae2a124d77d02678729e3247d456eb9641d2a54 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Aug 2024 07:35:12 -0700
+Subject: regulator: core: Stub devm_regulator_bulk_get_const() if
+ !CONFIG_REGULATOR
+
+From: Douglas Anderson <dianders@chromium.org>
+
+[ Upstream commit 1a5caec7f80ca2e659c03f45378ee26915f4eda2 ]
+
+When adding devm_regulator_bulk_get_const() I missed adding a stub for
+when CONFIG_REGULATOR is not enabled. Under certain conditions (like
+randconfig testing) this can cause the compiler to reports errors
+like:
+
+ error: implicit declaration of function 'devm_regulator_bulk_get_const';
+ did you mean 'devm_regulator_bulk_get_enable'?
+
+Add the stub.
+
+Fixes: 1de452a0edda ("regulator: core: Allow drivers to define their init data as const")
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202408301813.TesFuSbh-lkp@intel.com/
+Cc: Neil Armstrong <neil.armstrong@linaro.org>
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Link: https://patch.msgid.link/20240830073511.1.Ib733229a8a19fad8179213c05e1af01b51e42328@changeid
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/regulator/consumer.h | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
+index 59d0b9a79e6e..e6ad927bb4a8 100644
+--- a/include/linux/regulator/consumer.h
++++ b/include/linux/regulator/consumer.h
+@@ -451,6 +451,14 @@ static inline int of_regulator_bulk_get_all(struct device *dev, struct device_no
+ return 0;
+ }
+
++static inline int devm_regulator_bulk_get_const(
++ struct device *dev, int num_consumers,
++ const struct regulator_bulk_data *in_consumers,
++ struct regulator_bulk_data **out_consumers)
++{
++ return 0;
++}
++
+ static inline int regulator_bulk_enable(int num_consumers,
+ struct regulator_bulk_data *consumers)
+ {
+--
+2.43.0
+
--- /dev/null
+From 7386edf579b68c3098a394cd26765ca7f2098078 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 27 Mar 2024 09:04:42 -0700
+Subject: riscv: kprobes: Use patch_text_nosync() for insn slots
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Samuel Holland <samuel.holland@sifive.com>
+
+[ Upstream commit b1756750a397f36ddc857989d31887c3f5081fb0 ]
+
+These instructions are not yet visible to the rest of the system,
+so there is no need to do the whole stop_machine() dance.
+
+Reviewed-by: Björn Töpel <bjorn@rivosinc.com>
+Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
+Link: https://lore.kernel.org/r/20240327160520.791322-4-samuel.holland@sifive.com
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/kernel/probes/kprobes.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c
+index dfb28e57d900..03cd103b8449 100644
+--- a/arch/riscv/kernel/probes/kprobes.c
++++ b/arch/riscv/kernel/probes/kprobes.c
+@@ -29,9 +29,8 @@ static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
+
+ p->ainsn.api.restore = (unsigned long)p->addr + offset;
+
+- patch_text(p->ainsn.api.insn, &p->opcode, 1);
+- patch_text((void *)((unsigned long)(p->ainsn.api.insn) + offset),
+- &insn, 1);
++ patch_text_nosync(p->ainsn.api.insn, &p->opcode, 1);
++ patch_text_nosync(p->ainsn.api.insn + offset, &insn, 1);
+ }
+
+ static void __kprobes arch_prepare_simulate(struct kprobe *p)
+--
+2.43.0
+
--- /dev/null
+From e7637d80b2d8066be6b608ae8992bb4013d93071 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 May 2024 10:24:45 +0800
+Subject: riscv: set trap vector earlier
+
+From: yang.zhang <yang.zhang@hexintek.com>
+
+[ Upstream commit 6ad8735994b854b23c824dd6b1dd2126e893a3b4 ]
+
+The exception vector of the booting hart is not set before enabling
+the mmu and then still points to the value of the previous firmware,
+typically _start. That makes it hard to debug setup_vm() when bad
+things happen. So fix that by setting the exception vector earlier.
+
+Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
+Signed-off-by: yang.zhang <yang.zhang@hexintek.com>
+Link: https://lore.kernel.org/r/20240508022445.6131-1-gaoshanliukou@163.com
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/kernel/head.S | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
+index a00f7523cb91..356d5397b2a2 100644
+--- a/arch/riscv/kernel/head.S
++++ b/arch/riscv/kernel/head.S
+@@ -305,6 +305,9 @@ SYM_CODE_START(_start_kernel)
+ #else
+ mv a0, a1
+ #endif /* CONFIG_BUILTIN_DTB */
++ /* Set trap vector to spin forever to help debug */
++ la a3, .Lsecondary_park
++ csrw CSR_TVEC, a3
+ call setup_vm
+ #ifdef CONFIG_MMU
+ la a0, early_pg_dir
+--
+2.43.0
+
--- /dev/null
+From 890aa8f9f8303b535f4cca16156b2c2e68f38887 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 Aug 2024 07:49:30 +0000
+Subject: rust: kbuild: fix export of bss symbols
+
+From: Andreas Hindborg <a.hindborg@samsung.com>
+
+[ Upstream commit b8673d56935c32a4e0a1a0b40951fdd313dbf340 ]
+
+Symbols in the bss segment are not currently exported. This is a problem
+for Rust modules that link against statics, that are resident in the kernel
+image. Thus export symbols in the bss segment.
+
+Fixes: 2f7ab1267dc9 ("Kbuild: add Rust support")
+Signed-off-by: Andreas Hindborg <a.hindborg@samsung.com>
+Reviewed-by: Alice Ryhl <aliceryhl@google.com>
+Tested-by: Alice Ryhl <aliceryhl@google.com>
+Reviewed-by: Gary Guo <gary@garyguo.net>
+Link: https://lore.kernel.org/r/20240815074519.2684107-2-nmi@metaspace.dk
+[ Reworded slightly. - Miguel ]
+Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ rust/Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/rust/Makefile b/rust/Makefile
+index f70d5e244fee..47f9a9f1bdb3 100644
+--- a/rust/Makefile
++++ b/rust/Makefile
+@@ -359,7 +359,7 @@ $(obj)/bindings/bindings_helpers_generated.rs: $(src)/helpers.c FORCE
+ quiet_cmd_exports = EXPORTS $@
+ cmd_exports = \
+ $(NM) -p --defined-only $< \
+- | awk '/ (T|R|D) / {printf "EXPORT_SYMBOL_RUST_GPL(%s);\n",$$3}' > $@
++ | awk '/ (T|R|D|B) / {printf "EXPORT_SYMBOL_RUST_GPL(%s);\n",$$3}' > $@
+
+ $(obj)/exports_core_generated.h: $(obj)/core.o FORCE
+ $(call if_changed,exports)
+--
+2.43.0
+
--- /dev/null
+From dbaf1c88d68154621e027d0aab957ec0cdc1f029 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Jul 2024 21:43:22 +0200
+Subject: s390/boot: Do not assume the decompressor range is reserved
+
+From: Alexander Gordeev <agordeev@linux.ibm.com>
+
+[ Upstream commit b798b685b42c9dbe508e59a74250d97c41bec35e ]
+
+When allocating a random memory range for .amode31 sections
+the minimal randomization address is 0. That does not lead
+to a possible overlap with the decompressor image (which also
+starts from 0) since by that time the image range is already
+reserved.
+
+Do not assume the decompressor range is reserved and always
+provide the minimal randomization address for .amode31
+sections beyond the decompressor. That is a prerequisite
+for moving the lowcore memory address from NULL elsewhere.
+
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/boot/startup.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
+index 6d88f241dd43..66ee97ac803d 100644
+--- a/arch/s390/boot/startup.c
++++ b/arch/s390/boot/startup.c
+@@ -476,8 +476,12 @@ void startup_kernel(void)
+ * before the kernel started. Therefore, in case the two sections
+ * overlap there is no risk of corrupting any data.
+ */
+- if (kaslr_enabled())
+- amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, 0, SZ_2G);
++ if (kaslr_enabled()) {
++ unsigned long amode31_min;
++
++ amode31_min = (unsigned long)_decompressor_end;
++ amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, amode31_min, SZ_2G);
++ }
+ if (!amode31_lma)
+ amode31_lma = text_lma - vmlinux.amode31_size;
+ physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size);
+--
+2.43.0
+
--- /dev/null
+From 9afdd56335ec703db21799c26d7c38e4a2acbd66 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 Jul 2024 13:06:43 +0200
+Subject: s390/vmlinux.lds.S: Move ro_after_init section behind rodata section
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+[ Upstream commit 75c10d5377d8821efafed32e4d72068d9c1f8ec0 ]
+
+The .data.rel.ro and .got section were added between the rodata and
+ro_after_init data section, which adds an RW mapping in between all RO
+mapping of the kernel image:
+
+---[ Kernel Image Start ]---
+0x000003ffe0000000-0x000003ffe0e00000 14M PMD RO X
+0x000003ffe0e00000-0x000003ffe0ec7000 796K PTE RO X
+0x000003ffe0ec7000-0x000003ffe0f00000 228K PTE RO NX
+0x000003ffe0f00000-0x000003ffe1300000 4M PMD RO NX
+0x000003ffe1300000-0x000003ffe1331000 196K PTE RO NX
+0x000003ffe1331000-0x000003ffe13b3000 520K PTE RW NX <---
+0x000003ffe13b3000-0x000003ffe13d5000 136K PTE RO NX
+0x000003ffe13d5000-0x000003ffe1400000 172K PTE RW NX
+0x000003ffe1400000-0x000003ffe1500000 1M PMD RW NX
+0x000003ffe1500000-0x000003ffe1700000 2M PTE RW NX
+0x000003ffe1700000-0x000003ffe1800000 1M PMD RW NX
+0x000003ffe1800000-0x000003ffe187e000 504K PTE RW NX
+---[ Kernel Image End ]---
+
+Move the ro_after_init data section again right behind the rodata
+section to prevent interleaving RO and RW mappings:
+
+---[ Kernel Image Start ]---
+0x000003ffe0000000-0x000003ffe0e00000 14M PMD RO X
+0x000003ffe0e00000-0x000003ffe0ec7000 796K PTE RO X
+0x000003ffe0ec7000-0x000003ffe0f00000 228K PTE RO NX
+0x000003ffe0f00000-0x000003ffe1300000 4M PMD RO NX
+0x000003ffe1300000-0x000003ffe1353000 332K PTE RO NX
+0x000003ffe1353000-0x000003ffe1400000 692K PTE RW NX
+0x000003ffe1400000-0x000003ffe1500000 1M PMD RW NX
+0x000003ffe1500000-0x000003ffe1700000 2M PTE RW NX
+0x000003ffe1700000-0x000003ffe1800000 1M PMD RW NX
+0x000003ffe1800000-0x000003ffe187e000 504K PTE RW NX
+---[ Kernel Image End ]---
+
+Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/kernel/vmlinux.lds.S | 17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
+
+diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
+index 52bd969b2828..779162c664c4 100644
+--- a/arch/s390/kernel/vmlinux.lds.S
++++ b/arch/s390/kernel/vmlinux.lds.S
+@@ -59,14 +59,6 @@ SECTIONS
+ } :text = 0x0700
+
+ RO_DATA(PAGE_SIZE)
+- .data.rel.ro : {
+- *(.data.rel.ro .data.rel.ro.*)
+- }
+- .got : {
+- __got_start = .;
+- *(.got)
+- __got_end = .;
+- }
+
+ . = ALIGN(PAGE_SIZE);
+ _sdata = .; /* Start of data section */
+@@ -80,6 +72,15 @@ SECTIONS
+ . = ALIGN(PAGE_SIZE);
+ __end_ro_after_init = .;
+
++ .data.rel.ro : {
++ *(.data.rel.ro .data.rel.ro.*)
++ }
++ .got : {
++ __got_start = .;
++ *(.got)
++ __got_end = .;
++ }
++
+ RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE)
+ .data.rel : {
+ *(.data.rel*)
+--
+2.43.0
+
--- /dev/null
+From 8295e1a4700180f15cd6804e1476cd2a2b643413 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Aug 2024 17:38:50 +0100
+Subject: scripts: fix gfp-translate after ___GFP_*_BITS conversion to an enum
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Marc Zyngier <maz@kernel.org>
+
+[ Upstream commit a3f6a89c834a4cba0f881da21307b26de3796133 ]
+
+Richard reports that since 772dd0342727c ("mm: enumerate all gfp flags"),
+gfp-translate is broken, as the bit numbers are implicit, leaving the
+shell script unable to extract them. Even more, some bits are now at a
+variable location, making it double extra hard to parse using a simple
+shell script.
+
+Use a brute-force approach to the problem by generating a small C stub
+that will use the enum to dump the interesting bits.
+
+As an added bonus, we are now able to identify invalid bits for a given
+configuration. As an added drawback, we cannot parse include files that
+predate this change anymore. Tough luck.
+
+Link: https://lkml.kernel.org/r/20240823163850.3791201-1-maz@kernel.org
+Fixes: 772dd0342727 ("mm: enumerate all gfp flags")
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Reported-by: Richard Weinberger <richard@nod.at>
+Cc: Petr Tesařík <petr@tesarici.cz>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ scripts/gfp-translate | 66 ++++++++++++++++++++++++++++++++-----------
+ 1 file changed, 49 insertions(+), 17 deletions(-)
+
+diff --git a/scripts/gfp-translate b/scripts/gfp-translate
+index 6c9aed17cf56..8385ae0d5af9 100755
+--- a/scripts/gfp-translate
++++ b/scripts/gfp-translate
+@@ -62,25 +62,57 @@ if [ "$GFPMASK" = "none" ]; then
+ fi
+
+ # Extract GFP flags from the kernel source
+-TMPFILE=`mktemp -t gfptranslate-XXXXXX` || exit 1
+-grep -q ___GFP $SOURCE/include/linux/gfp_types.h
+-if [ $? -eq 0 ]; then
+- grep "^#define ___GFP" $SOURCE/include/linux/gfp_types.h | sed -e 's/u$//' | grep -v GFP_BITS > $TMPFILE
+-else
+- grep "^#define __GFP" $SOURCE/include/linux/gfp_types.h | sed -e 's/(__force gfp_t)//' | sed -e 's/u)/)/' | grep -v GFP_BITS | sed -e 's/)\//) \//' > $TMPFILE
+-fi
++TMPFILE=`mktemp -t gfptranslate-XXXXXX.c` || exit 1
+
+-# Parse the flags
+-IFS="
+-"
+ echo Source: $SOURCE
+ echo Parsing: $GFPMASK
+-for LINE in `cat $TMPFILE`; do
+- MASK=`echo $LINE | awk '{print $3}'`
+- if [ $(($GFPMASK&$MASK)) -ne 0 ]; then
+- echo $LINE
+- fi
+-done
+
+-rm -f $TMPFILE
++(
++ cat <<EOF
++#include <stdint.h>
++#include <stdio.h>
++
++// Try to fool compiler.h into not including extra stuff
++#define __ASSEMBLY__ 1
++
++#include <generated/autoconf.h>
++#include <linux/gfp_types.h>
++
++static const char *masks[] = {
++EOF
++
++ sed -nEe 's/^[[:space:]]+(___GFP_.*)_BIT,.*$/\1/p' $SOURCE/include/linux/gfp_types.h |
++ while read b; do
++ cat <<EOF
++#if defined($b) && ($b > 0)
++ [${b}_BIT] = "$b",
++#endif
++EOF
++ done
++
++ cat <<EOF
++};
++
++int main(int argc, char *argv[])
++{
++ unsigned long long mask = $GFPMASK;
++
++ for (int i = 0; i < sizeof(mask) * 8; i++) {
++ unsigned long long bit = 1ULL << i;
++ if (mask & bit)
++ printf("\t%-25s0x%llx\n",
++ (i < ___GFP_LAST_BIT && masks[i]) ?
++ masks[i] : "*** INVALID ***",
++ bit);
++ }
++
++ return 0;
++}
++EOF
++) > $TMPFILE
++
++${CC:-gcc} -Wall -o ${TMPFILE}.bin -I $SOURCE/include $TMPFILE && ${TMPFILE}.bin
++
++rm -f $TMPFILE ${TMPFILE}.bin
++
+ exit 0
+--
+2.43.0
+
--- /dev/null
+From e2105b7970097dd92049056ac2f3a8cb39fb4a00 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Jun 2024 10:20:08 -0700
+Subject: scsi: lpfc: Handle mailbox timeouts in lpfc_get_sfp_info
+
+From: Justin Tee <justin.tee@broadcom.com>
+
+[ Upstream commit ede596b1434b57c0b3fd5c02b326efe5c54f6e48 ]
+
+The MBX_TIMEOUT return code is not handled in lpfc_get_sfp_info and the
+routine unconditionally frees submitted mailbox commands regardless of
+return status. The issue is that for MBX_TIMEOUT cases, when firmware
+returns SFP information at a later time, that same mailbox memory region
+references previously freed memory in its cmpl routine.
+
+Fix by adding checks for the MBX_TIMEOUT return code. During mailbox
+resource cleanup, check the mbox flag to make sure that the wait did not
+timeout. If the MBOX_WAKE flag is not set, then do not free the resources
+because it will be freed when firmware completes the mailbox at a later
+time in its cmpl routine.
+
+Also, increase the timeout from 30 to 60 seconds to accommodate boot
+scripts requiring longer timeouts.
+
+Signed-off-by: Justin Tee <justin.tee@broadcom.com>
+Link: https://lore.kernel.org/r/20240628172011.25921-6-justintee8345@gmail.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/lpfc/lpfc_els.c | 17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
+index c32bc773ab29..445cb6c2e80f 100644
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -7302,13 +7302,13 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba,
+ mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
+ }
+ mbox->vport = phba->pport;
+-
+- rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30);
++ rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_SLI4_CONFIG_TMO);
+ if (rc == MBX_NOT_FINISHED) {
+ rc = 1;
+ goto error;
+ }
+-
++ if (rc == MBX_TIMEOUT)
++ goto error;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ mp = mbox->ctx_buf;
+ else
+@@ -7361,7 +7361,10 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba,
+ mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
+ }
+
+- rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30);
++ rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_SLI4_CONFIG_TMO);
++
++ if (rc == MBX_TIMEOUT)
++ goto error;
+ if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) {
+ rc = 1;
+ goto error;
+@@ -7372,8 +7375,10 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba,
+ DMP_SFF_PAGE_A2_SIZE);
+
+ error:
+- mbox->ctx_buf = mpsave;
+- lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
++ if (mbox->mbox_flag & LPFC_MBX_WAKE) {
++ mbox->ctx_buf = mpsave;
++ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
++ }
+
+ return rc;
+
+--
+2.43.0
+
--- /dev/null
+From 70d15c38caf365409a7ae395c0e33566e5a18680 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 Jun 2024 15:59:23 +0000
+Subject: scsi: pm80xx: Set phy->enable_completion only when we wait for it
+
+From: Igor Pylypiv <ipylypiv@google.com>
+
+[ Upstream commit e4f949ef1516c0d74745ee54a0f4882c1f6c7aea ]
+
+pm8001_phy_control() populates the enable_completion pointer with a stack
+address, sends a PHY_LINK_RESET / PHY_HARD_RESET, waits 300 ms, and
+returns. The problem arises when a phy control response comes late. After
+300 ms the pm8001_phy_control() function returns and the passed
+enable_completion stack address is no longer valid. Late phy control
+response invokes complete() on a dangling enable_completion pointer which
+leads to a kernel crash.
+
+Signed-off-by: Igor Pylypiv <ipylypiv@google.com>
+Signed-off-by: Terrence Adams <tadamsjr@google.com>
+Link: https://lore.kernel.org/r/20240627155924.2361370-2-tadamsjr@google.com
+Acked-by: Jack Wang <jinpu.wang@ionos.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/pm8001/pm8001_sas.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+index a5a31dfa4512..ee2da8e49d4c 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.c
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -166,7 +166,6 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+ unsigned long flags;
+ pm8001_ha = sas_phy->ha->lldd_ha;
+ phy = &pm8001_ha->phy[phy_id];
+- pm8001_ha->phy[phy_id].enable_completion = &completion;
+
+ if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
+ /*
+@@ -190,6 +189,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+ rates->maximum_linkrate;
+ }
+ if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
++ pm8001_ha->phy[phy_id].enable_completion = &completion;
+ PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
+ wait_for_completion(&completion);
+ }
+@@ -198,6 +198,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+ break;
+ case PHY_FUNC_HARD_RESET:
+ if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
++ pm8001_ha->phy[phy_id].enable_completion = &completion;
+ PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
+ wait_for_completion(&completion);
+ }
+@@ -206,6 +207,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+ break;
+ case PHY_FUNC_LINK_RESET:
+ if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
++ pm8001_ha->phy[phy_id].enable_completion = &completion;
+ PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
+ wait_for_completion(&completion);
+ }
+--
+2.43.0
+
--- /dev/null
+From 4c670b35f30618ad3208229dd31759cd64a19896 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 Jun 2024 17:51:04 +0900
+Subject: scsi: ufs: core: Remove SCSI host only if added
+
+From: Kyoungrul Kim <k831.kim@samsung.com>
+
+[ Upstream commit 7cbff570dbe8907e23bba06f6414899a0fbb2fcc ]
+
+If host tries to remove ufshcd driver from a UFS device it would cause a
+kernel panic if ufshcd_async_scan fails during ufshcd_probe_hba before
+adding a SCSI host with scsi_add_host and MCQ is enabled since SCSI host
+has been defered after MCQ configuration introduced by commit 0cab4023ec7b
+("scsi: ufs: core: Defer adding host to SCSI if MCQ is supported").
+
+To guarantee that SCSI host is removed only if it has been added, set the
+scsi_host_added flag to true after adding a SCSI host and check whether it
+is set or not before removing it.
+
+Signed-off-by: Kyoungrul Kim <k831.kim@samsung.com>
+Signed-off-by: Minwoo Im <minwoo.im@samsung.com>
+Link: https://lore.kernel.org/r/20240627085104epcms2p5897a3870ea5c6416aa44f94df6c543d7@epcms2p5
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ufs/core/ufshcd.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 91bfdc17eedb..b9c436a002a1 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -10196,7 +10196,8 @@ void ufshcd_remove(struct ufs_hba *hba)
+ blk_mq_destroy_queue(hba->tmf_queue);
+ blk_put_queue(hba->tmf_queue);
+ blk_mq_free_tag_set(&hba->tmf_tag_set);
+- scsi_remove_host(hba->host);
++ if (hba->scsi_host_added)
++ scsi_remove_host(hba->host);
+ /* disable interrupts */
+ ufshcd_disable_intr(hba, hba->intr_mask);
+ ufshcd_hba_stop(hba);
+@@ -10478,6 +10479,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+ dev_err(hba->dev, "scsi_add_host failed\n");
+ goto out_disable;
+ }
++ hba->scsi_host_added = true;
+ }
+
+ hba->tmf_tag_set = (struct blk_mq_tag_set) {
+@@ -10560,7 +10562,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+ free_tmf_tag_set:
+ blk_mq_free_tag_set(&hba->tmf_tag_set);
+ out_remove_scsi_host:
+- scsi_remove_host(hba->host);
++ if (hba->scsi_host_added)
++ scsi_remove_host(hba->host);
+ out_disable:
+ hba->is_irq_enabled = false;
+ ufshcd_hba_exit(hba);
+--
+2.43.0
+
--- /dev/null
+From c677d0b961788e38085572e47c620e5280858cb8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Jun 2024 02:10:12 +0000
+Subject: seccomp: release task filters when the task exits
+
+From: Andrei Vagin <avagin@google.com>
+
+[ Upstream commit bfafe5efa9754ebc991750da0bcca2a6694f3ed3 ]
+
+Previously, seccomp filters were released in release_task(), which
+required the process to exit and its zombie to be collected. However,
+exited threads/processes can't trigger any seccomp events, making it
+more logical to release filters upon task exits.
+
+This adjustment simplifies scenarios where a parent is tracing its child
+process. The parent process can now handle all events from a seccomp
+listening descriptor and then call wait to collect a child zombie.
+
+seccomp_filter_release takes the siglock to avoid races with
+seccomp_sync_threads. There was an idea to bypass taking the lock by
+checking PF_EXITING, but it can be set without holding siglock if
+threads have SIGNAL_GROUP_EXIT. This means it can happen concurently
+with seccomp_filter_release.
+
+This change also fixes another minor problem. Suppose that a group
+leader installs the new filter without SECCOMP_FILTER_FLAG_TSYNC, exits,
+and becomes a zombie. Without this change, SECCOMP_FILTER_FLAG_TSYNC
+from any other thread can never succeed, seccomp_can_sync_threads() will
+check a zombie leader and is_ancestor() will fail.
+
+Reviewed-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Andrei Vagin <avagin@google.com>
+Link: https://lore.kernel.org/r/20240628021014.231976-3-avagin@google.com
+Reviewed-by: Tycho Andersen <tandersen@netflix.com>
+Signed-off-by: Kees Cook <kees@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/exit.c | 3 ++-
+ kernel/seccomp.c | 23 ++++++++++++++++++-----
+ 2 files changed, 20 insertions(+), 6 deletions(-)
+
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 81fcee45d630..be81342caf1b 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -277,7 +277,6 @@ void release_task(struct task_struct *p)
+ }
+
+ write_unlock_irq(&tasklist_lock);
+- seccomp_filter_release(p);
+ proc_flush_pid(thread_pid);
+ put_pid(thread_pid);
+ release_thread(p);
+@@ -834,6 +833,8 @@ void __noreturn do_exit(long code)
+ io_uring_files_cancel();
+ exit_signals(tsk); /* sets PF_EXITING */
+
++ seccomp_filter_release(tsk);
++
+ acct_update_integrals(tsk);
+ group_dead = atomic_dec_and_test(&tsk->signal->live);
+ if (group_dead) {
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index e30b60b57614..b02337e95664 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -502,6 +502,9 @@ static inline pid_t seccomp_can_sync_threads(void)
+ /* Skip current, since it is initiating the sync. */
+ if (thread == caller)
+ continue;
++ /* Skip exited threads. */
++ if (thread->flags & PF_EXITING)
++ continue;
+
+ if (thread->seccomp.mode == SECCOMP_MODE_DISABLED ||
+ (thread->seccomp.mode == SECCOMP_MODE_FILTER &&
+@@ -563,18 +566,21 @@ static void __seccomp_filter_release(struct seccomp_filter *orig)
+ * @tsk: task the filter should be released from.
+ *
+ * This function should only be called when the task is exiting as
+- * it detaches it from its filter tree. As such, READ_ONCE() and
+- * barriers are not needed here, as would normally be needed.
++ * it detaches it from its filter tree. PF_EXITING has to be set
++ * for the task.
+ */
+ void seccomp_filter_release(struct task_struct *tsk)
+ {
+- struct seccomp_filter *orig = tsk->seccomp.filter;
++ struct seccomp_filter *orig;
+
+- /* We are effectively holding the siglock by not having any sighand. */
+- WARN_ON(tsk->sighand != NULL);
++ if (WARN_ON((tsk->flags & PF_EXITING) == 0))
++ return;
+
++ spin_lock_irq(&tsk->sighand->siglock);
++ orig = tsk->seccomp.filter;
+ /* Detach task from its filter tree. */
+ tsk->seccomp.filter = NULL;
++ spin_unlock_irq(&tsk->sighand->siglock);
+ __seccomp_filter_release(orig);
+ }
+
+@@ -602,6 +608,13 @@ static inline void seccomp_sync_threads(unsigned long flags)
+ if (thread == caller)
+ continue;
+
++ /*
++ * Skip exited threads. seccomp_filter_release could have
++ * been already called for this task.
++ */
++ if (thread->flags & PF_EXITING)
++ continue;
++
+ /* Get a task reference for the new leaf node. */
+ get_seccomp_filter(caller);
+
+--
+2.43.0
+
--- /dev/null
+From 575316bad2521a163269fbc8e67aacc37500f4e4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Sep 2024 16:12:26 +1000
+Subject: selftests: net: enable bind tests
+
+From: Jamie Bainbridge <jamie.bainbridge@gmail.com>
+
+[ Upstream commit e4af74a53b7aa865e7fcc104630ebb7a9129b71f ]
+
+bind_wildcard is compiled but not run, bind_timewait is not compiled.
+
+These two tests complete in a very short time, use the test harness
+properly, and seem reasonable to enable.
+
+The author of the tests confirmed via email that these were
+intended to be run.
+
+Enable these two tests.
+
+Fixes: 13715acf8ab5 ("selftest: Add test for bind() conflicts.")
+Fixes: 2c042e8e54ef ("tcp: Add selftest for bind() and TIME_WAIT.")
+Signed-off-by: Jamie Bainbridge <jamie.bainbridge@gmail.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Link: https://patch.msgid.link/5a009b26cf5fb1ad1512d89c61b37e2fac702323.1725430322.git.jamie.bainbridge@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/net/Makefile | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
+index d9393569d03a..ec5377ffda31 100644
+--- a/tools/testing/selftests/net/Makefile
++++ b/tools/testing/selftests/net/Makefile
+@@ -84,7 +84,8 @@ TEST_GEN_PROGS += so_incoming_cpu
+ TEST_PROGS += sctp_vrf.sh
+ TEST_GEN_FILES += sctp_hello
+ TEST_GEN_FILES += ip_local_port_range
+-TEST_GEN_FILES += bind_wildcard
++TEST_GEN_PROGS += bind_wildcard
++TEST_GEN_PROGS += bind_timewait
+ TEST_PROGS += test_vxlan_mdb.sh
+ TEST_PROGS += test_bridge_neigh_suppress.sh
+ TEST_PROGS += test_vxlan_nolocalbypass.sh
+--
+2.43.0
+
drm-i915-do-not-attempt-to-load-the-gsc-multiple-times.patch
drm-amd-display-lock-dc-and-exit-ips-when-changing-backlight.patch
alsa-hda-realtek-extend-quirks-for-clevo-v50.patch
+alsa-control-apply-sanity-check-of-input-values-for-.patch
+alsa-hda-add-input-value-sanity-checks-to-hdmi-chann.patch
+wifi-ath12k-fix-uninitialize-symbol-error-on-ath12k_.patch
+wifi-ath12k-fix-firmware-crash-due-to-invalid-peer-n.patch
+smack-unix-sockets-fix-accept-ed-socket-label.patch
+drm-amd-display-check-unboundedrequestenabled-s-valu.patch
+cgroup-cpuset-delay-setting-of-cs_cpu_exclusive-unti.patch
+virt-sev-guest-mark-driver-struct-with-__refdata-to-.patch
+bpf-verifier-correct-tail_call_reachable-for-bpf-pro.patch
+elf-fix-kernel.randomize_va_space-double-read.patch
+accel-habanalabs-gaudi2-unsecure-edma-max-outstandin.patch
+irqchip-renesas-rzg2l-reorder-function-calls-in-rzg2.patch
+irqchip-armada-370-xp-do-not-allow-mapping-irq-0-and.patch
+media-b2c2-flexcop-usb-fix-flexcop_usb_memory_req.patch
+af_unix-remove-put_pid-put_cred-in-copy_peercred.patch
+x86-kmsan-fix-hook-for-unaligned-accesses.patch
+iommu-sun50i-clear-bypass-register.patch
+netfilter-nf_conncount-fix-wrong-variable-type.patch
+gve-add-adminq-mutex-lock.patch
+wifi-iwlwifi-mvm-use-iwl_fw_check-for-link-id-check.patch
+udf-avoid-excessive-partition-lengths.patch
+fs-ntfs3-one-more-reason-to-mark-inode-bad.patch
+riscv-kprobes-use-patch_text_nosync-for-insn-slots.patch
+media-vivid-fix-wrong-sizeimage-value-for-mplane.patch
+leds-spi-byte-call-of_node_put-on-error-path.patch
+wifi-brcmsmac-advertise-mfp_capable-to-enable-wpa3.patch
+wifi-rtw89-wow-prevent-to-send-unexpected-h2c-during.patch
+usb-uas-set-host-status-byte-on-data-completion-erro.patch
+usb-gadget-aspeed_udc-validate-endpoint-index-for-as.patch
+drm-amdgpu-fix-register-access-violation.patch
+drm-amd-display-run-dc_log_dc-after-checking-link-li.patch
+drm-amd-display-check-hdcp-returned-status.patch
+drm-amd-display-validate-function-returns.patch
+drm-amdgpu-add-missing-error-handling-in-function-am.patch
+drm-amdgpu-fix-smatch-static-checker-warning.patch
+drm-amdgpu-clear-rb_overflow-bit-when-enabling-inter.patch
+crypto-qat-initialize-user_input.lock-for-rate_limit.patch
+media-vivid-don-t-set-hdmi-tx-controls-if-there-are-.patch
+vfio-spapr-always-clear-tces-before-unsetting-the-wi.patch
+fs-don-t-copy-to-userspace-under-namespace-semaphore.patch
+fs-relax-permissions-for-statmount.patch
+powerpc-rtas-prevent-spectre-v1-gadget-construction-.patch
+seccomp-release-task-filters-when-the-task-exits.patch
+ice-check-all-ice_vsi_rebuild-errors-in-function.patch
+pci-keystone-add-workaround-for-errata-i2037-am65x-s.patch
+input-ili210x-use-kvmalloc-to-allocate-buffer-for-fi.patch
+media-qcom-camss-add-check-for-v4l2_fwnode_endpoint_.patch
+pcmcia-use-resource_size-function-on-resource-object.patch
+drm-amd-display-check-denominator-pbn_div-before-use.patch
+drm-amd-display-check-denominator-crb_pipes-before-u.patch
+drm-amdgpu-check-for-linear_aligned-correctly-in-che.patch
+drm-amdgpu-correct-register-used-to-clear-fault-stat.patch
+drm-amdgpu-display-handle-gfx12-in-amdgpu_dm_plane_f.patch
+can-bcm-remove-proc-entry-when-dev-is-unregistered.patch
+can-m_can-release-irq-on-error-in-m_can_open.patch
+can-m_can-reset-coalescing-during-suspend-resume.patch
+can-m_can-remove-coalesing-disable-in-isr-during-sus.patch
+can-m_can-remove-m_can_rx_peripheral-indirection.patch
+can-m_can-do-not-cancel-timer-from-within-timer.patch
+can-m_can-disable_all_interrupts-not-clear-active_in.patch
+can-m_can-reset-cached-active_interrupts-on-start.patch
+can-mcp251xfd-fix-ring-configuration-when-switching-.patch
+rust-kbuild-fix-export-of-bss-symbols.patch
+cifs-fix-lack-of-credit-renegotiation-on-read-retry.patch
+netfs-cifs-fix-handling-of-short-dio-read.patch
+cifs-fix-copy-offload-to-flush-destination-region.patch
+cifs-fix-falloc_fl_zero_range-to-preflush-buffered-p.patch
+igb-fix-not-clearing-timesync-interrupts-for-82580.patch
+ice-add-netif_device_attach-detach-into-pf-reset-flo.patch
+platform-x86-dell-smbios-fix-error-path-in-dell_smbi.patch
+spi-intel-add-check-devm_kasprintf-returned-value.patch
+regulator-core-stub-devm_regulator_bulk_get_const-if.patch
+can-kvaser_pciefd-skip-redundant-null-pointer-check-.patch
+can-kvaser_pciefd-remove-unnecessary-comment.patch
+can-kvaser_pciefd-rename-board_irq-to-pci_irq.patch
+can-kvaser_pciefd-move-reset-of-dma-rx-buffers-to-th.patch
+can-kvaser_pciefd-use-a-single-write-when-releasing-.patch
+bluetooth-qca-if-memdump-doesn-t-work-re-enable-ibs.patch
+bluetooth-hci_sync-introduce-hci_cmd_sync_run-hci_cm.patch
+bluetooth-mgmt-fix-not-generating-command-complete-f.patch
+bcachefs-add-printbuf-arg-to-bch2_parse_mount_opts.patch
+bcachefs-add-error-code-to-defer-option-parsing.patch
+bcachefs-revert-lockless-buffered-io-path.patch
+hwmon-ltc2991-fix-register-bits-defines.patch
+scripts-fix-gfp-translate-after-___gfp_-_bits-conver.patch
+igc-unlock-on-error-in-igc_io_resume.patch
+hwmon-hp-wmi-sensors-check-if-wmi-event-data-exists.patch
+perf-lock-contention-fix-spinlock-and-rwlock-account.patch
+net-ethernet-ti-am65-cpsw-fix-rx-statistics-for-xdp_.patch
+net-phy-fix-missing-of_node_put-for-leds.patch
+ptp-ocp-convert-serial-ports-to-array.patch
+ptp-ocp-adjust-sysfs-entries-to-expose-tty-informati.patch
+ice-move-netif_queue_set_napi-to-rtnl-protected-sect.patch
+ice-protect-xdp-configuration-with-a-mutex.patch
+ice-check-ice_vsi_down-under-rtnl_lock-when-preparin.patch
+ice-remove-ice_cfg_busy-locking-from-af_xdp-code.patch
+ice-do-not-bring-the-vsi-up-if-it-was-down-before-th.patch
+usbnet-modern-method-to-get-random-mac.patch
+net-dqs-do-not-use-extern-for-unused-dql_group.patch
+bpf-net-fix-a-potential-race-in-do_sock_getsockopt.patch
+bpf-add-check-for-invalid-name-in-btf_name_valid_sec.patch
+bareudp-fix-device-stats-updates.patch
+fou-fix-null-ptr-deref-in-gro.patch
+r8152-fix-the-firmware-doesn-t-work.patch
+net-bridge-br_fdb_external_learn_add-always-set-ext_.patch
+net-xilinx-axienet-fix-race-in-axienet_stop.patch
+net-dsa-vsc73xx-fix-possible-subblocks-range-of-capt.patch
+selftests-net-enable-bind-tests.patch
+tools-net-ynl-fix-cli.py-subscribe-feature.patch
+xen-privcmd-fix-possible-access-to-a-freed-kirqfd-in.patch
+firmware-cs_dsp-don-t-allow-writes-to-read-only-cont.patch
+phy-zynqmp-take-the-phy-mutex-in-xlate.patch
+asoc-topology-properly-initialize-soc_enum-values.patch
+dm-init-handle-minors-larger-than-255.patch
+cxl-region-fix-a-race-condition-in-memory-hotplug-no.patch
+iommu-vt-d-handle-volatile-descriptor-status-read.patch
+iommu-vt-d-remove-control-over-execute-requested-req.patch
+block-don-t-call-bio_uninit-from-bio_endio.patch
+cgroup-protect-css-cgroup-write-under-css_set_lock.patch
+um-line-always-fill-error_out-in-setup_one_line.patch
+devres-initialize-an-uninitialized-struct-member.patch
+pci-hotplug-pnv_php-fix-hotplug-driver-crash-on-powe.patch
+virtio_ring-fix-kmsan-error-for-premapped-mode.patch
+wifi-rtw88-usb-schedule-rx-work-after-everything-is-.patch
+scsi-ufs-core-remove-scsi-host-only-if-added.patch
+scsi-pm80xx-set-phy-enable_completion-only-when-we-w.patch
+scsi-lpfc-handle-mailbox-timeouts-in-lpfc_get_sfp_in.patch
+crypto-qat-fix-unintentional-re-enabling-of-error-in.patch
+tracing-kprobes-add-symbol-counting-check-when-modul.patch
+hwmon-adc128d818-fix-underflows-seen-when-writing-li.patch
+hwmon-lm95234-fix-underflows-seen-when-writing-limit.patch
+hwmon-nct6775-core-fix-underflows-seen-when-writing-.patch
+hwmon-w83627ehf-fix-underflows-seen-when-writing-lim.patch
+asoc-tas2781-replace-bexx_to_cpup-with-get_unaligned.patch
+libbpf-add-null-checks-to-bpf_object__-prev_map-next.patch
+drm-amdgpu-set-no_hw_access-when-vf-request-full-gpu.patch
+ext4-fix-possible-tid_t-sequence-overflows.patch
+jbd2-avoid-mount-failed-when-commit-block-is-partial.patch
+dma-mapping-benchmark-don-t-starve-others-when-doing.patch
+wifi-mwifiex-do-not-return-unused-priv-in-mwifiex_ge.patch
+perf-x86-intel-hide-topdown-metrics-events-if-the-fe.patch
+pci-qcom-override-no_snoop-attribute-for-sa8775p-rc.patch
+staging-vchiq_core-bubble-up-wait_event_interruptibl.patch
+iommufd-require-drivers-to-supply-the-cache_invalida.patch
+bpf-remove-tst_run-from-lwt_seg6local_prog_ops.patch
+watchdog-imx7ulp_wdt-keep-already-running-watchdog-e.patch
+drm-amdgpu-reject-gang-submit-on-reserved-vmids.patch
+smp-add-missing-destroy_work_on_stack-call-in-smp_ca.patch
+fs-ntfs3-check-more-cases-when-directory-is-corrupte.patch
+btrfs-slightly-loosen-the-requirement-for-qgroup-rem.patch
+btrfs-don-t-bug_on-on-enomem-from-btrfs_lookup_exten.patch
+btrfs-replace-bug_on-with-assert-in-walk_down_proc.patch
+btrfs-clean-up-our-handling-of-refs-0-in-snapshot-de.patch
+btrfs-handle-errors-from-btrfs_dec_ref-properly.patch
+btrfs-replace-bug_on-with-error-handling-at-update_r.patch
+btrfs-don-t-bug_on-when-0-reference-count-at-btrfs_l.patch
+ethtool-fail-closed-if-we-can-t-get-max-channel-used.patch
+cxl-region-verify-target-positions-using-the-ordered.patch
+riscv-set-trap-vector-earlier.patch
+pci-add-missing-bridge-lock-to-pci_bus_lock.patch
+tcp-don-t-drop-syn-ack-for-simultaneous-connect.patch
+bluetooth-btnxpuart-fix-null-pointer-dereference-in-.patch
+net-dpaa-avoid-on-stack-arrays-of-nr_cpus-elements.patch
+irqchip-gic-v4-always-configure-affinity-on-vpe-acti.patch
+irqchip-gic-v4-make-sure-a-vpe-is-locked-when-vmapp-.patch
+drm-amdgpu-add-mutex-to-protect-ras-shared-memory.patch
+loongarch-use-correct-api-to-map-cmdline-in-relocate.patch
+regmap-maple-work-around-gcc-14.1-false-positive-war.patch
+s390-boot-do-not-assume-the-decompressor-range-is-re.patch
+cachefiles-set-the-max-subreq-size-for-cache-writes-.patch
+vfs-fix-potential-circular-locking-through-setxattr-.patch
+i3c-master-svc-resend-target-address-when-get-nack.patch
+i3c-mipi-i3c-hci-error-out-instead-on-bug_on-in-ibi-.patch
+kselftests-dmabuf-heaps-ensure-the-driver-name-is-nu.patch
+spi-hisi-kunpeng-add-verification-for-the-max_freque.patch
+btrfs-initialize-location-to-fix-wmaybe-uninitialize.patch
+s390-vmlinux.lds.s-move-ro_after_init-section-behind.patch
+hid-cougar-fix-slab-out-of-bounds-read-in-cougar_rep.patch
+hid-amd_sfh-free-driver_data-after-destroying-hid-de.patch
+input-uinput-reject-requests-with-unreasonable-numbe.patch
+usbnet-ipheth-race-between-ipheth_close-and-error-ha.patch
+wifi-mt76-mt7921-fix-null-pointer-access-in-mt7921_i.patch
+squashfs-sanity-check-symbolic-link-size.patch
+of-irq-prevent-device-address-out-of-bounds-read-in-.patch
+lib-generic-radix-tree.c-fix-rare-race-in-__genradix.patch
+net-hns3-void-array-out-of-bound-when-loop-tnl_num.patch
+kunit-overflow-fix-ub-in-overflow_allocation_test.patch
+mips-cevt-r4k-don-t-call-get_c0_compare_int-if-timer.patch
+spi-spi-fsl-lpspi-limit-prescale-bit-in-tcr-register.patch
+ata-pata_macio-use-warn-instead-of-bug.patch
+smb-server-fix-potential-null-ptr-deref-of-lease_ctx.patch
+nfsv4-add-missing-rescheduling-points-in-nfs_client_.patch
+drm-amdgpu-fix-two-reset-triggered-in-a-row.patch
+drm-amdgpu-add-reset_context-flag-for-host-flr.patch
+drm-amdgpu-fix-amdgpu_device_reset_sriov-retry-logic.patch
--- /dev/null
+From 6adc456ec5f45dafd5c43b426cbcabd6f00bc2c0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Jun 2024 01:44:30 +0300
+Subject: smack: unix sockets: fix accept()ed socket label
+
+From: Konstantin Andreev <andreev@swemel.ru>
+
+[ Upstream commit e86cac0acdb1a74f608bacefe702f2034133a047 ]
+
+When a process accept()s connection from a unix socket
+(either stream or seqpacket)
+it gets the socket with the label of the connecting process.
+
+For example, if a connecting process has a label 'foo',
+the accept()ed socket will also have 'in' and 'out' labels 'foo',
+regardless of the label of the listener process.
+
+This is because kernel creates unix child sockets
+in the context of the connecting process.
+
+I do not see any obvious way for the listener to abuse
+alien labels coming with the new socket, but,
+to be on the safe side, it's better fix new socket labels.
+
+Signed-off-by: Konstantin Andreev <andreev@swemel.ru>
+Signed-off-by: Casey Schaufler <casey@schaufler-ca.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ security/smack/smack_lsm.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index ab939e6449e4..002a1b9ed83a 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -3871,12 +3871,18 @@ static int smack_unix_stream_connect(struct sock *sock,
+ }
+ }
+
+- /*
+- * Cross reference the peer labels for SO_PEERSEC.
+- */
+ if (rc == 0) {
++ /*
++ * Cross reference the peer labels for SO_PEERSEC.
++ */
+ nsp->smk_packet = ssp->smk_out;
+ ssp->smk_packet = osp->smk_out;
++
++ /*
++ * new/child/established socket must inherit listening socket labels
++ */
++ nsp->smk_out = osp->smk_out;
++ nsp->smk_in = osp->smk_in;
+ }
+
+ return rc;
+--
+2.43.0
+
--- /dev/null
+From f3fc44f3f7e14ca6036b36fded14be4b20c65e56 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Aug 2024 08:20:51 +0000
+Subject: smb/server: fix potential null-ptr-deref of lease_ctx_info in
+ smb2_open()
+
+From: ChenXiaoSong <chenxiaosong@kylinos.cn>
+
+[ Upstream commit 4e8771a3666c8f216eefd6bd2fd50121c6c437db ]
+
+null-ptr-deref will occur when (req_op_level == SMB2_OPLOCK_LEVEL_LEASE)
+and parse_lease_state() return NULL.
+
+Fix this by check if 'lease_ctx_info' is NULL.
+
+Additionally, remove the redundant parentheses in
+parse_durable_handle_context().
+
+Signed-off-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/server/oplock.c | 2 +-
+ fs/smb/server/smb2pdu.c | 10 +++++-----
+ 2 files changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
+index a8f52c4ebbda..e546ffa57b55 100644
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -1510,7 +1510,7 @@ void create_lease_buf(u8 *rbuf, struct lease *lease)
+ * parse_lease_state() - parse lease context containted in file open request
+ * @open_req: buffer containing smb2 file open(create) request
+ *
+- * Return: oplock state, -ENOENT if create lease context not found
++ * Return: allocated lease context object on success, otherwise NULL
+ */
+ struct lease_ctx_info *parse_lease_state(void *open_req)
+ {
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index bc69b94df40f..39dfecf082ba 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -2771,8 +2771,8 @@ static int parse_durable_handle_context(struct ksmbd_work *work,
+ }
+ }
+
+- if (((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) ||
+- req_op_level == SMB2_OPLOCK_LEVEL_BATCH)) {
++ if ((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) ||
++ req_op_level == SMB2_OPLOCK_LEVEL_BATCH) {
+ dh_info->CreateGuid =
+ durable_v2_blob->CreateGuid;
+ dh_info->persistent =
+@@ -2792,8 +2792,8 @@ static int parse_durable_handle_context(struct ksmbd_work *work,
+ goto out;
+ }
+
+- if (((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) ||
+- req_op_level == SMB2_OPLOCK_LEVEL_BATCH)) {
++ if ((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) ||
++ req_op_level == SMB2_OPLOCK_LEVEL_BATCH) {
+ ksmbd_debug(SMB, "Request for durable open\n");
+ dh_info->type = dh_idx;
+ }
+@@ -3415,7 +3415,7 @@ int smb2_open(struct ksmbd_work *work)
+ goto err_out1;
+ }
+ } else {
+- if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE) {
++ if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE && lc) {
+ if (S_ISDIR(file_inode(filp)->i_mode)) {
+ lc->req_state &= ~SMB2_LEASE_WRITE_CACHING_LE;
+ lc->is_dir = true;
+--
+2.43.0
+
--- /dev/null
+From 509fa3bddc7bfac9601ee69a439ec626f563fbf9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Jul 2024 14:52:13 +0800
+Subject: smp: Add missing destroy_work_on_stack() call in smp_call_on_cpu()
+
+From: Zqiang <qiang.zhang1211@gmail.com>
+
+[ Upstream commit 77aeb1b685f9db73d276bad4bb30d48505a6fd23 ]
+
+For CONFIG_DEBUG_OBJECTS_WORK=y kernels sscs.work defined by
+INIT_WORK_ONSTACK() is initialized by debug_object_init_on_stack() for
+the debug check in __init_work() to work correctly.
+
+But this lacks the counterpart to remove the tracked object from debug
+objects again, which will cause a debug object warning once the stack is
+freed.
+
+Add the missing destroy_work_on_stack() invocation to cure that.
+
+[ tglx: Massaged changelog ]
+
+Signed-off-by: Zqiang <qiang.zhang1211@gmail.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Paul E. McKenney <paulmck@kernel.org>
+Link: https://lore.kernel.org/r/20240704065213.13559-1-qiang.zhang1211@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/smp.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/kernel/smp.c b/kernel/smp.c
+index f085ebcdf9e7..af9b2d0736c8 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -1119,6 +1119,7 @@ int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
+
+ queue_work_on(cpu, system_wq, &sscs.work);
+ wait_for_completion(&sscs.done);
++ destroy_work_on_stack(&sscs.work);
+
+ return sscs.ret;
+ }
+--
+2.43.0
+
--- /dev/null
+From 9c92b9e892d3f24670adfcf03ef0634097a57089 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Jul 2024 11:20:40 +0800
+Subject: spi: hisi-kunpeng: Add verification for the max_frequency provided by
+ the firmware
+
+From: Devyn Liu <liudingyuan@huawei.com>
+
+[ Upstream commit 5127c42c77de18651aa9e8e0a3ced190103b449c ]
+
+If the value of max_speed_hz is 0, it may cause a division by zero
+error in hisi_calc_effective_speed().
+The value of max_speed_hz is provided by firmware.
+Firmware is generally considered as a trusted domain. However, as
+division by zero errors can cause system failure, for defense measure,
+the value of max_speed is validated here. So 0 is regarded as invalid
+and an error code is returned.
+
+Signed-off-by: Devyn Liu <liudingyuan@huawei.com>
+Reviewed-by: Jay Fang <f.fangjian@huawei.com>
+Link: https://patch.msgid.link/20240730032040.3156393-3-liudingyuan@huawei.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-hisi-kunpeng.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/spi/spi-hisi-kunpeng.c b/drivers/spi/spi-hisi-kunpeng.c
+index 6910b4d4c427..16054695bdb0 100644
+--- a/drivers/spi/spi-hisi-kunpeng.c
++++ b/drivers/spi/spi-hisi-kunpeng.c
+@@ -481,6 +481,9 @@ static int hisi_spi_probe(struct platform_device *pdev)
+ return -EINVAL;
+ }
+
++ if (host->max_speed_hz == 0)
++ return dev_err_probe(dev, -EINVAL, "spi-max-frequency can't be 0\n");
++
+ ret = device_property_read_u16(dev, "num-cs",
+ &host->num_chipselect);
+ if (ret)
+--
+2.43.0
+
--- /dev/null
+From 5b3563e3fa325b6b51fb519a41f428cacc567c70 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Aug 2024 15:41:06 +0800
+Subject: spi: intel: Add check devm_kasprintf() returned value
+
+From: Charles Han <hanchunchao@inspur.com>
+
+[ Upstream commit 2920294686ec23211637998f3ec386dfd3d784a6 ]
+
+intel_spi_populate_chip() use devm_kasprintf() to set pdata->name.
+This can return a NULL pointer on failure but this returned value
+is not checked.
+
+Fixes: e58db3bcd93b ("spi: intel: Add default partition and name to the second chip")
+Signed-off-by: Charles Han <hanchunchao@inspur.com>
+Reviewed-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Link: https://patch.msgid.link/20240830074106.8744-1-hanchunchao@inspur.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-intel.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c
+index 3e5dcf2b3c8a..795b7e72baea 100644
+--- a/drivers/spi/spi-intel.c
++++ b/drivers/spi/spi-intel.c
+@@ -1390,6 +1390,9 @@ static int intel_spi_populate_chip(struct intel_spi *ispi)
+
+ pdata->name = devm_kasprintf(ispi->dev, GFP_KERNEL, "%s-chip1",
+ dev_name(ispi->dev));
++ if (!pdata->name)
++ return -ENOMEM;
++
+ pdata->nr_parts = 1;
+ parts = devm_kcalloc(ispi->dev, pdata->nr_parts, sizeof(*parts),
+ GFP_KERNEL);
+--
+2.43.0
+
--- /dev/null
+From ea3fc0e3445708ab7b4679fca307eab26b1efcff Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Aug 2024 15:06:58 +0800
+Subject: spi: spi-fsl-lpspi: limit PRESCALE bit in TCR register
+
+From: Carlos Song <carlos.song@nxp.com>
+
+[ Upstream commit 783bf5d09f86b9736605f3e01a3472e55ef98ff8 ]
+
+Referring to the errata ERR051608 of I.MX93, LPSPI TCR[PRESCALE]
+can only be configured to be 0 or 1, other values are not valid
+and will cause LPSPI to not work.
+
+Add the prescale limitation for LPSPI in I.MX93. Other platforms
+are not affected.
+
+Signed-off-by: Carlos Song <carlos.song@nxp.com>
+Link: https://patch.msgid.link/20240820070658.672127-1-carlos.song@nxp.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-fsl-lpspi.c | 31 +++++++++++++++++++++++++++++--
+ 1 file changed, 29 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
+index f2d7eedd324b..30d56f8775d7 100644
+--- a/drivers/spi/spi-fsl-lpspi.c
++++ b/drivers/spi/spi-fsl-lpspi.c
+@@ -82,6 +82,10 @@
+ #define TCR_RXMSK BIT(19)
+ #define TCR_TXMSK BIT(18)
+
++struct fsl_lpspi_devtype_data {
++ u8 prescale_max;
++};
++
+ struct lpspi_config {
+ u8 bpw;
+ u8 chip_select;
+@@ -119,10 +123,25 @@ struct fsl_lpspi_data {
+ bool usedma;
+ struct completion dma_rx_completion;
+ struct completion dma_tx_completion;
++
++ const struct fsl_lpspi_devtype_data *devtype_data;
++};
++
++/*
++ * ERR051608 fixed or not:
++ * https://www.nxp.com/docs/en/errata/i.MX93_1P87f.pdf
++ */
++static struct fsl_lpspi_devtype_data imx93_lpspi_devtype_data = {
++ .prescale_max = 1,
++};
++
++static struct fsl_lpspi_devtype_data imx7ulp_lpspi_devtype_data = {
++ .prescale_max = 8,
+ };
+
+ static const struct of_device_id fsl_lpspi_dt_ids[] = {
+- { .compatible = "fsl,imx7ulp-spi", },
++ { .compatible = "fsl,imx7ulp-spi", .data = &imx7ulp_lpspi_devtype_data,},
++ { .compatible = "fsl,imx93-spi", .data = &imx93_lpspi_devtype_data,},
+ { /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, fsl_lpspi_dt_ids);
+@@ -297,9 +316,11 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
+ {
+ struct lpspi_config config = fsl_lpspi->config;
+ unsigned int perclk_rate, scldiv, div;
++ u8 prescale_max;
+ u8 prescale;
+
+ perclk_rate = clk_get_rate(fsl_lpspi->clk_per);
++ prescale_max = fsl_lpspi->devtype_data->prescale_max;
+
+ if (!config.speed_hz) {
+ dev_err(fsl_lpspi->dev,
+@@ -315,7 +336,7 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
+
+ div = DIV_ROUND_UP(perclk_rate, config.speed_hz);
+
+- for (prescale = 0; prescale < 8; prescale++) {
++ for (prescale = 0; prescale < prescale_max; prescale++) {
+ scldiv = div / (1 << prescale) - 2;
+ if (scldiv < 256) {
+ fsl_lpspi->config.prescale = prescale;
+@@ -822,6 +843,7 @@ static int fsl_lpspi_init_rpm(struct fsl_lpspi_data *fsl_lpspi)
+
+ static int fsl_lpspi_probe(struct platform_device *pdev)
+ {
++ const struct fsl_lpspi_devtype_data *devtype_data;
+ struct fsl_lpspi_data *fsl_lpspi;
+ struct spi_controller *controller;
+ struct resource *res;
+@@ -830,6 +852,10 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
+ u32 temp;
+ bool is_target;
+
++ devtype_data = of_device_get_match_data(&pdev->dev);
++ if (!devtype_data)
++ return -ENODEV;
++
+ is_target = of_property_read_bool((&pdev->dev)->of_node, "spi-slave");
+ if (is_target)
+ controller = devm_spi_alloc_target(&pdev->dev,
+@@ -848,6 +874,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
+ fsl_lpspi->is_target = is_target;
+ fsl_lpspi->is_only_cs1 = of_property_read_bool((&pdev->dev)->of_node,
+ "fsl,spi-only-use-cs1-sel");
++ fsl_lpspi->devtype_data = devtype_data;
+
+ init_completion(&fsl_lpspi->xfer_done);
+
+--
+2.43.0
+
--- /dev/null
+From bdc2b3a467236f579180e159a1b8be73136a7142 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Aug 2024 00:28:21 +0100
+Subject: Squashfs: sanity check symbolic link size
+
+From: Phillip Lougher <phillip@squashfs.org.uk>
+
+[ Upstream commit 810ee43d9cd245d138a2733d87a24858a23f577d ]
+
+Syzkiller reports a "KMSAN: uninit-value in pick_link" bug.
+
+This is caused by an uninitialised page, which is ultimately caused
+by a corrupted symbolic link size read from disk.
+
+The reason why the corrupted symlink size causes an uninitialised
+page is due to the following sequence of events:
+
+1. squashfs_read_inode() is called to read the symbolic
+ link from disk. This assigns the corrupted value
+ 3875536935 to inode->i_size.
+
+2. Later squashfs_symlink_read_folio() is called, which assigns
+ this corrupted value to the length variable, which being a
+ signed int, overflows producing a negative number.
+
+3. The following loop that fills in the page contents checks that
+ the copied bytes is less than length, which being negative means
+ the loop is skipped, producing an uninitialised page.
+
+This patch adds a sanity check which checks that the symbolic
+link size is not larger than expected.
+
+--
+
+Signed-off-by: Phillip Lougher <phillip@squashfs.org.uk>
+Link: https://lore.kernel.org/r/20240811232821.13903-1-phillip@squashfs.org.uk
+Reported-by: Lizhi Xu <lizhi.xu@windriver.com>
+Reported-by: syzbot+24ac24ff58dc5b0d26b9@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/000000000000a90e8c061e86a76b@google.com/
+V2: fix spelling mistake.
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/squashfs/inode.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
+index 16bd693d0b3a..d5918eba27e3 100644
+--- a/fs/squashfs/inode.c
++++ b/fs/squashfs/inode.c
+@@ -279,8 +279,13 @@ int squashfs_read_inode(struct inode *inode, long long ino)
+ if (err < 0)
+ goto failed_read;
+
+- set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
+ inode->i_size = le32_to_cpu(sqsh_ino->symlink_size);
++ if (inode->i_size > PAGE_SIZE) {
++ ERROR("Corrupted symlink\n");
++ return -EINVAL;
++ }
++
++ set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
+ inode->i_op = &squashfs_symlink_inode_ops;
+ inode_nohighmem(inode);
+ inode->i_data.a_ops = &squashfs_symlink_aops;
+--
+2.43.0
+
--- /dev/null
+From d5f6f9b2356ac21b2aeda9730b83b2253ede8ee3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Jul 2024 18:40:51 +0530
+Subject: staging: vchiq_core: Bubble up wait_event_interruptible() return
+ value
+
+From: Umang Jain <umang.jain@ideasonboard.com>
+
+[ Upstream commit c22502cb84d4c963f754e6d943d3133cfa80ba97 ]
+
+wait_event_interruptible() returns if the condition evaluates to true
+it receives a signal. However, the current code always assume that the
+wait_event_interruptible() returns only when the event is fired.
+This should not be the case as wait_event_interruptible() can
+return on receiving a signal (with -ERESTARTSYS as return value).
+
+We should consider this and bubble up the return value of
+wait_event_interruptible() to exactly know if the wait has failed
+and error out. This will also help to properly stop kthreads in the
+subsequent patch.
+
+Meanwhile at it, remote_wait_event() is modified to return 0 on success,
+and an error code (from wait_event_interruptible()) on failure. The
+return value is now checked for remote_wait_event() calls.
+
+Signed-off-by: Umang Jain <umang.jain@ideasonboard.com>
+Tested-by: Stefan Wahren <wahrenst@gmx.net>
+Link: https://lore.kernel.org/r/20240703131052.597443-2-umang.jain@ideasonboard.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../interface/vchiq_arm/vchiq_core.c | 31 ++++++++++++++-----
+ 1 file changed, 24 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+index df3af821f218..fb1907414cc1 100644
+--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+@@ -501,16 +501,21 @@ remote_event_create(wait_queue_head_t *wq, struct remote_event *event)
+ * routines where switched to the "interruptible" family of functions, as the
+ * former was deemed unjustified and the use "killable" set all VCHIQ's
+ * threads in D state.
++ *
++ * Returns: 0 on success, a negative error code on failure
+ */
+ static inline int
+ remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
+ {
++ int ret = 0;
++
+ if (!event->fired) {
+ event->armed = 1;
+ dsb(sy);
+- if (wait_event_interruptible(*wq, event->fired)) {
++ ret = wait_event_interruptible(*wq, event->fired);
++ if (ret) {
+ event->armed = 0;
+- return 0;
++ return ret;
+ }
+ event->armed = 0;
+ /* Ensure that the peer sees that we are not waiting (armed == 0). */
+@@ -518,7 +523,7 @@ remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
+ }
+
+ event->fired = 0;
+- return 1;
++ return ret;
+ }
+
+ /*
+@@ -1140,6 +1145,7 @@ queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
+ struct vchiq_header *header;
+ ssize_t callback_result;
+ int svc_fourcc;
++ int ret;
+
+ local = state->local;
+
+@@ -1147,7 +1153,9 @@ queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
+ mutex_lock_killable(&state->sync_mutex))
+ return -EAGAIN;
+
+- remote_event_wait(&state->sync_release_event, &local->sync_release);
++ ret = remote_event_wait(&state->sync_release_event, &local->sync_release);
++ if (ret)
++ return ret;
+
+ /* Ensure that reads don't overtake the remote_event_wait. */
+ rmb();
+@@ -1929,13 +1937,16 @@ slot_handler_func(void *v)
+ {
+ struct vchiq_state *state = v;
+ struct vchiq_shared_state *local = state->local;
++ int ret;
+
+ DEBUG_INITIALISE(local);
+
+ while (1) {
+ DEBUG_COUNT(SLOT_HANDLER_COUNT);
+ DEBUG_TRACE(SLOT_HANDLER_LINE);
+- remote_event_wait(&state->trigger_event, &local->trigger);
++ ret = remote_event_wait(&state->trigger_event, &local->trigger);
++ if (ret)
++ return ret;
+
+ /* Ensure that reads don't overtake the remote_event_wait. */
+ rmb();
+@@ -1966,6 +1977,7 @@ recycle_func(void *v)
+ struct vchiq_shared_state *local = state->local;
+ u32 *found;
+ size_t length;
++ int ret;
+
+ length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES);
+
+@@ -1975,7 +1987,9 @@ recycle_func(void *v)
+ return -ENOMEM;
+
+ while (1) {
+- remote_event_wait(&state->recycle_event, &local->recycle);
++ ret = remote_event_wait(&state->recycle_event, &local->recycle);
++ if (ret)
++ return ret;
+
+ process_free_queue(state, found, length);
+ }
+@@ -1992,6 +2006,7 @@ sync_func(void *v)
+ (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
+ state->remote->slot_sync);
+ int svc_fourcc;
++ int ret;
+
+ while (1) {
+ struct vchiq_service *service;
+@@ -1999,7 +2014,9 @@ sync_func(void *v)
+ int type;
+ unsigned int localport, remoteport;
+
+- remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
++ ret = remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
++ if (ret)
++ return ret;
+
+ /* Ensure that reads don't overtake the remote_event_wait. */
+ rmb();
+--
+2.43.0
+
--- /dev/null
+From 61b0d6bc7760fbdd52888f7973eeae9aefb160ab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Jul 2024 10:12:45 -0700
+Subject: tcp: Don't drop SYN+ACK for simultaneous connect().
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 23e89e8ee7be73e21200947885a6d3a109a2c58d ]
+
+RFC 9293 states that in the case of simultaneous connect(), the connection
+gets established when SYN+ACK is received. [0]
+
+ TCP Peer A TCP Peer B
+
+ 1. CLOSED CLOSED
+ 2. SYN-SENT --> <SEQ=100><CTL=SYN> ...
+ 3. SYN-RECEIVED <-- <SEQ=300><CTL=SYN> <-- SYN-SENT
+ 4. ... <SEQ=100><CTL=SYN> --> SYN-RECEIVED
+ 5. SYN-RECEIVED --> <SEQ=100><ACK=301><CTL=SYN,ACK> ...
+ 6. ESTABLISHED <-- <SEQ=300><ACK=101><CTL=SYN,ACK> <-- SYN-RECEIVED
+ 7. ... <SEQ=100><ACK=301><CTL=SYN,ACK> --> ESTABLISHED
+
+However, since commit 0c24604b68fc ("tcp: implement RFC 5961 4.2"), such a
+SYN+ACK is dropped in tcp_validate_incoming() and responded with Challenge
+ACK.
+
+For example, the write() syscall in the following packetdrill script fails
+with -EAGAIN, and wrong SNMP stats get incremented.
+
+ 0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3
+ +0 connect(3, ..., ...) = -1 EINPROGRESS (Operation now in progress)
+
+ +0 > S 0:0(0) <mss 1460,sackOK,TS val 1000 ecr 0,nop,wscale 8>
+ +0 < S 0:0(0) win 1000 <mss 1000>
+ +0 > S. 0:0(0) ack 1 <mss 1460,sackOK,TS val 3308134035 ecr 0,nop,wscale 8>
+ +0 < S. 0:0(0) ack 1 win 1000
+
+ +0 write(3, ..., 100) = 100
+ +0 > P. 1:101(100) ack 1
+
+ --
+
+ # packetdrill cross-synack.pkt
+ cross-synack.pkt:13: runtime error in write call: Expected result 100 but got -1 with errno 11 (Resource temporarily unavailable)
+ # nstat
+ ...
+ TcpExtTCPChallengeACK 1 0.0
+ TcpExtTCPSYNChallenge 1 0.0
+
+The problem is that bpf_skops_established() is triggered by the Challenge
+ACK instead of SYN+ACK. This causes the bpf prog to miss the chance to
+check if the peer supports a TCP option that is expected to be exchanged
+in SYN and SYN+ACK.
+
+Let's accept a bare SYN+ACK for active-open TCP_SYN_RECV sockets to avoid
+such a situation.
+
+Note that tcp_ack_snd_check() in tcp_rcv_state_process() is skipped not to
+send an unnecessary ACK, but this could be a bit risky for net.git, so this
+targets for net-next.
+
+Link: https://www.rfc-editor.org/rfc/rfc9293.html#section-3.5-7 [0]
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/20240710171246.87533-2-kuniyu@amazon.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/tcp_input.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 2c52f6dcbd29..e0d870d3c9b8 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -6004,6 +6004,11 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
+ * RFC 5961 4.2 : Send a challenge ack
+ */
+ if (th->syn) {
++ if (sk->sk_state == TCP_SYN_RECV && sk->sk_socket && th->ack &&
++ TCP_SKB_CB(skb)->seq + 1 == TCP_SKB_CB(skb)->end_seq &&
++ TCP_SKB_CB(skb)->seq + 1 == tp->rcv_nxt &&
++ TCP_SKB_CB(skb)->ack_seq == tp->snd_nxt)
++ goto pass;
+ syn_challenge:
+ if (syn_inerr)
+ TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
+@@ -6013,6 +6018,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
+ goto discard;
+ }
+
++pass:
+ bpf_skops_parse_hdr(sk, skb);
+
+ return true;
+@@ -6819,6 +6825,9 @@ tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
+ tcp_fast_path_on(tp);
+ if (sk->sk_shutdown & SEND_SHUTDOWN)
+ tcp_shutdown(sk, SEND_SHUTDOWN);
++
++ if (sk->sk_socket)
++ goto consume;
+ break;
+
+ case TCP_FIN_WAIT1: {
+--
+2.43.0
+
--- /dev/null
+From 4423d29308bf5b7fbf481ade723a0571fb1b30cd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Sep 2024 15:50:34 +0200
+Subject: tools/net/ynl: fix cli.py --subscribe feature
+
+From: Arkadiusz Kubalewski <arkadiusz.kubalewski@intel.com>
+
+[ Upstream commit 6fda63c45fe8a0870226c13dcce1cc21b7c4d508 ]
+
+Execution of command:
+./tools/net/ynl/cli.py --spec Documentation/netlink/specs/dpll.yaml /
+ --subscribe "monitor" --sleep 10
+fails with:
+ File "/repo/./tools/net/ynl/cli.py", line 109, in main
+ ynl.check_ntf()
+ File "/repo/tools/net/ynl/lib/ynl.py", line 924, in check_ntf
+ op = self.rsp_by_value[nl_msg.cmd()]
+KeyError: 19
+
+Parsing Generic Netlink notification messages performs lookup for op in
+the message. The message was not yet decoded, and is not yet considered
+GenlMsg, thus msg.cmd() returns Generic Netlink family id (19) instead of
+proper notification command id (i.e.: DPLL_CMD_PIN_CHANGE_NTF=13).
+
+Allow the op to be obtained within NetlinkProtocol.decode(..) itself if the
+op was not passed to the decode function, thus allow parsing of Generic
+Netlink notifications without causing the failure.
+
+Suggested-by: Donald Hunter <donald.hunter@gmail.com>
+Link: https://lore.kernel.org/netdev/m2le0n5xpn.fsf@gmail.com/
+Fixes: 0a966d606c68 ("tools/net/ynl: Fix extack decoding for directional ops")
+Signed-off-by: Arkadiusz Kubalewski <arkadiusz.kubalewski@intel.com>
+Reviewed-by: Donald Hunter <donald.hunter@gmail.com>
+Link: https://patch.msgid.link/20240904135034.316033-1-arkadiusz.kubalewski@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/net/ynl/lib/ynl.py | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/tools/net/ynl/lib/ynl.py b/tools/net/ynl/lib/ynl.py
+index 35e666928119..ed7b6fff6999 100644
+--- a/tools/net/ynl/lib/ynl.py
++++ b/tools/net/ynl/lib/ynl.py
+@@ -388,6 +388,8 @@ class NetlinkProtocol:
+
+ def decode(self, ynl, nl_msg, op):
+ msg = self._decode(nl_msg)
++ if op is None:
++ op = ynl.rsp_by_value[msg.cmd()]
+ fixed_header_size = ynl._struct_size(op.fixed_header)
+ msg.raw_attrs = NlAttrs(msg.raw, fixed_header_size)
+ return msg
+@@ -919,8 +921,7 @@ class YnlFamily(SpecFamily):
+ print("Netlink done while checking for ntf!?")
+ continue
+
+- op = self.rsp_by_value[nl_msg.cmd()]
+- decoded = self.nlproto.decode(self, nl_msg, op)
++ decoded = self.nlproto.decode(self, nl_msg, None)
+ if decoded.cmd() not in self.async_msg_ids:
+ print("Unexpected msg id done while checking for ntf", decoded)
+ continue
+@@ -978,7 +979,7 @@ class YnlFamily(SpecFamily):
+ if nl_msg.extack:
+ self._decode_extack(req_msg, op, nl_msg.extack)
+ else:
+- op = self.rsp_by_value[nl_msg.cmd()]
++ op = None
+ req_flags = []
+
+ if nl_msg.error:
+--
+2.43.0
+
--- /dev/null
+From 418688c98f83cf6bf335475f120b7c50dc80658a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Jul 2024 16:11:25 +0900
+Subject: tracing/kprobes: Add symbol counting check when module loads
+
+From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+
+[ Upstream commit 9d8616034f161222a4ac166c1b42b6d79961c005 ]
+
+Currently, kprobe event checks whether the target symbol name is unique
+or not, so that it does not put a probe on an unexpected place. But this
+skips the check if the target is on a module because the module may not
+be loaded.
+
+To fix this issue, this patch checks the number of probe target symbols
+in a target module when the module is loaded. If the probe is not on the
+unique name symbols in the module, it will be rejected at that point.
+
+Note that the symbol which has a unique name in the target module,
+it will be accepted even if there are same-name symbols in the
+kernel or other modules,
+
+Link: https://lore.kernel.org/all/172016348553.99543.2834679315611882137.stgit@devnote2/
+
+Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace_kprobe.c | 125 +++++++++++++++++++++++-------------
+ 1 file changed, 81 insertions(+), 44 deletions(-)
+
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index 16383247bdbf..0d88922f8763 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -678,6 +678,21 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
+ }
+
+ #ifdef CONFIG_MODULES
++static int validate_module_probe_symbol(const char *modname, const char *symbol);
++
++static int register_module_trace_kprobe(struct module *mod, struct trace_kprobe *tk)
++{
++ const char *p;
++ int ret = 0;
++
++ p = strchr(trace_kprobe_symbol(tk), ':');
++ if (p)
++ ret = validate_module_probe_symbol(module_name(mod), p + 1);
++ if (!ret)
++ ret = __register_trace_kprobe(tk);
++ return ret;
++}
++
+ /* Module notifier call back, checking event on the module */
+ static int trace_kprobe_module_callback(struct notifier_block *nb,
+ unsigned long val, void *data)
+@@ -696,7 +711,7 @@ static int trace_kprobe_module_callback(struct notifier_block *nb,
+ if (trace_kprobe_within_module(tk, mod)) {
+ /* Don't need to check busy - this should have gone. */
+ __unregister_trace_kprobe(tk);
+- ret = __register_trace_kprobe(tk);
++ ret = register_module_trace_kprobe(mod, tk);
+ if (ret)
+ pr_warn("Failed to re-register probe %s on %s: %d\n",
+ trace_probe_name(&tk->tp),
+@@ -747,17 +762,68 @@ static int count_mod_symbols(void *data, const char *name, unsigned long unused)
+ return 0;
+ }
+
+-static unsigned int number_of_same_symbols(char *func_name)
++static unsigned int number_of_same_symbols(const char *mod, const char *func_name)
+ {
+ struct sym_count_ctx ctx = { .count = 0, .name = func_name };
+
+- kallsyms_on_each_match_symbol(count_symbols, func_name, &ctx.count);
++ if (!mod)
++ kallsyms_on_each_match_symbol(count_symbols, func_name, &ctx.count);
+
+- module_kallsyms_on_each_symbol(NULL, count_mod_symbols, &ctx);
++ module_kallsyms_on_each_symbol(mod, count_mod_symbols, &ctx);
+
+ return ctx.count;
+ }
+
++static int validate_module_probe_symbol(const char *modname, const char *symbol)
++{
++ unsigned int count = number_of_same_symbols(modname, symbol);
++
++ if (count > 1) {
++ /*
++ * Users should use ADDR to remove the ambiguity of
++ * using KSYM only.
++ */
++ return -EADDRNOTAVAIL;
++ } else if (count == 0) {
++ /*
++ * We can return ENOENT earlier than when register the
++ * kprobe.
++ */
++ return -ENOENT;
++ }
++ return 0;
++}
++
++static int validate_probe_symbol(char *symbol)
++{
++ struct module *mod = NULL;
++ char *modname = NULL, *p;
++ int ret = 0;
++
++ p = strchr(symbol, ':');
++ if (p) {
++ modname = symbol;
++ symbol = p + 1;
++ *p = '\0';
++ /* Return 0 (defer) if the module does not exist yet. */
++ rcu_read_lock_sched();
++ mod = find_module(modname);
++ if (mod && !try_module_get(mod))
++ mod = NULL;
++ rcu_read_unlock_sched();
++ if (!mod)
++ goto out;
++ }
++
++ ret = validate_module_probe_symbol(modname, symbol);
++out:
++ if (p)
++ *p = ':';
++ if (mod)
++ module_put(mod);
++ return ret;
++}
++
+ static int trace_kprobe_entry_handler(struct kretprobe_instance *ri,
+ struct pt_regs *regs);
+
+@@ -881,6 +947,14 @@ static int __trace_kprobe_create(int argc, const char *argv[])
+ trace_probe_log_err(0, BAD_PROBE_ADDR);
+ goto parse_error;
+ }
++ ret = validate_probe_symbol(symbol);
++ if (ret) {
++ if (ret == -EADDRNOTAVAIL)
++ trace_probe_log_err(0, NON_UNIQ_SYMBOL);
++ else
++ trace_probe_log_err(0, BAD_PROBE_ADDR);
++ goto parse_error;
++ }
+ if (is_return)
+ ctx.flags |= TPARG_FL_RETURN;
+ ret = kprobe_on_func_entry(NULL, symbol, offset);
+@@ -893,31 +967,6 @@ static int __trace_kprobe_create(int argc, const char *argv[])
+ }
+ }
+
+- if (symbol && !strchr(symbol, ':')) {
+- unsigned int count;
+-
+- count = number_of_same_symbols(symbol);
+- if (count > 1) {
+- /*
+- * Users should use ADDR to remove the ambiguity of
+- * using KSYM only.
+- */
+- trace_probe_log_err(0, NON_UNIQ_SYMBOL);
+- ret = -EADDRNOTAVAIL;
+-
+- goto error;
+- } else if (count == 0) {
+- /*
+- * We can return ENOENT earlier than when register the
+- * kprobe.
+- */
+- trace_probe_log_err(0, BAD_PROBE_ADDR);
+- ret = -ENOENT;
+-
+- goto error;
+- }
+- }
+-
+ trace_probe_log_set_index(0);
+ if (event) {
+ ret = traceprobe_parse_event_name(&event, &group, gbuf,
+@@ -1835,21 +1884,9 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
+ char *event;
+
+ if (func) {
+- unsigned int count;
+-
+- count = number_of_same_symbols(func);
+- if (count > 1)
+- /*
+- * Users should use addr to remove the ambiguity of
+- * using func only.
+- */
+- return ERR_PTR(-EADDRNOTAVAIL);
+- else if (count == 0)
+- /*
+- * We can return ENOENT earlier than when register the
+- * kprobe.
+- */
+- return ERR_PTR(-ENOENT);
++ ret = validate_probe_symbol(func);
++ if (ret)
++ return ERR_PTR(ret);
+ }
+
+ /*
+--
+2.43.0
+
--- /dev/null
+From 3681e3f9ad0022086185ed68d35524a137f6a1f6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Jun 2024 12:52:17 +0200
+Subject: udf: Avoid excessive partition lengths
+
+From: Jan Kara <jack@suse.cz>
+
+[ Upstream commit ebbe26fd54a9621994bc16b14f2ba8f84c089693 ]
+
+Avoid mounting filesystems where the partition would overflow the
+32-bits used for block number. Also refuse to mount filesystems where
+the partition length is so large we cannot safely index bits in a
+block bitmap.
+
+Link: https://patch.msgid.link/20240620130403.14731-1-jack@suse.cz
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/udf/super.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index 92d477053905..3460ecc826d1 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -1111,12 +1111,19 @@ static int udf_fill_partdesc_info(struct super_block *sb,
+ struct udf_part_map *map;
+ struct udf_sb_info *sbi = UDF_SB(sb);
+ struct partitionHeaderDesc *phd;
++ u32 sum;
+ int err;
+
+ map = &sbi->s_partmaps[p_index];
+
+ map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
+ map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
++ if (check_add_overflow(map->s_partition_root, map->s_partition_len,
++ &sum)) {
++ udf_err(sb, "Partition %d has invalid location %u + %u\n",
++ p_index, map->s_partition_root, map->s_partition_len);
++ return -EFSCORRUPTED;
++ }
+
+ if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
+ map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY;
+@@ -1172,6 +1179,14 @@ static int udf_fill_partdesc_info(struct super_block *sb,
+ bitmap->s_extPosition = le32_to_cpu(
+ phd->unallocSpaceBitmap.extPosition);
+ map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
++ /* Check whether math over bitmap won't overflow. */
++ if (check_add_overflow(map->s_partition_len,
++ sizeof(struct spaceBitmapDesc) << 3,
++ &sum)) {
++ udf_err(sb, "Partition %d is too long (%u)\n", p_index,
++ map->s_partition_len);
++ return -EFSCORRUPTED;
++ }
+ udf_debug("unallocSpaceBitmap (part %d) @ %u\n",
+ p_index, bitmap->s_extPosition);
+ }
+--
+2.43.0
+
--- /dev/null
+From 879cbbf075589d9d94c2c5c403ae3fd9f56580a8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Jul 2024 17:22:36 +0200
+Subject: um: line: always fill *error_out in setup_one_line()
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+[ Upstream commit 824ac4a5edd3f7494ab1996826c4f47f8ef0f63d ]
+
+The pointer isn't initialized by callers, but I have
+encountered cases where it's still printed; initialize
+it in all possible cases in setup_one_line().
+
+Link: https://patch.msgid.link/20240703172235.ad863568b55f.Iaa1eba4db8265d7715ba71d5f6bb8c7ff63d27e9@changeid
+Acked-By: Anton Ivanov <anton.ivanov@cambridgegreys.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/um/drivers/line.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
+index d82bc3fdb86e..43d8959cc746 100644
+--- a/arch/um/drivers/line.c
++++ b/arch/um/drivers/line.c
+@@ -383,6 +383,7 @@ int setup_one_line(struct line *lines, int n, char *init,
+ parse_chan_pair(NULL, line, n, opts, error_out);
+ err = 0;
+ }
++ *error_out = "configured as 'none'";
+ } else {
+ char *new = kstrdup(init, GFP_KERNEL);
+ if (!new) {
+@@ -406,6 +407,7 @@ int setup_one_line(struct line *lines, int n, char *init,
+ }
+ }
+ if (err) {
++ *error_out = "failed to parse channel pair";
+ line->init_str = NULL;
+ line->valid = 0;
+ kfree(new);
+--
+2.43.0
+
--- /dev/null
+From 43e964f20671edd9bccf922786ff4fa45f6ae1e1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Jun 2024 10:23:06 +0800
+Subject: usb: gadget: aspeed_udc: validate endpoint index for ast udc
+
+From: Ma Ke <make24@iscas.ac.cn>
+
+[ Upstream commit ee0d382feb44ec0f445e2ad63786cd7f3f6a8199 ]
+
+We should verify the bound of the array to assure that host
+may not manipulate the index to point past endpoint array.
+
+Found by static analysis.
+
+Signed-off-by: Ma Ke <make24@iscas.ac.cn>
+Reviewed-by: Andrew Jeffery <andrew@codeconstruct.com.au>
+Acked-by: Andrew Jeffery <andrew@codeconstruct.com.au>
+Link: https://lore.kernel.org/r/20240625022306.2568122-1-make24@iscas.ac.cn
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/gadget/udc/aspeed_udc.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/usb/gadget/udc/aspeed_udc.c b/drivers/usb/gadget/udc/aspeed_udc.c
+index 821a6ab5da56..f4781e611aaa 100644
+--- a/drivers/usb/gadget/udc/aspeed_udc.c
++++ b/drivers/usb/gadget/udc/aspeed_udc.c
+@@ -1009,6 +1009,8 @@ static void ast_udc_getstatus(struct ast_udc_dev *udc)
+ break;
+ case USB_RECIP_ENDPOINT:
+ epnum = crq.wIndex & USB_ENDPOINT_NUMBER_MASK;
++ if (epnum >= AST_UDC_NUM_ENDPOINTS)
++ goto stall;
+ status = udc->ep[epnum].stopped;
+ break;
+ default:
+--
+2.43.0
+
--- /dev/null
+From 9fe71481857af6cd3e0c91a0500f6b6fb6251f4a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Jun 2024 23:32:57 -0400
+Subject: usb: uas: set host status byte on data completion error
+
+From: Shantanu Goel <sgoel01@yahoo.com>
+
+[ Upstream commit 9d32685a251a754f1823d287df233716aa23bcb9 ]
+
+Set the host status byte when a data completion error is encountered
+otherwise the upper layer may end up using the invalid zero'ed data.
+The following output was observed from scsi/sd.c prior to this fix.
+
+[ 11.872824] sd 0:0:0:1: [sdf] tag#9 data cmplt err -75 uas-tag 1 inflight:
+[ 11.872826] sd 0:0:0:1: [sdf] tag#9 CDB: Read capacity(16) 9e 10 00 00 00 00 00 00 00 00 00 00 00 20 00 00
+[ 11.872830] sd 0:0:0:1: [sdf] Sector size 0 reported, assuming 512.
+
+Signed-off-by: Shantanu Goel <sgoel01@yahoo.com>
+Acked-by: Oliver Neukum <oneukum@suse.com>
+Link: https://lore.kernel.org/r/87msnx4ec6.fsf@yahoo.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/storage/uas.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index b610a2de4ae5..a04b4cb1382d 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -423,6 +423,7 @@ static void uas_data_cmplt(struct urb *urb)
+ uas_log_cmd_state(cmnd, "data cmplt err", status);
+ /* error: no data transfered */
+ scsi_set_resid(cmnd, sdb->length);
++ set_host_byte(cmnd, DID_ERROR);
+ } else {
+ scsi_set_resid(cmnd, sdb->length - urb->actual_length);
+ }
+--
+2.43.0
+
--- /dev/null
+From 98fa6490489bdaaef06204fa9bdc1c06760e8553 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Aug 2024 19:28:05 +0200
+Subject: usbnet: ipheth: race between ipheth_close and error handling
+
+From: Oliver Neukum <oneukum@suse.com>
+
+[ Upstream commit e5876b088ba03a62124266fa20d00e65533c7269 ]
+
+ipheth_sndbulk_callback() can submit carrier_work
+as a part of its error handling. That means that
+the driver must make sure that the work is cancelled
+after it has made sure that no more URB can terminate
+with an error condition.
+
+Hence the order of actions in ipheth_close() needs
+to be inverted.
+
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+Signed-off-by: Foster Snowhill <forst@pen.gy>
+Tested-by: Georgi Valkov <gvalkov@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/ipheth.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
+index 687d70cfc556..6eeef10edada 100644
+--- a/drivers/net/usb/ipheth.c
++++ b/drivers/net/usb/ipheth.c
+@@ -475,8 +475,8 @@ static int ipheth_close(struct net_device *net)
+ {
+ struct ipheth_device *dev = netdev_priv(net);
+
+- cancel_delayed_work_sync(&dev->carrier_work);
+ netif_stop_queue(net);
++ cancel_delayed_work_sync(&dev->carrier_work);
+ return 0;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From e97486ac5bedf2f48d07a1d8d260f5a2f4cf3af5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Aug 2024 19:50:55 +0200
+Subject: usbnet: modern method to get random MAC
+
+From: Oliver Neukum <oneukum@suse.com>
+
+[ Upstream commit bab8eb0dd4cb995caa4a0529d5655531c2ec5e8e ]
+
+The driver generates a random MAC once on load
+and uses it over and over, including on two devices
+needing a random MAC at the same time.
+
+Jakub suggested revamping the driver to the modern
+API for setting a random MAC rather than fixing
+the old stuff.
+
+The bug is as old as the driver.
+
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Link: https://patch.msgid.link/20240829175201.670718-1-oneukum@suse.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/usbnet.c | 11 +++--------
+ 1 file changed, 3 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 9fd516e8bb10..18eb5ba436df 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -61,9 +61,6 @@
+
+ /*-------------------------------------------------------------------------*/
+
+-// randomly generated ethernet address
+-static u8 node_id [ETH_ALEN];
+-
+ /* use ethtool to change the level for any given device */
+ static int msg_level = -1;
+ module_param (msg_level, int, 0);
+@@ -1725,7 +1722,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+
+ dev->net = net;
+ strscpy(net->name, "usb%d", sizeof(net->name));
+- eth_hw_addr_set(net, node_id);
+
+ /* rx and tx sides can use different message sizes;
+ * bind() should set rx_urb_size in that case.
+@@ -1801,9 +1797,9 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+ goto out4;
+ }
+
+- /* let userspace know we have a random address */
+- if (ether_addr_equal(net->dev_addr, node_id))
+- net->addr_assign_type = NET_ADDR_RANDOM;
++ /* this flags the device for user space */
++ if (!is_valid_ether_addr(net->dev_addr))
++ eth_hw_addr_random(net);
+
+ if ((dev->driver_info->flags & FLAG_WLAN) != 0)
+ SET_NETDEV_DEVTYPE(net, &wlan_type);
+@@ -2211,7 +2207,6 @@ static int __init usbnet_init(void)
+ BUILD_BUG_ON(
+ sizeof_field(struct sk_buff, cb) < sizeof(struct skb_data));
+
+- eth_random_addr(node_id);
+ return 0;
+ }
+ module_init(usbnet_init);
+--
+2.43.0
+
--- /dev/null
+From e4ebdd7c2505102fb5a04eeb5b83f46dcb59a9d7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Jun 2024 12:38:58 +0000
+Subject: vfio/spapr: Always clear TCEs before unsetting the window
+
+From: Shivaprasad G Bhat <sbhat@linux.ibm.com>
+
+[ Upstream commit 4ba2fdff2eb174114786784926d0efb6903c88a6 ]
+
+The PAPR expects the TCE table to have no entries at the time of
+unset window(i.e. remove-pe). The TCE clear right now is done
+before freeing the iommu table. On pSeries, the unset window
+makes those entries inaccessible to the OS and the H_PUT/GET calls
+fail on them with H_CONSTRAINED.
+
+On PowerNV, this has no side effect as the TCE clear can be done
+before the DMA window removal as well.
+
+Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/171923273535.1397.1236742071894414895.stgit@linux.ibm.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vfio/vfio_iommu_spapr_tce.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
+index a94ec6225d31..5f9e7e477078 100644
+--- a/drivers/vfio/vfio_iommu_spapr_tce.c
++++ b/drivers/vfio/vfio_iommu_spapr_tce.c
+@@ -364,7 +364,6 @@ static void tce_iommu_release(void *iommu_data)
+ if (!tbl)
+ continue;
+
+- tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
+ tce_iommu_free_table(container, tbl);
+ }
+
+@@ -720,6 +719,8 @@ static long tce_iommu_remove_window(struct tce_container *container,
+
+ BUG_ON(!tbl->it_size);
+
++ tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
++
+ /* Detach groups from IOMMUs */
+ list_for_each_entry(tcegrp, &container->group_list, next) {
+ table_group = iommu_group_get_iommudata(tcegrp->grp);
+@@ -738,7 +739,6 @@ static long tce_iommu_remove_window(struct tce_container *container,
+ }
+
+ /* Free table */
+- tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
+ tce_iommu_free_table(container, tbl);
+ container->tables[num] = NULL;
+
+@@ -1197,9 +1197,14 @@ static void tce_iommu_release_ownership(struct tce_container *container,
+ return;
+ }
+
+- for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
+- if (container->tables[i])
++ for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
++ if (container->tables[i]) {
++ tce_iommu_clear(container, container->tables[i],
++ container->tables[i]->it_offset,
++ container->tables[i]->it_size);
+ table_group->ops->unset_window(table_group, i);
++ }
++ }
+ }
+
+ static long tce_iommu_take_ownership(struct tce_container *container,
+--
+2.43.0
+
--- /dev/null
+From 4d888ed19d7c3d322575a0157c6b951a7dd4c626 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Jul 2024 09:59:54 +0100
+Subject: vfs: Fix potential circular locking through setxattr() and
+ removexattr()
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit c3a5e3e872f3688ae0dc57bb78ca633921d96a91 ]
+
+When using cachefiles, lockdep may emit something similar to the circular
+locking dependency notice below. The problem appears to stem from the
+following:
+
+ (1) Cachefiles manipulates xattrs on the files in its cache when called
+ from ->writepages().
+
+ (2) The setxattr() and removexattr() system call handlers get the name
+ (and value) from userspace after taking the sb_writers lock, putting
+ accesses of the vma->vm_lock and mm->mmap_lock inside of that.
+
+ (3) The afs filesystem uses a per-inode lock to prevent multiple
+ revalidation RPCs and in writeback vs truncate to prevent parallel
+ operations from deadlocking against the server on one side and local
+ page locks on the other.
+
+Fix this by moving the getting of the name and value in {get,remove}xattr()
+outside of the sb_writers lock. This also has the minor benefits that we
+don't need to reget these in the event of a retry and we never try to take
+the sb_writers lock in the event we can't pull the name and value into the
+kernel.
+
+Alternative approaches that might fix this include moving the dispatch of a
+write to the cache off to a workqueue or trying to do without the
+validation lock in afs. Note that this might also affect other filesystems
+that use netfslib and/or cachefiles.
+
+ ======================================================
+ WARNING: possible circular locking dependency detected
+ 6.10.0-build2+ #956 Not tainted
+ ------------------------------------------------------
+ fsstress/6050 is trying to acquire lock:
+ ffff888138fd82f0 (mapping.invalidate_lock#3){++++}-{3:3}, at: filemap_fault+0x26e/0x8b0
+
+ but task is already holding lock:
+ ffff888113f26d18 (&vma->vm_lock->lock){++++}-{3:3}, at: lock_vma_under_rcu+0x165/0x250
+
+ which lock already depends on the new lock.
+
+ the existing dependency chain (in reverse order) is:
+
+ -> #4 (&vma->vm_lock->lock){++++}-{3:3}:
+ __lock_acquire+0xaf0/0xd80
+ lock_acquire.part.0+0x103/0x280
+ down_write+0x3b/0x50
+ vma_start_write+0x6b/0xa0
+ vma_link+0xcc/0x140
+ insert_vm_struct+0xb7/0xf0
+ alloc_bprm+0x2c1/0x390
+ kernel_execve+0x65/0x1a0
+ call_usermodehelper_exec_async+0x14d/0x190
+ ret_from_fork+0x24/0x40
+ ret_from_fork_asm+0x1a/0x30
+
+ -> #3 (&mm->mmap_lock){++++}-{3:3}:
+ __lock_acquire+0xaf0/0xd80
+ lock_acquire.part.0+0x103/0x280
+ __might_fault+0x7c/0xb0
+ strncpy_from_user+0x25/0x160
+ removexattr+0x7f/0x100
+ __do_sys_fremovexattr+0x7e/0xb0
+ do_syscall_64+0x9f/0x100
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+ -> #2 (sb_writers#14){.+.+}-{0:0}:
+ __lock_acquire+0xaf0/0xd80
+ lock_acquire.part.0+0x103/0x280
+ percpu_down_read+0x3c/0x90
+ vfs_iocb_iter_write+0xe9/0x1d0
+ __cachefiles_write+0x367/0x430
+ cachefiles_issue_write+0x299/0x2f0
+ netfs_advance_write+0x117/0x140
+ netfs_write_folio.isra.0+0x5ca/0x6e0
+ netfs_writepages+0x230/0x2f0
+ afs_writepages+0x4d/0x70
+ do_writepages+0x1e8/0x3e0
+ filemap_fdatawrite_wbc+0x84/0xa0
+ __filemap_fdatawrite_range+0xa8/0xf0
+ file_write_and_wait_range+0x59/0x90
+ afs_release+0x10f/0x270
+ __fput+0x25f/0x3d0
+ __do_sys_close+0x43/0x70
+ do_syscall_64+0x9f/0x100
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+ -> #1 (&vnode->validate_lock){++++}-{3:3}:
+ __lock_acquire+0xaf0/0xd80
+ lock_acquire.part.0+0x103/0x280
+ down_read+0x95/0x200
+ afs_writepages+0x37/0x70
+ do_writepages+0x1e8/0x3e0
+ filemap_fdatawrite_wbc+0x84/0xa0
+ filemap_invalidate_inode+0x167/0x1e0
+ netfs_unbuffered_write_iter+0x1bd/0x2d0
+ vfs_write+0x22e/0x320
+ ksys_write+0xbc/0x130
+ do_syscall_64+0x9f/0x100
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+ -> #0 (mapping.invalidate_lock#3){++++}-{3:3}:
+ check_noncircular+0x119/0x160
+ check_prev_add+0x195/0x430
+ __lock_acquire+0xaf0/0xd80
+ lock_acquire.part.0+0x103/0x280
+ down_read+0x95/0x200
+ filemap_fault+0x26e/0x8b0
+ __do_fault+0x57/0xd0
+ do_pte_missing+0x23b/0x320
+ __handle_mm_fault+0x2d4/0x320
+ handle_mm_fault+0x14f/0x260
+ do_user_addr_fault+0x2a2/0x500
+ exc_page_fault+0x71/0x90
+ asm_exc_page_fault+0x22/0x30
+
+ other info that might help us debug this:
+
+ Chain exists of:
+ mapping.invalidate_lock#3 --> &mm->mmap_lock --> &vma->vm_lock->lock
+
+ Possible unsafe locking scenario:
+
+ CPU0 CPU1
+ ---- ----
+ rlock(&vma->vm_lock->lock);
+ lock(&mm->mmap_lock);
+ lock(&vma->vm_lock->lock);
+ rlock(mapping.invalidate_lock#3);
+
+ *** DEADLOCK ***
+
+ 1 lock held by fsstress/6050:
+ #0: ffff888113f26d18 (&vma->vm_lock->lock){++++}-{3:3}, at: lock_vma_under_rcu+0x165/0x250
+
+ stack backtrace:
+ CPU: 0 PID: 6050 Comm: fsstress Not tainted 6.10.0-build2+ #956
+ Hardware name: ASUS All Series/H97-PLUS, BIOS 2306 10/09/2014
+ Call Trace:
+ <TASK>
+ dump_stack_lvl+0x57/0x80
+ check_noncircular+0x119/0x160
+ ? queued_spin_lock_slowpath+0x4be/0x510
+ ? __pfx_check_noncircular+0x10/0x10
+ ? __pfx_queued_spin_lock_slowpath+0x10/0x10
+ ? mark_lock+0x47/0x160
+ ? init_chain_block+0x9c/0xc0
+ ? add_chain_block+0x84/0xf0
+ check_prev_add+0x195/0x430
+ __lock_acquire+0xaf0/0xd80
+ ? __pfx___lock_acquire+0x10/0x10
+ ? __lock_release.isra.0+0x13b/0x230
+ lock_acquire.part.0+0x103/0x280
+ ? filemap_fault+0x26e/0x8b0
+ ? __pfx_lock_acquire.part.0+0x10/0x10
+ ? rcu_is_watching+0x34/0x60
+ ? lock_acquire+0xd7/0x120
+ down_read+0x95/0x200
+ ? filemap_fault+0x26e/0x8b0
+ ? __pfx_down_read+0x10/0x10
+ ? __filemap_get_folio+0x25/0x1a0
+ filemap_fault+0x26e/0x8b0
+ ? __pfx_filemap_fault+0x10/0x10
+ ? find_held_lock+0x7c/0x90
+ ? __pfx___lock_release.isra.0+0x10/0x10
+ ? __pte_offset_map+0x99/0x110
+ __do_fault+0x57/0xd0
+ do_pte_missing+0x23b/0x320
+ __handle_mm_fault+0x2d4/0x320
+ ? __pfx___handle_mm_fault+0x10/0x10
+ handle_mm_fault+0x14f/0x260
+ do_user_addr_fault+0x2a2/0x500
+ exc_page_fault+0x71/0x90
+ asm_exc_page_fault+0x22/0x30
+
+Signed-off-by: David Howells <dhowells@redhat.com>
+Link: https://lore.kernel.org/r/2136178.1721725194@warthog.procyon.org.uk
+cc: Alexander Viro <viro@zeniv.linux.org.uk>
+cc: Christian Brauner <brauner@kernel.org>
+cc: Jan Kara <jack@suse.cz>
+cc: Jeff Layton <jlayton@kernel.org>
+cc: Gao Xiang <xiang@kernel.org>
+cc: Matthew Wilcox <willy@infradead.org>
+cc: netfs@lists.linux.dev
+cc: linux-erofs@lists.ozlabs.org
+cc: linux-fsdevel@vger.kernel.org
+[brauner: fix minor issues]
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/xattr.c | 91 ++++++++++++++++++++++++++++--------------------------
+ 1 file changed, 48 insertions(+), 43 deletions(-)
+
+diff --git a/fs/xattr.c b/fs/xattr.c
+index f8b643f91a98..7672ce5486c5 100644
+--- a/fs/xattr.c
++++ b/fs/xattr.c
+@@ -630,10 +630,9 @@ int do_setxattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ ctx->kvalue, ctx->size, ctx->flags);
+ }
+
+-static long
+-setxattr(struct mnt_idmap *idmap, struct dentry *d,
+- const char __user *name, const void __user *value, size_t size,
+- int flags)
++static int path_setxattr(const char __user *pathname,
++ const char __user *name, const void __user *value,
++ size_t size, int flags, unsigned int lookup_flags)
+ {
+ struct xattr_name kname;
+ struct xattr_ctx ctx = {
+@@ -643,33 +642,20 @@ setxattr(struct mnt_idmap *idmap, struct dentry *d,
+ .kname = &kname,
+ .flags = flags,
+ };
++ struct path path;
+ int error;
+
+ error = setxattr_copy(name, &ctx);
+ if (error)
+ return error;
+
+- error = do_setxattr(idmap, d, &ctx);
+-
+- kvfree(ctx.kvalue);
+- return error;
+-}
+-
+-static int path_setxattr(const char __user *pathname,
+- const char __user *name, const void __user *value,
+- size_t size, int flags, unsigned int lookup_flags)
+-{
+- struct path path;
+- int error;
+-
+ retry:
+ error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
+ if (error)
+- return error;
++ goto out;
+ error = mnt_want_write(path.mnt);
+ if (!error) {
+- error = setxattr(mnt_idmap(path.mnt), path.dentry, name,
+- value, size, flags);
++ error = do_setxattr(mnt_idmap(path.mnt), path.dentry, &ctx);
+ mnt_drop_write(path.mnt);
+ }
+ path_put(&path);
+@@ -677,6 +663,9 @@ static int path_setxattr(const char __user *pathname,
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
++
++out:
++ kvfree(ctx.kvalue);
+ return error;
+ }
+
+@@ -697,20 +686,32 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
+ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
+ const void __user *,value, size_t, size, int, flags)
+ {
+- struct fd f = fdget(fd);
+- int error = -EBADF;
++ struct xattr_name kname;
++ struct xattr_ctx ctx = {
++ .cvalue = value,
++ .kvalue = NULL,
++ .size = size,
++ .kname = &kname,
++ .flags = flags,
++ };
++ int error;
+
++ CLASS(fd, f)(fd);
+ if (!f.file)
+- return error;
++ return -EBADF;
++
+ audit_file(f.file);
++ error = setxattr_copy(name, &ctx);
++ if (error)
++ return error;
++
+ error = mnt_want_write_file(f.file);
+ if (!error) {
+- error = setxattr(file_mnt_idmap(f.file),
+- f.file->f_path.dentry, name,
+- value, size, flags);
++ error = do_setxattr(file_mnt_idmap(f.file),
++ f.file->f_path.dentry, &ctx);
+ mnt_drop_write_file(f.file);
+ }
+- fdput(f);
++ kvfree(ctx.kvalue);
+ return error;
+ }
+
+@@ -899,9 +900,17 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
+ * Extended attribute REMOVE operations
+ */
+ static long
+-removexattr(struct mnt_idmap *idmap, struct dentry *d,
+- const char __user *name)
++removexattr(struct mnt_idmap *idmap, struct dentry *d, const char *name)
+ {
++ if (is_posix_acl_xattr(name))
++ return vfs_remove_acl(idmap, d, name);
++ return vfs_removexattr(idmap, d, name);
++}
++
++static int path_removexattr(const char __user *pathname,
++ const char __user *name, unsigned int lookup_flags)
++{
++ struct path path;
+ int error;
+ char kname[XATTR_NAME_MAX + 1];
+
+@@ -910,25 +919,13 @@ removexattr(struct mnt_idmap *idmap, struct dentry *d,
+ error = -ERANGE;
+ if (error < 0)
+ return error;
+-
+- if (is_posix_acl_xattr(kname))
+- return vfs_remove_acl(idmap, d, kname);
+-
+- return vfs_removexattr(idmap, d, kname);
+-}
+-
+-static int path_removexattr(const char __user *pathname,
+- const char __user *name, unsigned int lookup_flags)
+-{
+- struct path path;
+- int error;
+ retry:
+ error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
+ if (error)
+ return error;
+ error = mnt_want_write(path.mnt);
+ if (!error) {
+- error = removexattr(mnt_idmap(path.mnt), path.dentry, name);
++ error = removexattr(mnt_idmap(path.mnt), path.dentry, kname);
+ mnt_drop_write(path.mnt);
+ }
+ path_put(&path);
+@@ -954,15 +951,23 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
+ SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
+ {
+ struct fd f = fdget(fd);
++ char kname[XATTR_NAME_MAX + 1];
+ int error = -EBADF;
+
+ if (!f.file)
+ return error;
+ audit_file(f.file);
++
++ error = strncpy_from_user(kname, name, sizeof(kname));
++ if (error == 0 || error == sizeof(kname))
++ error = -ERANGE;
++ if (error < 0)
++ return error;
++
+ error = mnt_want_write_file(f.file);
+ if (!error) {
+ error = removexattr(file_mnt_idmap(f.file),
+- f.file->f_path.dentry, name);
++ f.file->f_path.dentry, kname);
+ mnt_drop_write_file(f.file);
+ }
+ fdput(f);
+--
+2.43.0
+
--- /dev/null
+From 8f1b0c91e4c6ca2f737e500466530fd596afd085 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 29 Mar 2024 22:54:41 +0100
+Subject: virt: sev-guest: Mark driver struct with __refdata to prevent section
+ mismatch
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit 3991b04d4870fd334b77b859a8642ca7fb592603 ]
+
+As described in the added code comment, a reference to .exit.text is ok for
+drivers registered via module_platform_driver_probe(). Make this explicit to
+prevent the following section mismatch warning:
+
+ WARNING: modpost: drivers/virt/coco/sev-guest/sev-guest: section mismatch in reference: \
+ sev_guest_driver+0x10 (section: .data) -> sev_guest_remove (section: .exit.text)
+
+that triggers on an allmodconfig W=1 build.
+
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@linux.intel.com>
+Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
+Link: https://lore.kernel.org/r/4a81b0e87728a58904283e2d1f18f73abc69c2a1.1711748999.git.u.kleine-koenig@pengutronix.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/virt/coco/sev-guest/sev-guest.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
+index 654290a8e1ba..a100d6241992 100644
+--- a/drivers/virt/coco/sev-guest/sev-guest.c
++++ b/drivers/virt/coco/sev-guest/sev-guest.c
+@@ -1009,8 +1009,13 @@ static void __exit sev_guest_remove(struct platform_device *pdev)
+ * This driver is meant to be a common SEV guest interface driver and to
+ * support any SEV guest API. As such, even though it has been introduced
+ * with the SEV-SNP support, it is named "sev-guest".
++ *
++ * sev_guest_remove() lives in .exit.text. For drivers registered via
++ * module_platform_driver_probe() this is ok because they cannot get unbound
++ * at runtime. So mark the driver struct with __refdata to prevent modpost
++ * triggering a section mismatch warning.
+ */
+-static struct platform_driver sev_guest_driver = {
++static struct platform_driver sev_guest_driver __refdata = {
+ .remove_new = __exit_p(sev_guest_remove),
+ .driver = {
+ .name = "sev-guest",
+--
+2.43.0
+
--- /dev/null
+From 9c93b8db4e8f1e677b27df4d7d286ef6dac11e29 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Jun 2024 19:13:45 +0800
+Subject: virtio_ring: fix KMSAN error for premapped mode
+
+From: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
+
+[ Upstream commit 840b2d39a2dc1b96deb3f5c7fef76c9b24f08f51 ]
+
+Add kmsan for virtqueue_dma_map_single_attrs to fix:
+
+BUG: KMSAN: uninit-value in receive_buf+0x45ca/0x6990
+ receive_buf+0x45ca/0x6990
+ virtnet_poll+0x17e0/0x3130
+ net_rx_action+0x832/0x26e0
+ handle_softirqs+0x330/0x10f0
+ [...]
+
+Uninit was created at:
+ __alloc_pages_noprof+0x62a/0xe60
+ alloc_pages_noprof+0x392/0x830
+ skb_page_frag_refill+0x21a/0x5c0
+ virtnet_rq_alloc+0x50/0x1500
+ try_fill_recv+0x372/0x54c0
+ virtnet_open+0x210/0xbe0
+ __dev_open+0x56e/0x920
+ __dev_change_flags+0x39c/0x2000
+ dev_change_flags+0xaa/0x200
+ do_setlink+0x197a/0x7420
+ rtnl_setlink+0x77c/0x860
+ [...]
+
+Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
+Tested-by: Alexander Potapenko <glider@google.com>
+Message-Id: <20240606111345.93600-1-xuanzhuo@linux.alibaba.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Tested-by: Ilya Leoshkevich <iii@linux.ibm.com> # s390x
+Acked-by: Jason Wang <jasowang@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/virtio/virtio_ring.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index 2a972752ff1b..9d3a9942c8c8 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -3121,8 +3121,10 @@ dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr,
+ {
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+- if (!vq->use_dma_api)
++ if (!vq->use_dma_api) {
++ kmsan_handle_dma(virt_to_page(ptr), offset_in_page(ptr), size, dir);
+ return (dma_addr_t)virt_to_phys(ptr);
++ }
+
+ return dma_map_single_attrs(vring_dma_dev(vq), ptr, size, dir, attrs);
+ }
+--
+2.43.0
+
--- /dev/null
+From ead407374398a1c96812ccc8f1ebc8861624baab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Jul 2024 13:16:03 +0200
+Subject: watchdog: imx7ulp_wdt: keep already running watchdog enabled
+
+From: Sascha Hauer <s.hauer@pengutronix.de>
+
+[ Upstream commit b771d14f417e9d8030ab000b3341cf71266be90e ]
+
+When the bootloader enabled the watchdog before Kernel started then
+keep it enabled during initialization. Otherwise the time between
+the watchdog probing and the userspace taking over the watchdog
+won't be covered by the watchdog. When keeping the watchdog enabled
+inform the Kernel about this by setting the WDOG_HW_RUNNING so that
+the periodic watchdog feeder is started when desired.
+
+Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
+Reviewed-by: Guenter Roeck <linux@roeck-us.net>
+Link: https://lore.kernel.org/r/20240703111603.1096424-1-s.hauer@pengutronix.de
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Wim Van Sebroeck <wim@linux-watchdog.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/watchdog/imx7ulp_wdt.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c
+index b21d7a74a42d..94914a22daff 100644
+--- a/drivers/watchdog/imx7ulp_wdt.c
++++ b/drivers/watchdog/imx7ulp_wdt.c
+@@ -290,6 +290,11 @@ static int imx7ulp_wdt_init(struct imx7ulp_wdt_device *wdt, unsigned int timeout
+ if (wdt->ext_reset)
+ val |= WDOG_CS_INT_EN;
+
++ if (readl(wdt->base + WDOG_CS) & WDOG_CS_EN) {
++ set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
++ val |= WDOG_CS_EN;
++ }
++
+ do {
+ ret = _imx7ulp_wdt_init(wdt, timeout, val);
+ toval = readl(wdt->base + WDOG_TOVAL);
+--
+2.43.0
+
--- /dev/null
+From 461bab67ef0d6d721f206bdb84dfa4d36ffeec43 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Jun 2024 11:05:28 +0530
+Subject: wifi: ath12k: fix firmware crash due to invalid peer nss
+
+From: Ajith C <quic_ajithc@quicinc.com>
+
+[ Upstream commit db163a463bb93cd3e37e1e7b10b9726fb6f95857 ]
+
+Currently, if the access point receives an association
+request containing an Extended HE Capabilities Information
+Element with an invalid MCS-NSS, it triggers a firmware
+crash.
+
+This issue arises when EHT-PHY capabilities shows support
+for a bandwidth and MCS-NSS set for that particular
+bandwidth is filled by zeros and due to this, driver obtains
+peer_nss as 0 and sending this value to firmware causes
+crash.
+
+Address this issue by implementing a validation step for
+the peer_nss value before passing it to the firmware. If
+the value is greater than zero, proceed with forwarding
+it to the firmware. However, if the value is invalid,
+reject the association request to prevent potential
+firmware crashes.
+
+Tested-on: QCN9274 hw2.0 PCI WLAN.WBE.1.0.1-00029-QCAHKSWPL_SILICONZ-1
+
+Signed-off-by: Ajith C <quic_ajithc@quicinc.com>
+Acked-by: Jeff Johnson <quic_jjohnson@quicinc.com>
+Signed-off-by: Kalle Valo <quic_kvalo@quicinc.com>
+Link: https://patch.msgid.link/20240613053528.2541645-1-quic_ajithc@quicinc.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ath/ath12k/mac.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
+index 71b4ec7717d5..7037004ce977 100644
+--- a/drivers/net/wireless/ath/ath12k/mac.c
++++ b/drivers/net/wireless/ath/ath12k/mac.c
+@@ -3847,6 +3847,11 @@ static int ath12k_station_assoc(struct ath12k *ar,
+
+ ath12k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc);
+
++ if (peer_arg.peer_nss < 1) {
++ ath12k_warn(ar->ab,
++ "invalid peer NSS %d\n", peer_arg.peer_nss);
++ return -EINVAL;
++ }
+ ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
+--
+2.43.0
+
--- /dev/null
+From 1697c2d045618c2f2c543d9ab5b05937d1450045 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Jun 2024 08:40:17 +0530
+Subject: wifi: ath12k: fix uninitialize symbol error on
+ ath12k_peer_assoc_h_he()
+
+From: Aaradhana Sahu <quic_aarasahu@quicinc.com>
+
+[ Upstream commit 19b77e7c656a3e125319cc3ef347b397cf042bf6 ]
+
+Smatch throws following errors
+
+drivers/net/wireless/ath/ath12k/mac.c:1922 ath12k_peer_assoc_h_he() error: uninitialized symbol 'rx_mcs_80'.
+drivers/net/wireless/ath/ath12k/mac.c:1922 ath12k_peer_assoc_h_he() error: uninitialized symbol 'rx_mcs_160'.
+drivers/net/wireless/ath/ath12k/mac.c:1924 ath12k_peer_assoc_h_he() error: uninitialized symbol 'rx_mcs_80'.
+
+In ath12k_peer_assoc_h_he() rx_mcs_80 and rx_mcs_160 variables
+remain uninitialized in the following conditions:
+1. Whenever the value of mcs_80 become equal to
+ IEEE80211_HE_MCS_NOT_SUPPORTED then rx_mcs_80 remains uninitialized.
+2. Whenever phy capability is not supported 160 channel width and
+ value of mcs_160 become equal to IEEE80211_HE_MCS_NOT_SUPPORTED
+ then rx_mcs_160 remains uninitialized.
+
+Initialize these variables during declaration.
+
+Tested-on: QCN9274 hw2.0 PCI WLAN.WBE.1.1.1-00188-QCAHKSWPL_SILICONZ-1
+
+Signed-off-by: Aaradhana Sahu <quic_aarasahu@quicinc.com>
+Acked-by: Jeff Johnson <quic_jjohnson@quicinc.com>
+Signed-off-by: Kalle Valo <quic_kvalo@quicinc.com>
+Link: https://patch.msgid.link/20240611031017.297927-3-quic_aarasahu@quicinc.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ath/ath12k/mac.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
+index 8474e25d2ac6..71b4ec7717d5 100644
+--- a/drivers/net/wireless/ath/ath12k/mac.c
++++ b/drivers/net/wireless/ath/ath12k/mac.c
+@@ -1881,7 +1881,9 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
+ {
+ const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
+ int i;
+- u8 ampdu_factor, rx_mcs_80, rx_mcs_160, max_nss;
++ u8 ampdu_factor, max_nss;
++ u8 rx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
++ u8 rx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ u16 mcs_160_map, mcs_80_map;
+ bool support_160;
+ u16 v;
+--
+2.43.0
+
--- /dev/null
+From 46a7728f57339310bfefc48ef6d255c0406a6eb1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Jun 2024 14:26:09 +0200
+Subject: wifi: brcmsmac: advertise MFP_CAPABLE to enable WPA3
+
+From: Arend van Spriel <arend.vanspriel@broadcom.com>
+
+[ Upstream commit dbb5265a5d7cca1cdba7736dba313ab7d07bc19d ]
+
+After being asked about support for WPA3 for BCM43224 chipset it
+was found that all it takes is setting the MFP_CAPABLE flag and
+mac80211 will take care of all that is needed [1].
+
+Link: https://lore.kernel.org/linux-wireless/20200526155909.5807-2-Larry.Finger@lwfinger.net/ [1]
+Signed-off-by: Arend van Spriel <arend.vanspriel@broadcom.com>
+Tested-by: Reijer Boekhoff <reijerboekhoff@protonmail.com>
+Signed-off-by: Kalle Valo <kvalo@kernel.org>
+Link: https://patch.msgid.link/20240617122609.349582-1-arend.vanspriel@broadcom.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+index 92860dc0a92e..676604cb5a22 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+@@ -1090,6 +1090,7 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
++ ieee80211_hw_set(hw, MFP_CAPABLE);
+
+ hw->extra_tx_headroom = brcms_c_get_header_len();
+ hw->queues = N_TX_QUEUES;
+--
+2.43.0
+
--- /dev/null
+From 9ecd02e2202c521fa81fc74a9c68e5394a15e025 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Jun 2024 19:51:09 +0300
+Subject: wifi: iwlwifi: mvm: use IWL_FW_CHECK for link ID check
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+[ Upstream commit 9215152677d4b321801a92b06f6d5248b2b4465f ]
+
+The lookup function iwl_mvm_rcu_fw_link_id_to_link_conf() is
+normally called with input from the firmware, so it should use
+IWL_FW_CHECK() instead of WARN_ON().
+
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com>
+Link: https://patch.msgid.link/20240625194805.4ea8fb7c47d4.I1c22af213f97f69bfc14674502511c1bc504adfb@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+index ded094b6b63d..bc40242aaadd 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+@@ -1442,7 +1442,8 @@ iwl_mvm_rcu_dereference_vif_id(struct iwl_mvm *mvm, u8 vif_id, bool rcu)
+ static inline struct ieee80211_bss_conf *
+ iwl_mvm_rcu_fw_link_id_to_link_conf(struct iwl_mvm *mvm, u8 link_id, bool rcu)
+ {
+- if (WARN_ON(link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf)))
++ if (IWL_FW_CHECK(mvm, link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf),
++ "erroneous FW link ID: %d\n", link_id))
+ return NULL;
+
+ if (rcu)
+--
+2.43.0
+
--- /dev/null
+From c641fc7157397fce5f8a3e6468862f11cb36bcbb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Aug 2024 12:45:41 +0200
+Subject: wifi: mt76: mt7921: fix NULL pointer access in
+ mt7921_ipv6_addr_change
+
+From: Bert Karwatzki <spasswolf@web.de>
+
+[ Upstream commit 479ffee68d59c599f8aed8fa2dcc8e13e7bd13c3 ]
+
+When disabling wifi mt7921_ipv6_addr_change() is called as a notifier.
+At this point mvif->phy is already NULL so we cannot use it here.
+
+Signed-off-by: Bert Karwatzki <spasswolf@web.de>
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Signed-off-by: Kalle Valo <kvalo@kernel.org>
+Link: https://patch.msgid.link/20240812104542.80760-1-spasswolf@web.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt7921/main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+index 3e3ad3518d85..cca7132ed6ab 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+@@ -1182,7 +1182,7 @@ static void mt7921_ipv6_addr_change(struct ieee80211_hw *hw,
+ struct inet6_dev *idev)
+ {
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+- struct mt792x_dev *dev = mvif->phy->dev;
++ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ struct inet6_ifaddr *ifa;
+ struct in6_addr ns_addrs[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
+ struct sk_buff *skb;
+--
+2.43.0
+
--- /dev/null
+From 44d4b3d89f87af11520dc59bb829978c5dd2f941 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Jul 2024 09:24:09 +0200
+Subject: wifi: mwifiex: Do not return unused priv in mwifiex_get_priv_by_id()
+
+From: Sascha Hauer <s.hauer@pengutronix.de>
+
+[ Upstream commit c145eea2f75ff7949392aebecf7ef0a81c1f6c14 ]
+
+mwifiex_get_priv_by_id() returns the priv pointer corresponding to
+the bss_num and bss_type, but without checking if the priv is actually
+currently in use.
+Unused priv pointers do not have a wiphy attached to them which can
+lead to NULL pointer dereferences further down the callstack. Fix
+this by returning only used priv pointers which have priv->bss_mode
+set to something else than NL80211_IFTYPE_UNSPECIFIED.
+
+Said NULL pointer dereference happened when an Accesspoint was started
+with wpa_supplicant -i mlan0 with this config:
+
+network={
+ ssid="somessid"
+ mode=2
+ frequency=2412
+ key_mgmt=WPA-PSK WPA-PSK-SHA256
+ proto=RSN
+ group=CCMP
+ pairwise=CCMP
+ psk="12345678"
+}
+
+When waiting for the AP to be established, interrupting wpa_supplicant
+with <ctrl-c> and starting it again this happens:
+
+| Unable to handle kernel NULL pointer dereference at virtual address 0000000000000140
+| Mem abort info:
+| ESR = 0x0000000096000004
+| EC = 0x25: DABT (current EL), IL = 32 bits
+| SET = 0, FnV = 0
+| EA = 0, S1PTW = 0
+| FSC = 0x04: level 0 translation fault
+| Data abort info:
+| ISV = 0, ISS = 0x00000004, ISS2 = 0x00000000
+| CM = 0, WnR = 0, TnD = 0, TagAccess = 0
+| GCS = 0, Overlay = 0, DirtyBit = 0, Xs = 0
+| user pgtable: 4k pages, 48-bit VAs, pgdp=0000000046d96000
+| [0000000000000140] pgd=0000000000000000, p4d=0000000000000000
+| Internal error: Oops: 0000000096000004 [#1] PREEMPT SMP
+| Modules linked in: caam_jr caamhash_desc spidev caamalg_desc crypto_engine authenc libdes mwifiex_sdio
++mwifiex crct10dif_ce cdc_acm onboard_usb_hub fsl_imx8_ddr_perf imx8m_ddrc rtc_ds1307 lm75 rtc_snvs
++imx_sdma caam imx8mm_thermal spi_imx error imx_cpufreq_dt fuse ip_tables x_tables ipv6
+| CPU: 0 PID: 8 Comm: kworker/0:1 Not tainted 6.9.0-00007-g937242013fce-dirty #18
+| Hardware name: somemachine (DT)
+| Workqueue: events sdio_irq_work
+| pstate: 00000005 (nzcv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+| pc : mwifiex_get_cfp+0xd8/0x15c [mwifiex]
+| lr : mwifiex_get_cfp+0x34/0x15c [mwifiex]
+| sp : ffff8000818b3a70
+| x29: ffff8000818b3a70 x28: ffff000006bfd8a5 x27: 0000000000000004
+| x26: 000000000000002c x25: 0000000000001511 x24: 0000000002e86bc9
+| x23: ffff000006bfd996 x22: 0000000000000004 x21: ffff000007bec000
+| x20: 000000000000002c x19: 0000000000000000 x18: 0000000000000000
+| x17: 000000040044ffff x16: 00500072b5503510 x15: ccc283740681e517
+| x14: 0201000101006d15 x13: 0000000002e8ff43 x12: 002c01000000ffb1
+| x11: 0100000000000000 x10: 02e8ff43002c0100 x9 : 0000ffb100100157
+| x8 : ffff000003d20000 x7 : 00000000000002f1 x6 : 00000000ffffe124
+| x5 : 0000000000000001 x4 : 0000000000000003 x3 : 0000000000000000
+| x2 : 0000000000000000 x1 : 0001000000011001 x0 : 0000000000000000
+| Call trace:
+| mwifiex_get_cfp+0xd8/0x15c [mwifiex]
+| mwifiex_parse_single_response_buf+0x1d0/0x504 [mwifiex]
+| mwifiex_handle_event_ext_scan_report+0x19c/0x2f8 [mwifiex]
+| mwifiex_process_sta_event+0x298/0xf0c [mwifiex]
+| mwifiex_process_event+0x110/0x238 [mwifiex]
+| mwifiex_main_process+0x428/0xa44 [mwifiex]
+| mwifiex_sdio_interrupt+0x64/0x12c [mwifiex_sdio]
+| process_sdio_pending_irqs+0x64/0x1b8
+| sdio_irq_work+0x4c/0x7c
+| process_one_work+0x148/0x2a0
+| worker_thread+0x2fc/0x40c
+| kthread+0x110/0x114
+| ret_from_fork+0x10/0x20
+| Code: a94153f3 a8c37bfd d50323bf d65f03c0 (f940a000)
+| ---[ end trace 0000000000000000 ]---
+
+Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
+Acked-by: Brian Norris <briannorris@chromium.org>
+Reviewed-by: Francesco Dolcini <francesco.dolcini@toradex.com>
+Signed-off-by: Kalle Valo <kvalo@kernel.org>
+Link: https://patch.msgid.link/20240703072409.556618-1-s.hauer@pengutronix.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/marvell/mwifiex/main.h | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
+index 175882485a19..c5164ae41b54 100644
+--- a/drivers/net/wireless/marvell/mwifiex/main.h
++++ b/drivers/net/wireless/marvell/mwifiex/main.h
+@@ -1287,6 +1287,9 @@ mwifiex_get_priv_by_id(struct mwifiex_adapter *adapter,
+
+ for (i = 0; i < adapter->priv_num; i++) {
+ if (adapter->priv[i]) {
++ if (adapter->priv[i]->bss_mode == NL80211_IFTYPE_UNSPECIFIED)
++ continue;
++
+ if ((adapter->priv[i]->bss_num == bss_num) &&
+ (adapter->priv[i]->bss_type == bss_type))
+ break;
+--
+2.43.0
+
--- /dev/null
+From aef10d062a72d445a75472ee5e489d50d4b96048 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 May 2024 13:02:46 +0200
+Subject: wifi: rtw88: usb: schedule rx work after everything is set up
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Marcin Ślusarz <mslusarz@renau.com>
+
+[ Upstream commit adc539784c98a7cc602cbf557debfc2e7b9be8b3 ]
+
+Right now it's possible to hit NULL pointer dereference in
+rtw_rx_fill_rx_status on hw object and/or its fields because
+initialization routine can start getting USB replies before
+rtw_dev is fully setup.
+
+The stack trace looks like this:
+
+rtw_rx_fill_rx_status
+rtw8821c_query_rx_desc
+rtw_usb_rx_handler
+...
+queue_work
+rtw_usb_read_port_complete
+...
+usb_submit_urb
+rtw_usb_rx_resubmit
+rtw_usb_init_rx
+rtw_usb_probe
+
+So while we do the async stuff rtw_usb_probe continues and calls
+rtw_register_hw, which does all kinds of initialization (e.g.
+via ieee80211_register_hw) that rtw_rx_fill_rx_status relies on.
+
+Fix this by moving the first usb_submit_urb after everything
+is set up.
+
+For me, this bug manifested as:
+[ 8.893177] rtw_8821cu 1-1:1.2: band wrong, packet dropped
+[ 8.910904] rtw_8821cu 1-1:1.2: hw->conf.chandef.chan NULL in rtw_rx_fill_rx_status
+because I'm using Larry's backport of rtw88 driver with the NULL
+checks in rtw_rx_fill_rx_status.
+
+Link: https://lore.kernel.org/linux-wireless/CA+shoWQ7P49jhQasofDcTdQhiuarPTjYEDa--NiVVx494WcuQw@mail.gmail.com/
+Signed-off-by: Marcin Ślusarz <mslusarz@renau.com>
+Cc: Tim K <tpkuester@gmail.com>
+Cc: Ping-Ke Shih <pkshih@realtek.com>
+Cc: Larry Finger <Larry.Finger@lwfinger.net>
+Cc: Kalle Valo <kvalo@kernel.org>
+Cc: linux-wireless@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Ping-Ke Shih <pkshih@realtek.com>
+Link: https://patch.msgid.link/20240528110246.477321-1-marcin.slusarz@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/realtek/rtw88/usb.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
+index 0001a1ab6f38..edc1507514f6 100644
+--- a/drivers/net/wireless/realtek/rtw88/usb.c
++++ b/drivers/net/wireless/realtek/rtw88/usb.c
+@@ -744,7 +744,6 @@ static struct rtw_hci_ops rtw_usb_ops = {
+ static int rtw_usb_init_rx(struct rtw_dev *rtwdev)
+ {
+ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+- int i;
+
+ rtwusb->rxwq = create_singlethread_workqueue("rtw88_usb: rx wq");
+ if (!rtwusb->rxwq) {
+@@ -756,13 +755,19 @@ static int rtw_usb_init_rx(struct rtw_dev *rtwdev)
+
+ INIT_WORK(&rtwusb->rx_work, rtw_usb_rx_handler);
+
++ return 0;
++}
++
++static void rtw_usb_setup_rx(struct rtw_dev *rtwdev)
++{
++ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
++ int i;
++
+ for (i = 0; i < RTW_USB_RXCB_NUM; i++) {
+ struct rx_usb_ctrl_block *rxcb = &rtwusb->rx_cb[i];
+
+ rtw_usb_rx_resubmit(rtwusb, rxcb);
+ }
+-
+- return 0;
+ }
+
+ static void rtw_usb_deinit_rx(struct rtw_dev *rtwdev)
+@@ -899,6 +904,8 @@ int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ goto err_destroy_rxwq;
+ }
+
++ rtw_usb_setup_rx(rtwdev);
++
+ return 0;
+
+ err_destroy_rxwq:
+--
+2.43.0
+
--- /dev/null
+From 3356cd71e30bc1dbe25b7e1e09732abaa792fd30 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Jun 2024 13:58:23 +0800
+Subject: wifi: rtw89: wow: prevent to send unexpected H2C during download
+ Firmware
+
+From: Chih-Kang Chang <gary.chang@realtek.com>
+
+[ Upstream commit 60757f28408bcc63c4c0676b2a69a38adce30fc7 ]
+
+While downloading Firmware in the resume flow, it is possible to receive
+beacon and send H2C to Firmware. However, if Firmware receives unexpected
+H2C during the download process, it will fail. Therefore, we prevent to
+send unexpected H2C during download Firmware in WoWLAN mode.
+
+Signed-off-by: Chih-Kang Chang <gary.chang@realtek.com>
+Signed-off-by: Ping-Ke Shih <pkshih@realtek.com>
+Link: https://patch.msgid.link/20240620055825.17592-6-pkshih@realtek.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/realtek/rtw89/core.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
+index ddc390d24ec1..ddf45828086d 100644
+--- a/drivers/net/wireless/realtek/rtw89/core.c
++++ b/drivers/net/wireless/realtek/rtw89/core.c
+@@ -1917,7 +1917,8 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
+ return;
+
+ if (ieee80211_is_beacon(hdr->frame_control)) {
+- if (vif->type == NL80211_IFTYPE_STATION) {
++ if (vif->type == NL80211_IFTYPE_STATION &&
++ !test_bit(RTW89_FLAG_WOWLAN, rtwdev->flags)) {
+ rtw89_vif_sync_bcn_tsf(rtwvif, hdr, skb->len);
+ rtw89_fw_h2c_rssi_offload(rtwdev, phy_ppdu);
+ }
+--
+2.43.0
+
--- /dev/null
+From 5a51df34bf65956cc36ce1ec0a456d42653c377c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 May 2024 23:50:29 +0200
+Subject: x86/kmsan: Fix hook for unaligned accesses
+
+From: Brian Johannesmeyer <bjohannesmeyer@gmail.com>
+
+[ Upstream commit bf6ab33d8487f5e2a0998ce75286eae65bb0a6d6 ]
+
+When called with a 'from' that is not 4-byte-aligned, string_memcpy_fromio()
+calls the movs() macro to copy the first few bytes, so that 'from' becomes
+4-byte-aligned before calling rep_movs(). This movs() macro modifies 'to', and
+the subsequent line modifies 'n'.
+
+As a result, on unaligned accesses, kmsan_unpoison_memory() uses the updated
+(aligned) values of 'to' and 'n'. Hence, it does not unpoison the entire
+region.
+
+Save the original values of 'to' and 'n', and pass those to
+kmsan_unpoison_memory(), so that the entire region is unpoisoned.
+
+Signed-off-by: Brian Johannesmeyer <bjohannesmeyer@gmail.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Alexander Potapenko <glider@google.com>
+Link: https://lore.kernel.org/r/20240523215029.4160518-1-bjohannesmeyer@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/lib/iomem.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/lib/iomem.c b/arch/x86/lib/iomem.c
+index e0411a3774d4..5eecb45d05d5 100644
+--- a/arch/x86/lib/iomem.c
++++ b/arch/x86/lib/iomem.c
+@@ -25,6 +25,9 @@ static __always_inline void rep_movs(void *to, const void *from, size_t n)
+
+ static void string_memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
+ {
++ const void *orig_to = to;
++ const size_t orig_n = n;
++
+ if (unlikely(!n))
+ return;
+
+@@ -39,7 +42,7 @@ static void string_memcpy_fromio(void *to, const volatile void __iomem *from, si
+ }
+ rep_movs(to, (const void *)from, n);
+ /* KMSAN must treat values read from devices as initialized. */
+- kmsan_unpoison_memory(to, n);
++ kmsan_unpoison_memory(orig_to, orig_n);
+ }
+
+ static void string_memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
+--
+2.43.0
+
--- /dev/null
+From 35d0e38f645e457cab753396c4913d5ee7c98bb7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Jun 2024 15:12:29 +0530
+Subject: xen: privcmd: Fix possible access to a freed kirqfd instance
+
+From: Viresh Kumar <viresh.kumar@linaro.org>
+
+[ Upstream commit 611ff1b1ae989a7bcce3e2a8e132ee30e968c557 ]
+
+Nothing prevents simultaneous ioctl calls to privcmd_irqfd_assign() and
+privcmd_irqfd_deassign(). If that happens, it is possible that a kirqfd
+created and added to the irqfds_list by privcmd_irqfd_assign() may get
+removed by another thread executing privcmd_irqfd_deassign(), while the
+former is still using it after dropping the locks.
+
+This can lead to a situation where an already freed kirqfd instance may
+be accessed and cause kernel oops.
+
+Use SRCU locking to prevent the same, as is done for the KVM
+implementation for irqfds.
+
+Reported-by: Al Viro <viro@zeniv.linux.org.uk>
+Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Link: https://lore.kernel.org/r/9e884af1f1f842eacbb7afc5672c8feb4dea7f3f.1718703669.git.viresh.kumar@linaro.org
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/xen/privcmd.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
+index c9c620e32fa8..39e726d7280e 100644
+--- a/drivers/xen/privcmd.c
++++ b/drivers/xen/privcmd.c
+@@ -17,6 +17,7 @@
+ #include <linux/poll.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
++#include <linux/srcu.h>
+ #include <linux/string.h>
+ #include <linux/workqueue.h>
+ #include <linux/errno.h>
+@@ -846,6 +847,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
+ /* Irqfd support */
+ static struct workqueue_struct *irqfd_cleanup_wq;
+ static DEFINE_SPINLOCK(irqfds_lock);
++DEFINE_STATIC_SRCU(irqfds_srcu);
+ static LIST_HEAD(irqfds_list);
+
+ struct privcmd_kernel_irqfd {
+@@ -873,6 +875,9 @@ static void irqfd_shutdown(struct work_struct *work)
+ container_of(work, struct privcmd_kernel_irqfd, shutdown);
+ u64 cnt;
+
++ /* Make sure irqfd has been initialized in assign path */
++ synchronize_srcu(&irqfds_srcu);
++
+ eventfd_ctx_remove_wait_queue(kirqfd->eventfd, &kirqfd->wait, &cnt);
+ eventfd_ctx_put(kirqfd->eventfd);
+ kfree(kirqfd);
+@@ -935,7 +940,7 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
+ __poll_t events;
+ struct fd f;
+ void *dm_op;
+- int ret;
++ int ret, idx;
+
+ kirqfd = kzalloc(sizeof(*kirqfd) + irqfd->size, GFP_KERNEL);
+ if (!kirqfd)
+@@ -981,6 +986,7 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
+ }
+ }
+
++ idx = srcu_read_lock(&irqfds_srcu);
+ list_add_tail(&kirqfd->list, &irqfds_list);
+ spin_unlock_irqrestore(&irqfds_lock, flags);
+
+@@ -992,6 +998,8 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
+ if (events & EPOLLIN)
+ irqfd_inject(kirqfd);
+
++ srcu_read_unlock(&irqfds_srcu, idx);
++
+ /*
+ * Do not drop the file until the kirqfd is fully initialized, otherwise
+ * we might race against the EPOLLHUP.
+--
+2.43.0
+