--- /dev/null
+From a9489aa9313b05cbde6c3b6221ee27f8eef5ce2b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Mar 2023 15:40:54 -0600
+Subject: ALSA: hda: Match only Intel devices with CONTROLLER_IN_GPU()
+
+From: Bjorn Helgaas <bhelgaas@google.com>
+
+[ Upstream commit ff447886e675979d66b2bc01810035d3baea1b3a ]
+
+CONTROLLER_IN_GPU() is clearly intended to match only Intel devices, but
+previously it checked only the PCI Device ID, not the Vendor ID, so it
+could match devices from other vendors that happened to use the same Device
+ID.
+
+Update CONTROLLER_IN_GPU() so it matches only Intel devices.
+
+Fixes: 535115b5ff51 ("ALSA: hda - Abort the probe without i915 binding for HSW/B")
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Link: https://lore.kernel.org/r/20230307214054.886721-1-helgaas@kernel.org
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/hda_intel.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 81c4a45254ff2..77a592f219472 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -328,14 +328,15 @@ enum {
+ #define needs_eld_notify_link(chip) false
+ #endif
+
+-#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
++#define CONTROLLER_IN_GPU(pci) (((pci)->vendor == 0x8086) && \
++ (((pci)->device == 0x0a0c) || \
+ ((pci)->device == 0x0c0c) || \
+ ((pci)->device == 0x0d0c) || \
+ ((pci)->device == 0x160c) || \
+ ((pci)->device == 0x490d) || \
+ ((pci)->device == 0x4f90) || \
+ ((pci)->device == 0x4f91) || \
+- ((pci)->device == 0x4f92))
++ ((pci)->device == 0x4f92)))
+
+ #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
+
+--
+2.39.2
+
--- /dev/null
+From 467d41a1ca4f23045baf378d2e6d07b2bdedb487 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Mar 2023 11:39:12 +0200
+Subject: ASoC: SOF: Intel: HDA: Fix device description
+
+From: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+
+[ Upstream commit 9eb2b4cac223095d2079a6d52b8bbddc6e064288 ]
+
+Add the missing ops_free callback for APL/CNL/CML/JSL/TGL/EHL platforms.
+
+Fixes: 1da51943725f ("ASoC: SOF: Intel: hda: init NHLT for IPC4")
+
+Signed-off-by: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+Reviewed-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Reviewed-by: Bard Liao <yung-chuan.liao@linux.intel.com>
+Signed-off-by: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Link: https://lore.kernel.org/r/20230307093914.25409-3-peter.ujfalusi@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/sof/intel/pci-apl.c | 1 +
+ sound/soc/sof/intel/pci-cnl.c | 2 ++
+ sound/soc/sof/intel/pci-icl.c | 1 +
+ sound/soc/sof/intel/pci-tgl.c | 5 +++++
+ 4 files changed, 9 insertions(+)
+
+diff --git a/sound/soc/sof/intel/pci-apl.c b/sound/soc/sof/intel/pci-apl.c
+index 998e219011f01..ad8431b13125d 100644
+--- a/sound/soc/sof/intel/pci-apl.c
++++ b/sound/soc/sof/intel/pci-apl.c
+@@ -72,6 +72,7 @@ static const struct sof_dev_desc glk_desc = {
+ .nocodec_tplg_filename = "sof-glk-nocodec.tplg",
+ .ops = &sof_apl_ops,
+ .ops_init = sof_apl_ops_init,
++ .ops_free = hda_ops_free,
+ };
+
+ /* PCI IDs */
+diff --git a/sound/soc/sof/intel/pci-cnl.c b/sound/soc/sof/intel/pci-cnl.c
+index c797356f7028b..33677ce8de41d 100644
+--- a/sound/soc/sof/intel/pci-cnl.c
++++ b/sound/soc/sof/intel/pci-cnl.c
+@@ -45,6 +45,7 @@ static const struct sof_dev_desc cnl_desc = {
+ .nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
+ .ops = &sof_cnl_ops,
+ .ops_init = sof_cnl_ops_init,
++ .ops_free = hda_ops_free,
+ };
+
+ static const struct sof_dev_desc cfl_desc = {
+@@ -102,6 +103,7 @@ static const struct sof_dev_desc cml_desc = {
+ .nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
+ .ops = &sof_cnl_ops,
+ .ops_init = sof_cnl_ops_init,
++ .ops_free = hda_ops_free,
+ };
+
+ /* PCI IDs */
+diff --git a/sound/soc/sof/intel/pci-icl.c b/sound/soc/sof/intel/pci-icl.c
+index 48f24f8ace261..9a42a4ea1a5ea 100644
+--- a/sound/soc/sof/intel/pci-icl.c
++++ b/sound/soc/sof/intel/pci-icl.c
+@@ -73,6 +73,7 @@ static const struct sof_dev_desc jsl_desc = {
+ .nocodec_tplg_filename = "sof-jsl-nocodec.tplg",
+ .ops = &sof_cnl_ops,
+ .ops_init = sof_cnl_ops_init,
++ .ops_free = hda_ops_free,
+ };
+
+ /* PCI IDs */
+diff --git a/sound/soc/sof/intel/pci-tgl.c b/sound/soc/sof/intel/pci-tgl.c
+index 4cfe4f242fc5e..19e2d68dcb20a 100644
+--- a/sound/soc/sof/intel/pci-tgl.c
++++ b/sound/soc/sof/intel/pci-tgl.c
+@@ -45,6 +45,7 @@ static const struct sof_dev_desc tgl_desc = {
+ .nocodec_tplg_filename = "sof-tgl-nocodec.tplg",
+ .ops = &sof_tgl_ops,
+ .ops_init = sof_tgl_ops_init,
++ .ops_free = hda_ops_free,
+ };
+
+ static const struct sof_dev_desc tglh_desc = {
+@@ -101,6 +102,7 @@ static const struct sof_dev_desc ehl_desc = {
+ .nocodec_tplg_filename = "sof-ehl-nocodec.tplg",
+ .ops = &sof_tgl_ops,
+ .ops_init = sof_tgl_ops_init,
++ .ops_free = hda_ops_free,
+ };
+
+ static const struct sof_dev_desc adls_desc = {
+@@ -129,6 +131,7 @@ static const struct sof_dev_desc adls_desc = {
+ .nocodec_tplg_filename = "sof-adl-nocodec.tplg",
+ .ops = &sof_tgl_ops,
+ .ops_init = sof_tgl_ops_init,
++ .ops_free = hda_ops_free,
+ };
+
+ static const struct sof_dev_desc adl_desc = {
+@@ -157,6 +160,7 @@ static const struct sof_dev_desc adl_desc = {
+ .nocodec_tplg_filename = "sof-adl-nocodec.tplg",
+ .ops = &sof_tgl_ops,
+ .ops_init = sof_tgl_ops_init,
++ .ops_free = hda_ops_free,
+ };
+
+ static const struct sof_dev_desc adl_n_desc = {
+@@ -185,6 +189,7 @@ static const struct sof_dev_desc adl_n_desc = {
+ .nocodec_tplg_filename = "sof-adl-nocodec.tplg",
+ .ops = &sof_tgl_ops,
+ .ops_init = sof_tgl_ops_init,
++ .ops_free = hda_ops_free,
+ };
+
+ static const struct sof_dev_desc rpls_desc = {
+--
+2.39.2
+
--- /dev/null
+From fb847f78ccb02fe50acb6d99b93ffed034fabb51 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Mar 2023 11:39:11 +0200
+Subject: ASoC: SOF: Intel: MTL: Fix the device description
+
+From: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+
+[ Upstream commit a659e35ca0af2765f567bdfdccfa247eff0cdab8 ]
+
+Add the missing ops_free callback.
+
+Fixes: 064520e8aeaa ("ASoC: SOF: Intel: Add support for MeteorLake (MTL)")
+
+Signed-off-by: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+Reviewed-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Reviewed-by: Bard Liao <yung-chuan.liao@linux.intel.com>
+Signed-off-by: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Link: https://lore.kernel.org/r/20230307093914.25409-2-peter.ujfalusi@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/sof/intel/pci-mtl.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/sound/soc/sof/intel/pci-mtl.c b/sound/soc/sof/intel/pci-mtl.c
+index 9f39da984e9fa..4dae256536bf4 100644
+--- a/sound/soc/sof/intel/pci-mtl.c
++++ b/sound/soc/sof/intel/pci-mtl.c
+@@ -43,6 +43,7 @@ static const struct sof_dev_desc mtl_desc = {
+ .nocodec_tplg_filename = "sof-mtl-nocodec.tplg",
+ .ops = &sof_mtl_ops,
+ .ops_init = sof_mtl_ops_init,
++ .ops_free = hda_ops_free,
+ };
+
+ /* PCI IDs */
+--
+2.39.2
+
--- /dev/null
+From 2a729808615d58805acd561bd6003a9bc6d1ed3a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Mar 2023 11:39:14 +0200
+Subject: ASOC: SOF: Intel: pci-tgl: Fix device description
+
+From: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+
+[ Upstream commit 376f79bbf521fc37b871b536276319951b5bef3a ]
+
+Add the missing ops_free callback.
+
+Fixes: 63d375b9f2a9 ("ASoC: SOF: Intel: pci-tgl: use RPL specific firmware definitions")
+
+Signed-off-by: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+Reviewed-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Reviewed-by: Bard Liao <yung-chuan.liao@linux.intel.com>
+Signed-off-by: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Link: https://lore.kernel.org/r/20230307093914.25409-5-peter.ujfalusi@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/sof/intel/pci-tgl.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/sound/soc/sof/intel/pci-tgl.c b/sound/soc/sof/intel/pci-tgl.c
+index 19e2d68dcb20a..ccaf0ff9eb1c3 100644
+--- a/sound/soc/sof/intel/pci-tgl.c
++++ b/sound/soc/sof/intel/pci-tgl.c
+@@ -218,6 +218,7 @@ static const struct sof_dev_desc rpls_desc = {
+ .nocodec_tplg_filename = "sof-rpl-nocodec.tplg",
+ .ops = &sof_tgl_ops,
+ .ops_init = sof_tgl_ops_init,
++ .ops_free = hda_ops_free,
+ };
+
+ static const struct sof_dev_desc rpl_desc = {
+@@ -246,6 +247,7 @@ static const struct sof_dev_desc rpl_desc = {
+ .nocodec_tplg_filename = "sof-rpl-nocodec.tplg",
+ .ops = &sof_tgl_ops,
+ .ops_init = sof_tgl_ops_init,
++ .ops_free = hda_ops_free,
+ };
+
+ /* PCI IDs */
+--
+2.39.2
+
--- /dev/null
+From c64ca0d99e6b4f363c5a6db35e7c1469d8632fcc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Mar 2023 11:39:13 +0200
+Subject: ASoC: SOF: Intel: SKL: Fix device description
+
+From: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+
+[ Upstream commit 1f320bdb29b644a2c9fb301a6fb2d6170e6417e9 ]
+
+Add missing ops_free callback for SKL/KBL platforms.
+
+Fixes: 52d7939d10f2 ("ASoC: SOF: Intel: add ops for SKL/KBL")
+
+Signed-off-by: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+Reviewed-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Reviewed-by: Bard Liao <yung-chuan.liao@linux.intel.com>
+Signed-off-by: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Link: https://lore.kernel.org/r/20230307093914.25409-4-peter.ujfalusi@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/sof/intel/pci-skl.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/sound/soc/sof/intel/pci-skl.c b/sound/soc/sof/intel/pci-skl.c
+index 3a99dc444f92e..5b4bccf819658 100644
+--- a/sound/soc/sof/intel/pci-skl.c
++++ b/sound/soc/sof/intel/pci-skl.c
+@@ -38,6 +38,7 @@ static struct sof_dev_desc skl_desc = {
+ .nocodec_tplg_filename = "sof-skl-nocodec.tplg",
+ .ops = &sof_skl_ops,
+ .ops_init = sof_skl_ops_init,
++ .ops_free = hda_ops_free,
+ };
+
+ static struct sof_dev_desc kbl_desc = {
+@@ -61,6 +62,7 @@ static struct sof_dev_desc kbl_desc = {
+ .nocodec_tplg_filename = "sof-kbl-nocodec.tplg",
+ .ops = &sof_skl_ops,
+ .ops_init = sof_skl_ops_init,
++ .ops_free = hda_ops_free,
+ };
+
+ /* PCI IDs */
+--
+2.39.2
+
--- /dev/null
+From 92038f506a6dba0cab955905694d9cd67e93950b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Mar 2023 13:07:30 +0200
+Subject: ASoC: SOF: ipc4-topology: set dmic dai index from copier
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jaska Uimonen <jaska.uimonen@linux.intel.com>
+
+[ Upstream commit c99e48f4ce9b986ab7992ec7283a06dae875f668 ]
+
+Dmic dai index was set incorrectly to bits 5-7, when it is actually using
+just the lowest 3. Fix the macro for setting the bits.
+
+Fixes: aa84ffb72158 ("ASoC: SOF: ipc4-topology: Add support for SSP/DMIC DAI's")
+Signed-off-by: Jaska Uimonen <jaska.uimonen@linux.intel.com>
+Reviewed-by: Adrian Bonislawski <adrian.bonislawski@intel.com>
+Reviewed-by: Péter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Reviewed-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Reviewed-by: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+Signed-off-by: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Link: https://lore.kernel.org/r/20230307110730.1995-1-peter.ujfalusi@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/sof/ipc4-topology.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/soc/sof/ipc4-topology.h b/sound/soc/sof/ipc4-topology.h
+index 0aa87a8add5d3..2363a7cc0b57d 100644
+--- a/sound/soc/sof/ipc4-topology.h
++++ b/sound/soc/sof/ipc4-topology.h
+@@ -46,7 +46,7 @@
+ #define SOF_IPC4_NODE_INDEX_INTEL_SSP(x) (((x) & 0xf) << 4)
+
+ /* Node ID for DMIC type DAI copiers */
+-#define SOF_IPC4_NODE_INDEX_INTEL_DMIC(x) (((x) & 0x7) << 5)
++#define SOF_IPC4_NODE_INDEX_INTEL_DMIC(x) ((x) & 0x7)
+
+ #define SOF_IPC4_GAIN_ALL_CHANNELS_MASK 0xffffffff
+ #define SOF_IPC4_VOL_ZERO_DB 0x7fffffff
+--
+2.39.2
+
--- /dev/null
+From b90b6af4620106c95516a7ad6ea08d795f28ab52 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Mar 2023 09:09:13 +0800
+Subject: blk-mq: fix "bad unlock balance detected" on q->srcu in
+ __blk_mq_run_dispatch_ops
+
+From: Chris Leech <cleech@redhat.com>
+
+[ Upstream commit 00e885efcfbb8712d3e1bfc1ae30639c15ca1d3b ]
+
+The 'q' parameter of the macro __blk_mq_run_dispatch_ops may not be one
+local variable, such as, it is rq->q, then request queue pointed by
+this variable could be changed to another queue in case of
+BLK_MQ_F_TAG_QUEUE_SHARED after 'dispatch_ops' returns, then
+'bad unlock balance' is triggered.
+
+Fixes the issue by adding one local variable for doing srcu lock/unlock.
+
+Fixes: 2a904d00855f ("blk-mq: remove hctx_lock and hctx_unlock")
+Cc: Marco Patalano <mpatalan@redhat.com>
+Signed-off-by: Chris Leech <cleech@redhat.com>
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Link: https://lore.kernel.org/r/20230310010913.1014789-1-ming.lei@redhat.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-mq.h | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/block/blk-mq.h b/block/blk-mq.h
+index ef59fee62780d..a7482d2cc82e7 100644
+--- a/block/blk-mq.h
++++ b/block/blk-mq.h
+@@ -378,12 +378,13 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
+ #define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \
+ do { \
+ if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) { \
++ struct blk_mq_tag_set *__tag_set = (q)->tag_set; \
+ int srcu_idx; \
+ \
+ might_sleep_if(check_sleep); \
+- srcu_idx = srcu_read_lock((q)->tag_set->srcu); \
++ srcu_idx = srcu_read_lock(__tag_set->srcu); \
+ (dispatch_ops); \
+- srcu_read_unlock((q)->tag_set->srcu, srcu_idx); \
++ srcu_read_unlock(__tag_set->srcu, srcu_idx); \
+ } else { \
+ rcu_read_lock(); \
+ (dispatch_ops); \
+--
+2.39.2
+
--- /dev/null
+From 8cf394369e02f13d99be3fef10eae6f3f9f7e9bc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 1 Nov 2022 16:00:47 +0100
+Subject: blk-mq: move the srcu_struct used for quiescing to the tagset
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit 80bd4a7aab4c9ce59bf5e35fdf52aa23d8a3c9f5 ]
+
+All I/O submissions have fairly similar latencies, and a tagset-wide
+quiesce is a fairly common operation.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Keith Busch <kbusch@kernel.org>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Chao Leng <lengchao@huawei.com>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
+Link: https://lore.kernel.org/r/20221101150050.3510-12-hch@lst.de
+[axboe: fix whitespace]
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Stable-dep-of: 00e885efcfbb ("blk-mq: fix "bad unlock balance detected" on q->srcu in __blk_mq_run_dispatch_ops")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-core.c | 27 +++++----------------------
+ block/blk-mq.c | 33 +++++++++++++++++++++++++--------
+ block/blk-mq.h | 14 +++++++-------
+ block/blk-sysfs.c | 9 ++-------
+ block/blk.h | 9 +--------
+ block/genhd.c | 2 +-
+ include/linux/blk-mq.h | 4 ++++
+ include/linux/blkdev.h | 9 ---------
+ 8 files changed, 45 insertions(+), 62 deletions(-)
+
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 24ee7785a5ad5..d5da62bb4bc06 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -65,7 +65,6 @@ DEFINE_IDA(blk_queue_ida);
+ * For queue allocation
+ */
+ struct kmem_cache *blk_requestq_cachep;
+-struct kmem_cache *blk_requestq_srcu_cachep;
+
+ /*
+ * Controlling structure to kblockd
+@@ -373,26 +372,20 @@ static void blk_timeout_work(struct work_struct *work)
+ {
+ }
+
+-struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
++struct request_queue *blk_alloc_queue(int node_id)
+ {
+ struct request_queue *q;
+
+- q = kmem_cache_alloc_node(blk_get_queue_kmem_cache(alloc_srcu),
+- GFP_KERNEL | __GFP_ZERO, node_id);
++ q = kmem_cache_alloc_node(blk_requestq_cachep, GFP_KERNEL | __GFP_ZERO,
++ node_id);
+ if (!q)
+ return NULL;
+
+- if (alloc_srcu) {
+- blk_queue_flag_set(QUEUE_FLAG_HAS_SRCU, q);
+- if (init_srcu_struct(q->srcu) != 0)
+- goto fail_q;
+- }
+-
+ q->last_merge = NULL;
+
+ q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL);
+ if (q->id < 0)
+- goto fail_srcu;
++ goto fail_q;
+
+ q->stats = blk_alloc_queue_stats();
+ if (!q->stats)
+@@ -434,11 +427,8 @@ struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
+ blk_free_queue_stats(q->stats);
+ fail_id:
+ ida_free(&blk_queue_ida, q->id);
+-fail_srcu:
+- if (alloc_srcu)
+- cleanup_srcu_struct(q->srcu);
+ fail_q:
+- kmem_cache_free(blk_get_queue_kmem_cache(alloc_srcu), q);
++ kmem_cache_free(blk_requestq_cachep, q);
+ return NULL;
+ }
+
+@@ -1190,9 +1180,6 @@ int __init blk_dev_init(void)
+ sizeof_field(struct request, cmd_flags));
+ BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
+ sizeof_field(struct bio, bi_opf));
+- BUILD_BUG_ON(ALIGN(offsetof(struct request_queue, srcu),
+- __alignof__(struct request_queue)) !=
+- sizeof(struct request_queue));
+
+ /* used for unplugging and affects IO latency/throughput - HIGHPRI */
+ kblockd_workqueue = alloc_workqueue("kblockd",
+@@ -1203,10 +1190,6 @@ int __init blk_dev_init(void)
+ blk_requestq_cachep = kmem_cache_create("request_queue",
+ sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
+
+- blk_requestq_srcu_cachep = kmem_cache_create("request_queue_srcu",
+- sizeof(struct request_queue) +
+- sizeof(struct srcu_struct), 0, SLAB_PANIC, NULL);
+-
+ blk_debugfs_root = debugfs_create_dir("block", NULL);
+
+ return 0;
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index aa67a52c5a069..f8c97d75b8d1a 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -261,8 +261,8 @@ EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
+ */
+ void blk_mq_wait_quiesce_done(struct request_queue *q)
+ {
+- if (blk_queue_has_srcu(q))
+- synchronize_srcu(q->srcu);
++ if (q->tag_set->flags & BLK_MQ_F_BLOCKING)
++ synchronize_srcu(q->tag_set->srcu);
+ else
+ synchronize_rcu();
+ }
+@@ -4022,7 +4022,7 @@ static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
+ struct request_queue *q;
+ int ret;
+
+- q = blk_alloc_queue(set->numa_node, set->flags & BLK_MQ_F_BLOCKING);
++ q = blk_alloc_queue(set->numa_node);
+ if (!q)
+ return ERR_PTR(-ENOMEM);
+ q->queuedata = queuedata;
+@@ -4194,9 +4194,6 @@ static void blk_mq_update_poll_flag(struct request_queue *q)
+ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+ struct request_queue *q)
+ {
+- WARN_ON_ONCE(blk_queue_has_srcu(q) !=
+- !!(set->flags & BLK_MQ_F_BLOCKING));
+-
+ /* mark the queue as mq asap */
+ q->mq_ops = set->ops;
+
+@@ -4453,8 +4450,18 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
+ if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
+ set->nr_hw_queues = nr_cpu_ids;
+
+- if (blk_mq_alloc_tag_set_tags(set, set->nr_hw_queues) < 0)
+- return -ENOMEM;
++ if (set->flags & BLK_MQ_F_BLOCKING) {
++ set->srcu = kmalloc(sizeof(*set->srcu), GFP_KERNEL);
++ if (!set->srcu)
++ return -ENOMEM;
++ ret = init_srcu_struct(set->srcu);
++ if (ret)
++ goto out_free_srcu;
++ }
++
++ ret = blk_mq_alloc_tag_set_tags(set, set->nr_hw_queues);
++ if (ret)
++ goto out_cleanup_srcu;
+
+ ret = -ENOMEM;
+ for (i = 0; i < set->nr_maps; i++) {
+@@ -4484,6 +4491,12 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
+ }
+ kfree(set->tags);
+ set->tags = NULL;
++out_cleanup_srcu:
++ if (set->flags & BLK_MQ_F_BLOCKING)
++ cleanup_srcu_struct(set->srcu);
++out_free_srcu:
++ if (set->flags & BLK_MQ_F_BLOCKING)
++ kfree(set->srcu);
+ return ret;
+ }
+ EXPORT_SYMBOL(blk_mq_alloc_tag_set);
+@@ -4523,6 +4536,10 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
+
+ kfree(set->tags);
+ set->tags = NULL;
++ if (set->flags & BLK_MQ_F_BLOCKING) {
++ cleanup_srcu_struct(set->srcu);
++ kfree(set->srcu);
++ }
+ }
+ EXPORT_SYMBOL(blk_mq_free_tag_set);
+
+diff --git a/block/blk-mq.h b/block/blk-mq.h
+index 0b2870839cdd6..ef59fee62780d 100644
+--- a/block/blk-mq.h
++++ b/block/blk-mq.h
+@@ -377,17 +377,17 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
+ /* run the code block in @dispatch_ops with rcu/srcu read lock held */
+ #define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \
+ do { \
+- if (!blk_queue_has_srcu(q)) { \
+- rcu_read_lock(); \
+- (dispatch_ops); \
+- rcu_read_unlock(); \
+- } else { \
++ if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) { \
+ int srcu_idx; \
+ \
+ might_sleep_if(check_sleep); \
+- srcu_idx = srcu_read_lock((q)->srcu); \
++ srcu_idx = srcu_read_lock((q)->tag_set->srcu); \
+ (dispatch_ops); \
+- srcu_read_unlock((q)->srcu, srcu_idx); \
++ srcu_read_unlock((q)->tag_set->srcu, srcu_idx); \
++ } else { \
++ rcu_read_lock(); \
++ (dispatch_ops); \
++ rcu_read_unlock(); \
+ } \
+ } while (0)
+
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index e71b3b43927c0..e7871665825a3 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -739,10 +739,8 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
+
+ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
+ {
+- struct request_queue *q = container_of(rcu_head, struct request_queue,
+- rcu_head);
+-
+- kmem_cache_free(blk_get_queue_kmem_cache(blk_queue_has_srcu(q)), q);
++ kmem_cache_free(blk_requestq_cachep,
++ container_of(rcu_head, struct request_queue, rcu_head));
+ }
+
+ /**
+@@ -779,9 +777,6 @@ static void blk_release_queue(struct kobject *kobj)
+ if (queue_is_mq(q))
+ blk_mq_release(q);
+
+- if (blk_queue_has_srcu(q))
+- cleanup_srcu_struct(q->srcu);
+-
+ ida_free(&blk_queue_ida, q->id);
+ call_rcu(&q->rcu_head, blk_free_queue_rcu);
+ }
+diff --git a/block/blk.h b/block/blk.h
+index a186ea20f39d8..4849a2efa4c50 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -27,7 +27,6 @@ struct blk_flush_queue {
+ };
+
+ extern struct kmem_cache *blk_requestq_cachep;
+-extern struct kmem_cache *blk_requestq_srcu_cachep;
+ extern struct kobj_type blk_queue_ktype;
+ extern struct ida blk_queue_ida;
+
+@@ -428,13 +427,7 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
+ struct page *page, unsigned int len, unsigned int offset,
+ unsigned int max_sectors, bool *same_page);
+
+-static inline struct kmem_cache *blk_get_queue_kmem_cache(bool srcu)
+-{
+- if (srcu)
+- return blk_requestq_srcu_cachep;
+- return blk_requestq_cachep;
+-}
+-struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu);
++struct request_queue *blk_alloc_queue(int node_id);
+
+ int disk_scan_partitions(struct gendisk *disk, fmode_t mode);
+
+diff --git a/block/genhd.c b/block/genhd.c
+index 0b6928e948f31..4db1f905514c5 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -1436,7 +1436,7 @@ struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass)
+ struct request_queue *q;
+ struct gendisk *disk;
+
+- q = blk_alloc_queue(node, false);
++ q = blk_alloc_queue(node);
+ if (!q)
+ return NULL;
+
+diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
+index a9764cbf7f8d2..8e942e36f1c48 100644
+--- a/include/linux/blk-mq.h
++++ b/include/linux/blk-mq.h
+@@ -7,6 +7,7 @@
+ #include <linux/lockdep.h>
+ #include <linux/scatterlist.h>
+ #include <linux/prefetch.h>
++#include <linux/srcu.h>
+
+ struct blk_mq_tags;
+ struct blk_flush_queue;
+@@ -507,6 +508,8 @@ enum hctx_type {
+ * @tag_list_lock: Serializes tag_list accesses.
+ * @tag_list: List of the request queues that use this tag set. See also
+ * request_queue.tag_set_list.
++ * @srcu: Use as lock when type of the request queue is blocking
++ * (BLK_MQ_F_BLOCKING).
+ */
+ struct blk_mq_tag_set {
+ struct blk_mq_queue_map map[HCTX_MAX_TYPES];
+@@ -527,6 +530,7 @@ struct blk_mq_tag_set {
+
+ struct mutex tag_list_lock;
+ struct list_head tag_list;
++ struct srcu_struct *srcu;
+ };
+
+ /**
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 891f8cbcd0436..36c286d22fb23 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -22,7 +22,6 @@
+ #include <linux/blkzoned.h>
+ #include <linux/sched.h>
+ #include <linux/sbitmap.h>
+-#include <linux/srcu.h>
+ #include <linux/uuid.h>
+ #include <linux/xarray.h>
+
+@@ -544,18 +543,11 @@ struct request_queue {
+ struct mutex debugfs_mutex;
+
+ bool mq_sysfs_init_done;
+-
+- /**
+- * @srcu: Sleepable RCU. Use as lock when type of the request queue
+- * is blocking (BLK_MQ_F_BLOCKING). Must be the last member
+- */
+- struct srcu_struct srcu[];
+ };
+
+ /* Keep blk_queue_flag_name[] in sync with the definitions below */
+ #define QUEUE_FLAG_STOPPED 0 /* queue is stopped */
+ #define QUEUE_FLAG_DYING 1 /* queue being torn down */
+-#define QUEUE_FLAG_HAS_SRCU 2 /* SRCU is allocated */
+ #define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */
+ #define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */
+ #define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */
+@@ -591,7 +583,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
+
+ #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
+ #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
+-#define blk_queue_has_srcu(q) test_bit(QUEUE_FLAG_HAS_SRCU, &(q)->queue_flags)
+ #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
+ #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
+ #define blk_queue_noxmerges(q) \
+--
+2.39.2
+
--- /dev/null
+From e9280b98420d2bf4c97dde84058d362610beb048 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Mar 2023 10:30:02 +0100
+Subject: block: do not reverse request order when flushing plug list
+
+From: Jan Kara <jack@suse.cz>
+
+[ Upstream commit 34e0a279a993debaff03158fc2fbf6a00c093643 ]
+
+Commit 26fed4ac4eab ("block: flush plug based on hardware and software
+queue order") changed flushing of plug list to submit requests one
+device at a time. However while doing that it also started using
+list_add_tail() instead of list_add() used previously thus effectively
+submitting requests in reverse order. Also when forming a rq_list with
+remaining requests (in case two or more devices are used), we
+effectively reverse the ordering of the plug list for each device we
+process. Submitting requests in reverse order has negative impact on
+performance for rotational disks (when BFQ is not in use). We observe
+10-25% regression in random 4k write throughput, as well as ~20%
+regression in MariaDB OLTP benchmark on rotational storage on btrfs
+filesystem.
+
+Fix the problem by preserving ordering of the plug list when inserting
+requests into the queuelist as well as by appending to requeue_list
+instead of prepending to it.
+
+Fixes: 26fed4ac4eab ("block: flush plug based on hardware and software queue order")
+Signed-off-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/r/20230313093002.11756-1-jack@suse.cz
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-mq.c | 5 +++--
+ include/linux/blk-mq.h | 6 ++++++
+ 2 files changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index fe0a3a882f465..aa67a52c5a069 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2711,6 +2711,7 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
+ struct blk_mq_hw_ctx *this_hctx = NULL;
+ struct blk_mq_ctx *this_ctx = NULL;
+ struct request *requeue_list = NULL;
++ struct request **requeue_lastp = &requeue_list;
+ unsigned int depth = 0;
+ LIST_HEAD(list);
+
+@@ -2721,10 +2722,10 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
+ this_hctx = rq->mq_hctx;
+ this_ctx = rq->mq_ctx;
+ } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
+- rq_list_add(&requeue_list, rq);
++ rq_list_add_tail(&requeue_lastp, rq);
+ continue;
+ }
+- list_add_tail(&rq->queuelist, &list);
++ list_add(&rq->queuelist, &list);
+ depth++;
+ } while (!rq_list_empty(plug->mq_list));
+
+diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
+index d6119c5d1069b..a9764cbf7f8d2 100644
+--- a/include/linux/blk-mq.h
++++ b/include/linux/blk-mq.h
+@@ -228,6 +228,12 @@ static inline unsigned short req_get_ioprio(struct request *req)
+ *(listptr) = rq; \
+ } while (0)
+
++#define rq_list_add_tail(lastpptr, rq) do { \
++ (rq)->rq_next = NULL; \
++ **(lastpptr) = rq; \
++ *(lastpptr) = &rq->rq_next; \
++} while (0)
++
+ #define rq_list_pop(listptr) \
+ ({ \
+ struct request *__req = NULL; \
+--
+2.39.2
+
--- /dev/null
+From f6f18e7f248f9fce5439949fe6a5b53e609b3cae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Mar 2023 13:11:05 +0900
+Subject: block: null_blk: Fix handling of fake timeout request
+
+From: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+
+[ Upstream commit 63f886597085f346276e3b3c8974de0100d65f32 ]
+
+When injecting a fake timeout into the null_blk driver using
+fail_io_timeout, the request timeout handler does not execute
+blk_mq_complete_request(), so the complete callback is never executed
+for a timedout request.
+
+The null_blk driver also has a driver-specific fake timeout mechanism
+which does not have this problem. Fix the problem with fail_io_timeout
+by using the same meachanism as null_blk internal timeout feature, using
+the fake_timeout field of null_blk commands.
+
+Reported-by: Akinobu Mita <akinobu.mita@gmail.com>
+Fixes: de3510e52b0a ("null_blk: fix command timeout completion handling")
+Signed-off-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Link: https://lore.kernel.org/r/20230314041106.19173-2-damien.lemoal@opensource.wdc.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/null_blk/main.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
+index 1f154f92f4c27..af419af9a0f4a 100644
+--- a/drivers/block/null_blk/main.c
++++ b/drivers/block/null_blk/main.c
+@@ -1393,8 +1393,7 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
+ case NULL_IRQ_SOFTIRQ:
+ switch (cmd->nq->dev->queue_mode) {
+ case NULL_Q_MQ:
+- if (likely(!blk_should_fake_timeout(cmd->rq->q)))
+- blk_mq_complete_request(cmd->rq);
++ blk_mq_complete_request(cmd->rq);
+ break;
+ case NULL_Q_BIO:
+ /*
+@@ -1655,7 +1654,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
+ cmd->rq = bd->rq;
+ cmd->error = BLK_STS_OK;
+ cmd->nq = nq;
+- cmd->fake_timeout = should_timeout_request(bd->rq);
++ cmd->fake_timeout = should_timeout_request(bd->rq) ||
++ blk_should_fake_timeout(bd->rq->q);
+
+ blk_mq_start_request(bd->rq);
+
+--
+2.39.2
+
--- /dev/null
+From af013ec587aa790c2ee9a04ea13d89b438f9df81 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Mar 2023 14:20:32 +0800
+Subject: block: sunvdc: add check for mdesc_grab() returning NULL
+
+From: Liang He <windhl@126.com>
+
+[ Upstream commit 6030363199e3a6341afb467ddddbed56640cbf6a ]
+
+In vdc_port_probe(), we should check the return value of mdesc_grab() as
+it may return NULL, which can cause potential NPD bug.
+
+Fixes: 43fdf27470b2 ("[SPARC64]: Abstract out mdesc accesses for better MD update handling.")
+Signed-off-by: Liang He <windhl@126.com>
+Link: https://lore.kernel.org/r/20230315062032.1741692-1-windhl@126.com
+[axboe: style cleanup]
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/sunvdc.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
+index fb855da971ee7..9fa821fa76b07 100644
+--- a/drivers/block/sunvdc.c
++++ b/drivers/block/sunvdc.c
+@@ -972,6 +972,8 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ print_version();
+
+ hp = mdesc_grab();
++ if (!hp)
++ return -ENODEV;
+
+ err = -ENODEV;
+ if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
+--
+2.39.2
+
--- /dev/null
+From 65d775a609b0e4b959929f47f6f1f4af30747045 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Mar 2023 13:18:41 +0200
+Subject: bonding: restore bond's IFF_SLAVE flag if a non-eth dev enslave fails
+
+From: Nikolay Aleksandrov <razor@blackwall.org>
+
+[ Upstream commit e667d469098671261d558be0cd93dca4d285ce1e ]
+
+syzbot reported a warning[1] where the bond device itself is a slave and
+we try to enslave a non-ethernet device as the first slave which fails
+but then in the error path when ether_setup() restores the bond device
+it also clears all flags. In my previous fix[2] I restored the
+IFF_MASTER flag, but I didn't consider the case that the bond device
+itself might also be a slave with IFF_SLAVE set, so we need to restore
+that flag as well. Use the bond_ether_setup helper which does the right
+thing and restores the bond's flags properly.
+
+Steps to reproduce using a nlmon dev:
+ $ ip l add nlmon0 type nlmon
+ $ ip l add bond1 type bond
+ $ ip l add bond2 type bond
+ $ ip l set bond1 master bond2
+ $ ip l set dev nlmon0 master bond1
+ $ ip -d l sh dev bond1
+ 22: bond1: <BROADCAST,MULTICAST,MASTER> mtu 1500 qdisc noqueue master bond2 state DOWN mode DEFAULT group default qlen 1000
+ (now bond1's IFF_SLAVE flag is gone and we'll hit a warning[3] if we
+ try to delete it)
+
+[1] https://syzkaller.appspot.com/bug?id=391c7b1f6522182899efba27d891f1743e8eb3ef
+[2] commit 7d5cd2ce5292 ("bonding: correctly handle bonding type change on enslave failure")
+[3] example warning:
+ [ 27.008664] bond1: (slave nlmon0): The slave device specified does not support setting the MAC address
+ [ 27.008692] bond1: (slave nlmon0): Error -95 calling set_mac_address
+ [ 32.464639] bond1 (unregistering): Released all slaves
+ [ 32.464685] ------------[ cut here ]------------
+ [ 32.464686] WARNING: CPU: 1 PID: 2004 at net/core/dev.c:10829 unregister_netdevice_many+0x72a/0x780
+ [ 32.464694] Modules linked in: br_netfilter bridge bonding virtio_net
+ [ 32.464699] CPU: 1 PID: 2004 Comm: ip Kdump: loaded Not tainted 5.18.0-rc3+ #47
+ [ 32.464703] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.1-2.fc37 04/01/2014
+ [ 32.464704] RIP: 0010:unregister_netdevice_many+0x72a/0x780
+ [ 32.464707] Code: 99 fd ff ff ba 90 1a 00 00 48 c7 c6 f4 02 66 96 48 c7 c7 20 4d 35 96 c6 05 fa c7 2b 02 01 e8 be 6f 4a 00 0f 0b e9 73 fd ff ff <0f> 0b e9 5f fd ff ff 80 3d e3 c7 2b 02 00 0f 85 3b fd ff ff ba 59
+ [ 32.464710] RSP: 0018:ffffa006422d7820 EFLAGS: 00010206
+ [ 32.464712] RAX: ffff8f6e077140a0 RBX: ffffa006422d7888 RCX: 0000000000000000
+ [ 32.464714] RDX: ffff8f6e12edbe58 RSI: 0000000000000296 RDI: ffffffff96d4a520
+ [ 32.464716] RBP: ffff8f6e07714000 R08: ffffffff96d63600 R09: ffffa006422d7728
+ [ 32.464717] R10: 0000000000000ec0 R11: ffffffff9698c988 R12: ffff8f6e12edb140
+ [ 32.464719] R13: dead000000000122 R14: dead000000000100 R15: ffff8f6e12edb140
+ [ 32.464723] FS: 00007f297c2f1740(0000) GS:ffff8f6e5d900000(0000) knlGS:0000000000000000
+ [ 32.464725] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ [ 32.464726] CR2: 00007f297bf1c800 CR3: 00000000115e8000 CR4: 0000000000350ee0
+ [ 32.464730] Call Trace:
+ [ 32.464763] <TASK>
+ [ 32.464767] rtnl_dellink+0x13e/0x380
+ [ 32.464776] ? cred_has_capability.isra.0+0x68/0x100
+ [ 32.464780] ? __rtnl_unlock+0x33/0x60
+ [ 32.464783] ? bpf_lsm_capset+0x10/0x10
+ [ 32.464786] ? security_capable+0x36/0x50
+ [ 32.464790] rtnetlink_rcv_msg+0x14e/0x3b0
+ [ 32.464792] ? _copy_to_iter+0xb1/0x790
+ [ 32.464796] ? post_alloc_hook+0xa0/0x160
+ [ 32.464799] ? rtnl_calcit.isra.0+0x110/0x110
+ [ 32.464802] netlink_rcv_skb+0x50/0xf0
+ [ 32.464806] netlink_unicast+0x216/0x340
+ [ 32.464809] netlink_sendmsg+0x23f/0x480
+ [ 32.464812] sock_sendmsg+0x5e/0x60
+ [ 32.464815] ____sys_sendmsg+0x22c/0x270
+ [ 32.464818] ? import_iovec+0x17/0x20
+ [ 32.464821] ? sendmsg_copy_msghdr+0x59/0x90
+ [ 32.464823] ? do_set_pte+0xa0/0xe0
+ [ 32.464828] ___sys_sendmsg+0x81/0xc0
+ [ 32.464832] ? mod_objcg_state+0xc6/0x300
+ [ 32.464835] ? refill_obj_stock+0xa9/0x160
+ [ 32.464838] ? memcg_slab_free_hook+0x1a5/0x1f0
+ [ 32.464842] __sys_sendmsg+0x49/0x80
+ [ 32.464847] do_syscall_64+0x3b/0x90
+ [ 32.464851] entry_SYSCALL_64_after_hwframe+0x44/0xae
+ [ 32.464865] RIP: 0033:0x7f297bf2e5e7
+ [ 32.464868] Code: 64 89 02 48 c7 c0 ff ff ff ff eb bb 0f 1f 80 00 00 00 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 b8 2e 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 51 c3 48 83 ec 28 89 54 24 1c 48 89 74 24 10
+ [ 32.464869] RSP: 002b:00007ffd96c824c8 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
+ [ 32.464872] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f297bf2e5e7
+ [ 32.464874] RDX: 0000000000000000 RSI: 00007ffd96c82540 RDI: 0000000000000003
+ [ 32.464875] RBP: 00000000640f19de R08: 0000000000000001 R09: 000000000000007c
+ [ 32.464876] R10: 00007f297bffabe0 R11: 0000000000000246 R12: 0000000000000001
+ [ 32.464877] R13: 00007ffd96c82d20 R14: 00007ffd96c82610 R15: 000055bfe38a7020
+ [ 32.464881] </TASK>
+ [ 32.464882] ---[ end trace 0000000000000000 ]---
+
+Fixes: 7d5cd2ce5292 ("bonding: correctly handle bonding type change on enslave failure")
+Reported-by: syzbot+9dfc3f3348729cc82277@syzkaller.appspotmail.com
+Link: https://syzkaller.appspot.com/bug?id=391c7b1f6522182899efba27d891f1743e8eb3ef
+Signed-off-by: Nikolay Aleksandrov <razor@blackwall.org>
+Reviewed-by: Michal Kubiak <michal.kubiak@intel.com>
+Acked-by: Jonathan Toppins <jtoppins@redhat.com>
+Acked-by: Jay Vosburgh <jay.vosburgh@canonical.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_main.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 091c430547e7c..45d3cb557de73 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -2299,9 +2299,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+ eth_hw_addr_random(bond_dev);
+ if (bond_dev->type != ARPHRD_ETHER) {
+ dev_close(bond_dev);
+- ether_setup(bond_dev);
+- bond_dev->flags |= IFF_MASTER;
+- bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
++ bond_ether_setup(bond_dev);
+ }
+ }
+
+--
+2.39.2
+
--- /dev/null
+From 5516bf9f023aa7ca2c47c9a30bcab53d80284954 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Mar 2023 13:18:40 +0200
+Subject: bonding: restore IFF_MASTER/SLAVE flags on bond enslave ether type
+ change
+
+From: Nikolay Aleksandrov <razor@blackwall.org>
+
+[ Upstream commit 9ec7eb60dcbcb6c41076defbc5df7bbd95ceaba5 ]
+
+Add bond_ether_setup helper which is used to fix ether_setup() calls in the
+bonding driver. It takes care of both IFF_MASTER and IFF_SLAVE flags, the
+former is always restored and the latter only if it was set.
+If the bond enslaves non-ARPHRD_ETHER device (changes its type), then
+releases it and enslaves ARPHRD_ETHER device (changes back) then we
+use ether_setup() to restore the bond device type but it also resets its
+flags and removes IFF_MASTER and IFF_SLAVE[1]. Use the bond_ether_setup
+helper to restore both after such transition.
+
+[1] reproduce (nlmon is non-ARPHRD_ETHER):
+ $ ip l add nlmon0 type nlmon
+ $ ip l add bond2 type bond mode active-backup
+ $ ip l set nlmon0 master bond2
+ $ ip l set nlmon0 nomaster
+ $ ip l add bond1 type bond
+ (we use bond1 as ARPHRD_ETHER device to restore bond2's mode)
+ $ ip l set bond1 master bond2
+ $ ip l sh dev bond2
+ 37: bond2: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
+ link/ether be:d7:c5:40:5b:cc brd ff:ff:ff:ff:ff:ff promiscuity 0 minmtu 68 maxmtu 1500
+ (notice bond2's IFF_MASTER is missing)
+
+Fixes: e36b9d16c6a6 ("bonding: clean muticast addresses when device changes type")
+Signed-off-by: Nikolay Aleksandrov <razor@blackwall.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_main.c | 19 +++++++++++++++----
+ 1 file changed, 15 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index fce9301c8ebbc..091c430547e7c 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1774,6 +1774,19 @@ void bond_lower_state_changed(struct slave *slave)
+ slave_err(bond_dev, slave_dev, "Error: %s\n", errmsg); \
+ } while (0)
+
++/* The bonding driver uses ether_setup() to convert a master bond device
++ * to ARPHRD_ETHER, that resets the target netdevice's flags so we always
++ * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE if it was set
++ */
++static void bond_ether_setup(struct net_device *bond_dev)
++{
++ unsigned int slave_flag = bond_dev->flags & IFF_SLAVE;
++
++ ether_setup(bond_dev);
++ bond_dev->flags |= IFF_MASTER | slave_flag;
++ bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
++}
++
+ /* enslave device <slave> to bond device <master> */
+ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+ struct netlink_ext_ack *extack)
+@@ -1865,10 +1878,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+
+ if (slave_dev->type != ARPHRD_ETHER)
+ bond_setup_by_slave(bond_dev, slave_dev);
+- else {
+- ether_setup(bond_dev);
+- bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+- }
++ else
++ bond_ether_setup(bond_dev);
+
+ call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
+ bond_dev);
+--
+2.39.2
+
--- /dev/null
+From c08163e4d14eabe27d3299a19e5aecb4aa41f4d7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 16 Nov 2022 11:11:36 +0800
+Subject: cifs: Move the in_send statistic to __smb_send_rqst()
+
+From: Zhang Xiaoxu <zhangxiaoxu5@huawei.com>
+
+[ Upstream commit d0dc41119905f740e8d5594adce277f7c0de8c92 ]
+
+When send SMB_COM_NT_CANCEL and RFC1002_SESSION_REQUEST, the
+in_send statistic was lost.
+
+Let's move the in_send statistic to the send function to avoid
+this scenario.
+
+Fixes: 7ee1af765dfa ("[CIFS]")
+Signed-off-by: Zhang Xiaoxu <zhangxiaoxu5@huawei.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/cifs/transport.c | 21 +++++++++------------
+ 1 file changed, 9 insertions(+), 12 deletions(-)
+
+diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
+index 3851d0aaa2886..c961b90f92b9f 100644
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -297,7 +297,7 @@ static int
+ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ struct smb_rqst *rqst)
+ {
+- int rc = 0;
++ int rc;
+ struct kvec *iov;
+ int n_vec;
+ unsigned int send_length = 0;
+@@ -308,6 +308,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ struct msghdr smb_msg = {};
+ __be32 rfc1002_marker;
+
++ cifs_in_send_inc(server);
+ if (cifs_rdma_enabled(server)) {
+ /* return -EAGAIN when connecting or reconnecting */
+ rc = -EAGAIN;
+@@ -316,14 +317,17 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ goto smbd_done;
+ }
+
++ rc = -EAGAIN;
+ if (ssocket == NULL)
+- return -EAGAIN;
++ goto out;
+
++ rc = -ERESTARTSYS;
+ if (fatal_signal_pending(current)) {
+ cifs_dbg(FYI, "signal pending before send request\n");
+- return -ERESTARTSYS;
++ goto out;
+ }
+
++ rc = 0;
+ /* cork the socket */
+ tcp_sock_set_cork(ssocket->sk, true);
+
+@@ -434,7 +438,8 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ rc);
+ else if (rc > 0)
+ rc = 0;
+-
++out:
++ cifs_in_send_dec(server);
+ return rc;
+ }
+
+@@ -853,9 +858,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
+ * I/O response may come back and free the mid entry on another thread.
+ */
+ cifs_save_when_sent(mid);
+- cifs_in_send_inc(server);
+ rc = smb_send_rqst(server, 1, rqst, flags);
+- cifs_in_send_dec(server);
+
+ if (rc < 0) {
+ revert_current_mid(server, mid->credits);
+@@ -1146,9 +1149,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ else
+ midQ[i]->callback = cifs_compound_last_callback;
+ }
+- cifs_in_send_inc(server);
+ rc = smb_send_rqst(server, num_rqst, rqst, flags);
+- cifs_in_send_dec(server);
+
+ for (i = 0; i < num_rqst; i++)
+ cifs_save_when_sent(midQ[i]);
+@@ -1398,9 +1399,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
+
+ midQ->mid_state = MID_REQUEST_SUBMITTED;
+
+- cifs_in_send_inc(server);
+ rc = smb_send(server, in_buf, len);
+- cifs_in_send_dec(server);
+ cifs_save_when_sent(midQ);
+
+ if (rc < 0)
+@@ -1541,9 +1540,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
+ }
+
+ midQ->mid_state = MID_REQUEST_SUBMITTED;
+- cifs_in_send_inc(server);
+ rc = smb_send(server, in_buf, len);
+- cifs_in_send_dec(server);
+ cifs_save_when_sent(midQ);
+
+ if (rc < 0)
+--
+2.39.2
+
--- /dev/null
+From 9d515cbaf7810b77a2ebdab3284689e482463de6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 25 Feb 2023 21:39:47 -0800
+Subject: clk: HI655X: select REGMAP instead of depending on it
+
+From: Randy Dunlap <rdunlap@infradead.org>
+
+[ Upstream commit 0ffad67784a097beccf34d297ddd1b0773b3b8a3 ]
+
+REGMAP is a hidden (not user visible) symbol. Users cannot set it
+directly thru "make *config", so drivers should select it instead of
+depending on it if they need it.
+
+Consistently using "select" or "depends on" can also help reduce
+Kconfig circular dependency issues.
+
+Therefore, change the use of "depends on REGMAP" to "select REGMAP".
+
+Fixes: 3a49afb84ca0 ("clk: enable hi655x common clk automatically")
+Signed-off-by: Randy Dunlap <rdunlap@infradead.org>
+Cc: Riku Voipio <riku.voipio@linaro.org>
+Cc: Stephen Boyd <sboyd@kernel.org>
+Cc: Michael Turquette <mturquette@baylibre.com>
+Cc: linux-clk@vger.kernel.org
+Link: https://lore.kernel.org/r/20230226053953.4681-3-rdunlap@infradead.org
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clk/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
+index d79905f3e1744..5da82f2bdd211 100644
+--- a/drivers/clk/Kconfig
++++ b/drivers/clk/Kconfig
+@@ -92,7 +92,7 @@ config COMMON_CLK_RK808
+ config COMMON_CLK_HI655X
+ tristate "Clock driver for Hi655x" if EXPERT
+ depends on (MFD_HI655X_PMIC || COMPILE_TEST)
+- depends on REGMAP
++ select REGMAP
+ default MFD_HI655X_PMIC
+ help
+ This driver supports the hi655x PMIC clock. This
+--
+2.39.2
+
--- /dev/null
+From 1a35fabd4baef429845f3aa390a1b83c0248ff1f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Feb 2023 12:40:42 -0600
+Subject: docs: Correct missing "d_" prefix for dentry_operations member
+ d_weak_revalidate
+
+From: Glenn Washburn <development@efficientek.com>
+
+[ Upstream commit 74596085796fae0cfce3e42ee46bf4f8acbdac55 ]
+
+The details for struct dentry_operations member d_weak_revalidate is
+missing a "d_" prefix.
+
+Fixes: af96c1e304f7 ("docs: filesystems: vfs: Convert vfs.txt to RST")
+Signed-off-by: Glenn Washburn <development@efficientek.com>
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Link: https://lore.kernel.org/r/20230227184042.2375235-1-development@efficientek.com
+Signed-off-by: Jonathan Corbet <corbet@lwn.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/filesystems/vfs.rst | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst
+index 2b55f71e2ae19..b5e8b8af8afbb 100644
+--- a/Documentation/filesystems/vfs.rst
++++ b/Documentation/filesystems/vfs.rst
+@@ -1221,7 +1221,7 @@ defined:
+ return
+ -ECHILD and it will be called again in ref-walk mode.
+
+-``_weak_revalidate``
++``d_weak_revalidate``
+ called when the VFS needs to revalidate a "jumped" dentry. This
+ is called when a path-walk ends at dentry that was not acquired
+ by doing a lookup in the parent directory. This includes "/",
+--
+2.39.2
+
--- /dev/null
+From de0e63934711ac7459643b321f7bce73447350d3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Mar 2023 13:50:35 +0800
+Subject: drm/bridge: Fix returned array size name for
+ atomic_get_input_bus_fmts kdoc
+
+From: Liu Ying <victor.liu@nxp.com>
+
+[ Upstream commit 0d3c9333d976af41d7dbc6bf4d9d2e95fbdf9c89 ]
+
+The returned array size for input formats is set through
+atomic_get_input_bus_fmts()'s 'num_input_fmts' argument, so use
+'num_input_fmts' to represent the array size in the function's kdoc,
+not 'num_output_fmts'.
+
+Fixes: 91ea83306bfa ("drm/bridge: Fix the bridge kernel doc")
+Fixes: f32df58acc68 ("drm/bridge: Add the necessary bits to support bus format negotiation")
+Signed-off-by: Liu Ying <victor.liu@nxp.com>
+Reviewed-by: Robert Foss <rfoss@kernel.org>
+Signed-off-by: Neil Armstrong <neil.armstrong@linaro.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230314055035.3731179-1-victor.liu@nxp.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/drm/drm_bridge.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
+index 6b65b0dfb4fb4..288c6feda5de2 100644
+--- a/include/drm/drm_bridge.h
++++ b/include/drm/drm_bridge.h
+@@ -447,11 +447,11 @@ struct drm_bridge_funcs {
+ *
+ * The returned array must be allocated with kmalloc() and will be
+ * freed by the caller. If the allocation fails, NULL should be
+- * returned. num_output_fmts must be set to the returned array size.
++ * returned. num_input_fmts must be set to the returned array size.
+ * Formats listed in the returned array should be listed in decreasing
+ * preference order (the core will try all formats until it finds one
+ * that works). When the format is not supported NULL should be
+- * returned and num_output_fmts should be set to 0.
++ * returned and num_input_fmts should be set to 0.
+ *
+ * This method is called on all elements of the bridge chain as part of
+ * the bus format negotiation process that happens in
+--
+2.39.2
+
--- /dev/null
+From 1deef59d6ed25b5e92ff66326eac4e5e6ac1be80 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Feb 2023 10:53:04 +0200
+Subject: drm/i915/psr: Use calculated io and fast wake lines
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jouni Högander <jouni.hogander@intel.com>
+
+[ Upstream commit 71c602103c74b277bef3d20a308874a33ec8326d ]
+
+Currently we are using hardcoded 7 for io and fast wake lines.
+
+According to Bspec io and fast wake times are both 42us for
+DISPLAY_VER >= 12 and 50us and 32us for older platforms.
+
+Calculate line counts for these and configure them into PSR2_CTL
+accordingly
+
+Use 45 us for the fast wake calculation as 42 seems to be too
+tight based on testing.
+
+Bspec: 49274, 4289
+
+Cc: Mika Kahola <mika.kahola@intel.com>
+Cc: José Roberto de Souza <jose.souza@intel.com>
+Fixes: 64cf40a125ff ("drm/i915/psr: Program default IO buffer Wake and Fast Wake")
+Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/7725
+Signed-off-by: Jouni Högander <jouni.hogander@intel.com>
+Reviewed-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230221085304.3382297-1-jouni.hogander@intel.com
+(cherry picked from commit cb42e8ede5b475c096e473b86c356b1158b4bc3b)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../drm/i915/display/intel_display_types.h | 2 +
+ drivers/gpu/drm/i915/display/intel_psr.c | 78 +++++++++++++++----
+ 2 files changed, 63 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
+index 135dbcab62b28..63b7105e818a6 100644
+--- a/drivers/gpu/drm/i915/display/intel_display_types.h
++++ b/drivers/gpu/drm/i915/display/intel_display_types.h
+@@ -1604,6 +1604,8 @@ struct intel_psr {
+ bool psr2_sel_fetch_cff_enabled;
+ bool req_psr2_sdp_prior_scanline;
+ u8 sink_sync_latency;
++ u8 io_wake_lines;
++ u8 fast_wake_lines;
+ ktime_t last_entry_attempt;
+ ktime_t last_exit;
+ bool sink_not_reliable;
+diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
+index 15c3e448aa0e6..bf18423c7a005 100644
+--- a/drivers/gpu/drm/i915/display/intel_psr.c
++++ b/drivers/gpu/drm/i915/display/intel_psr.c
+@@ -542,6 +542,14 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
+ val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
+ val |= intel_psr2_get_tp_time(intel_dp);
+
++ if (DISPLAY_VER(dev_priv) >= 12) {
++ if (intel_dp->psr.io_wake_lines < 9 &&
++ intel_dp->psr.fast_wake_lines < 9)
++ val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
++ else
++ val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
++ }
++
+ /* Wa_22012278275:adl-p */
+ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
+ static const u8 map[] = {
+@@ -558,31 +566,21 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
+ * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
+ * comments bellow for more information
+ */
+- u32 tmp, lines = 7;
+-
+- val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
++ u32 tmp;
+
+- tmp = map[lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
++ tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
+ tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT;
+ val |= tmp;
+
+- tmp = map[lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
++ tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
+ tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT;
+ val |= tmp;
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
+- /*
+- * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
+- * values from BSpec. In order to setting an optimal power
+- * consumption, lower than 4k resolution mode needs to decrease
+- * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
+- * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
+- */
+- val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
+- val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7);
+- val |= TGL_EDP_PSR2_FAST_WAKE(7);
++ val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
++ val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
+ } else if (DISPLAY_VER(dev_priv) >= 9) {
+- val |= EDP_PSR2_IO_BUFFER_WAKE(7);
+- val |= EDP_PSR2_FAST_WAKE(7);
++ val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
++ val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
+ }
+
+ if (intel_dp->psr.req_psr2_sdp_prior_scanline)
+@@ -837,6 +835,46 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
+ return true;
+ }
+
++static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
++ struct intel_crtc_state *crtc_state)
++{
++ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
++ int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
++ u8 max_wake_lines;
++
++ if (DISPLAY_VER(i915) >= 12) {
++ io_wake_time = 42;
++ /*
++ * According to Bspec it's 42us, but based on testing
++ * it is not enough -> use 45 us.
++ */
++ fast_wake_time = 45;
++ max_wake_lines = 12;
++ } else {
++ io_wake_time = 50;
++ fast_wake_time = 32;
++ max_wake_lines = 8;
++ }
++
++ io_wake_lines = intel_usecs_to_scanlines(
++ &crtc_state->uapi.adjusted_mode, io_wake_time);
++ fast_wake_lines = intel_usecs_to_scanlines(
++ &crtc_state->uapi.adjusted_mode, fast_wake_time);
++
++ if (io_wake_lines > max_wake_lines ||
++ fast_wake_lines > max_wake_lines)
++ return false;
++
++ if (i915->params.psr_safest_params)
++ io_wake_lines = fast_wake_lines = max_wake_lines;
++
++ /* According to Bspec lower limit should be set as 7 lines. */
++ intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
++ intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
++
++ return true;
++}
++
+ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+ {
+@@ -930,6 +968,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
+ return false;
+ }
+
++ if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
++ drm_dbg_kms(&dev_priv->drm,
++ "PSR2 not enabled, Unable to use long enough wake times\n");
++ return false;
++ }
++
+ if (HAS_PSR2_SEL_FETCH(dev_priv)) {
+ if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
+ !HAS_PSR_HW_TRACKING(dev_priv)) {
+--
+2.39.2
+
--- /dev/null
+From e27eac81cf2b6c997f1e619afa8fa010ec418320 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Feb 2023 18:18:58 +0100
+Subject: drm/i915/sseu: fix max_subslices array-index-out-of-bounds access
+
+From: Andrea Righi <andrea.righi@canonical.com>
+
+[ Upstream commit 193c41926d152761764894f46e23b53c00186a82 ]
+
+It seems that commit bc3c5e0809ae ("drm/i915/sseu: Don't try to store EU
+mask internally in UAPI format") exposed a potential out-of-bounds
+access, reported by UBSAN as following on a laptop with a gen 11 i915
+card:
+
+ UBSAN: array-index-out-of-bounds in drivers/gpu/drm/i915/gt/intel_sseu.c:65:27
+ index 6 is out of range for type 'u16 [6]'
+ CPU: 2 PID: 165 Comm: systemd-udevd Not tainted 6.2.0-9-generic #9-Ubuntu
+ Hardware name: Dell Inc. XPS 13 9300/077Y9N, BIOS 1.11.0 03/22/2022
+ Call Trace:
+ <TASK>
+ show_stack+0x4e/0x61
+ dump_stack_lvl+0x4a/0x6f
+ dump_stack+0x10/0x18
+ ubsan_epilogue+0x9/0x3a
+ __ubsan_handle_out_of_bounds.cold+0x42/0x47
+ gen11_compute_sseu_info+0x121/0x130 [i915]
+ intel_sseu_info_init+0x15d/0x2b0 [i915]
+ intel_gt_init_mmio+0x23/0x40 [i915]
+ i915_driver_mmio_probe+0x129/0x400 [i915]
+ ? intel_gt_probe_all+0x91/0x2e0 [i915]
+ i915_driver_probe+0xe1/0x3f0 [i915]
+ ? drm_privacy_screen_get+0x16d/0x190 [drm]
+ ? acpi_dev_found+0x64/0x80
+ i915_pci_probe+0xac/0x1b0 [i915]
+ ...
+
+According to the definition of sseu_dev_info, eu_mask->hsw is limited to
+a maximum of GEN_MAX_SS_PER_HSW_SLICE (6) sub-slices, but
+gen11_sseu_info_init() can potentially set 8 sub-slices, in the
+!IS_JSL_EHL(gt->i915) case.
+
+Fix this by reserving up to 8 slots for max_subslices in the eu_mask
+struct.
+
+Reported-by: Emil Renner Berthing <emil.renner.berthing@canonical.com>
+Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
+Fixes: bc3c5e0809ae ("drm/i915/sseu: Don't try to store EU mask internally in UAPI format")
+Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
+Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230220171858.131416-1-andrea.righi@canonical.com
+(cherry picked from commit 3cba09a6ac86ea1d456909626eb2685596c07822)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/gt/intel_sseu.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
+index aa87d3832d60d..d7e8c374f153e 100644
+--- a/drivers/gpu/drm/i915/gt/intel_sseu.h
++++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
+@@ -27,7 +27,7 @@ struct drm_printer;
+ * is only relevant to pre-Xe_HP platforms (Xe_HP and beyond use the
+ * I915_MAX_SS_FUSE_BITS value below).
+ */
+-#define GEN_MAX_SS_PER_HSW_SLICE 6
++#define GEN_MAX_SS_PER_HSW_SLICE 8
+
+ /*
+ * Maximum number of 32-bit registers used by hardware to express the
+--
+2.39.2
+
--- /dev/null
+From 067fa9756a13693a23540c075e45ed1fb30a9ec3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Mar 2023 12:33:12 +0000
+Subject: drm/meson: fix 1px pink line on GXM when scaling video overlay
+
+From: Christian Hewitt <christianshewitt@gmail.com>
+
+[ Upstream commit 5c8cf1664f288098a971a1d1e65716a2b6a279e1 ]
+
+Playing media with a resolution smaller than the crtc size requires the
+video overlay to be scaled for output and GXM boards display a 1px pink
+line on the bottom of the scaled overlay. Comparing with the downstream
+vendor driver revealed VPP_DUMMY_DATA not being set [0].
+
+Setting VPP_DUMMY_DATA prevents the 1px pink line from being seen.
+
+[0] https://github.com/endlessm/linux-s905x/blob/master/drivers/amlogic/amports/video.c#L7869
+
+Fixes: bbbe775ec5b5 ("drm: Add support for Amlogic Meson Graphic Controller")
+Suggested-by: Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+Signed-off-by: Christian Hewitt <christianshewitt@gmail.com>
+Acked-by: Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+Signed-off-by: Neil Armstrong <neil.armstrong@linaro.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230303123312.155164-1-christianshewitt@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/meson/meson_vpp.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/meson/meson_vpp.c b/drivers/gpu/drm/meson/meson_vpp.c
+index 154837688ab0d..5df1957c8e41f 100644
+--- a/drivers/gpu/drm/meson/meson_vpp.c
++++ b/drivers/gpu/drm/meson/meson_vpp.c
+@@ -100,6 +100,8 @@ void meson_vpp_init(struct meson_drm *priv)
+ priv->io_base + _REG(VPP_DOLBY_CTRL));
+ writel_relaxed(0x1020080,
+ priv->io_base + _REG(VPP_DUMMY_DATA1));
++ writel_relaxed(0x42020,
++ priv->io_base + _REG(VPP_DUMMY_DATA));
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+ writel_relaxed(0xf, priv->io_base + _REG(DOLBY_PATH_CTRL));
+
+--
+2.39.2
+
--- /dev/null
+From 1fccb2cc5eab177e09d615c6396b9695184dde64 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 4 Nov 2022 19:04:59 +0300
+Subject: drm/msm/gem: Prevent blocking within shrinker loop
+
+From: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+
+[ Upstream commit 9630b585b607bd26f505d34620b14d75b9a5af7d ]
+
+Consider this scenario:
+
+1. APP1 continuously creates lots of small GEMs
+2. APP2 triggers `drop_caches`
+3. Shrinker starts to evict APP1 GEMs, while APP1 produces new purgeable
+ GEMs
+4. msm_gem_shrinker_scan() returns non-zero number of freed pages
+ and causes shrinker to try shrink more
+5. msm_gem_shrinker_scan() returns non-zero number of freed pages again,
+ goto 4
+6. The APP2 is blocked in `drop_caches` until APP1 stops producing
+ purgeable GEMs
+
+To prevent this blocking scenario, check number of remaining pages
+that GPU shrinker couldn't release due to a GEM locking contention
+or shrinking rejection. If there are no remaining pages left to shrink,
+then there is no need to free up more pages and shrinker may break out
+from the loop.
+
+This problem was found during shrinker/madvise IOCTL testing of
+virtio-gpu driver. The MSM driver is affected in the same way.
+
+Reviewed-by: Rob Clark <robdclark@gmail.com>
+Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
+Fixes: b352ba54a820 ("drm/msm/gem: Convert to using drm_gem_lru")
+Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+Link: https://lore.kernel.org/all/20230108210445.3948344-2-dmitry.osipenko@collabora.com/
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_gem.c | 9 +++++++--
+ drivers/gpu/drm/msm/msm_gem_shrinker.c | 11 +++++++++--
+ include/drm/drm_gem.h | 4 +++-
+ 3 files changed, 19 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
+index 8b68a3c1e6ab6..b87ed4238fc83 100644
+--- a/drivers/gpu/drm/drm_gem.c
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -1351,10 +1351,13 @@ EXPORT_SYMBOL(drm_gem_lru_move_tail);
+ *
+ * @lru: The LRU to scan
+ * @nr_to_scan: The number of pages to try to reclaim
++ * @remaining: The number of pages left to reclaim, should be initialized by caller
+ * @shrink: Callback to try to shrink/reclaim the object.
+ */
+ unsigned long
+-drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
++drm_gem_lru_scan(struct drm_gem_lru *lru,
++ unsigned int nr_to_scan,
++ unsigned long *remaining,
+ bool (*shrink)(struct drm_gem_object *obj))
+ {
+ struct drm_gem_lru still_in_lru;
+@@ -1393,8 +1396,10 @@ drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
+ * hit shrinker in response to trying to get backing pages
+ * for this obj (ie. while it's lock is already held)
+ */
+- if (!dma_resv_trylock(obj->resv))
++ if (!dma_resv_trylock(obj->resv)) {
++ *remaining += obj->size >> PAGE_SHIFT;
+ goto tail;
++ }
+
+ if (shrink(obj)) {
+ freed += obj->size >> PAGE_SHIFT;
+diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
+index 1de14e67f96b0..31f054c903a43 100644
+--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
++++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
+@@ -107,6 +107,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+ bool (*shrink)(struct drm_gem_object *obj);
+ bool cond;
+ unsigned long freed;
++ unsigned long remaining;
+ } stages[] = {
+ /* Stages of progressively more aggressive/expensive reclaim: */
+ { &priv->lru.dontneed, purge, true },
+@@ -116,14 +117,18 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+ };
+ long nr = sc->nr_to_scan;
+ unsigned long freed = 0;
++ unsigned long remaining = 0;
+
+ for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
+ if (!stages[i].cond)
+ continue;
+ stages[i].freed =
+- drm_gem_lru_scan(stages[i].lru, nr, stages[i].shrink);
++ drm_gem_lru_scan(stages[i].lru, nr,
++ &stages[i].remaining,
++ stages[i].shrink);
+ nr -= stages[i].freed;
+ freed += stages[i].freed;
++ remaining += stages[i].remaining;
+ }
+
+ if (freed) {
+@@ -132,7 +137,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+ stages[3].freed);
+ }
+
+- return (freed > 0) ? freed : SHRINK_STOP;
++ return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP;
+ }
+
+ #ifdef CONFIG_DEBUG_FS
+@@ -182,10 +187,12 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
+ NULL,
+ };
+ unsigned idx, unmapped = 0;
++ unsigned long remaining = 0;
+
+ for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
+ unmapped += drm_gem_lru_scan(lrus[idx],
+ vmap_shrink_limit - unmapped,
++ &remaining,
+ vmap_shrink);
+ }
+
+diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
+index bd42f25e449c2..60b2dda8d964b 100644
+--- a/include/drm/drm_gem.h
++++ b/include/drm/drm_gem.h
+@@ -472,7 +472,9 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock);
+ void drm_gem_lru_remove(struct drm_gem_object *obj);
+ void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj);
+-unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
++unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
++ unsigned int nr_to_scan,
++ unsigned long *remaining,
+ bool (*shrink)(struct drm_gem_object *obj));
+
+ #endif /* __DRM_GEM_H__ */
+--
+2.39.2
+
--- /dev/null
+From 6fd844935e78275ac11a95ccebe8917ee0a5fa3f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Nov 2022 04:40:38 +0300
+Subject: drm/panfrost: Don't sync rpm suspension after mmu flushing
+
+From: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+
+[ Upstream commit ba3be66f11c3c49afaa9f49b99e21d88756229ef ]
+
+Lockdep warns about potential circular locking dependency of devfreq
+with the fs_reclaim caused by immediate device suspension when mapping is
+released by shrinker. Fix it by doing the suspension asynchronously.
+
+Reviewed-by: Steven Price <steven.price@arm.com>
+Fixes: ec7eba47da86 ("drm/panfrost: Rework page table flushing and runtime PM interaction")
+Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+Link: https://lore.kernel.org/all/20230108210445.3948344-3-dmitry.osipenko@collabora.com/
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/panfrost/panfrost_mmu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+index 4e83a1891f3ed..666a5e53fe193 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+@@ -282,7 +282,7 @@ static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
+ if (pm_runtime_active(pfdev->dev))
+ mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
+
+- pm_runtime_put_sync_autosuspend(pfdev->dev);
++ pm_runtime_put_autosuspend(pfdev->dev);
+ }
+
+ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
+--
+2.39.2
+
--- /dev/null
+From a508fa1b1818ae8652255ae0c6b6813402b7f05b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Feb 2023 17:34:50 +0200
+Subject: drm/virtio: Pass correct device to dma_sync_sgtable_for_device()
+
+From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
+
+[ Upstream commit a54bace095d00e9222161495649688bc43de4dde ]
+
+The "vdev->dev.parent" should be used instead of "vdev->dev" as a device
+for which to perform the DMA operation in both
+virtio_gpu_cmd_transfer_to_host_2d(3d).
+
+Because the virtio-gpu device "vdev->dev" doesn't really have DMA OPS
+assigned to it, but parent (virtio-pci or virtio-mmio) device
+"vdev->dev.parent" has. The more, the sgtable in question the code is
+trying to sync here was mapped for the parent device (by using its DMA OPS)
+previously at:
+virtio_gpu_object_shmem_init()->drm_gem_shmem_get_pages_sgt()->
+dma_map_sgtable(), so should be synced here for the same parent device.
+
+Fixes: b5c9ed70d1a9 ("drm/virtio: Improve DMA API usage for shmem BOs")
+Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
+Reviewed-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230224153450.526222-1-olekstysh@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/virtio/virtgpu_vq.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
+index 9ff8660b50ade..208e9434cb28d 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
++++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
+@@ -597,7 +597,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
+ bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
+
+ if (virtio_gpu_is_shmem(bo) && use_dma_api)
+- dma_sync_sgtable_for_device(&vgdev->vdev->dev,
++ dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
+ bo->base.sgt, DMA_TO_DEVICE);
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+@@ -1019,7 +1019,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
+ bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
+
+ if (virtio_gpu_is_shmem(bo) && use_dma_api)
+- dma_sync_sgtable_for_device(&vgdev->vdev->dev,
++ dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
+ bo->base.sgt, DMA_TO_DEVICE);
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+--
+2.39.2
+
--- /dev/null
+From 92599715cf9fcdfad9863a173af2581ac077d8a5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Mar 2023 14:00:21 +0800
+Subject: ethernet: sun: add check for the mdesc_grab()
+
+From: Liang He <windhl@126.com>
+
+[ Upstream commit 90de546d9a0b3c771667af18bb3f80567eabb89b ]
+
+In vnet_port_probe() and vsw_port_probe(), we should
+check the return value of mdesc_grab() as it may
+return NULL which can caused NPD bugs.
+
+Fixes: 5d01fa0c6bd8 ("ldmvsw: Add ldmvsw.c driver code")
+Fixes: 43fdf27470b2 ("[SPARC64]: Abstract out mdesc accesses for better MD update handling.")
+Signed-off-by: Liang He <windhl@126.com>
+Reviewed-by: Piotr Raczynski <piotr.raczynski@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/sun/ldmvsw.c | 3 +++
+ drivers/net/ethernet/sun/sunvnet.c | 3 +++
+ 2 files changed, 6 insertions(+)
+
+diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
+index 8addee6d04bd8..734a817d3c945 100644
+--- a/drivers/net/ethernet/sun/ldmvsw.c
++++ b/drivers/net/ethernet/sun/ldmvsw.c
+@@ -287,6 +287,9 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+
+ hp = mdesc_grab();
+
++ if (!hp)
++ return -ENODEV;
++
+ rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
+ err = -ENODEV;
+ if (!rmac) {
+diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
+index acda6cbd0238d..bdf4c8be2d536 100644
+--- a/drivers/net/ethernet/sun/sunvnet.c
++++ b/drivers/net/ethernet/sun/sunvnet.c
+@@ -433,6 +433,9 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+
+ hp = mdesc_grab();
+
++ if (!hp)
++ return -ENODEV;
++
+ vp = vnet_find_parent(hp, vdev->mp, vdev);
+ if (IS_ERR(vp)) {
+ pr_err("Cannot find port parent vnet\n");
+--
+2.39.2
+
--- /dev/null
+From d9e60a2e852018926670bd25538a6aa3822e3ca6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Feb 2023 13:33:30 +0300
+Subject: fbdev: chipsfb: Fix error codes in chipsfb_pci_init()
+
+From: Dan Carpenter <error27@gmail.com>
+
+[ Upstream commit 77bc762451c2dc72bdbea07b857c916c9e7f4952 ]
+
+The error codes are not set on these error paths.
+
+Fixes: 145eed48de27 ("fbdev: Remove conflicting devices on PCI bus")
+Signed-off-by: Dan Carpenter <error27@gmail.com>
+Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://patchwork.freedesktop.org/patch/msgid/Y/yG+sm2mhdJeTZW@kili
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/video/fbdev/chipsfb.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
+index f1c1c95c1fdf0..2ecb97c619b7c 100644
+--- a/drivers/video/fbdev/chipsfb.c
++++ b/drivers/video/fbdev/chipsfb.c
+@@ -358,16 +358,21 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
+ if (rc)
+ return rc;
+
+- if (pci_enable_device(dp) < 0) {
++ rc = pci_enable_device(dp);
++ if (rc < 0) {
+ dev_err(&dp->dev, "Cannot enable PCI device\n");
+ goto err_out;
+ }
+
+- if ((dp->resource[0].flags & IORESOURCE_MEM) == 0)
++ if ((dp->resource[0].flags & IORESOURCE_MEM) == 0) {
++ rc = -ENODEV;
+ goto err_disable;
++ }
+ addr = pci_resource_start(dp, 0);
+- if (addr == 0)
++ if (addr == 0) {
++ rc = -ENODEV;
+ goto err_disable;
++ }
+
+ p = framebuffer_alloc(0, &dp->dev);
+ if (p == NULL) {
+@@ -417,7 +422,8 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
+
+ init_chips(p, addr);
+
+- if (register_framebuffer(p) < 0) {
++ rc = register_framebuffer(p);
++ if (rc < 0) {
+ dev_err(&dp->dev,"C&T 65550 framebuffer failed to register\n");
+ goto err_unmap;
+ }
+--
+2.39.2
+
--- /dev/null
+From 4525deab5706f0917a55e175d49d652b5b5352f4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Jan 2023 10:36:30 +0100
+Subject: ftrace,kcfi: Define ftrace_stub_graph conditionally
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+[ Upstream commit aa69f814920d85a2d4cfd5c294757c3d59d2fba6 ]
+
+When CONFIG_FUNCTION_GRAPH_TRACER is disabled, __kcfi_typeid_ftrace_stub_graph
+is missing, causing a link failure:
+
+ ld.lld: error: undefined symbol: __kcfi_typeid_ftrace_stub_graph
+ referenced by arch/x86/kernel/ftrace_64.o:(__cfi_ftrace_stub_graph) in archive vmlinux.a
+
+Mark the reference to it as conditional on the same symbol, as
+is done on arm64.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20230131093643.3850272-1-arnd@kernel.org
+
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Josh Poimboeuf <jpoimboe@kernel.org>
+Fixes: 883bbbffa5a4 ("ftrace,kcfi: Separate ftrace_stub() and ftrace_stub_graph()")
+See-also: 2598ac6ec493 ("arm64: ftrace: Define ftrace_stub_graph only with FUNCTION_GRAPH_TRACER")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/ftrace_64.S | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
+index 2a4be92fd1444..6233c5b4c10b2 100644
+--- a/arch/x86/kernel/ftrace_64.S
++++ b/arch/x86/kernel/ftrace_64.S
+@@ -134,9 +134,11 @@ SYM_TYPED_FUNC_START(ftrace_stub)
+ RET
+ SYM_FUNC_END(ftrace_stub)
+
++#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ SYM_TYPED_FUNC_START(ftrace_stub_graph)
+ RET
+ SYM_FUNC_END(ftrace_stub_graph)
++#endif
+
+ #ifdef CONFIG_DYNAMIC_FTRACE
+
+--
+2.39.2
+
--- /dev/null
+From 5cc42f723f85d322e4ecd55659eec26360b08f70 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Mar 2023 10:45:09 -0800
+Subject: i40e: Fix kernel crash during reboot when adapter is in recovery mode
+
+From: Ivan Vecera <ivecera@redhat.com>
+
+[ Upstream commit 7e4f8a0c495413a50413e8c9f1032ce1bc633bae ]
+
+If the driver detects during probe that firmware is in recovery
+mode then i40e_init_recovery_mode() is called and the rest of
+probe function is skipped including pci_set_drvdata(). Subsequent
+i40e_shutdown() called during shutdown/reboot dereferences NULL
+pointer as pci_get_drvdata() returns NULL.
+
+To fix call pci_set_drvdata() also during entering to recovery mode.
+
+Reproducer:
+1) Lets have i40e NIC with firmware in recovery mode
+2) Run reboot
+
+Result:
+[ 139.084698] i40e: Intel(R) Ethernet Connection XL710 Network Driver
+[ 139.090959] i40e: Copyright (c) 2013 - 2019 Intel Corporation.
+[ 139.108438] i40e 0000:02:00.0: Firmware recovery mode detected. Limiting functionality.
+[ 139.116439] i40e 0000:02:00.0: Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.
+[ 139.129499] i40e 0000:02:00.0: fw 8.3.64775 api 1.13 nvm 8.30 0x8000b78d 1.3106.0 [8086:1583] [15d9:084a]
+[ 139.215932] i40e 0000:02:00.0 enp2s0f0: renamed from eth0
+[ 139.223292] i40e 0000:02:00.1: Firmware recovery mode detected. Limiting functionality.
+[ 139.231292] i40e 0000:02:00.1: Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.
+[ 139.244406] i40e 0000:02:00.1: fw 8.3.64775 api 1.13 nvm 8.30 0x8000b78d 1.3106.0 [8086:1583] [15d9:084a]
+[ 139.329209] i40e 0000:02:00.1 enp2s0f1: renamed from eth0
+...
+[ 156.311376] BUG: kernel NULL pointer dereference, address: 00000000000006c2
+[ 156.318330] #PF: supervisor write access in kernel mode
+[ 156.323546] #PF: error_code(0x0002) - not-present page
+[ 156.328679] PGD 0 P4D 0
+[ 156.331210] Oops: 0002 [#1] PREEMPT SMP NOPTI
+[ 156.335567] CPU: 26 PID: 15119 Comm: reboot Tainted: G E 6.2.0+ #1
+[ 156.343126] Hardware name: Abacus electric, s.r.o. - servis@abacus.cz Super Server/H12SSW-iN, BIOS 2.4 04/13/2022
+[ 156.353369] RIP: 0010:i40e_shutdown+0x15/0x130 [i40e]
+[ 156.358430] Code: c1 fc ff ff 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 f3 0f 1e fa 0f 1f 44 00 00 55 48 89 fd 53 48 8b 9f 48 01 00 00 <f0> 80 8b c2 06 00 00 04 f0 80 8b c0 06 00 00 08 48 8d bb 08 08 00
+[ 156.377168] RSP: 0018:ffffb223c8447d90 EFLAGS: 00010282
+[ 156.382384] RAX: ffffffffc073ee70 RBX: 0000000000000000 RCX: 0000000000000001
+[ 156.389510] RDX: 0000000080000001 RSI: 0000000000000246 RDI: ffff95db49988000
+[ 156.396634] RBP: ffff95db49988000 R08: ffffffffffffffff R09: ffffffff8bd17d40
+[ 156.403759] R10: 0000000000000001 R11: ffffffff8a5e3d28 R12: ffff95db49988000
+[ 156.410882] R13: ffffffff89a6fe17 R14: ffff95db49988150 R15: 0000000000000000
+[ 156.418007] FS: 00007fe7c0cc3980(0000) GS:ffff95ea8ee80000(0000) knlGS:0000000000000000
+[ 156.426083] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 156.431819] CR2: 00000000000006c2 CR3: 00000003092fc005 CR4: 0000000000770ee0
+[ 156.438944] PKRU: 55555554
+[ 156.441647] Call Trace:
+[ 156.444096] <TASK>
+[ 156.446199] pci_device_shutdown+0x38/0x60
+[ 156.450297] device_shutdown+0x163/0x210
+[ 156.454215] kernel_restart+0x12/0x70
+[ 156.457872] __do_sys_reboot+0x1ab/0x230
+[ 156.461789] ? vfs_writev+0xa6/0x1a0
+[ 156.465362] ? __pfx_file_free_rcu+0x10/0x10
+[ 156.469635] ? __call_rcu_common.constprop.85+0x109/0x5a0
+[ 156.475034] do_syscall_64+0x3e/0x90
+[ 156.478611] entry_SYSCALL_64_after_hwframe+0x72/0xdc
+[ 156.483658] RIP: 0033:0x7fe7bff37ab7
+
+Fixes: 4ff0ee1af016 ("i40e: Introduce recovery mode support")
+Signed-off-by: Ivan Vecera <ivecera@redhat.com>
+Tested-by: Arpana Arland <arpanax.arland@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Link: https://lore.kernel.org/r/20230309184509.984639-1-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index d30bc38725e97..da0cf87d3a1ca 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -15491,6 +15491,7 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
+ int err;
+ int v_idx;
+
++ pci_set_drvdata(pf->pdev, pf);
+ pci_save_state(pf->pdev);
+
+ /* set up periodic task facility */
+--
+2.39.2
+
--- /dev/null
+From 5efa658657da4baefecea0de93e263057e3b6dc5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Mar 2023 14:41:17 +0100
+Subject: i825xx: sni_82596: use eth_hw_addr_set()
+
+From: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+
+[ Upstream commit f38373345c65529639a01fba3675eb8cb4c579c3 ]
+
+netdev->dev_addr is now const, we can't write to it directly.
+Copy scrambled mac address octects into an array then eth_hw_addr_set().
+
+Fixes: adeef3e32146 ("net: constify netdev->dev_addr")
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Reviewed-by: Michal Kubiak <michal.kubiak@intel.com>
+Link: https://lore.kernel.org/r/20230315134117.79511-1-tsbogend@alpha.franken.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/i825xx/sni_82596.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
+index daec9ce04531b..54bb4d9a0d1ea 100644
+--- a/drivers/net/ethernet/i825xx/sni_82596.c
++++ b/drivers/net/ethernet/i825xx/sni_82596.c
+@@ -78,6 +78,7 @@ static int sni_82596_probe(struct platform_device *dev)
+ void __iomem *mpu_addr;
+ void __iomem *ca_addr;
+ u8 __iomem *eth_addr;
++ u8 mac[ETH_ALEN];
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ ca = platform_get_resource(dev, IORESOURCE_MEM, 1);
+@@ -109,12 +110,13 @@ static int sni_82596_probe(struct platform_device *dev)
+ goto probe_failed;
+
+ /* someone seems to like messed up stuff */
+- netdevice->dev_addr[0] = readb(eth_addr + 0x0b);
+- netdevice->dev_addr[1] = readb(eth_addr + 0x0a);
+- netdevice->dev_addr[2] = readb(eth_addr + 0x09);
+- netdevice->dev_addr[3] = readb(eth_addr + 0x08);
+- netdevice->dev_addr[4] = readb(eth_addr + 0x07);
+- netdevice->dev_addr[5] = readb(eth_addr + 0x06);
++ mac[0] = readb(eth_addr + 0x0b);
++ mac[1] = readb(eth_addr + 0x0a);
++ mac[2] = readb(eth_addr + 0x09);
++ mac[3] = readb(eth_addr + 0x08);
++ mac[4] = readb(eth_addr + 0x07);
++ mac[5] = readb(eth_addr + 0x06);
++ eth_hw_addr_set(netdevice, mac);
+ iounmap(eth_addr);
+
+ if (netdevice->irq < 0) {
+--
+2.39.2
+
--- /dev/null
+From 0baa7ce6c2ca4b53e1d5fd44a0593e365c899f97 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Mar 2023 10:45:43 -0700
+Subject: ice: xsk: disable txq irq before flushing hw
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+[ Upstream commit b830c9642386867863ac64295185f896ff2928ac ]
+
+ice_qp_dis() intends to stop a given queue pair that is a target of xsk
+pool attach/detach. One of the steps is to disable interrupts on these
+queues. It currently is broken in a way that txq irq is turned off
+*after* HW flush which in turn takes no effect.
+
+ice_qp_dis():
+-> ice_qvec_dis_irq()
+--> disable rxq irq
+--> flush hw
+-> ice_vsi_stop_tx_ring()
+-->disable txq irq
+
+Below splat can be triggered by following steps:
+- start xdpsock WITHOUT loading xdp prog
+- run xdp_rxq_info with XDP_TX action on this interface
+- start traffic
+- terminate xdpsock
+
+[ 256.312485] BUG: kernel NULL pointer dereference, address: 0000000000000018
+[ 256.319560] #PF: supervisor read access in kernel mode
+[ 256.324775] #PF: error_code(0x0000) - not-present page
+[ 256.329994] PGD 0 P4D 0
+[ 256.332574] Oops: 0000 [#1] PREEMPT SMP NOPTI
+[ 256.337006] CPU: 3 PID: 32 Comm: ksoftirqd/3 Tainted: G OE 6.2.0-rc5+ #51
+[ 256.345218] Hardware name: Intel Corporation S2600WFT/S2600WFT, BIOS SE5C620.86B.02.01.0008.031920191559 03/19/2019
+[ 256.355807] RIP: 0010:ice_clean_rx_irq_zc+0x9c/0x7d0 [ice]
+[ 256.361423] Code: b7 8f 8a 00 00 00 66 39 ca 0f 84 f1 04 00 00 49 8b 47 40 4c 8b 24 d0 41 0f b7 45 04 66 25 ff 3f 66 89 04 24 0f 84 85 02 00 00 <49> 8b 44 24 18 0f b7 14 24 48 05 00 01 00 00 49 89 04 24 49 89 44
+[ 256.380463] RSP: 0018:ffffc900088bfd20 EFLAGS: 00010206
+[ 256.385765] RAX: 000000000000003c RBX: 0000000000000035 RCX: 000000000000067f
+[ 256.393012] RDX: 0000000000000775 RSI: 0000000000000000 RDI: ffff8881deb3ac80
+[ 256.400256] RBP: 000000000000003c R08: ffff889847982710 R09: 0000000000010000
+[ 256.407500] R10: ffffffff82c060c0 R11: 0000000000000004 R12: 0000000000000000
+[ 256.414746] R13: ffff88811165eea0 R14: ffffc9000d255000 R15: ffff888119b37600
+[ 256.421990] FS: 0000000000000000(0000) GS:ffff8897e0cc0000(0000) knlGS:0000000000000000
+[ 256.430207] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 256.436036] CR2: 0000000000000018 CR3: 0000000005c0a006 CR4: 00000000007706e0
+[ 256.443283] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ 256.450527] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[ 256.457770] PKRU: 55555554
+[ 256.460529] Call Trace:
+[ 256.463015] <TASK>
+[ 256.465157] ? ice_xmit_zc+0x6e/0x150 [ice]
+[ 256.469437] ice_napi_poll+0x46d/0x680 [ice]
+[ 256.473815] ? _raw_spin_unlock_irqrestore+0x1b/0x40
+[ 256.478863] __napi_poll+0x29/0x160
+[ 256.482409] net_rx_action+0x136/0x260
+[ 256.486222] __do_softirq+0xe8/0x2e5
+[ 256.489853] ? smpboot_thread_fn+0x2c/0x270
+[ 256.494108] run_ksoftirqd+0x2a/0x50
+[ 256.497747] smpboot_thread_fn+0x1c1/0x270
+[ 256.501907] ? __pfx_smpboot_thread_fn+0x10/0x10
+[ 256.506594] kthread+0xea/0x120
+[ 256.509785] ? __pfx_kthread+0x10/0x10
+[ 256.513597] ret_from_fork+0x29/0x50
+[ 256.517238] </TASK>
+
+In fact, irqs were not disabled and napi managed to be scheduled and run
+while xsk_pool pointer was still valid, but SW ring of xdp_buff pointers
+was already freed.
+
+To fix this, call ice_qvec_dis_irq() after ice_vsi_stop_tx_ring(). Also
+while at it, remove redundant ice_clean_rx_ring() call - this is handled
+in ice_qp_clean_rings().
+
+Fixes: 2d4238f55697 ("ice: Add support for AF_XDP")
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Reviewed-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com> (A Contingent Worker at Intel)
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_xsk.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index 65468cdc25870..41ee081eb8875 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -173,8 +173,6 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+ }
+ netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+
+- ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+-
+ ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
+ if (err)
+@@ -189,10 +187,11 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+ if (err)
+ return err;
+ }
++ ice_qvec_dis_irq(vsi, rx_ring, q_vector);
++
+ err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
+ if (err)
+ return err;
+- ice_clean_rx_ring(rx_ring);
+
+ ice_qvec_toggle_napi(vsi, q_vector, false);
+ ice_qp_clean_rings(vsi, q_idx);
+--
+2.39.2
+
--- /dev/null
+From a56e1ef351c5c72583bd03b52efc7854db44f6f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Mar 2023 14:40:09 +0200
+Subject: ipv4: Fix incorrect table ID in IOCTL path
+
+From: Ido Schimmel <idosch@nvidia.com>
+
+[ Upstream commit 8a2618e14f81604a9b6ad305d57e0c8da939cd65 ]
+
+Commit f96a3d74554d ("ipv4: Fix incorrect route flushing when source
+address is deleted") started to take the table ID field in the FIB info
+structure into account when determining if two structures are identical
+or not. This field is initialized using the 'fc_table' field in the
+route configuration structure, which is not set when adding a route via
+IOCTL.
+
+The above can result in user space being able to install two identical
+routes that only differ in the table ID field of their associated FIB
+info.
+
+Fix by initializing the table ID field in the route configuration
+structure in the IOCTL path.
+
+Before the fix:
+
+ # ip route add default via 192.0.2.2
+ # route add default gw 192.0.2.2
+ # ip -4 r show default
+ # default via 192.0.2.2 dev dummy10
+ # default via 192.0.2.2 dev dummy10
+
+After the fix:
+
+ # ip route add default via 192.0.2.2
+ # route add default gw 192.0.2.2
+ SIOCADDRT: File exists
+ # ip -4 r show default
+ default via 192.0.2.2 dev dummy10
+
+Audited the code paths to ensure there are no other paths that do not
+properly initialize the route configuration structure when installing a
+route.
+
+Fixes: 5a56a0b3a45d ("net: Don't delete routes in different VRFs")
+Fixes: f96a3d74554d ("ipv4: Fix incorrect route flushing when source address is deleted")
+Reported-by: gaoxingwang <gaoxingwang1@huawei.com>
+Link: https://lore.kernel.org/netdev/20230314144159.2354729-1-gaoxingwang1@huawei.com/
+Tested-by: gaoxingwang <gaoxingwang1@huawei.com>
+Signed-off-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Link: https://lore.kernel.org/r/20230315124009.4015212-1-idosch@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/fib_frontend.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index b5736ef16ed2d..390f4be7f7bec 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -576,6 +576,9 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
+ cfg->fc_scope = RT_SCOPE_UNIVERSE;
+ }
+
++ if (!cfg->fc_table)
++ cfg->fc_table = RT_TABLE_MAIN;
++
+ if (cmd == SIOCDELRT)
+ return 0;
+
+--
+2.39.2
+
--- /dev/null
+From 14b96b841cb85c43320db524fcb89dc01fecf3ec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Mar 2023 10:03:36 +0800
+Subject: ipvlan: Make skb->skb_iif track skb->dev for l3s mode
+
+From: Jianguo Wu <wujianguo@chinatelecom.cn>
+
+[ Upstream commit 59a0b022aa249e3f5735d93de0849341722c4754 ]
+
+For l3s mode, skb->dev is set to ipvlan interface in ipvlan_nf_input():
+ skb->dev = addr->master->dev
+but, skb->skb_iif remain unchanged, this will cause socket lookup failed
+if a target socket is bound to a interface, like the following example:
+
+ ip link add ipvlan0 link eth0 type ipvlan mode l3s
+ ip addr add dev ipvlan0 192.168.124.111/24
+ ip link set ipvlan0 up
+
+ ping -c 1 -I ipvlan0 8.8.8.8
+ 100% packet loss
+
+This is because there is no match sk in __raw_v4_lookup() as sk->sk_bound_dev_if != dif(skb->skb_iif).
+Fix this by make skb->skb_iif track skb->dev in ipvlan_nf_input().
+
+Fixes: c675e06a98a4 ("ipvlan: decouple l3s mode dependencies from other modes")
+Signed-off-by: Jianguo Wu <wujianguo@chinatelecom.cn>
+Reviewed-by: Jiri Pirko <jiri@nvidia.com>
+Link: https://lore.kernel.org/r/29865b1f-6db7-c07a-de89-949d3721ea30@163.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ipvlan/ipvlan_l3s.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c
+index 943d26cbf39f5..71712ea25403d 100644
+--- a/drivers/net/ipvlan/ipvlan_l3s.c
++++ b/drivers/net/ipvlan/ipvlan_l3s.c
+@@ -101,6 +101,7 @@ static unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
+ goto out;
+
+ skb->dev = addr->master->dev;
++ skb->skb_iif = skb->dev->ifindex;
+ len = skb->len + ETH_HLEN;
+ ipvlan_count_rx(addr->master, len, true, false);
+ out:
+--
+2.39.2
+
--- /dev/null
+From 099d006e1e54832aba68bacd6f7ff1d17e859930 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Mar 2023 11:21:54 -0700
+Subject: loop: Fix use-after-free issues
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit 9b0cb770f5d7b1ff40bea7ca385438ee94570eec ]
+
+do_req_filebacked() calls blk_mq_complete_request() synchronously or
+asynchronously when using asynchronous I/O unless memory allocation fails.
+Hence, modify loop_handle_cmd() such that it does not dereference 'cmd' nor
+'rq' after do_req_filebacked() finished unless we are sure that the request
+has not yet been completed. This patch fixes the following kernel crash:
+
+Unable to handle kernel NULL pointer dereference at virtual address 0000000000000054
+Call trace:
+ css_put.42938+0x1c/0x1ac
+ loop_process_work+0xc8c/0xfd4
+ loop_rootcg_workfn+0x24/0x34
+ process_one_work+0x244/0x558
+ worker_thread+0x400/0x8fc
+ kthread+0x16c/0x1e0
+ ret_from_fork+0x10/0x20
+
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Ming Lei <ming.lei@redhat.com>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Dan Schatzberg <schatzberg.dan@gmail.com>
+Fixes: c74d40e8b5e2 ("loop: charge i/o to mem and blk cg")
+Fixes: bc07c10a3603 ("block: loop: support DIO & AIO")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Link: https://lore.kernel.org/r/20230314182155.80625-1-bvanassche@acm.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/loop.c | 25 +++++++++++++++++--------
+ 1 file changed, 17 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 981464e561df1..793ae876918ce 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1853,35 +1853,44 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
+
+ static void loop_handle_cmd(struct loop_cmd *cmd)
+ {
++ struct cgroup_subsys_state *cmd_blkcg_css = cmd->blkcg_css;
++ struct cgroup_subsys_state *cmd_memcg_css = cmd->memcg_css;
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+ const bool write = op_is_write(req_op(rq));
+ struct loop_device *lo = rq->q->queuedata;
+ int ret = 0;
+ struct mem_cgroup *old_memcg = NULL;
++ const bool use_aio = cmd->use_aio;
+
+ if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
+ ret = -EIO;
+ goto failed;
+ }
+
+- if (cmd->blkcg_css)
+- kthread_associate_blkcg(cmd->blkcg_css);
+- if (cmd->memcg_css)
++ if (cmd_blkcg_css)
++ kthread_associate_blkcg(cmd_blkcg_css);
++ if (cmd_memcg_css)
+ old_memcg = set_active_memcg(
+- mem_cgroup_from_css(cmd->memcg_css));
++ mem_cgroup_from_css(cmd_memcg_css));
+
++ /*
++ * do_req_filebacked() may call blk_mq_complete_request() synchronously
++ * or asynchronously if using aio. Hence, do not touch 'cmd' after
++ * do_req_filebacked() has returned unless we are sure that 'cmd' has
++ * not yet been completed.
++ */
+ ret = do_req_filebacked(lo, rq);
+
+- if (cmd->blkcg_css)
++ if (cmd_blkcg_css)
+ kthread_associate_blkcg(NULL);
+
+- if (cmd->memcg_css) {
++ if (cmd_memcg_css) {
+ set_active_memcg(old_memcg);
+- css_put(cmd->memcg_css);
++ css_put(cmd_memcg_css);
+ }
+ failed:
+ /* complete non-aio request */
+- if (!cmd->use_aio || ret) {
++ if (!use_aio || ret) {
+ if (ret == -EOPNOTSUPP)
+ cmd->ret = ret;
+ else
+--
+2.39.2
+
--- /dev/null
+From abb93b39aad197285e1f113b8cbce86ed5a08463 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Mar 2023 18:21:24 +0100
+Subject: mlxsw: spectrum: Fix incorrect parsing depth after reload
+
+From: Ido Schimmel <idosch@nvidia.com>
+
+[ Upstream commit 35c356924fe3669dfbb1185607ce3b37f70bfa80 ]
+
+Spectrum ASICs have a configurable limit on how deep into the packet
+they parse. By default, the limit is 96 bytes.
+
+There are several cases where this parsing depth is not enough and there
+is a need to increase it. For example, timestamping of PTP packets and a
+FIB multipath hash policy that requires hashing on inner fields. The
+driver therefore maintains a reference count that reflects the number of
+consumers that require an increased parsing depth.
+
+During reload_down() the parsing depth reference count does not
+necessarily drop to zero, but the parsing depth itself is restored to
+the default during reload_up() when the firmware is reset. It is
+therefore possible to end up in situations where the driver thinks that
+the parsing depth was increased (reference count is non-zero), when it
+is not.
+
+Fix by making sure that all the consumers that increase the parsing
+depth reference count also decrease it during reload_down().
+Specifically, make sure that when the routing code is de-initialized it
+drops the reference count if it was increased because of a FIB multipath
+hash policy that requires hashing on inner fields.
+
+Add a warning if the reference count is not zero after the driver was
+de-initialized and explicitly reset it to zero during initialization for
+good measures.
+
+Fixes: 2d91f0803b84 ("mlxsw: spectrum: Add infrastructure for parsing configuration")
+Reported-by: Maksym Yaremchuk <maksymy@nvidia.com>
+Signed-off-by: Ido Schimmel <idosch@nvidia.com>
+Signed-off-by: Petr Machata <petrm@nvidia.com>
+Link: https://lore.kernel.org/r/9c35e1b3e6c1d8f319a2449d14e2b86373f3b3ba.1678727526.git.petrm@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 2 ++
+ .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 14 ++++++++++++++
+ 2 files changed, 16 insertions(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index 5bcf5bceff710..67ecdb9e708f9 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -2931,6 +2931,7 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
+
+ static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp)
+ {
++ refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 0);
+ mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
+ mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT;
+ mutex_init(&mlxsw_sp->parsing.lock);
+@@ -2939,6 +2940,7 @@ static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp)
+ static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp)
+ {
+ mutex_destroy(&mlxsw_sp->parsing.lock);
++ WARN_ON_ONCE(refcount_read(&mlxsw_sp->parsing.parsing_depth_ref));
+ }
+
+ struct mlxsw_sp_ipv6_addr_node {
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+index 48f1fa62a4fd4..ab0aa1a61d4aa 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+@@ -10313,11 +10313,23 @@ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
+ old_inc_parsing_depth);
+ return err;
+ }
++
++static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
++{
++ bool old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
++
++ mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, old_inc_parsing_depth,
++ false);
++}
+ #else
+ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
+ {
+ return 0;
+ }
++
++static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
++{
++}
+ #endif
+
+ static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
+@@ -10547,6 +10559,7 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
+ err_register_inetaddr_notifier:
+ mlxsw_core_flush_owq();
+ err_dscp_init:
++ mlxsw_sp_mp_hash_fini(mlxsw_sp);
+ err_mp_hash_init:
+ mlxsw_sp_neigh_fini(mlxsw_sp);
+ err_neigh_init:
+@@ -10587,6 +10600,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
+ unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
+ unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
+ mlxsw_core_flush_owq();
++ mlxsw_sp_mp_hash_fini(mlxsw_sp);
+ mlxsw_sp_neigh_fini(mlxsw_sp);
+ mlxsw_sp_lb_rif_fini(mlxsw_sp);
+ mlxsw_sp_vrs_fini(mlxsw_sp);
+--
+2.39.2
+
--- /dev/null
+From c7c9beaf1791a6b2821dba97440104be5703be41 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Mar 2023 13:55:38 +0100
+Subject: net: atlantic: Fix crash when XDP is enabled but no program is loaded
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Toke Høiland-Jørgensen <toke@redhat.com>
+
+[ Upstream commit 37d010399f7552add2b68e2b347901c83562dab8 ]
+
+The aq_xdp_run_prog() function falls back to the XDP_ABORTED action
+handler (using a goto) if the operations for any of the other actions fail.
+The XDP_ABORTED handler in turn calls the bpf_warn_invalid_xdp_action()
+tracepoint. However, the function also jumps into the XDP_PASS helper if no
+XDP program is loaded on the device, which means the XDP_ABORTED handler
+can be run with a NULL program pointer. This results in a NULL pointer
+deref because the tracepoint dereferences the 'prog' pointer passed to it.
+
+This situation can happen in multiple ways:
+- If a packet arrives between the removal of the program from the interface
+ and the static_branch_dec() in aq_xdp_setup()
+- If there are multiple devices using the same driver in the system and
+ one of them has an XDP program loaded and the other does not.
+
+Fix this by refactoring the aq_xdp_run_prog() function to remove the 'goto
+pass' handling if there is no XDP program loaded. Instead, factor out the
+skb building in a separate small helper function.
+
+Fixes: 26efaef759a1 ("net: atlantic: Implement xdp data plane")
+Reported-by: Freysteinn Alfredsson <Freysteinn.Alfredsson@kau.se>
+Tested-by: Freysteinn Alfredsson <Freysteinn.Alfredsson@kau.se>
+Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Link: https://lore.kernel.org/r/20230315125539.103319-1-toke@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/aquantia/atlantic/aq_ring.c | 28 ++++++++++++++-----
+ 1 file changed, 21 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+index 25129e723b575..2dc8d215a5918 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+@@ -412,6 +412,25 @@ int aq_xdp_xmit(struct net_device *dev, int num_frames,
+ return num_frames - drop;
+ }
+
++static struct sk_buff *aq_xdp_build_skb(struct xdp_buff *xdp,
++ struct net_device *dev,
++ struct aq_ring_buff_s *buff)
++{
++ struct xdp_frame *xdpf;
++ struct sk_buff *skb;
++
++ xdpf = xdp_convert_buff_to_frame(xdp);
++ if (unlikely(!xdpf))
++ return NULL;
++
++ skb = xdp_build_skb_from_frame(xdpf, dev);
++ if (!skb)
++ return NULL;
++
++ aq_get_rxpages_xdp(buff, xdp);
++ return skb;
++}
++
+ static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
+ struct xdp_buff *xdp,
+ struct aq_ring_s *rx_ring,
+@@ -431,7 +450,7 @@ static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
+
+ prog = READ_ONCE(rx_ring->xdp_prog);
+ if (!prog)
+- goto pass;
++ return aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
+
+ prefetchw(xdp->data_hard_start); /* xdp_frame write */
+
+@@ -442,17 +461,12 @@ static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+-pass:
+- xdpf = xdp_convert_buff_to_frame(xdp);
+- if (unlikely(!xdpf))
+- goto out_aborted;
+- skb = xdp_build_skb_from_frame(xdpf, aq_nic->ndev);
++ skb = aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
+ if (!skb)
+ goto out_aborted;
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_pass;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+- aq_get_rxpages_xdp(buff, xdp);
+ return skb;
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+--
+2.39.2
+
--- /dev/null
+From a7a716ce3b3f82da9091f52b9a769acf089dcb79 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Mar 2023 20:24:04 +0200
+Subject: net: dsa: don't error out when drivers return ETH_DATA_LEN in
+ .port_max_mtu()
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 636e8adf7878eab3614250234341bde45537f47a ]
+
+Currently, when dsa_slave_change_mtu() is called on a user port where
+dev->max_mtu is 1500 (as returned by ds->ops->port_max_mtu()), the code
+will stumble upon this check:
+
+ if (new_master_mtu > mtu_limit)
+ return -ERANGE;
+
+because new_master_mtu is adjusted for the tagger overhead but mtu_limit
+is not.
+
+But it would be good if the logic went through, for example if the DSA
+master really depends on an MTU adjustment to accept DSA-tagged frames.
+
+To make the code pass through the check, we need to adjust mtu_limit for
+the overhead as well, if the minimum restriction was caused by the DSA
+user port's MTU (dev->max_mtu). A DSA user port MTU and a DSA master MTU
+are always offset by the protocol overhead.
+
+Currently no drivers return 1500 .port_max_mtu(), but this is only
+temporary and a bug in itself - mv88e6xxx should have done that, but
+since commit b9c587fed61c ("dsa: mv88e6xxx: Include tagger overhead when
+setting MTU for DSA and CPU ports") it no longer does. This is a
+preparation for fixing that.
+
+Fixes: bfcb813203e6 ("net: dsa: configure the MTU for switch ports")
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/dsa/slave.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/net/dsa/slave.c b/net/dsa/slave.c
+index a9fde48cffd43..5fe075bf479ec 100644
+--- a/net/dsa/slave.c
++++ b/net/dsa/slave.c
+@@ -1852,6 +1852,7 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
+ int new_master_mtu;
+ int old_master_mtu;
+ int mtu_limit;
++ int overhead;
+ int cpu_mtu;
+ int err;
+
+@@ -1880,9 +1881,10 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
+ largest_mtu = slave_mtu;
+ }
+
+- mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
++ overhead = dsa_tag_protocol_overhead(cpu_dp->tag_ops);
++ mtu_limit = min_t(int, master->max_mtu, dev->max_mtu + overhead);
+ old_master_mtu = master->mtu;
+- new_master_mtu = largest_mtu + dsa_tag_protocol_overhead(cpu_dp->tag_ops);
++ new_master_mtu = largest_mtu + overhead;
+ if (new_master_mtu > mtu_limit)
+ return -ERANGE;
+
+@@ -1917,8 +1919,7 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
+
+ out_port_failed:
+ if (new_master_mtu != old_master_mtu)
+- dsa_port_mtu_change(cpu_dp, old_master_mtu -
+- dsa_tag_protocol_overhead(cpu_dp->tag_ops));
++ dsa_port_mtu_change(cpu_dp, old_master_mtu - overhead);
+ out_cpu_failed:
+ if (new_master_mtu != old_master_mtu)
+ dev_set_mtu(master, old_master_mtu);
+--
+2.39.2
+
--- /dev/null
+From fe5aab7b0b3642569fe27e1fb545a0155c4e88d1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Mar 2023 01:19:16 +0200
+Subject: net: dsa: microchip: fix RGMII delay configuration on
+ KSZ8765/KSZ8794/KSZ8795
+
+From: Marek Vasut <marex@denx.de>
+
+[ Upstream commit 5ae06327a3a5bad4ee246d81df203b1b00a7b390 ]
+
+The blamed commit has replaced a ksz_write8() call to address
+REG_PORT_5_CTRL_6 (0x56) with a ksz_set_xmii() -> ksz_pwrite8() call to
+regs[P_XMII_CTRL_1], which is also defined as 0x56 for ksz8795_regs[].
+
+The trouble is that, when compared to ksz_write8(), ksz_pwrite8() also
+adjusts the register offset with the port base address. So in reality,
+ksz_pwrite8(offset=0x56) accesses register 0x56 + 0x50 = 0xa6, which in
+this switch appears to be unmapped, and the RGMII delay configuration on
+the CPU port does nothing.
+
+So if the switch wasn't fine with the RGMII delay configuration done
+through pin strapping and relied on Linux to apply a different one in
+order to pass traffic, this is now broken.
+
+Using the offset translation logic imposed by ksz_pwrite8(), the correct
+value for regs[P_XMII_CTRL_1] should have been 0x6 on ksz8795_regs[], in
+order to really end up accessing register 0x56.
+
+Static code analysis shows that, despite there being multiple other
+accesses to regs[P_XMII_CTRL_1] in this driver, the only code path that
+is applicable to ksz8795_regs[] and ksz8_dev_ops is ksz_set_xmii().
+Therefore, the problem is isolated to RGMII delays.
+
+In its current form, ksz8795_regs[] contains the same value for
+P_XMII_CTRL_0 and for P_XMII_CTRL_1, and this raises valid suspicions
+that writes made by the driver to regs[P_XMII_CTRL_0] might overwrite
+writes made to regs[P_XMII_CTRL_1] or vice versa.
+
+Again, static analysis shows that the only accesses to P_XMII_CTRL_0
+from the driver are made from code paths which are not reachable with
+ksz8_dev_ops. So the accesses made by ksz_set_xmii() are safe for this
+switch family.
+
+[ vladimiroltean: rewrote commit message ]
+
+Fixes: c476bede4b0f ("net: dsa: microchip: ksz8795: use common xmii function")
+Signed-off-by: Marek Vasut <marex@denx.de>
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Acked-by: Arun Ramadoss <arun.ramadoss@microchip.com>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Link: https://lore.kernel.org/r/20230315231916.2998480-1-vladimir.oltean@nxp.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/microchip/ksz_common.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index c68f48cd1ec08..07f6776bba12b 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -272,7 +272,7 @@ static const u16 ksz8795_regs[] = {
+ [S_BROADCAST_CTRL] = 0x06,
+ [S_MULTICAST_CTRL] = 0x04,
+ [P_XMII_CTRL_0] = 0x06,
+- [P_XMII_CTRL_1] = 0x56,
++ [P_XMII_CTRL_1] = 0x06,
+ };
+
+ static const u32 ksz8795_masks[] = {
+--
+2.39.2
+
--- /dev/null
+From fe22378129a743b8b59212b27a96b77c0bff33d9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Mar 2023 10:33:37 +0300
+Subject: net: dsa: mt7530: remove now incorrect comment regarding port 5
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Arınç ÜNAL <arinc.unal@arinc9.com>
+
+[ Upstream commit feb03fd11c5616f3a47e4714d2f9917d0f1a2edd ]
+
+Remove now incorrect comment regarding port 5 as GMAC5. This is supposed to
+be supported since commit 38f790a80560 ("net: dsa: mt7530: Add support for
+port 5") under mt7530_setup_port5().
+
+Fixes: 38f790a80560 ("net: dsa: mt7530: Add support for port 5")
+Signed-off-by: Arınç ÜNAL <arinc.unal@arinc9.com>
+Link: https://lore.kernel.org/r/20230310073338.5836-1-arinc.unal@arinc9.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/mt7530.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index 1e0b8bcd59e6c..4306782bf2257 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -2206,7 +2206,7 @@ mt7530_setup(struct dsa_switch *ds)
+
+ mt7530_pll_setup(priv);
+
+- /* Enable Port 6 only; P5 as GMAC5 which currently is not supported */
++ /* Enable port 6 */
+ val = mt7530_read(priv, MT7530_MHWTRAP);
+ val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS;
+ val |= MHWTRAP_MANUAL;
+--
+2.39.2
+
--- /dev/null
+From 5467a8ed1b68ca88f8d4068c8d4665ac8b9246be Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Mar 2023 10:33:38 +0300
+Subject: net: dsa: mt7530: set PLL frequency and trgmii only when trgmii is
+ used
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Arınç ÜNAL <arinc.unal@arinc9.com>
+
+[ Upstream commit 0b086d76e7b011772b0ac214c6e5fd5816eff2df ]
+
+As my testing on the MCM MT7530 switch on MT7621 SoC shows, setting the PLL
+frequency does not affect MII modes other than trgmii on port 5 and port 6.
+So the assumption is that the operation here called "setting the PLL
+frequency" actually sets the frequency of the TRGMII TX clock.
+
+Make it so that it and the rest of the trgmii setup run only when the
+trgmii mode is used.
+
+Tested rgmii and trgmii modes of port 6 on MCM MT7530 on MT7621AT Unielec
+U7621-06 and standalone MT7530 on MT7623NI Bananapi BPI-R2.
+
+Fixes: b8f126a8d543 ("net-next: dsa: add dsa support for Mediatek MT7530 switch")
+Tested-by: Arınç ÜNAL <arinc.unal@arinc9.com>
+Signed-off-by: Arınç ÜNAL <arinc.unal@arinc9.com>
+Link: https://lore.kernel.org/r/20230310073338.5836-2-arinc.unal@arinc9.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/mt7530.c | 62 ++++++++++++++++++++--------------------
+ 1 file changed, 31 insertions(+), 31 deletions(-)
+
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index 4306782bf2257..1757d6a2c72ae 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -430,8 +430,6 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+ switch (interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ trgint = 0;
+- /* PLL frequency: 125MHz */
+- ncpo1 = 0x0c80;
+ break;
+ case PHY_INTERFACE_MODE_TRGMII:
+ trgint = 1;
+@@ -462,38 +460,40 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+ mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
+ P6_INTF_MODE(trgint));
+
+- /* Lower Tx Driving for TRGMII path */
+- for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
+- mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
+- TD_DM_DRVP(8) | TD_DM_DRVN(8));
+-
+- /* Disable MT7530 core and TRGMII Tx clocks */
+- core_clear(priv, CORE_TRGMII_GSW_CLK_CG,
+- REG_GSWCK_EN | REG_TRGMIICK_EN);
+-
+- /* Setup the MT7530 TRGMII Tx Clock */
+- core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
+- core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
+- core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
+- core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
+- core_write(priv, CORE_PLL_GROUP4,
+- RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN |
+- RG_SYSPLL_BIAS_LPF_EN);
+- core_write(priv, CORE_PLL_GROUP2,
+- RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
+- RG_SYSPLL_POSDIV(1));
+- core_write(priv, CORE_PLL_GROUP7,
+- RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
+- RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
+-
+- /* Enable MT7530 core and TRGMII Tx clocks */
+- core_set(priv, CORE_TRGMII_GSW_CLK_CG,
+- REG_GSWCK_EN | REG_TRGMIICK_EN);
+-
+- if (!trgint)
++ if (trgint) {
++ /* Lower Tx Driving for TRGMII path */
++ for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
++ mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
++ TD_DM_DRVP(8) | TD_DM_DRVN(8));
++
++ /* Disable MT7530 core and TRGMII Tx clocks */
++ core_clear(priv, CORE_TRGMII_GSW_CLK_CG,
++ REG_GSWCK_EN | REG_TRGMIICK_EN);
++
++ /* Setup the MT7530 TRGMII Tx Clock */
++ core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
++ core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
++ core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
++ core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
++ core_write(priv, CORE_PLL_GROUP4,
++ RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN |
++ RG_SYSPLL_BIAS_LPF_EN);
++ core_write(priv, CORE_PLL_GROUP2,
++ RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
++ RG_SYSPLL_POSDIV(1));
++ core_write(priv, CORE_PLL_GROUP7,
++ RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
++ RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
++
++ /* Enable MT7530 core and TRGMII Tx clocks */
++ core_set(priv, CORE_TRGMII_GSW_CLK_CG,
++ REG_GSWCK_EN | REG_TRGMIICK_EN);
++ } else {
+ for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
+ mt7530_rmw(priv, MT7530_TRGMII_RD(i),
+ RD_TAP_MASK, RD_TAP(16));
++ }
++
+ return 0;
+ }
+
+--
+2.39.2
+
--- /dev/null
+From d2fefefcb9843a19e8e42026e086f4a39e6d8a5b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Mar 2023 20:24:05 +0200
+Subject: net: dsa: mv88e6xxx: fix max_mtu of 1492 on 6165, 6191, 6220, 6250,
+ 6290
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 7e9517375a14f44ee830ca1c3278076dd65fcc8f ]
+
+There are 3 classes of switch families that the driver is aware of, as
+far as mv88e6xxx_change_mtu() is concerned:
+
+- MTU configuration is available per port. Here, the
+ chip->info->ops->port_set_jumbo_size() method will be present.
+
+- MTU configuration is global to the switch. Here, the
+ chip->info->ops->set_max_frame_size() method will be present.
+
+- We don't know how to change the MTU. Here, none of the above methods
+ will be present.
+
+Switch families MV88E6165, MV88E6191, MV88E6220, MV88E6250 and MV88E6290
+fall in category 3.
+
+The blamed commit has adjusted the MTU for all 3 categories by EDSA_HLEN
+(8 bytes), resulting in a new maximum MTU of 1492 being reported by the
+driver for these switches.
+
+I don't have the hardware to test, but I do have a MV88E6390 switch on
+which I can simulate this by commenting out its .port_set_jumbo_size
+definition from mv88e6390_ops. The result is this set of messages at
+probe time:
+
+mv88e6085 d0032004.mdio-mii:10: nonfatal error -34 setting MTU to 1500 on port 1
+mv88e6085 d0032004.mdio-mii:10: nonfatal error -34 setting MTU to 1500 on port 2
+mv88e6085 d0032004.mdio-mii:10: nonfatal error -34 setting MTU to 1500 on port 3
+mv88e6085 d0032004.mdio-mii:10: nonfatal error -34 setting MTU to 1500 on port 4
+mv88e6085 d0032004.mdio-mii:10: nonfatal error -34 setting MTU to 1500 on port 5
+mv88e6085 d0032004.mdio-mii:10: nonfatal error -34 setting MTU to 1500 on port 6
+mv88e6085 d0032004.mdio-mii:10: nonfatal error -34 setting MTU to 1500 on port 7
+mv88e6085 d0032004.mdio-mii:10: nonfatal error -34 setting MTU to 1500 on port 8
+
+It is highly implausible that there exist Ethernet switches which don't
+support the standard MTU of 1500 octets, and this is what the DSA
+framework says as well - the error comes from dsa_slave_create() ->
+dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN).
+
+But the error messages are alarming, and it would be good to suppress
+them.
+
+As a consequence of this unlikeliness, we reimplement mv88e6xxx_get_max_mtu()
+and mv88e6xxx_change_mtu() on switches from the 3rd category as follows:
+the maximum supported MTU is 1500, and any request to set the MTU to a
+value larger than that fails in dev_validate_mtu().
+
+Fixes: b9c587fed61c ("dsa: mv88e6xxx: Include tagger overhead when setting MTU for DSA and CPU ports")
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/mv88e6xxx/chip.c | 16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 3b8b2d0fbafaf..3a6db36574ad7 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -3549,7 +3549,7 @@ static int mv88e6xxx_get_max_mtu(struct dsa_switch *ds, int port)
+ return 10240 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
+ else if (chip->info->ops->set_max_frame_size)
+ return 1632 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
+- return 1522 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
++ return ETH_DATA_LEN;
+ }
+
+ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+@@ -3557,6 +3557,17 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int ret = 0;
+
++ /* For families where we don't know how to alter the MTU,
++ * just accept any value up to ETH_DATA_LEN
++ */
++ if (!chip->info->ops->port_set_jumbo_size &&
++ !chip->info->ops->set_max_frame_size) {
++ if (new_mtu > ETH_DATA_LEN)
++ return -EINVAL;
++
++ return 0;
++ }
++
+ if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+ new_mtu += EDSA_HLEN;
+
+@@ -3565,9 +3576,6 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+ ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu);
+ else if (chip->info->ops->set_max_frame_size)
+ ret = chip->info->ops->set_max_frame_size(chip, new_mtu);
+- else
+- if (new_mtu > 1522)
+- ret = -EINVAL;
+ mv88e6xxx_reg_unlock(chip);
+
+ return ret;
+--
+2.39.2
+
--- /dev/null
+From d7fceecbf452c529caee244a227816d689264681 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Mar 2023 14:14:35 +0100
+Subject: net/iucv: Fix size of interrupt data
+
+From: Alexandra Winter <wintera@linux.ibm.com>
+
+[ Upstream commit 3d87debb8ed2649608ff432699e7c961c0c6f03b ]
+
+iucv_irq_data needs to be 4 bytes larger.
+These bytes are not used by the iucv module, but written by
+the z/VM hypervisor in case a CPU is deconfigured.
+
+Reported as:
+BUG dma-kmalloc-64 (Not tainted): kmalloc Redzone overwritten
+-----------------------------------------------------------------------------
+0x0000000000400564-0x0000000000400567 @offset=1380. First byte 0x80 instead of 0xcc
+Allocated in iucv_cpu_prepare+0x44/0xd0 age=167839 cpu=2 pid=1
+__kmem_cache_alloc_node+0x166/0x450
+kmalloc_node_trace+0x3a/0x70
+iucv_cpu_prepare+0x44/0xd0
+cpuhp_invoke_callback+0x156/0x2f0
+cpuhp_issue_call+0xf0/0x298
+__cpuhp_setup_state_cpuslocked+0x136/0x338
+__cpuhp_setup_state+0xf4/0x288
+iucv_init+0xf4/0x280
+do_one_initcall+0x78/0x390
+do_initcalls+0x11a/0x140
+kernel_init_freeable+0x25e/0x2a0
+kernel_init+0x2e/0x170
+__ret_from_fork+0x3c/0x58
+ret_from_fork+0xa/0x40
+Freed in iucv_init+0x92/0x280 age=167839 cpu=2 pid=1
+__kmem_cache_free+0x308/0x358
+iucv_init+0x92/0x280
+do_one_initcall+0x78/0x390
+do_initcalls+0x11a/0x140
+kernel_init_freeable+0x25e/0x2a0
+kernel_init+0x2e/0x170
+__ret_from_fork+0x3c/0x58
+ret_from_fork+0xa/0x40
+Slab 0x0000037200010000 objects=32 used=30 fp=0x0000000000400640 flags=0x1ffff00000010200(slab|head|node=0|zone=0|
+Object 0x0000000000400540 @offset=1344 fp=0x0000000000000000
+Redzone 0000000000400500: cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc ................
+Redzone 0000000000400510: cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc ................
+Redzone 0000000000400520: cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc ................
+Redzone 0000000000400530: cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc ................
+Object 0000000000400540: 00 01 00 03 00 00 00 00 00 00 00 00 00 00 00 00 ................
+Object 0000000000400550: f3 86 81 f2 f4 82 f8 82 f0 f0 f0 f0 f0 f0 f0 f2 ................
+Object 0000000000400560: 00 00 00 00 80 00 00 00 cc cc cc cc cc cc cc cc ................
+Object 0000000000400570: cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc ................
+Redzone 0000000000400580: cc cc cc cc cc cc cc cc ........
+Padding 00000000004005d4: 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZZZZZZZZZ
+Padding 00000000004005e4: 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZZZZZZZZZ
+Padding 00000000004005f4: 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZZZZZ
+CPU: 6 PID: 121030 Comm: 116-pai-crypto. Not tainted 6.3.0-20230221.rc0.git4.99b8246b2d71.300.fc37.s390x+debug #1
+Hardware name: IBM 3931 A01 704 (z/VM 7.3.0)
+Call Trace:
+[<000000032aa034ec>] dump_stack_lvl+0xac/0x100
+[<0000000329f5a6cc>] check_bytes_and_report+0x104/0x140
+[<0000000329f5aa78>] check_object+0x370/0x3c0
+[<0000000329f5ede6>] free_debug_processing+0x15e/0x348
+[<0000000329f5f06a>] free_to_partial_list+0x9a/0x2f0
+[<0000000329f5f4a4>] __slab_free+0x1e4/0x3a8
+[<0000000329f61768>] __kmem_cache_free+0x308/0x358
+[<000000032a91465c>] iucv_cpu_dead+0x6c/0x88
+[<0000000329c2fc66>] cpuhp_invoke_callback+0x156/0x2f0
+[<000000032aa062da>] _cpu_down.constprop.0+0x22a/0x5e0
+[<0000000329c3243e>] cpu_device_down+0x4e/0x78
+[<000000032a61dee0>] device_offline+0xc8/0x118
+[<000000032a61e048>] online_store+0x60/0xe0
+[<000000032a08b6b0>] kernfs_fop_write_iter+0x150/0x1e8
+[<0000000329fab65c>] vfs_write+0x174/0x360
+[<0000000329fab9fc>] ksys_write+0x74/0x100
+[<000000032aa03a5a>] __do_syscall+0x1da/0x208
+[<000000032aa177b2>] system_call+0x82/0xb0
+INFO: lockdep is turned off.
+FIX dma-kmalloc-64: Restoring kmalloc Redzone 0x0000000000400564-0x0000000000400567=0xcc
+FIX dma-kmalloc-64: Object at 0x0000000000400540 not freed
+
+Fixes: 2356f4cb1911 ("[S390]: Rewrite of the IUCV base code, part 2")
+Signed-off-by: Alexandra Winter <wintera@linux.ibm.com>
+Link: https://lore.kernel.org/r/20230315131435.4113889-1-wintera@linux.ibm.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/iucv/iucv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
+index eb0295d900395..fc3fddeb6f36d 100644
+--- a/net/iucv/iucv.c
++++ b/net/iucv/iucv.c
+@@ -83,7 +83,7 @@ struct iucv_irq_data {
+ u16 ippathid;
+ u8 ipflags1;
+ u8 iptype;
+- u32 res2[8];
++ u32 res2[9];
+ };
+
+ struct iucv_irq_list {
+--
+2.39.2
+
--- /dev/null
+From 9efbdfe86d4a772e882ae04535aba7102205dec0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Oct 2022 00:13:50 +0300
+Subject: net/mlx5: Disable eswitch before waiting for VF pages
+
+From: Daniel Jurgens <danielj@nvidia.com>
+
+[ Upstream commit 7ba930fc25def6fd736abcdfa224272948a65cf7 ]
+
+The offending commit changed the ordering of moving to legacy mode and
+waiting for the VF pages. Moving to legacy mode is important in
+bluefield, because it sends the host driver into error state, and frees
+its pages. Without this transition we end up waiting 2 minutes for
+pages that aren't coming before carrying on with the unload process.
+
+Fixes: f019679ea5f2 ("net/mlx5: E-switch, Remove dependency between sriov and eswitch mode")
+Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index f07175549a87d..51c3e86f71a94 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1339,8 +1339,8 @@ static int mlx5_load(struct mlx5_core_dev *dev)
+ static void mlx5_unload(struct mlx5_core_dev *dev)
+ {
+ mlx5_sf_dev_table_destroy(dev);
+- mlx5_sriov_detach(dev);
+ mlx5_eswitch_disable(dev->priv.eswitch);
++ mlx5_sriov_detach(dev);
+ mlx5_lag_remove_mdev(dev);
+ mlx5_ec_cleanup(dev);
+ mlx5_sf_hw_table_destroy(dev);
+--
+2.39.2
+
--- /dev/null
+From f430402689a3de002141c10d432d1356d8635052 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Feb 2023 11:37:41 +0200
+Subject: net/mlx5: E-switch, Fix missing set of split_count when forward to
+ ovs internal port
+
+From: Maor Dickman <maord@nvidia.com>
+
+[ Upstream commit 28d3815a629cbdee660dd1c9de28d77cb3d77917 ]
+
+Rules with mirror actions are split to two FTEs when the actions after the mirror
+action contains pedit, vlan push/pop or ct. Forward to ovs internal port adds
+implicit header rewrite (pedit) but missing trigger to do split.
+
+Fix by setting split_count when forwarding to ovs internal port which
+will trigger split in mirror rules.
+
+Fixes: 27484f7170ed ("net/mlx5e: Offload tc rules that redirect to ovs internal port")
+Signed-off-by: Maor Dickman <maord@nvidia.com>
+Reviewed-by: Roi Dayan <roid@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 53b7d3775e8dc..a71eaa0601149 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -4054,6 +4054,7 @@ int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
+
+ esw_attr->dest_int_port = dest_int_port;
+ esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE;
++ esw_attr->split_count = out_index;
+
+ /* Forward to root fdb for matching against the new source vport */
+ attr->dest_chain = 0;
+--
+2.39.2
+
--- /dev/null
+From d4cba42fd15fff65aedc1061a10c6a6dcf10e943 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Feb 2023 15:07:00 +0200
+Subject: net/mlx5: E-switch, Fix wrong usage of source port rewrite in split
+ rules
+
+From: Maor Dickman <maord@nvidia.com>
+
+[ Upstream commit 1313d78ac0c1cfcff7bdece8da54b080e71487c4 ]
+
+In few cases, rules with mirror use case are split to two FTEs, one which
+do the mirror action and forward to second FTE which do the rest of the rule
+actions and the second redirect action.
+In case of mirror rules which do split and forward to ovs internal port or
+VF stack devices, source port rewrite should be used in the second FTE but
+it is wrongly also set in the first FTE which break the offload.
+
+Fix this issue by removing the wrong check if source port rewrite is needed to
+be used on the first FTE of the split and instead return EOPNOTSUPP which will
+block offload of rules which mirror to ovs internal port or VF stack devices
+which isn't supported.
+
+Fixes: 10742efc20a4 ("net/mlx5e: VF tunnel TX traffic offloading")
+Fixes: a508728a4c8b ("net/mlx5e: VF tunnel RX traffic offloading")
+Signed-off-by: Maor Dickman <maord@nvidia.com>
+Reviewed-by: Roi Dayan <roid@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index 5b6c54bde97a2..34790a82a0976 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -696,11 +696,11 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ for (i = 0; i < esw_attr->split_count; i++) {
+- if (esw_is_indir_table(esw, attr))
+- err = esw_setup_indir_table(dest, &flow_act, esw, attr, false, &i);
+- else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
+- err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr,
+- &i);
++ if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
++ /* Source port rewrite (forward to ovs internal port or statck device) isn't
++ * supported in the rule of split action.
++ */
++ err = -EOPNOTSUPP;
+ else
+ esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
+
+--
+2.39.2
+
--- /dev/null
+From 4ef16b30607d0f75a042cb64051e766fe5483b9d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Jun 2021 18:22:57 +0300
+Subject: net/mlx5: Fix setting ec_function bit in MANAGE_PAGES
+
+From: Parav Pandit <parav@nvidia.com>
+
+[ Upstream commit ba5d8f72b82cc197355c9340ef89dab813815865 ]
+
+When ECPF is a page supplier, reclaim pages missed to honor the
+ec_function bit provided by the firmware. It always used the ec_function
+to true during driver unload flow for ECPF. This is incorrect.
+
+Honor the ec_function bit provided by device during page allocation
+request event.
+
+Fixes: d6945242f45d ("net/mlx5: Hold pages RB tree per VF")
+Signed-off-by: Parav Pandit <parav@nvidia.com>
+Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/mellanox/mlx5/core/pagealloc.c | 22 ++++++++++++++-----
+ 1 file changed, 17 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+index 64d4e7125e9bb..95dc67fb30015 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+@@ -82,6 +82,16 @@ static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_funct
+ return func_id <= mlx5_core_max_vfs(dev) ? MLX5_VF : MLX5_SF;
+ }
+
++static u32 mlx5_get_ec_function(u32 function)
++{
++ return function >> 16;
++}
++
++static u32 mlx5_get_func_id(u32 function)
++{
++ return function & 0xffff;
++}
++
+ static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
+ {
+ struct rb_root *root;
+@@ -665,20 +675,22 @@ static int optimal_reclaimed_pages(void)
+ }
+
+ static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
+- struct rb_root *root, u16 func_id)
++ struct rb_root *root, u32 function)
+ {
+ u64 recl_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_PAGES));
+ unsigned long end = jiffies + recl_pages_to_jiffies;
+
+ while (!RB_EMPTY_ROOT(root)) {
++ u32 ec_function = mlx5_get_ec_function(function);
++ u32 function_id = mlx5_get_func_id(function);
+ int nclaimed;
+ int err;
+
+- err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(),
+- &nclaimed, false, mlx5_core_is_ecpf(dev));
++ err = reclaim_pages(dev, function_id, optimal_reclaimed_pages(),
++ &nclaimed, false, ec_function);
+ if (err) {
+- mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n",
+- err, func_id);
++ mlx5_core_warn(dev, "reclaim_pages err (%d) func_id=0x%x ec_func=0x%x\n",
++ err, function_id, ec_function);
+ return err;
+ }
+
+--
+2.39.2
+
--- /dev/null
+From ebe9e232166b7a9a1bcdd008e503d61687592a32 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Feb 2023 10:36:19 +0200
+Subject: net/mlx5: Set BREAK_FW_WAIT flag first when removing driver
+
+From: Shay Drory <shayd@nvidia.com>
+
+[ Upstream commit 031a163f2c476adcb2c01e27a7d323e66174ac11 ]
+
+Currently, BREAK_FW_WAIT flag is set after syncing with fw_reset.
+However, fw_reset can call mlx5_load_one() which is waiting for fw
+init bit and BREAK_FW_WAIT flag is intended to stop. e.g.: the driver
+might wait on a loop it should exit.
+Fix it by setting the flag before syncing with fw_reset.
+
+Fixes: 8324a02c342a ("net/mlx5: Add exit route when waiting for FW")
+Signed-off-by: Shay Drory <shayd@nvidia.com>
+Reviewed-by: Moshe Shemesh <moshe@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 51c3e86f71a94..59914f66857da 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1752,11 +1752,11 @@ static void remove_one(struct pci_dev *pdev)
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct devlink *devlink = priv_to_devlink(dev);
+
++ set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
+ /* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain
+ * fw_reset before unregistering the devlink.
+ */
+ mlx5_drain_fw_reset(dev);
+- set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
+ devlink_unregister(devlink);
+ mlx5_sriov_disable(pdev);
+ mlx5_crdump_disable(dev);
+--
+2.39.2
+
--- /dev/null
+From afed1d2720beaa8832fa19f864ecebde0699de12 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Mar 2021 07:21:29 -0600
+Subject: net/mlx5e: Don't cache tunnel offloads capability
+
+From: Parav Pandit <parav@nvidia.com>
+
+[ Upstream commit 9a92fe1db9e57ea94388a1d768e8ee42af858377 ]
+
+When mlx5e attaches again after device health recovery, the device
+capabilities might have changed by the eswitch manager.
+
+For example in one flow when ECPF changes the eswitch mode between
+legacy and switchdev, it updates the flow table tunnel capability.
+
+The cached value is only used in one place, so just check the capability
+there instead.
+
+Fixes: 5bef709d76a2 ("net/mlx5: Enable host PF HCA after eswitch is initialized")
+Signed-off-by: Parav Pandit <parav@nvidia.com>
+Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 -
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 4 +---
+ drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 1 -
+ drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 1 -
+ 4 files changed, 1 insertion(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+index 26a23047f1f3b..bc76fe6b06230 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -313,7 +313,6 @@ struct mlx5e_params {
+ } channel;
+ } mqprio;
+ bool rx_cqe_compress_def;
+- bool tunneled_offload_en;
+ struct dim_cq_moder rx_cq_moderation;
+ struct dim_cq_moder tx_cq_moderation;
+ struct mlx5e_packet_merge_param packet_merge;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 142ed2d98cd5d..609a49c1e09e6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -4911,8 +4911,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
+ /* TX inline */
+ mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
+
+- params->tunneled_offload_en = mlx5_tunnel_inner_ft_supported(mdev);
+-
+ /* AF_XDP */
+ params->xsk = xsk;
+
+@@ -5216,7 +5214,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
+ }
+
+ features = MLX5E_RX_RES_FEATURE_PTP;
+- if (priv->channels.params.tunneled_offload_en)
++ if (mlx5_tunnel_inner_ft_supported(mdev))
+ features |= MLX5E_RX_RES_FEATURE_INNER_FT;
+ err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features,
+ priv->max_nch, priv->drop_rq.rqn,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index 794cd8dfe9c91..0f744131c6869 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -707,7 +707,6 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
+ mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
+
+ params->mqprio.num_tc = 1;
+- params->tunneled_offload_en = false;
+ if (rep->vport != MLX5_VPORT_UPLINK)
+ params->vlan_strip_disable = true;
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+index 038ae0fcf9d45..aed4e896179a3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+@@ -70,7 +70,6 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
+
+ params->packet_merge.type = MLX5E_PACKET_MERGE_NONE;
+ params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
+- params->tunneled_offload_en = false;
+
+ /* CQE compression is not supported for IPoIB */
+ params->rx_cqe_compress_def = false;
+--
+2.39.2
+
--- /dev/null
+From 8fb9aca65da8fa6eb8c56b92009ef79b9131b809 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 12 Feb 2023 11:01:43 +0200
+Subject: net/mlx5e: Fix cleanup null-ptr deref on encap lock
+
+From: Paul Blakey <paulb@nvidia.com>
+
+[ Upstream commit c9668f0b1d28570327dbba189f2c61f6f9e43ae7 ]
+
+During module is unloaded while a peer tc flow is still offloaded,
+first the peer uplink rep profile is changed to a nic profile, and so
+neigh encap lock is destroyed. Next during unload, the VF reps netdevs
+are unregistered which causes the original non-peer tc flow to be deleted,
+which deletes the peer flow. The peer flow deletion detaches the encap
+entry and try to take the already destroyed encap lock, causing the
+below trace.
+
+Fix this by clearing peer flows during tc eswitch cleanup
+(mlx5e_tc_esw_cleanup()).
+
+Relevant trace:
+[ 4316.837128] BUG: kernel NULL pointer dereference, address: 00000000000001d8
+[ 4316.842239] RIP: 0010:__mutex_lock+0xb5/0xc40
+[ 4316.851897] Call Trace:
+[ 4316.852481] <TASK>
+[ 4316.857214] mlx5e_rep_neigh_entry_release+0x93/0x790 [mlx5_core]
+[ 4316.858258] mlx5e_rep_encap_entry_detach+0xa7/0xf0 [mlx5_core]
+[ 4316.859134] mlx5e_encap_dealloc+0xa3/0xf0 [mlx5_core]
+[ 4316.859867] clean_encap_dests.part.0+0x5c/0xe0 [mlx5_core]
+[ 4316.860605] mlx5e_tc_del_fdb_flow+0x32a/0x810 [mlx5_core]
+[ 4316.862609] __mlx5e_tc_del_fdb_peer_flow+0x1a2/0x250 [mlx5_core]
+[ 4316.863394] mlx5e_tc_del_flow+0x(/0x630 [mlx5_core]
+[ 4316.864090] mlx5e_flow_put+0x5f/0x100 [mlx5_core]
+[ 4316.864771] mlx5e_delete_flower+0x4de/0xa40 [mlx5_core]
+[ 4316.865486] tc_setup_cb_reoffload+0x20/0x80
+[ 4316.865905] fl_reoffload+0x47c/0x510 [cls_flower]
+[ 4316.869181] tcf_block_playback_offloads+0x91/0x1d0
+[ 4316.869649] tcf_block_unbind+0xe7/0x1b0
+[ 4316.870049] tcf_block_offload_cmd.isra.0+0x1ee/0x270
+[ 4316.879266] tcf_block_offload_unbind+0x61/0xa0
+[ 4316.879711] __tcf_block_put+0xa4/0x310
+
+Fixes: 04de7dda7394 ("net/mlx5e: Infrastructure for duplicated offloading of TC flows")
+Fixes: 1418ddd96afd ("net/mlx5e: Duplicate offloaded TC eswitch rules under uplink LAG")
+Signed-off-by: Paul Blakey <paulb@nvidia.com>
+Reviewed-by: Chris Mi <cmi@nvidia.com>
+Reviewed-by: Roi Dayan <roid@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index a71eaa0601149..73af062a87830 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -5160,6 +5160,16 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
+
+ void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
+ {
++ struct mlx5e_rep_priv *rpriv;
++ struct mlx5_eswitch *esw;
++ struct mlx5e_priv *priv;
++
++ rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
++ priv = netdev_priv(rpriv->netdev);
++ esw = priv->mdev->priv.eswitch;
++
++ mlx5e_tc_clean_fdb_peer_flows(esw);
++
+ mlx5e_tc_tun_cleanup(uplink_priv->encap);
+
+ mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
+--
+2.39.2
+
--- /dev/null
+From 7f48b9fe8a54194d861074f54a0f8bf49d3c9349 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Feb 2023 14:25:54 +0200
+Subject: net/mlx5e: Fix macsec ASO context alignment
+
+From: Emeel Hakim <ehakim@nvidia.com>
+
+[ Upstream commit 37beabe9a891b92174cd1aafbfa881fe9e05aa87 ]
+
+Currently mlx5e_macsec_umr struct does not satisfy hardware memory
+alignment requirement. Hence the result of querying advanced steering
+operation (ASO) is not copied to the memory region as expected.
+
+Fix by satisfying hardware memory alignment requirement and move
+context to be first field in struct for better readability.
+
+Fixes: 1f53da676439 ("net/mlx5e: Create advanced steering operation (ASO) object for MACsec")
+Signed-off-by: Emeel Hakim <ehakim@nvidia.com>
+Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+index b92d541b5286e..0c23340bfcc75 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+@@ -89,8 +89,8 @@ struct mlx5e_macsec_rx_sc {
+ };
+
+ struct mlx5e_macsec_umr {
++ u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
+ dma_addr_t dma_addr;
+- u8 ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
+ u32 mkey;
+ };
+
+--
+2.39.2
+
--- /dev/null
+From 7dd5ebfecb8caff2f19d37949c314204ce955d6e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Dec 2022 11:21:22 +0200
+Subject: net/mlx5e: Support Geneve and GRE with VF tunnel offload
+
+From: Maor Dickman <maord@nvidia.com>
+
+[ Upstream commit 521933cdc4aad133b410d8f64b03f60345021138 ]
+
+Today VF tunnel offload (tunnel endpoint is on VF) is implemented
+by indirect table which use rules that match on VXLAN VNI to
+recirculated to root table, this limit the support for only
+VXLAN tunnels.
+
+This patch change indirect table to use one single match all rule
+to recirculated to root table which is added when any tunnel decap
+rule is added with tunnel endpoint is VF. This allow support of
+Geneve and GRE with this configuration.
+
+Signed-off-by: Maor Dickman <maord@nvidia.com>
+Reviewed-by: Paul Blakey <paulb@nvidia.com>
+Reviewed-by: Roi Dayan <roid@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Stable-dep-of: 1313d78ac0c1 ("net/mlx5: E-switch, Fix wrong usage of source port rewrite in split rules")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/mellanox/mlx5/core/en/tc_tun.c | 2 -
+ .../net/ethernet/mellanox/mlx5/core/en_tc.c | 9 +-
+ .../net/ethernet/mellanox/mlx5/core/en_tc.h | 2 -
+ .../mellanox/mlx5/core/esw/indir_table.c | 203 +++---------------
+ .../mellanox/mlx5/core/esw/indir_table.h | 4 -
+ .../mellanox/mlx5/core/eswitch_offloads.c | 23 +-
+ 6 files changed, 48 insertions(+), 195 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+index e6f64d890fb34..83bb0811e7741 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+@@ -745,8 +745,6 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
+ if (err)
+ goto out;
+
+- esw_attr->rx_tun_attr->vni = MLX5_GET(fte_match_param, spec->match_value,
+- misc_parameters.vxlan_vni);
+ esw_attr->rx_tun_attr->decap_vport = vport_num;
+ } else if (netif_is_ovs_master(attr.route_dev) && mlx5e_tc_int_port_supported(esw)) {
+ int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index c1cf3917baa43..53b7d3775e8dc 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -2401,13 +2401,13 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
+ err = mlx5e_tc_set_attr_rx_tun(flow, spec);
+ if (err)
+ return err;
+- } else if (tunnel && tunnel->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
++ } else if (tunnel) {
+ struct mlx5_flow_spec *tmp_spec;
+
+ tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL);
+ if (!tmp_spec) {
+- NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for vxlan tmp spec");
+- netdev_warn(priv->netdev, "Failed to allocate memory for vxlan tmp spec");
++ NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for tunnel tmp spec");
++ netdev_warn(priv->netdev, "Failed to allocate memory for tunnel tmp spec");
+ return -ENOMEM;
+ }
+ memcpy(tmp_spec, spec, sizeof(*tmp_spec));
+@@ -4311,9 +4311,6 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
+ if (err)
+ goto err_free;
+
+- /* always set IP version for indirect table handling */
+- flow->attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
+-
+ err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
+ if (err)
+ goto err_free;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+index 48241317a5354..edd5f09440f9f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+@@ -82,7 +82,6 @@ struct mlx5_flow_attr {
+ struct mlx5_flow_table *dest_ft;
+ u8 inner_match_level;
+ u8 outer_match_level;
+- u8 ip_version;
+ u8 tun_ip_version;
+ int tunnel_id; /* mapped tunnel id */
+ u32 flags;
+@@ -129,7 +128,6 @@ struct mlx5_rx_tun_attr {
+ __be32 v4;
+ struct in6_addr v6;
+ } dst_ip; /* Valid if decap_vport is not zero */
+- u32 vni;
+ };
+
+ #define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
+index c9a91158e99c9..8a94870c5b43c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
+@@ -16,18 +16,12 @@
+ #include "lib/fs_chains.h"
+ #include "en/mod_hdr.h"
+
+-#define MLX5_ESW_INDIR_TABLE_SIZE 128
+-#define MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX (MLX5_ESW_INDIR_TABLE_SIZE - 2)
++#define MLX5_ESW_INDIR_TABLE_SIZE 2
++#define MLX5_ESW_INDIR_TABLE_RECIRC_IDX (MLX5_ESW_INDIR_TABLE_SIZE - 2)
+ #define MLX5_ESW_INDIR_TABLE_FWD_IDX (MLX5_ESW_INDIR_TABLE_SIZE - 1)
+
+ struct mlx5_esw_indir_table_rule {
+- struct list_head list;
+ struct mlx5_flow_handle *handle;
+- union {
+- __be32 v4;
+- struct in6_addr v6;
+- } dst_ip;
+- u32 vni;
+ struct mlx5_modify_hdr *mh;
+ refcount_t refcnt;
+ };
+@@ -38,12 +32,10 @@ struct mlx5_esw_indir_table_entry {
+ struct mlx5_flow_group *recirc_grp;
+ struct mlx5_flow_group *fwd_grp;
+ struct mlx5_flow_handle *fwd_rule;
+- struct list_head recirc_rules;
+- int recirc_cnt;
++ struct mlx5_esw_indir_table_rule *recirc_rule;
+ int fwd_ref;
+
+ u16 vport;
+- u8 ip_version;
+ };
+
+ struct mlx5_esw_indir_table {
+@@ -89,7 +81,6 @@ mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw,
+ return esw_attr->in_rep->vport == MLX5_VPORT_UPLINK &&
+ vf_sf_vport &&
+ esw->dev == dest_mdev &&
+- attr->ip_version &&
+ attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE;
+ }
+
+@@ -101,27 +92,8 @@ mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr)
+ return esw_attr->rx_tun_attr ? esw_attr->rx_tun_attr->decap_vport : 0;
+ }
+
+-static struct mlx5_esw_indir_table_rule *
+-mlx5_esw_indir_table_rule_lookup(struct mlx5_esw_indir_table_entry *e,
+- struct mlx5_esw_flow_attr *attr)
+-{
+- struct mlx5_esw_indir_table_rule *rule;
+-
+- list_for_each_entry(rule, &e->recirc_rules, list)
+- if (rule->vni == attr->rx_tun_attr->vni &&
+- !memcmp(&rule->dst_ip, &attr->rx_tun_attr->dst_ip,
+- sizeof(attr->rx_tun_attr->dst_ip)))
+- goto found;
+- return NULL;
+-
+-found:
+- refcount_inc(&rule->refcnt);
+- return rule;
+-}
+-
+ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+- struct mlx5_flow_spec *spec,
+ struct mlx5_esw_indir_table_entry *e)
+ {
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+@@ -130,73 +102,18 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_esw_indir_table_rule *rule;
+ struct mlx5_flow_act flow_act = {};
+- struct mlx5_flow_spec *rule_spec;
+ struct mlx5_flow_handle *handle;
+ int err = 0;
+ u32 data;
+
+- rule = mlx5_esw_indir_table_rule_lookup(e, esw_attr);
+- if (rule)
++ if (e->recirc_rule) {
++ refcount_inc(&e->recirc_rule->refcnt);
+ return 0;
+-
+- if (e->recirc_cnt == MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX)
+- return -EINVAL;
+-
+- rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
+- if (!rule_spec)
+- return -ENOMEM;
+-
+- rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+- if (!rule) {
+- err = -ENOMEM;
+- goto out;
+ }
+
+- rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
+- MLX5_MATCH_MISC_PARAMETERS |
+- MLX5_MATCH_MISC_PARAMETERS_2;
+- if (MLX5_CAP_FLOWTABLE_NIC_RX(esw->dev, ft_field_support.outer_ip_version)) {
+- MLX5_SET(fte_match_param, rule_spec->match_criteria,
+- outer_headers.ip_version, 0xf);
+- MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version,
+- attr->ip_version);
+- } else if (attr->ip_version) {
+- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+- outer_headers.ethertype);
+- MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ethertype,
+- (attr->ip_version == 4 ? ETH_P_IP : ETH_P_IPV6));
+- } else {
+- err = -EOPNOTSUPP;
+- goto err_ethertype;
+- }
+-
+- if (attr->ip_version == 4) {
+- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+- outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+- MLX5_SET(fte_match_param, rule_spec->match_value,
+- outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
+- ntohl(esw_attr->rx_tun_attr->dst_ip.v4));
+- } else if (attr->ip_version == 6) {
+- int len = sizeof(struct in6_addr);
+-
+- memset(MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
+- outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+- 0xff, len);
+- memcpy(MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
+- outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+- &esw_attr->rx_tun_attr->dst_ip.v6, len);
+- }
+-
+- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+- misc_parameters.vxlan_vni);
+- MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters.vxlan_vni,
+- MLX5_GET(fte_match_param, spec->match_value, misc_parameters.vxlan_vni));
+-
+- MLX5_SET(fte_match_param, rule_spec->match_criteria,
+- misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
+- MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
+- mlx5_eswitch_get_vport_metadata_for_match(esw_attr->in_mdev->priv.eswitch,
+- MLX5_VPORT_UPLINK));
++ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
++ if (!rule)
++ return -ENOMEM;
+
+ /* Modify flow source to recirculate packet */
+ data = mlx5_eswitch_get_vport_metadata_for_set(esw, esw_attr->rx_tun_attr->decap_vport);
+@@ -219,13 +136,14 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND;
++ flow_act.fg = e->recirc_grp;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = mlx5_chains_get_table(chains, 0, 1, 0);
+ if (IS_ERR(dest.ft)) {
+ err = PTR_ERR(dest.ft);
+ goto err_table;
+ }
+- handle = mlx5_add_flow_rules(e->ft, rule_spec, &flow_act, &dest, 1);
++ handle = mlx5_add_flow_rules(e->ft, NULL, &flow_act, &dest, 1);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ goto err_handle;
+@@ -233,14 +151,10 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
+
+ mlx5e_mod_hdr_dealloc(&mod_acts);
+ rule->handle = handle;
+- rule->vni = esw_attr->rx_tun_attr->vni;
+ rule->mh = flow_act.modify_hdr;
+- memcpy(&rule->dst_ip, &esw_attr->rx_tun_attr->dst_ip,
+- sizeof(esw_attr->rx_tun_attr->dst_ip));
+ refcount_set(&rule->refcnt, 1);
+- list_add(&rule->list, &e->recirc_rules);
+- e->recirc_cnt++;
+- goto out;
++ e->recirc_rule = rule;
++ return 0;
+
+ err_handle:
+ mlx5_chains_put_table(chains, 0, 1, 0);
+@@ -250,89 +164,44 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
+ err_mod_hdr_regc1:
+ mlx5e_mod_hdr_dealloc(&mod_acts);
+ err_mod_hdr_regc0:
+-err_ethertype:
+ kfree(rule);
+-out:
+- kvfree(rule_spec);
+ return err;
+ }
+
+ static void mlx5_esw_indir_table_rule_put(struct mlx5_eswitch *esw,
+- struct mlx5_flow_attr *attr,
+ struct mlx5_esw_indir_table_entry *e)
+ {
+- struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
++ struct mlx5_esw_indir_table_rule *rule = e->recirc_rule;
+ struct mlx5_fs_chains *chains = esw_chains(esw);
+- struct mlx5_esw_indir_table_rule *rule;
+
+- list_for_each_entry(rule, &e->recirc_rules, list)
+- if (rule->vni == esw_attr->rx_tun_attr->vni &&
+- !memcmp(&rule->dst_ip, &esw_attr->rx_tun_attr->dst_ip,
+- sizeof(esw_attr->rx_tun_attr->dst_ip)))
+- goto found;
+-
+- return;
++ if (!rule)
++ return;
+
+-found:
+ if (!refcount_dec_and_test(&rule->refcnt))
+ return;
+
+ mlx5_del_flow_rules(rule->handle);
+ mlx5_chains_put_table(chains, 0, 1, 0);
+ mlx5_modify_header_dealloc(esw->dev, rule->mh);
+- list_del(&rule->list);
+ kfree(rule);
+- e->recirc_cnt--;
++ e->recirc_rule = NULL;
+ }
+
+-static int mlx5_create_indir_recirc_group(struct mlx5_eswitch *esw,
+- struct mlx5_flow_attr *attr,
+- struct mlx5_flow_spec *spec,
+- struct mlx5_esw_indir_table_entry *e)
++static int mlx5_create_indir_recirc_group(struct mlx5_esw_indir_table_entry *e)
+ {
+ int err = 0, inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+- u32 *in, *match;
++ u32 *in;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+- MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
+- MLX5_MATCH_MISC_PARAMETERS | MLX5_MATCH_MISC_PARAMETERS_2);
+- match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+-
+- if (MLX5_CAP_FLOWTABLE_NIC_RX(esw->dev, ft_field_support.outer_ip_version))
+- MLX5_SET(fte_match_param, match, outer_headers.ip_version, 0xf);
+- else
+- MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ethertype);
+-
+- if (attr->ip_version == 4) {
+- MLX5_SET_TO_ONES(fte_match_param, match,
+- outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+- } else if (attr->ip_version == 6) {
+- memset(MLX5_ADDR_OF(fte_match_param, match,
+- outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+- 0xff, sizeof(struct in6_addr));
+- } else {
+- err = -EOPNOTSUPP;
+- goto out;
+- }
+-
+- MLX5_SET_TO_ONES(fte_match_param, match, misc_parameters.vxlan_vni);
+- MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
+- mlx5_eswitch_get_vport_metadata_mask());
+ MLX5_SET(create_flow_group_in, in, start_flow_index, 0);
+- MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX);
++ MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_RECIRC_IDX);
+ e->recirc_grp = mlx5_create_flow_group(e->ft, in);
+- if (IS_ERR(e->recirc_grp)) {
++ if (IS_ERR(e->recirc_grp))
+ err = PTR_ERR(e->recirc_grp);
+- goto out;
+- }
+
+- INIT_LIST_HEAD(&e->recirc_rules);
+- e->recirc_cnt = 0;
+-
+-out:
+ kvfree(in);
+ return err;
+ }
+@@ -366,6 +235,7 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
++ flow_act.fg = e->fwd_grp;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.vport.num = e->vport;
+ dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
+@@ -384,7 +254,7 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
+
+ static struct mlx5_esw_indir_table_entry *
+ mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
+- struct mlx5_flow_spec *spec, u16 vport, bool decap)
++ u16 vport, bool decap)
+ {
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *root_ns;
+@@ -412,15 +282,14 @@ mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_att
+ }
+ e->ft = ft;
+ e->vport = vport;
+- e->ip_version = attr->ip_version;
+ e->fwd_ref = !decap;
+
+- err = mlx5_create_indir_recirc_group(esw, attr, spec, e);
++ err = mlx5_create_indir_recirc_group(e);
+ if (err)
+ goto recirc_grp_err;
+
+ if (decap) {
+- err = mlx5_esw_indir_table_rule_get(esw, attr, spec, e);
++ err = mlx5_esw_indir_table_rule_get(esw, attr, e);
+ if (err)
+ goto recirc_rule_err;
+ }
+@@ -430,13 +299,13 @@ mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_att
+ goto fwd_grp_err;
+
+ hash_add(esw->fdb_table.offloads.indir->table, &e->hlist,
+- vport << 16 | attr->ip_version);
++ vport << 16);
+
+ return e;
+
+ fwd_grp_err:
+ if (decap)
+- mlx5_esw_indir_table_rule_put(esw, attr, e);
++ mlx5_esw_indir_table_rule_put(esw, e);
+ recirc_rule_err:
+ mlx5_destroy_flow_group(e->recirc_grp);
+ recirc_grp_err:
+@@ -447,13 +316,13 @@ mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_att
+ }
+
+ static struct mlx5_esw_indir_table_entry *
+-mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport, u8 ip_version)
++mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport)
+ {
+ struct mlx5_esw_indir_table_entry *e;
+- u32 key = vport << 16 | ip_version;
++ u32 key = vport << 16;
+
+ hash_for_each_possible(esw->fdb_table.offloads.indir->table, e, hlist, key)
+- if (e->vport == vport && e->ip_version == ip_version)
++ if (e->vport == vport)
+ return e;
+
+ return NULL;
+@@ -461,24 +330,23 @@ mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport, u8 ip_ver
+
+ struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+- struct mlx5_flow_spec *spec,
+ u16 vport, bool decap)
+ {
+ struct mlx5_esw_indir_table_entry *e;
+ int err;
+
+ mutex_lock(&esw->fdb_table.offloads.indir->lock);
+- e = mlx5_esw_indir_table_entry_lookup(esw, vport, attr->ip_version);
++ e = mlx5_esw_indir_table_entry_lookup(esw, vport);
+ if (e) {
+ if (!decap) {
+ e->fwd_ref++;
+ } else {
+- err = mlx5_esw_indir_table_rule_get(esw, attr, spec, e);
++ err = mlx5_esw_indir_table_rule_get(esw, attr, e);
+ if (err)
+ goto out_err;
+ }
+ } else {
+- e = mlx5_esw_indir_table_entry_create(esw, attr, spec, vport, decap);
++ e = mlx5_esw_indir_table_entry_create(esw, attr, vport, decap);
+ if (IS_ERR(e)) {
+ err = PTR_ERR(e);
+ esw_warn(esw->dev, "Failed to create indirection table, err %d.\n", err);
+@@ -494,22 +362,21 @@ struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
+ }
+
+ void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw,
+- struct mlx5_flow_attr *attr,
+ u16 vport, bool decap)
+ {
+ struct mlx5_esw_indir_table_entry *e;
+
+ mutex_lock(&esw->fdb_table.offloads.indir->lock);
+- e = mlx5_esw_indir_table_entry_lookup(esw, vport, attr->ip_version);
++ e = mlx5_esw_indir_table_entry_lookup(esw, vport);
+ if (!e)
+ goto out;
+
+ if (!decap)
+ e->fwd_ref--;
+ else
+- mlx5_esw_indir_table_rule_put(esw, attr, e);
++ mlx5_esw_indir_table_rule_put(esw, e);
+
+- if (e->fwd_ref || e->recirc_cnt)
++ if (e->fwd_ref || e->recirc_rule)
+ goto out;
+
+ hash_del(&e->hlist);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h
+index 21d56b49d14bc..036f5b3a341b9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h
+@@ -13,10 +13,8 @@ mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir);
+
+ struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+- struct mlx5_flow_spec *spec,
+ u16 vport, bool decap);
+ void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw,
+- struct mlx5_flow_attr *attr,
+ u16 vport, bool decap);
+
+ bool
+@@ -44,7 +42,6 @@ mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir)
+ static inline struct mlx5_flow_table *
+ mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+- struct mlx5_flow_spec *spec,
+ u16 vport, bool decap)
+ {
+ return ERR_PTR(-EOPNOTSUPP);
+@@ -52,7 +49,6 @@ mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
+
+ static inline void
+ mlx5_esw_indir_table_put(struct mlx5_eswitch *esw,
+- struct mlx5_flow_attr *attr,
+ u16 vport, bool decap)
+ {
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index 235f6f0a70523..5b6c54bde97a2 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -178,15 +178,14 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
+
+ static int
+ esw_setup_decap_indir(struct mlx5_eswitch *esw,
+- struct mlx5_flow_attr *attr,
+- struct mlx5_flow_spec *spec)
++ struct mlx5_flow_attr *attr)
+ {
+ struct mlx5_flow_table *ft;
+
+ if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
+ return -EOPNOTSUPP;
+
+- ft = mlx5_esw_indir_table_get(esw, attr, spec,
++ ft = mlx5_esw_indir_table_get(esw, attr,
+ mlx5_esw_indir_table_decap_vport(attr), true);
+ return PTR_ERR_OR_ZERO(ft);
+ }
+@@ -196,7 +195,7 @@ esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr)
+ {
+ if (mlx5_esw_indir_table_decap_vport(attr))
+- mlx5_esw_indir_table_put(esw, attr,
++ mlx5_esw_indir_table_put(esw,
+ mlx5_esw_indir_table_decap_vport(attr),
+ true);
+ }
+@@ -219,7 +218,6 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+- struct mlx5_flow_spec *spec,
+ int i)
+ {
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+@@ -227,7 +225,7 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest,
+ dest[i].ft = attr->dest_ft;
+
+ if (mlx5_esw_indir_table_decap_vport(attr))
+- return esw_setup_decap_indir(esw, attr, spec);
++ return esw_setup_decap_indir(esw, attr);
+ return 0;
+ }
+
+@@ -282,7 +280,7 @@ static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_
+ mlx5_chains_put_table(chains, 0, 1, 0);
+ else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
+ esw_attr->dests[i].mdev))
+- mlx5_esw_indir_table_put(esw, attr, esw_attr->dests[i].rep->vport,
++ mlx5_esw_indir_table_put(esw, esw_attr->dests[i].rep->vport,
+ false);
+ }
+
+@@ -368,7 +366,6 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+- struct mlx5_flow_spec *spec,
+ bool ignore_flow_lvl,
+ int *i)
+ {
+@@ -383,7 +380,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+
+- dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, spec,
++ dest[*i].ft = mlx5_esw_indir_table_get(esw, attr,
+ esw_attr->dests[j].rep->vport, false);
+ if (IS_ERR(dest[*i].ft)) {
+ err = PTR_ERR(dest[*i].ft);
+@@ -392,7 +389,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
+ }
+
+ if (mlx5_esw_indir_table_decap_vport(attr)) {
+- err = esw_setup_decap_indir(esw, attr, spec);
++ err = esw_setup_decap_indir(esw, attr);
+ if (err)
+ goto err_indir_tbl_get;
+ }
+@@ -490,14 +487,14 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
+ esw_setup_accept_dest(dest, flow_act, chains, *i);
+ (*i)++;
+ } else if (esw_is_indir_table(esw, attr)) {
+- err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i);
++ err = esw_setup_indir_table(dest, flow_act, esw, attr, true, i);
+ } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
+ err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
+ } else {
+ *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
+
+ if (attr->dest_ft) {
+- err = esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
++ err = esw_setup_ft_dest(dest, flow_act, esw, attr, *i);
+ (*i)++;
+ } else if (attr->dest_chain) {
+ err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
+@@ -700,7 +697,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ for (i = 0; i < esw_attr->split_count; i++) {
+ if (esw_is_indir_table(esw, attr))
+- err = esw_setup_indir_table(dest, &flow_act, esw, attr, spec, false, &i);
++ err = esw_setup_indir_table(dest, &flow_act, esw, attr, false, &i);
+ else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
+ err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr,
+ &i);
+--
+2.39.2
+
--- /dev/null
+From 171950f6ac7e5814d5a887a7fb9df42161154049 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 11 Mar 2023 19:34:45 +0100
+Subject: net: phy: smsc: bail out in lan87xx_read_status if genphy_read_status
+ fails
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+[ Upstream commit c22c3bbf351e4ce905f082649cffa1ff893ea8c1 ]
+
+If genphy_read_status fails then further access to the PHY may result
+in unpredictable behavior. To prevent this bail out immediately if
+genphy_read_status fails.
+
+Fixes: 4223dbffed9f ("net: phy: smsc: Re-enable EDPD mode for LAN87xx")
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Link: https://lore.kernel.org/r/026aa4f2-36f5-1c10-ab9f-cdb17dda6ac4@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/smsc.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
+index 00d9eff91dcfa..df2c5435c5c49 100644
+--- a/drivers/net/phy/smsc.c
++++ b/drivers/net/phy/smsc.c
+@@ -199,8 +199,11 @@ static int lan95xx_config_aneg_ext(struct phy_device *phydev)
+ static int lan87xx_read_status(struct phy_device *phydev)
+ {
+ struct smsc_phy_priv *priv = phydev->priv;
++ int err;
+
+- int err = genphy_read_status(phydev);
++ err = genphy_read_status(phydev);
++ if (err)
++ return err;
+
+ if (!phydev->link && priv->energy_enable && phydev->irq == PHY_POLL) {
+ /* Disable EDPD to wake up PHY */
+--
+2.39.2
+
--- /dev/null
+From a466d740fc7a3e9dfe719e02ed26791942457b21 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Mar 2023 11:08:28 +0100
+Subject: net/smc: fix deadlock triggered by cancel_delayed_work_syn()
+
+From: Wenjia Zhang <wenjia@linux.ibm.com>
+
+[ Upstream commit 13085e1b5cab8ad802904d72e6a6dae85ae0cd20 ]
+
+The following LOCKDEP was detected:
+ Workqueue: events smc_lgr_free_work [smc]
+ WARNING: possible circular locking dependency detected
+ 6.1.0-20221027.rc2.git8.56bc5b569087.300.fc36.s390x+debug #1 Not tainted
+ ------------------------------------------------------
+ kworker/3:0/176251 is trying to acquire lock:
+ 00000000f1467148 ((wq_completion)smc_tx_wq-00000000#2){+.+.}-{0:0},
+ at: __flush_workqueue+0x7a/0x4f0
+ but task is already holding lock:
+ 0000037fffe97dc8 ((work_completion)(&(&lgr->free_work)->work)){+.+.}-{0:0},
+ at: process_one_work+0x232/0x730
+ which lock already depends on the new lock.
+ the existing dependency chain (in reverse order) is:
+ -> #4 ((work_completion)(&(&lgr->free_work)->work)){+.+.}-{0:0}:
+ __lock_acquire+0x58e/0xbd8
+ lock_acquire.part.0+0xe2/0x248
+ lock_acquire+0xac/0x1c8
+ __flush_work+0x76/0xf0
+ __cancel_work_timer+0x170/0x220
+ __smc_lgr_terminate.part.0+0x34/0x1c0 [smc]
+ smc_connect_rdma+0x15e/0x418 [smc]
+ __smc_connect+0x234/0x480 [smc]
+ smc_connect+0x1d6/0x230 [smc]
+ __sys_connect+0x90/0xc0
+ __do_sys_socketcall+0x186/0x370
+ __do_syscall+0x1da/0x208
+ system_call+0x82/0xb0
+ -> #3 (smc_client_lgr_pending){+.+.}-{3:3}:
+ __lock_acquire+0x58e/0xbd8
+ lock_acquire.part.0+0xe2/0x248
+ lock_acquire+0xac/0x1c8
+ __mutex_lock+0x96/0x8e8
+ mutex_lock_nested+0x32/0x40
+ smc_connect_rdma+0xa4/0x418 [smc]
+ __smc_connect+0x234/0x480 [smc]
+ smc_connect+0x1d6/0x230 [smc]
+ __sys_connect+0x90/0xc0
+ __do_sys_socketcall+0x186/0x370
+ __do_syscall+0x1da/0x208
+ system_call+0x82/0xb0
+ -> #2 (sk_lock-AF_SMC){+.+.}-{0:0}:
+ __lock_acquire+0x58e/0xbd8
+ lock_acquire.part.0+0xe2/0x248
+ lock_acquire+0xac/0x1c8
+ lock_sock_nested+0x46/0xa8
+ smc_tx_work+0x34/0x50 [smc]
+ process_one_work+0x30c/0x730
+ worker_thread+0x62/0x420
+ kthread+0x138/0x150
+ __ret_from_fork+0x3c/0x58
+ ret_from_fork+0xa/0x40
+ -> #1 ((work_completion)(&(&smc->conn.tx_work)->work)){+.+.}-{0:0}:
+ __lock_acquire+0x58e/0xbd8
+ lock_acquire.part.0+0xe2/0x248
+ lock_acquire+0xac/0x1c8
+ process_one_work+0x2bc/0x730
+ worker_thread+0x62/0x420
+ kthread+0x138/0x150
+ __ret_from_fork+0x3c/0x58
+ ret_from_fork+0xa/0x40
+ -> #0 ((wq_completion)smc_tx_wq-00000000#2){+.+.}-{0:0}:
+ check_prev_add+0xd8/0xe88
+ validate_chain+0x70c/0xb20
+ __lock_acquire+0x58e/0xbd8
+ lock_acquire.part.0+0xe2/0x248
+ lock_acquire+0xac/0x1c8
+ __flush_workqueue+0xaa/0x4f0
+ drain_workqueue+0xaa/0x158
+ destroy_workqueue+0x44/0x2d8
+ smc_lgr_free+0x9e/0xf8 [smc]
+ process_one_work+0x30c/0x730
+ worker_thread+0x62/0x420
+ kthread+0x138/0x150
+ __ret_from_fork+0x3c/0x58
+ ret_from_fork+0xa/0x40
+ other info that might help us debug this:
+ Chain exists of:
+ (wq_completion)smc_tx_wq-00000000#2
+ --> smc_client_lgr_pending
+ --> (work_completion)(&(&lgr->free_work)->work)
+ Possible unsafe locking scenario:
+ CPU0 CPU1
+ ---- ----
+ lock((work_completion)(&(&lgr->free_work)->work));
+ lock(smc_client_lgr_pending);
+ lock((work_completion)
+ (&(&lgr->free_work)->work));
+ lock((wq_completion)smc_tx_wq-00000000#2);
+ *** DEADLOCK ***
+ 2 locks held by kworker/3:0/176251:
+ #0: 0000000080183548
+ ((wq_completion)events){+.+.}-{0:0},
+ at: process_one_work+0x232/0x730
+ #1: 0000037fffe97dc8
+ ((work_completion)
+ (&(&lgr->free_work)->work)){+.+.}-{0:0},
+ at: process_one_work+0x232/0x730
+ stack backtrace:
+ CPU: 3 PID: 176251 Comm: kworker/3:0 Not tainted
+ Hardware name: IBM 8561 T01 701 (z/VM 7.2.0)
+ Call Trace:
+ [<000000002983c3e4>] dump_stack_lvl+0xac/0x100
+ [<0000000028b477ae>] check_noncircular+0x13e/0x160
+ [<0000000028b48808>] check_prev_add+0xd8/0xe88
+ [<0000000028b49cc4>] validate_chain+0x70c/0xb20
+ [<0000000028b4bd26>] __lock_acquire+0x58e/0xbd8
+ [<0000000028b4cf6a>] lock_acquire.part.0+0xe2/0x248
+ [<0000000028b4d17c>] lock_acquire+0xac/0x1c8
+ [<0000000028addaaa>] __flush_workqueue+0xaa/0x4f0
+ [<0000000028addf9a>] drain_workqueue+0xaa/0x158
+ [<0000000028ae303c>] destroy_workqueue+0x44/0x2d8
+ [<000003ff8029af26>] smc_lgr_free+0x9e/0xf8 [smc]
+ [<0000000028adf3d4>] process_one_work+0x30c/0x730
+ [<0000000028adf85a>] worker_thread+0x62/0x420
+ [<0000000028aeac50>] kthread+0x138/0x150
+ [<0000000028a63914>] __ret_from_fork+0x3c/0x58
+ [<00000000298503da>] ret_from_fork+0xa/0x40
+ INFO: lockdep is turned off.
+===================================================================
+
+This deadlock occurs because cancel_delayed_work_sync() waits for
+the work(&lgr->free_work) to finish, while the &lgr->free_work
+waits for the work(lgr->tx_wq), which needs the sk_lock-AF_SMC, that
+is already used under the mutex_lock.
+
+The solution is to use cancel_delayed_work() instead, which kills
+off a pending work.
+
+Fixes: a52bcc919b14 ("net/smc: improve termination processing")
+Signed-off-by: Wenjia Zhang <wenjia@linux.ibm.com>
+Reviewed-by: Jan Karcher <jaka@linux.ibm.com>
+Reviewed-by: Karsten Graul <kgraul@linux.ibm.com>
+Reviewed-by: Tony Lu <tonylu@linux.alibaba.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/smc/smc_core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
+index c19d4b7c1f28a..0208dfb353456 100644
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -1459,7 +1459,7 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
+ if (lgr->terminating)
+ return; /* lgr already terminating */
+ /* cancel free_work sync, will terminate when lgr->freeing is set */
+- cancel_delayed_work_sync(&lgr->free_work);
++ cancel_delayed_work(&lgr->free_work);
+ lgr->terminating = 1;
+
+ /* kill remaining link group connections */
+--
+2.39.2
+
--- /dev/null
+From d13cace9d0d81b9bad1d6993e6acbd85dc87f82d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Mar 2023 16:17:12 +0800
+Subject: net/smc: fix NULL sndbuf_desc in smc_cdc_tx_handler()
+
+From: D. Wythe <alibuda@linux.alibaba.com>
+
+[ Upstream commit 22a825c541d775c1dbe7b2402786025acad6727b ]
+
+When performing a stress test on SMC-R by rmmod mlx5_ib driver
+during the wrk/nginx test, we found that there is a probability
+of triggering a panic while terminating all link groups.
+
+This issue dues to the race between smc_smcr_terminate_all()
+and smc_buf_create().
+
+ smc_smcr_terminate_all
+
+smc_buf_create
+/* init */
+conn->sndbuf_desc = NULL;
+...
+
+ __smc_lgr_terminate
+ smc_conn_kill
+ smc_close_abort
+ smc_cdc_get_slot_and_msg_send
+
+ __softirqentry_text_start
+ smc_wr_tx_process_cqe
+ smc_cdc_tx_handler
+ READ(conn->sndbuf_desc->len);
+ /* panic dues to NULL sndbuf_desc */
+
+conn->sndbuf_desc = xxx;
+
+This patch tries to fix the issue by always to check the sndbuf_desc
+before send any cdc msg, to make sure that no null pointer is
+seen during cqe processing.
+
+Fixes: 0b29ec643613 ("net/smc: immediate termination for SMCR link groups")
+Signed-off-by: D. Wythe <alibuda@linux.alibaba.com>
+Reviewed-by: Tony Lu <tonylu@linux.alibaba.com>
+Reviewed-by: Wenjia Zhang <wenjia@linux.ibm.com>
+Link: https://lore.kernel.org/r/1678263432-17329-1-git-send-email-alibuda@linux.alibaba.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/smc/smc_cdc.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
+index 53f63bfbaf5f9..89105e95b4523 100644
+--- a/net/smc/smc_cdc.c
++++ b/net/smc/smc_cdc.c
+@@ -114,6 +114,9 @@ int smc_cdc_msg_send(struct smc_connection *conn,
+ union smc_host_cursor cfed;
+ int rc;
+
++ if (unlikely(!READ_ONCE(conn->sndbuf_desc)))
++ return -ENOBUFS;
++
+ smc_cdc_add_pending_send(conn, pend);
+
+ conn->tx_cdc_seq++;
+--
+2.39.2
+
--- /dev/null
+From 456fdd1cce64ed7bdf904f57bb7effe4201cbcd5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Mar 2023 19:11:09 +0000
+Subject: net: tunnels: annotate lockless accesses to dev->needed_headroom
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 4b397c06cb987935b1b097336532aa6b4210e091 ]
+
+IP tunnels can apparently update dev->needed_headroom
+in their xmit path.
+
+This patch takes care of three tunnels xmit, and also the
+core LL_RESERVED_SPACE() and LL_RESERVED_SPACE_EXTRA()
+helpers.
+
+More changes might be needed for completeness.
+
+BUG: KCSAN: data-race in ip_tunnel_xmit / ip_tunnel_xmit
+
+read to 0xffff88815b9da0ec of 2 bytes by task 888 on cpu 1:
+ip_tunnel_xmit+0x1270/0x1730 net/ipv4/ip_tunnel.c:803
+__gre_xmit net/ipv4/ip_gre.c:469 [inline]
+ipgre_xmit+0x516/0x570 net/ipv4/ip_gre.c:661
+__netdev_start_xmit include/linux/netdevice.h:4881 [inline]
+netdev_start_xmit include/linux/netdevice.h:4895 [inline]
+xmit_one net/core/dev.c:3580 [inline]
+dev_hard_start_xmit+0x127/0x400 net/core/dev.c:3596
+__dev_queue_xmit+0x1007/0x1eb0 net/core/dev.c:4246
+dev_queue_xmit include/linux/netdevice.h:3051 [inline]
+neigh_direct_output+0x17/0x20 net/core/neighbour.c:1623
+neigh_output include/net/neighbour.h:546 [inline]
+ip_finish_output2+0x740/0x840 net/ipv4/ip_output.c:228
+ip_finish_output+0xf4/0x240 net/ipv4/ip_output.c:316
+NF_HOOK_COND include/linux/netfilter.h:291 [inline]
+ip_output+0xe5/0x1b0 net/ipv4/ip_output.c:430
+dst_output include/net/dst.h:444 [inline]
+ip_local_out+0x64/0x80 net/ipv4/ip_output.c:126
+iptunnel_xmit+0x34a/0x4b0 net/ipv4/ip_tunnel_core.c:82
+ip_tunnel_xmit+0x1451/0x1730 net/ipv4/ip_tunnel.c:813
+__gre_xmit net/ipv4/ip_gre.c:469 [inline]
+ipgre_xmit+0x516/0x570 net/ipv4/ip_gre.c:661
+__netdev_start_xmit include/linux/netdevice.h:4881 [inline]
+netdev_start_xmit include/linux/netdevice.h:4895 [inline]
+xmit_one net/core/dev.c:3580 [inline]
+dev_hard_start_xmit+0x127/0x400 net/core/dev.c:3596
+__dev_queue_xmit+0x1007/0x1eb0 net/core/dev.c:4246
+dev_queue_xmit include/linux/netdevice.h:3051 [inline]
+neigh_direct_output+0x17/0x20 net/core/neighbour.c:1623
+neigh_output include/net/neighbour.h:546 [inline]
+ip_finish_output2+0x740/0x840 net/ipv4/ip_output.c:228
+ip_finish_output+0xf4/0x240 net/ipv4/ip_output.c:316
+NF_HOOK_COND include/linux/netfilter.h:291 [inline]
+ip_output+0xe5/0x1b0 net/ipv4/ip_output.c:430
+dst_output include/net/dst.h:444 [inline]
+ip_local_out+0x64/0x80 net/ipv4/ip_output.c:126
+iptunnel_xmit+0x34a/0x4b0 net/ipv4/ip_tunnel_core.c:82
+ip_tunnel_xmit+0x1451/0x1730 net/ipv4/ip_tunnel.c:813
+__gre_xmit net/ipv4/ip_gre.c:469 [inline]
+ipgre_xmit+0x516/0x570 net/ipv4/ip_gre.c:661
+__netdev_start_xmit include/linux/netdevice.h:4881 [inline]
+netdev_start_xmit include/linux/netdevice.h:4895 [inline]
+xmit_one net/core/dev.c:3580 [inline]
+dev_hard_start_xmit+0x127/0x400 net/core/dev.c:3596
+__dev_queue_xmit+0x1007/0x1eb0 net/core/dev.c:4246
+dev_queue_xmit include/linux/netdevice.h:3051 [inline]
+neigh_direct_output+0x17/0x20 net/core/neighbour.c:1623
+neigh_output include/net/neighbour.h:546 [inline]
+ip_finish_output2+0x740/0x840 net/ipv4/ip_output.c:228
+ip_finish_output+0xf4/0x240 net/ipv4/ip_output.c:316
+NF_HOOK_COND include/linux/netfilter.h:291 [inline]
+ip_output+0xe5/0x1b0 net/ipv4/ip_output.c:430
+dst_output include/net/dst.h:444 [inline]
+ip_local_out+0x64/0x80 net/ipv4/ip_output.c:126
+iptunnel_xmit+0x34a/0x4b0 net/ipv4/ip_tunnel_core.c:82
+ip_tunnel_xmit+0x1451/0x1730 net/ipv4/ip_tunnel.c:813
+__gre_xmit net/ipv4/ip_gre.c:469 [inline]
+ipgre_xmit+0x516/0x570 net/ipv4/ip_gre.c:661
+__netdev_start_xmit include/linux/netdevice.h:4881 [inline]
+netdev_start_xmit include/linux/netdevice.h:4895 [inline]
+xmit_one net/core/dev.c:3580 [inline]
+dev_hard_start_xmit+0x127/0x400 net/core/dev.c:3596
+__dev_queue_xmit+0x1007/0x1eb0 net/core/dev.c:4246
+dev_queue_xmit include/linux/netdevice.h:3051 [inline]
+neigh_direct_output+0x17/0x20 net/core/neighbour.c:1623
+neigh_output include/net/neighbour.h:546 [inline]
+ip_finish_output2+0x740/0x840 net/ipv4/ip_output.c:228
+ip_finish_output+0xf4/0x240 net/ipv4/ip_output.c:316
+NF_HOOK_COND include/linux/netfilter.h:291 [inline]
+ip_output+0xe5/0x1b0 net/ipv4/ip_output.c:430
+dst_output include/net/dst.h:444 [inline]
+ip_local_out+0x64/0x80 net/ipv4/ip_output.c:126
+iptunnel_xmit+0x34a/0x4b0 net/ipv4/ip_tunnel_core.c:82
+ip_tunnel_xmit+0x1451/0x1730 net/ipv4/ip_tunnel.c:813
+__gre_xmit net/ipv4/ip_gre.c:469 [inline]
+ipgre_xmit+0x516/0x570 net/ipv4/ip_gre.c:661
+__netdev_start_xmit include/linux/netdevice.h:4881 [inline]
+netdev_start_xmit include/linux/netdevice.h:4895 [inline]
+xmit_one net/core/dev.c:3580 [inline]
+dev_hard_start_xmit+0x127/0x400 net/core/dev.c:3596
+__dev_queue_xmit+0x1007/0x1eb0 net/core/dev.c:4246
+dev_queue_xmit include/linux/netdevice.h:3051 [inline]
+neigh_direct_output+0x17/0x20 net/core/neighbour.c:1623
+neigh_output include/net/neighbour.h:546 [inline]
+ip_finish_output2+0x740/0x840 net/ipv4/ip_output.c:228
+ip_finish_output+0xf4/0x240 net/ipv4/ip_output.c:316
+NF_HOOK_COND include/linux/netfilter.h:291 [inline]
+ip_output+0xe5/0x1b0 net/ipv4/ip_output.c:430
+dst_output include/net/dst.h:444 [inline]
+ip_local_out+0x64/0x80 net/ipv4/ip_output.c:126
+iptunnel_xmit+0x34a/0x4b0 net/ipv4/ip_tunnel_core.c:82
+ip_tunnel_xmit+0x1451/0x1730 net/ipv4/ip_tunnel.c:813
+__gre_xmit net/ipv4/ip_gre.c:469 [inline]
+ipgre_xmit+0x516/0x570 net/ipv4/ip_gre.c:661
+__netdev_start_xmit include/linux/netdevice.h:4881 [inline]
+netdev_start_xmit include/linux/netdevice.h:4895 [inline]
+xmit_one net/core/dev.c:3580 [inline]
+dev_hard_start_xmit+0x127/0x400 net/core/dev.c:3596
+__dev_queue_xmit+0x1007/0x1eb0 net/core/dev.c:4246
+dev_queue_xmit include/linux/netdevice.h:3051 [inline]
+neigh_direct_output+0x17/0x20 net/core/neighbour.c:1623
+neigh_output include/net/neighbour.h:546 [inline]
+ip_finish_output2+0x740/0x840 net/ipv4/ip_output.c:228
+ip_finish_output+0xf4/0x240 net/ipv4/ip_output.c:316
+NF_HOOK_COND include/linux/netfilter.h:291 [inline]
+ip_output+0xe5/0x1b0 net/ipv4/ip_output.c:430
+dst_output include/net/dst.h:444 [inline]
+ip_local_out+0x64/0x80 net/ipv4/ip_output.c:126
+iptunnel_xmit+0x34a/0x4b0 net/ipv4/ip_tunnel_core.c:82
+ip_tunnel_xmit+0x1451/0x1730 net/ipv4/ip_tunnel.c:813
+__gre_xmit net/ipv4/ip_gre.c:469 [inline]
+ipgre_xmit+0x516/0x570 net/ipv4/ip_gre.c:661
+__netdev_start_xmit include/linux/netdevice.h:4881 [inline]
+netdev_start_xmit include/linux/netdevice.h:4895 [inline]
+xmit_one net/core/dev.c:3580 [inline]
+dev_hard_start_xmit+0x127/0x400 net/core/dev.c:3596
+__dev_queue_xmit+0x1007/0x1eb0 net/core/dev.c:4246
+
+write to 0xffff88815b9da0ec of 2 bytes by task 2379 on cpu 0:
+ip_tunnel_xmit+0x1294/0x1730 net/ipv4/ip_tunnel.c:804
+__gre_xmit net/ipv4/ip_gre.c:469 [inline]
+ipgre_xmit+0x516/0x570 net/ipv4/ip_gre.c:661
+__netdev_start_xmit include/linux/netdevice.h:4881 [inline]
+netdev_start_xmit include/linux/netdevice.h:4895 [inline]
+xmit_one net/core/dev.c:3580 [inline]
+dev_hard_start_xmit+0x127/0x400 net/core/dev.c:3596
+__dev_queue_xmit+0x1007/0x1eb0 net/core/dev.c:4246
+dev_queue_xmit include/linux/netdevice.h:3051 [inline]
+neigh_direct_output+0x17/0x20 net/core/neighbour.c:1623
+neigh_output include/net/neighbour.h:546 [inline]
+ip6_finish_output2+0x9bc/0xc50 net/ipv6/ip6_output.c:134
+__ip6_finish_output net/ipv6/ip6_output.c:195 [inline]
+ip6_finish_output+0x39a/0x4e0 net/ipv6/ip6_output.c:206
+NF_HOOK_COND include/linux/netfilter.h:291 [inline]
+ip6_output+0xeb/0x220 net/ipv6/ip6_output.c:227
+dst_output include/net/dst.h:444 [inline]
+NF_HOOK include/linux/netfilter.h:302 [inline]
+mld_sendpack+0x438/0x6a0 net/ipv6/mcast.c:1820
+mld_send_cr net/ipv6/mcast.c:2121 [inline]
+mld_ifc_work+0x519/0x7b0 net/ipv6/mcast.c:2653
+process_one_work+0x3e6/0x750 kernel/workqueue.c:2390
+worker_thread+0x5f2/0xa10 kernel/workqueue.c:2537
+kthread+0x1ac/0x1e0 kernel/kthread.c:376
+ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:308
+
+value changed: 0x0dd4 -> 0x0e14
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 0 PID: 2379 Comm: kworker/0:0 Not tainted 6.3.0-rc1-syzkaller-00002-g8ca09d5fa354-dirty #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 03/02/2023
+Workqueue: mld mld_ifc_work
+
+Fixes: 8eb30be0352d ("ipv6: Create ip6_tnl_xmit")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20230310191109.2384387-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/netdevice.h | 6 ++++--
+ net/ipv4/ip_tunnel.c | 12 ++++++------
+ net/ipv6/ip6_tunnel.c | 4 ++--
+ 3 files changed, 12 insertions(+), 10 deletions(-)
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index ba2bd604359d4..b072449b0f1ac 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -294,9 +294,11 @@ struct hh_cache {
+ * relationship HH alignment <= LL alignment.
+ */
+ #define LL_RESERVED_SPACE(dev) \
+- ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
++ ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \
++ & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+ #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
+- ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
++ ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \
++ & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+
+ struct header_ops {
+ int (*create) (struct sk_buff *skb, struct net_device *dev,
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 019f3b0839c52..24961b304dad0 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -614,10 +614,10 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ }
+
+ headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
+- if (headroom > dev->needed_headroom)
+- dev->needed_headroom = headroom;
++ if (headroom > READ_ONCE(dev->needed_headroom))
++ WRITE_ONCE(dev->needed_headroom, headroom);
+
+- if (skb_cow_head(skb, dev->needed_headroom)) {
++ if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
+ ip_rt_put(rt);
+ goto tx_dropped;
+ }
+@@ -800,10 +800,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+
+ max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
+ + rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
+- if (max_headroom > dev->needed_headroom)
+- dev->needed_headroom = max_headroom;
++ if (max_headroom > READ_ONCE(dev->needed_headroom))
++ WRITE_ONCE(dev->needed_headroom, max_headroom);
+
+- if (skb_cow_head(skb, dev->needed_headroom)) {
++ if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
+ ip_rt_put(rt);
+ dev->stats.tx_dropped++;
+ kfree_skb(skb);
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 2fb4c6ad72432..afc922c88d179 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1241,8 +1241,8 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
+ */
+ max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
+ + dst->header_len + t->hlen;
+- if (max_headroom > dev->needed_headroom)
+- dev->needed_headroom = max_headroom;
++ if (max_headroom > READ_ONCE(dev->needed_headroom))
++ WRITE_ONCE(dev->needed_headroom, max_headroom);
+
+ err = ip6_tnl_encap(skb, t, &proto, fl6);
+ if (err)
+--
+2.39.2
+
--- /dev/null
+From ef3d520d018bd59604b83606688951bd3ad21627 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Mar 2023 23:00:45 +0100
+Subject: net: usb: smsc75xx: Limit packet length to skb->len
+
+From: Szymon Heidrich <szymon.heidrich@gmail.com>
+
+[ Upstream commit d8b228318935044dafe3a5bc07ee71a1f1424b8d ]
+
+Packet length retrieved from skb data may be larger than
+the actual socket buffer length (up to 9026 bytes). In such
+case the cloned skb passed up the network stack will leak
+kernel memory contents.
+
+Fixes: d0cad871703b ("smsc75xx: SMSC LAN75xx USB gigabit ethernet adapter driver")
+Signed-off-by: Szymon Heidrich <szymon.heidrich@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/smsc75xx.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
+index 95de452ff4dad..db34f8d1d6051 100644
+--- a/drivers/net/usb/smsc75xx.c
++++ b/drivers/net/usb/smsc75xx.c
+@@ -2212,7 +2212,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ dev->net->stats.rx_frame_errors++;
+ } else {
+ /* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */
+- if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) {
++ if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12) ||
++ size > skb->len)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "size err rx_cmd_a=0x%08x\n",
+ rx_cmd_a);
+--
+2.39.2
+
--- /dev/null
+From c48b4f619a6f9fd078fda0652b57428886013a63 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Mar 2023 12:05:40 +0100
+Subject: net: usb: smsc75xx: Move packet length check to prevent kernel panic
+ in skb_pull
+
+From: Szymon Heidrich <szymon.heidrich@gmail.com>
+
+[ Upstream commit 43ffe6caccc7a1bb9d7442fbab521efbf6c1378c ]
+
+Packet length check needs to be located after size and align_count
+calculation to prevent kernel panic in skb_pull() in case
+rx_cmd_a & RX_CMD_A_RED evaluates to true.
+
+Fixes: d8b228318935 ("net: usb: smsc75xx: Limit packet length to skb->len")
+Signed-off-by: Szymon Heidrich <szymon.heidrich@gmail.com>
+Link: https://lore.kernel.org/r/20230316110540.77531-1-szymon.heidrich@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/smsc75xx.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
+index db34f8d1d6051..5d6454fedb3f1 100644
+--- a/drivers/net/usb/smsc75xx.c
++++ b/drivers/net/usb/smsc75xx.c
+@@ -2200,6 +2200,13 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ size = (rx_cmd_a & RX_CMD_A_LEN) - RXW_PADDING;
+ align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
+
++ if (unlikely(size > skb->len)) {
++ netif_dbg(dev, rx_err, dev->net,
++ "size err rx_cmd_a=0x%08x\n",
++ rx_cmd_a);
++ return 0;
++ }
++
+ if (unlikely(rx_cmd_a & RX_CMD_A_RED)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "Error rx_cmd_a=0x%08x\n", rx_cmd_a);
+@@ -2212,8 +2219,7 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ dev->net->stats.rx_frame_errors++;
+ } else {
+ /* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */
+- if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12) ||
+- size > skb->len)) {
++ if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) {
+ netif_dbg(dev, rx_err, dev->net,
+ "size err rx_cmd_a=0x%08x\n",
+ rx_cmd_a);
+--
+2.39.2
+
--- /dev/null
+From 58e611e488ffa469c66d09a9d0334c483396879d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Mar 2023 23:22:57 +0000
+Subject: netfilter: nft_masq: correct length for loading protocol registers
+
+From: Jeremy Sowden <jeremy@azazel.net>
+
+[ Upstream commit ec2c5917eb858428b2083d1c74f445aabbe8316b ]
+
+The values in the protocol registers are two bytes wide. However, when
+parsing the register loads, the code currently uses the larger 16-byte
+size of a `union nf_inet_addr`. Change it to use the (correct) size of
+a `union nf_conntrack_man_proto` instead.
+
+Fixes: 8a6bf5da1aef ("netfilter: nft_masq: support port range")
+Signed-off-by: Jeremy Sowden <jeremy@azazel.net>
+Reviewed-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_masq.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c
+index 2a0adc497bbb4..026b4f87d96cc 100644
+--- a/net/netfilter/nft_masq.c
++++ b/net/netfilter/nft_masq.c
+@@ -43,7 +43,7 @@ static int nft_masq_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+ {
+- u32 plen = sizeof_field(struct nf_nat_range, min_addr.all);
++ u32 plen = sizeof_field(struct nf_nat_range, min_proto.all);
+ struct nft_masq *priv = nft_expr_priv(expr);
+ int err;
+
+--
+2.39.2
+
--- /dev/null
+From 31d39f5f070082f2fc99b8cd345406fc1e948843 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Mar 2023 23:22:56 +0000
+Subject: netfilter: nft_nat: correct length for loading protocol registers
+
+From: Jeremy Sowden <jeremy@azazel.net>
+
+[ Upstream commit 068d82e75d537b444303b8c449a11e51ea659565 ]
+
+The values in the protocol registers are two bytes wide. However, when
+parsing the register loads, the code currently uses the larger 16-byte
+size of a `union nf_inet_addr`. Change it to use the (correct) size of
+a `union nf_conntrack_man_proto` instead.
+
+Fixes: d07db9884a5f ("netfilter: nf_tables: introduce nft_validate_register_load()")
+Signed-off-by: Jeremy Sowden <jeremy@azazel.net>
+Reviewed-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_nat.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
+index e5fd6995e4bf3..353c090f88917 100644
+--- a/net/netfilter/nft_nat.c
++++ b/net/netfilter/nft_nat.c
+@@ -226,7 +226,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ priv->flags |= NF_NAT_RANGE_MAP_IPS;
+ }
+
+- plen = sizeof_field(struct nf_nat_range, min_addr.all);
++ plen = sizeof_field(struct nf_nat_range, min_proto.all);
+ if (tb[NFTA_NAT_REG_PROTO_MIN]) {
+ err = nft_parse_register_load(tb[NFTA_NAT_REG_PROTO_MIN],
+ &priv->sreg_proto_min, plen);
+--
+2.39.2
+
--- /dev/null
+From f20a7a02ea16d531e7f49d557b6beb48ab117c01 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Mar 2023 23:22:58 +0000
+Subject: netfilter: nft_redir: correct length for loading protocol registers
+
+From: Jeremy Sowden <jeremy@azazel.net>
+
+[ Upstream commit 1f617b6b4c7a3d5ea7a56abb83a4c27733b60c2f ]
+
+The values in the protocol registers are two bytes wide. However, when
+parsing the register loads, the code currently uses the larger 16-byte
+size of a `union nf_inet_addr`. Change it to use the (correct) size of
+a `union nf_conntrack_man_proto` instead.
+
+Fixes: d07db9884a5f ("netfilter: nf_tables: introduce nft_validate_register_load()")
+Signed-off-by: Jeremy Sowden <jeremy@azazel.net>
+Reviewed-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_redir.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
+index 5086adfe731cb..7ae330d75ac7b 100644
+--- a/net/netfilter/nft_redir.c
++++ b/net/netfilter/nft_redir.c
+@@ -48,7 +48,7 @@ static int nft_redir_init(const struct nft_ctx *ctx,
+ unsigned int plen;
+ int err;
+
+- plen = sizeof_field(struct nf_nat_range, min_addr.all);
++ plen = sizeof_field(struct nf_nat_range, min_proto.all);
+ if (tb[NFTA_REDIR_REG_PROTO_MIN]) {
+ err = nft_parse_register_load(tb[NFTA_REDIR_REG_PROTO_MIN],
+ &priv->sreg_proto_min, plen);
+--
+2.39.2
+
--- /dev/null
+From cf0613a26e9c6eb4a73975a1bfc96c129786b97a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Mar 2023 23:22:59 +0000
+Subject: netfilter: nft_redir: correct value of inet type `.maxattrs`
+
+From: Jeremy Sowden <jeremy@azazel.net>
+
+[ Upstream commit 493924519b1fe3faab13ee621a43b0d0939abab1 ]
+
+`nft_redir_inet_type.maxattrs` was being set, presumably because of a
+cut-and-paste error, to `NFTA_MASQ_MAX`, instead of `NFTA_REDIR_MAX`.
+
+Fixes: 63ce3940f3ab ("netfilter: nft_redir: add inet support")
+Signed-off-by: Jeremy Sowden <jeremy@azazel.net>
+Reviewed-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_redir.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
+index 7ae330d75ac7b..5ed64b2bd15e8 100644
+--- a/net/netfilter/nft_redir.c
++++ b/net/netfilter/nft_redir.c
+@@ -235,7 +235,7 @@ static struct nft_expr_type nft_redir_inet_type __read_mostly = {
+ .name = "redir",
+ .ops = &nft_redir_inet_ops,
+ .policy = nft_redir_policy,
+- .maxattr = NFTA_MASQ_MAX,
++ .maxattr = NFTA_REDIR_MAX,
+ .owner = THIS_MODULE,
+ };
+
+--
+2.39.2
+
--- /dev/null
+From e4fbe5a38a092c99c486d1ea75ee84c4606866d5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Mar 2023 19:50:50 +0300
+Subject: nfc: pn533: initialize struct pn533_out_arg properly
+
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+
+[ Upstream commit 484b7059796e3bc1cb527caa61dfc60da649b4f6 ]
+
+struct pn533_out_arg used as a temporary context for out_urb is not
+initialized properly. Its uninitialized 'phy' field can be dereferenced in
+error cases inside pn533_out_complete() callback function. It causes the
+following failure:
+
+general protection fault, probably for non-canonical address 0xdffffc0000000000: 0000 [#1] PREEMPT SMP KASAN
+KASAN: null-ptr-deref in range [0x0000000000000000-0x0000000000000007]
+CPU: 1 PID: 0 Comm: swapper/1 Not tainted 6.2.0-rc3-next-20230110-syzkaller #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 10/26/2022
+RIP: 0010:pn533_out_complete.cold+0x15/0x44 drivers/nfc/pn533/usb.c:441
+Call Trace:
+ <IRQ>
+ __usb_hcd_giveback_urb+0x2b6/0x5c0 drivers/usb/core/hcd.c:1671
+ usb_hcd_giveback_urb+0x384/0x430 drivers/usb/core/hcd.c:1754
+ dummy_timer+0x1203/0x32d0 drivers/usb/gadget/udc/dummy_hcd.c:1988
+ call_timer_fn+0x1da/0x800 kernel/time/timer.c:1700
+ expire_timers+0x234/0x330 kernel/time/timer.c:1751
+ __run_timers kernel/time/timer.c:2022 [inline]
+ __run_timers kernel/time/timer.c:1995 [inline]
+ run_timer_softirq+0x326/0x910 kernel/time/timer.c:2035
+ __do_softirq+0x1fb/0xaf6 kernel/softirq.c:571
+ invoke_softirq kernel/softirq.c:445 [inline]
+ __irq_exit_rcu+0x123/0x180 kernel/softirq.c:650
+ irq_exit_rcu+0x9/0x20 kernel/softirq.c:662
+ sysvec_apic_timer_interrupt+0x97/0xc0 arch/x86/kernel/apic/apic.c:1107
+
+Initialize the field with the pn533_usb_phy currently used.
+
+Found by Linux Verification Center (linuxtesting.org) with Syzkaller.
+
+Fixes: 9dab880d675b ("nfc: pn533: Wait for out_urb's completion in pn533_usb_send_frame()")
+Reported-by: syzbot+1e608ba4217c96d1952f@syzkaller.appspotmail.com
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Link: https://lore.kernel.org/r/20230309165050.207390-1-pchelkin@ispras.ru
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nfc/pn533/usb.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
+index ed9c5e2cf3ad4..a187f0e0b0f7d 100644
+--- a/drivers/nfc/pn533/usb.c
++++ b/drivers/nfc/pn533/usb.c
+@@ -175,6 +175,7 @@ static int pn533_usb_send_frame(struct pn533 *dev,
+ print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1,
+ out->data, out->len, false);
+
++ arg.phy = phy;
+ init_completion(&arg.done);
+ cntx = phy->out_urb->context;
+ phy->out_urb->context = &arg;
+--
+2.39.2
+
--- /dev/null
+From adea77778829e04bcbd3aa3dd73ff30adb875792 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Mar 2023 00:08:37 +0800
+Subject: nfc: st-nci: Fix use after free bug in ndlc_remove due to race
+ condition
+
+From: Zheng Wang <zyytlz.wz@163.com>
+
+[ Upstream commit 5000fe6c27827a61d8250a7e4a1d26c3298ef4f6 ]
+
+This bug influences both st_nci_i2c_remove and st_nci_spi_remove.
+Take st_nci_i2c_remove as an example.
+
+In st_nci_i2c_probe, it called ndlc_probe and bound &ndlc->sm_work
+with llt_ndlc_sm_work.
+
+When it calls ndlc_recv or timeout handler, it will finally call
+schedule_work to start the work.
+
+When we call st_nci_i2c_remove to remove the driver, there
+may be a sequence as follows:
+
+Fix it by finishing the work before cleanup in ndlc_remove
+
+CPU0 CPU1
+
+ |llt_ndlc_sm_work
+st_nci_i2c_remove |
+ ndlc_remove |
+ st_nci_remove |
+ nci_free_device|
+ kfree(ndev) |
+//free ndlc->ndev |
+ |llt_ndlc_rcv_queue
+ |nci_recv_frame
+ |//use ndlc->ndev
+
+Fixes: 35630df68d60 ("NFC: st21nfcb: Add driver for STMicroelectronics ST21NFCB NFC chip")
+Signed-off-by: Zheng Wang <zyytlz.wz@163.com>
+Reviewed-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Link: https://lore.kernel.org/r/20230312160837.2040857-1-zyytlz.wz@163.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nfc/st-nci/ndlc.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c
+index 755460a73c0dc..d2aa9f766738e 100644
+--- a/drivers/nfc/st-nci/ndlc.c
++++ b/drivers/nfc/st-nci/ndlc.c
+@@ -282,13 +282,15 @@ EXPORT_SYMBOL(ndlc_probe);
+
+ void ndlc_remove(struct llt_ndlc *ndlc)
+ {
+- st_nci_remove(ndlc->ndev);
+-
+ /* cancel timers */
+ del_timer_sync(&ndlc->t1_timer);
+ del_timer_sync(&ndlc->t2_timer);
+ ndlc->t2_active = false;
+ ndlc->t1_active = false;
++ /* cancel work */
++ cancel_work_sync(&ndlc->sm_work);
++
++ st_nci_remove(ndlc->ndev);
+
+ skb_queue_purge(&ndlc->rcv_q);
+ skb_queue_purge(&ndlc->send_q);
+--
+2.39.2
+
--- /dev/null
+From e104f9ebc5088a6df55d6c7c52204ef4bb83be55 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 4 Mar 2023 07:13:45 +0800
+Subject: nvme: fix handling single range discard request
+
+From: Ming Lei <ming.lei@redhat.com>
+
+[ Upstream commit 37f0dc2ec78af0c3f35dd05578763de059f6fe77 ]
+
+When investigating one customer report on warning in nvme_setup_discard,
+we observed the controller(nvme/tcp) actually exposes
+queue_max_discard_segments(req->q) == 1.
+
+Obviously the current code can't handle this situation, since contiguity
+merge like normal RW request is taken.
+
+Fix the issue by building range from request sector/nr_sectors directly.
+
+Fixes: b35ba01ea697 ("nvme: support ranged discard requests")
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/core.c | 28 +++++++++++++++++++---------
+ 1 file changed, 19 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 2031fd960549c..a95e48b51da66 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -779,16 +779,26 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
+ range = page_address(ns->ctrl->discard_page);
+ }
+
+- __rq_for_each_bio(bio, req) {
+- u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
+- u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
+-
+- if (n < segments) {
+- range[n].cattr = cpu_to_le32(0);
+- range[n].nlb = cpu_to_le32(nlb);
+- range[n].slba = cpu_to_le64(slba);
++ if (queue_max_discard_segments(req->q) == 1) {
++ u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req));
++ u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9);
++
++ range[0].cattr = cpu_to_le32(0);
++ range[0].nlb = cpu_to_le32(nlb);
++ range[0].slba = cpu_to_le64(slba);
++ n = 1;
++ } else {
++ __rq_for_each_bio(bio, req) {
++ u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
++ u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
++
++ if (n < segments) {
++ range[n].cattr = cpu_to_le32(0);
++ range[n].nlb = cpu_to_le32(nlb);
++ range[n].slba = cpu_to_le64(slba);
++ }
++ n++;
+ }
+- n++;
+ }
+
+ if (WARN_ON_ONCE(n != segments)) {
+--
+2.39.2
+
--- /dev/null
+From 53b15e3a77b32b670daea0daafa4682a5d1982f0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Mar 2023 10:13:13 +0900
+Subject: nvmet: avoid potential UAF in nvmet_req_complete()
+
+From: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+
+[ Upstream commit 6173a77b7e9d3e202bdb9897b23f2a8afe7bf286 ]
+
+An nvme target ->queue_response() operation implementation may free the
+request passed as argument. Such implementation potentially could result
+in a use after free of the request pointer when percpu_ref_put() is
+called in nvmet_req_complete().
+
+Avoid such problem by using a local variable to save the sq pointer
+before calling __nvmet_req_complete(), thus avoiding dereferencing the
+req pointer after that function call.
+
+Fixes: a07b4970f464 ("nvmet: add a generic NVMe target")
+Signed-off-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/target/core.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 683b75a992b3d..3235baf7cc6b1 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -755,8 +755,10 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
+
+ void nvmet_req_complete(struct nvmet_req *req, u16 status)
+ {
++ struct nvmet_sq *sq = req->sq;
++
+ __nvmet_req_complete(req, status);
+- percpu_ref_put(&req->sq->ref);
++ percpu_ref_put(&sq->ref);
+ }
+ EXPORT_SYMBOL_GPL(nvmet_req_complete);
+
+--
+2.39.2
+
--- /dev/null
+From d9a2aa045f2d03265f4acbb0f1e4e059ff6bb566 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Mar 2023 16:10:11 +0100
+Subject: PCI: s390: Fix use-after-free of PCI resources with per-function
+ hotplug
+
+From: Niklas Schnelle <schnelle@linux.ibm.com>
+
+[ Upstream commit ab909509850b27fd39b8ba99e44cda39dbc3858c ]
+
+On s390 PCI functions may be hotplugged individually even when they
+belong to a multi-function device. In particular on an SR-IOV device VFs
+may be removed and later re-added.
+
+In commit a50297cf8235 ("s390/pci: separate zbus creation from
+scanning") it was missed however that struct pci_bus and struct
+zpci_bus's resource list retained a reference to the PCI functions MMIO
+resources even though those resources are released and freed on
+hot-unplug. These stale resources may subsequently be claimed when the
+PCI function re-appears resulting in use-after-free.
+
+One idea of fixing this use-after-free in s390 specific code that was
+investigated was to simply keep resources around from the moment a PCI
+function first appeared until the whole virtual PCI bus created for
+a multi-function device disappears. The problem with this however is
+that due to the requirement of artificial MMIO addreesses (address
+cookies) extra logic is then needed to keep the address cookies
+compatible on re-plug. At the same time the MMIO resources semantically
+belong to the PCI function so tying their lifecycle to the function
+seems more logical.
+
+Instead a simpler approach is to remove the resources of an individually
+hot-unplugged PCI function from the PCI bus's resource list while
+keeping the resources of other PCI functions on the PCI bus untouched.
+
+This is done by introducing pci_bus_remove_resource() to remove an
+individual resource. Similarly the resource also needs to be removed
+from the struct zpci_bus's resource list. It turns out however, that
+there is really no need to add the MMIO resources to the struct
+zpci_bus's resource list at all and instead we can simply use the
+zpci_bar_struct's resource pointer directly.
+
+Fixes: a50297cf8235 ("s390/pci: separate zbus creation from scanning")
+Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
+Reviewed-by: Matthew Rosato <mjrosato@linux.ibm.com>
+Acked-by: Bjorn Helgaas <bhelgaas@google.com>
+Link: https://lore.kernel.org/r/20230306151014.60913-2-schnelle@linux.ibm.com
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/pci/pci.c | 16 ++++++++++------
+ arch/s390/pci/pci_bus.c | 12 +++++-------
+ arch/s390/pci/pci_bus.h | 3 +--
+ drivers/pci/bus.c | 21 +++++++++++++++++++++
+ include/linux/pci.h | 1 +
+ 5 files changed, 38 insertions(+), 15 deletions(-)
+
+diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
+index 73cdc55393847..2c99f9552b2f5 100644
+--- a/arch/s390/pci/pci.c
++++ b/arch/s390/pci/pci.c
+@@ -544,8 +544,7 @@ static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
+ return r;
+ }
+
+-int zpci_setup_bus_resources(struct zpci_dev *zdev,
+- struct list_head *resources)
++int zpci_setup_bus_resources(struct zpci_dev *zdev)
+ {
+ unsigned long addr, size, flags;
+ struct resource *res;
+@@ -581,7 +580,6 @@ int zpci_setup_bus_resources(struct zpci_dev *zdev,
+ return -ENOMEM;
+ }
+ zdev->bars[i].res = res;
+- pci_add_resource(resources, res);
+ }
+ zdev->has_resources = 1;
+
+@@ -590,17 +588,23 @@ int zpci_setup_bus_resources(struct zpci_dev *zdev,
+
+ static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
+ {
++ struct resource *res;
+ int i;
+
++ pci_lock_rescan_remove();
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+- if (!zdev->bars[i].size || !zdev->bars[i].res)
++ res = zdev->bars[i].res;
++ if (!res)
+ continue;
+
++ release_resource(res);
++ pci_bus_remove_resource(zdev->zbus->bus, res);
+ zpci_free_iomap(zdev, zdev->bars[i].map_idx);
+- release_resource(zdev->bars[i].res);
+- kfree(zdev->bars[i].res);
++ zdev->bars[i].res = NULL;
++ kfree(res);
+ }
+ zdev->has_resources = 0;
++ pci_unlock_rescan_remove();
+ }
+
+ int pcibios_device_add(struct pci_dev *pdev)
+diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
+index 6a8da1b742ae5..a99926af2b69a 100644
+--- a/arch/s390/pci/pci_bus.c
++++ b/arch/s390/pci/pci_bus.c
+@@ -41,9 +41,7 @@ static int zpci_nb_devices;
+ */
+ static int zpci_bus_prepare_device(struct zpci_dev *zdev)
+ {
+- struct resource_entry *window, *n;
+- struct resource *res;
+- int rc;
++ int rc, i;
+
+ if (!zdev_enabled(zdev)) {
+ rc = zpci_enable_device(zdev);
+@@ -57,10 +55,10 @@ static int zpci_bus_prepare_device(struct zpci_dev *zdev)
+ }
+
+ if (!zdev->has_resources) {
+- zpci_setup_bus_resources(zdev, &zdev->zbus->resources);
+- resource_list_for_each_entry_safe(window, n, &zdev->zbus->resources) {
+- res = window->res;
+- pci_bus_add_resource(zdev->zbus->bus, res, 0);
++ zpci_setup_bus_resources(zdev);
++ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
++ if (zdev->bars[i].res)
++ pci_bus_add_resource(zdev->zbus->bus, zdev->bars[i].res, 0);
+ }
+ }
+
+diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h
+index e96c9860e0644..af9f0ac79a1b1 100644
+--- a/arch/s390/pci/pci_bus.h
++++ b/arch/s390/pci/pci_bus.h
+@@ -30,8 +30,7 @@ static inline void zpci_zdev_get(struct zpci_dev *zdev)
+
+ int zpci_alloc_domain(int domain);
+ void zpci_free_domain(int domain);
+-int zpci_setup_bus_resources(struct zpci_dev *zdev,
+- struct list_head *resources);
++int zpci_setup_bus_resources(struct zpci_dev *zdev);
+
+ static inline struct zpci_dev *zdev_from_bus(struct pci_bus *bus,
+ unsigned int devfn)
+diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
+index 3cef835b375fd..feafa378bf8ea 100644
+--- a/drivers/pci/bus.c
++++ b/drivers/pci/bus.c
+@@ -76,6 +76,27 @@ struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n)
+ }
+ EXPORT_SYMBOL_GPL(pci_bus_resource_n);
+
++void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res)
++{
++ struct pci_bus_resource *bus_res, *tmp;
++ int i;
++
++ for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
++ if (bus->resource[i] == res) {
++ bus->resource[i] = NULL;
++ return;
++ }
++ }
++
++ list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
++ if (bus_res->res == res) {
++ list_del(&bus_res->list);
++ kfree(bus_res);
++ return;
++ }
++ }
++}
++
+ void pci_bus_remove_resources(struct pci_bus *bus)
+ {
+ int i;
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index cb538bc579710..d20695184e0b9 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1417,6 +1417,7 @@ void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
+ unsigned int flags);
+ struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
+ void pci_bus_remove_resources(struct pci_bus *bus);
++void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res);
+ int devm_request_pci_bus_resources(struct device *dev,
+ struct list_head *resources);
+
+--
+2.39.2
+
--- /dev/null
+From 716f963e097b7c4b95c78e7a8bdd6f87703a3456 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Mar 2023 16:08:34 +1100
+Subject: powerpc/mm: Fix false detection of read faults
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Russell Currey <ruscur@russell.cc>
+
+[ Upstream commit f2c7e3562b4c4f1699acc1538ebf3e75f5cced35 ]
+
+To support detection of read faults with Radix execute-only memory, the
+vma_is_accessible() check in access_error() (which checks for PROT_NONE)
+was replaced with a check to see if VM_READ was missing, and if so,
+returns true to assert the fault was caused by a bad read.
+
+This is incorrect, as it ignores that both VM_WRITE and VM_EXEC imply
+read on powerpc, as defined in protection_map[]. This causes mappings
+containing VM_WRITE or VM_EXEC without VM_READ to misreport the cause of
+page faults, since the MMU is still allowing reads.
+
+Correct this by restoring the original vma_is_accessible() check for
+PROT_NONE mappings, and adding a separate check for Radix PROT_EXEC-only
+mappings.
+
+Fixes: 395cac7752b9 ("powerpc/mm: Support execute-only memory on the Radix MMU")
+Reported-by: Michal Suchánek <msuchanek@suse.de>
+Link: https://lore.kernel.org/r/20230308152702.GR19419@kitsune.suse.cz
+Tested-by: Benjamin Gray <bgray@linux.ibm.com>
+Signed-off-by: Russell Currey <ruscur@russell.cc>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/20230310050834.63105-1-ruscur@russell.cc
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/mm/fault.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
+index 2bef19cc1b98c..af46aa88422bf 100644
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
+@@ -271,11 +271,16 @@ static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma
+ }
+
+ /*
+- * Check for a read fault. This could be caused by a read on an
+- * inaccessible page (i.e. PROT_NONE), or a Radix MMU execute-only page.
++ * VM_READ, VM_WRITE and VM_EXEC all imply read permissions, as
++ * defined in protection_map[]. Read faults can only be caused by
++ * a PROT_NONE mapping, or with a PROT_EXEC-only mapping on Radix.
+ */
+- if (unlikely(!(vma->vm_flags & VM_READ)))
++ if (unlikely(!vma_is_accessible(vma)))
+ return true;
++
++ if (unlikely(radix_enabled() && ((vma->vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)))
++ return true;
++
+ /*
+ * We should ideally do the vma pkey access check here. But in the
+ * fault path, handle_mm_fault() also does the same check. To avoid
+--
+2.39.2
+
--- /dev/null
+From 38a87e6d48c5b66f8d559f2b2d84d36deb38de4f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Mar 2023 23:15:56 +0300
+Subject: qed/qed_dev: guard against a possible division by zero
+
+From: Daniil Tatianin <d-tatianin@yandex-team.ru>
+
+[ Upstream commit 1a9dc5610ef89d807acdcfbff93a558f341a44da ]
+
+Previously we would divide total_left_rate by zero if num_vports
+happened to be 1 because non_requested_count is calculated as
+num_vports - req_count. Guard against this by validating num_vports at
+the beginning and returning an error otherwise.
+
+Found by Linux Verification Center (linuxtesting.org) with the SVACE
+static analysis tool.
+
+Fixes: bcd197c81f63 ("qed: Add vport WFQ configuration APIs")
+Signed-off-by: Daniil Tatianin <d-tatianin@yandex-team.ru>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Link: https://lore.kernel.org/r/20230309201556.191392-1-d-tatianin@yandex-team.ru
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/qlogic/qed/qed_dev.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+index d61cd32ec3b65..86a93cac26470 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+@@ -5083,6 +5083,11 @@ static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
+
+ num_vports = p_hwfn->qm_info.num_vports;
+
++ if (num_vports < 2) {
++ DP_NOTICE(p_hwfn, "Unexpected num_vports: %d\n", num_vports);
++ return -EINVAL;
++ }
++
+ /* Accounting for the vports which are configured for WFQ explicitly */
+ for (i = 0; i < num_vports; i++) {
+ u32 tmp_speed;
+--
+2.39.2
+
--- /dev/null
+From 2bb5cdd548045560baff59824f3b756471720cd9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Mar 2023 22:46:18 +0300
+Subject: qed/qed_mng_tlv: correctly zero out ->min instead of ->hour
+
+From: Daniil Tatianin <d-tatianin@yandex-team.ru>
+
+[ Upstream commit 470efd68a4653d9819d391489886432cd31bcd0b ]
+
+This fixes an issue where ->hour would erroneously get zeroed out
+instead of ->min because of a bad copy paste.
+
+Found by Linux Verification Center (linuxtesting.org) with the SVACE
+static analysis tool.
+
+Fixes: f240b6882211 ("qed: Add support for processing fcoe tlv request.")
+Signed-off-by: Daniil Tatianin <d-tatianin@yandex-team.ru>
+Link: https://lore.kernel.org/r/20230315194618.579286-1-d-tatianin@yandex-team.ru
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
+index 6190adf965bca..f55eed092f25d 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
+@@ -422,7 +422,7 @@ qed_mfw_get_tlv_time_value(struct qed_mfw_tlv_time *p_time,
+ if (p_time->hour > 23)
+ p_time->hour = 0;
+ if (p_time->min > 59)
+- p_time->hour = 0;
++ p_time->min = 0;
+ if (p_time->msec > 999)
+ p_time->msec = 0;
+ if (p_time->usec > 999)
+--
+2.39.2
+
--- /dev/null
+From 1b353453f277bd9d5ad9cae195c546256c5ad22a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Mar 2023 08:41:14 +0100
+Subject: ravb: avoid PHY being resumed when interface is not up
+
+From: Wolfram Sang <wsa+renesas@sang-engineering.com>
+
+[ Upstream commit 7f5ebf5dae42e710162f1c481ebcf28ab7b741c7 ]
+
+RAVB doesn't need mdiobus suspend/resume, that's why it sets
+'mac_managed_pm'. However, setting it needs to be moved from init to
+probe, so mdiobus PM functions will really never be called (e.g. when
+the interface is not up yet during suspend/resume).
+
+Fixes: 4924c0cdce75 ("net: ravb: Fix PHY state warning splat during system resume")
+Suggested-by: Heiner Kallweit <hkallweit1@gmail.com>
+Signed-off-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Reviewed-by: Michal Kubiak <michal.kubiak@intel.com>
+Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/renesas/ravb_main.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index 0f54849a38235..894e2690c6437 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -1455,8 +1455,6 @@ static int ravb_phy_init(struct net_device *ndev)
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ }
+
+- /* Indicate that the MAC is responsible for managing PHY PM */
+- phydev->mac_managed_pm = true;
+ phy_attached_info(phydev);
+
+ return 0;
+@@ -2379,6 +2377,8 @@ static int ravb_mdio_init(struct ravb_private *priv)
+ {
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
++ struct phy_device *phydev;
++ struct device_node *pn;
+ int error;
+
+ /* Bitbang init */
+@@ -2400,6 +2400,14 @@ static int ravb_mdio_init(struct ravb_private *priv)
+ if (error)
+ goto out_free_bus;
+
++ pn = of_parse_phandle(dev->of_node, "phy-handle", 0);
++ phydev = of_phy_find_device(pn);
++ if (phydev) {
++ phydev->mac_managed_pm = true;
++ put_device(&phydev->mdio.dev);
++ }
++ of_node_put(pn);
++
+ return 0;
+
+ out_free_bus:
+--
+2.39.2
+
--- /dev/null
+From 68fcab99f97d6fcd4f77d91d29fca085f9f3b65b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Sep 2022 11:13:50 -0700
+Subject: scsi: core: Add BLIST_NO_VPD_SIZE for some VDASD
+
+From: Lee Duncan <lduncan@suse.com>
+
+[ Upstream commit 4b1a2c2a8e0ddcb89c5f6c5003bd9b53142f69e3 ]
+
+Some storage, such as AIX VDASD (virtual storage) and IBM 2076 (front
+end), fail as a result of commit c92a6b5d6335 ("scsi: core: Query VPD
+size before getting full page").
+
+That commit changed getting SCSI VPD pages so that we now read just
+enough of the page to get the actual page size, then read the whole
+page in a second read. The problem is that the above mentioned
+hardware returns zero for the page size, because of a firmware
+error. In such cases, until the firmware is fixed, this new blacklist
+flag says to revert to the original method of reading the VPD pages,
+i.e. try to read a whole buffer's worth on the first try.
+
+[mkp: reworked somewhat]
+
+Fixes: c92a6b5d6335 ("scsi: core: Query VPD size before getting full page")
+Reported-by: Martin Wilck <mwilck@suse.com>
+Suggested-by: Hannes Reinecke <hare@suse.de>
+Signed-off-by: Lee Duncan <lduncan@suse.com>
+Link: https://lore.kernel.org/r/20220928181350.9948-1-leeman.duncan@gmail.com
+Tested-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/scsi.c | 3 +++
+ drivers/scsi/scsi_devinfo.c | 3 ++-
+ drivers/scsi/scsi_scan.c | 3 +++
+ include/scsi/scsi_device.h | 2 ++
+ include/scsi/scsi_devinfo.h | 6 +++---
+ 5 files changed, 13 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+index c59eac7a32f2a..24c4c92543599 100644
+--- a/drivers/scsi/scsi.c
++++ b/drivers/scsi/scsi.c
+@@ -326,6 +326,9 @@ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
+ unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4);
+ int result;
+
++ if (sdev->no_vpd_size)
++ return SCSI_DEFAULT_VPD_LEN;
++
+ /*
+ * Fetch the VPD page header to find out how big the page
+ * is. This is done to prevent problems on legacy devices
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index c7080454aea99..bc9d280417f6a 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -134,7 +134,7 @@ static struct {
+ {"3PARdata", "VV", NULL, BLIST_REPORTLUN2},
+ {"ADAPTEC", "AACRAID", NULL, BLIST_FORCELUN},
+ {"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN},
+- {"AIX", "VDASD", NULL, BLIST_TRY_VPD_PAGES},
++ {"AIX", "VDASD", NULL, BLIST_TRY_VPD_PAGES | BLIST_NO_VPD_SIZE},
+ {"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN},
+ {"BELKIN", "USB 2 HS-CF", "1.95", BLIST_FORCELUN | BLIST_INQUIRY_36},
+ {"BROWNIE", "1200U3P", NULL, BLIST_NOREPORTLUN},
+@@ -188,6 +188,7 @@ static struct {
+ {"HPE", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES},
+ {"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN},
+ {"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
++ {"IBM", "2076", NULL, BLIST_NO_VPD_SIZE},
+ {"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
+ {"iomega", "jaz 1GB", "J.86", BLIST_NOTQ | BLIST_NOLUN},
+ {"IOMEGA", "ZIP", NULL, BLIST_NOTQ | BLIST_NOLUN},
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index d149b218715e5..d12f2dcb4040a 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -1056,6 +1056,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
+ else if (*bflags & BLIST_SKIP_VPD_PAGES)
+ sdev->skip_vpd_pages = 1;
+
++ if (*bflags & BLIST_NO_VPD_SIZE)
++ sdev->no_vpd_size = 1;
++
+ transport_configure_device(&sdev->sdev_gendev);
+
+ if (sdev->host->hostt->slave_configure) {
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index c36656d8ac6c7..006858ed04e8c 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -145,6 +145,7 @@ struct scsi_device {
+ const char * model; /* ... after scan; point to static string */
+ const char * rev; /* ... "nullnullnullnull" before scan */
+
++#define SCSI_DEFAULT_VPD_LEN 255 /* default SCSI VPD page size (max) */
+ struct scsi_vpd __rcu *vpd_pg0;
+ struct scsi_vpd __rcu *vpd_pg83;
+ struct scsi_vpd __rcu *vpd_pg80;
+@@ -214,6 +215,7 @@ struct scsi_device {
+ * creation time */
+ unsigned ignore_media_change:1; /* Ignore MEDIA CHANGE on resume */
+ unsigned silence_suspend:1; /* Do not print runtime PM related messages */
++ unsigned no_vpd_size:1; /* No VPD size reported in header */
+
+ unsigned int queue_stopped; /* request queue is quiesced */
+ bool offline_already; /* Device offline message logged */
+diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
+index 5d14adae21c78..6b548dc2c4965 100644
+--- a/include/scsi/scsi_devinfo.h
++++ b/include/scsi/scsi_devinfo.h
+@@ -32,7 +32,8 @@
+ #define BLIST_IGN_MEDIA_CHANGE ((__force blist_flags_t)(1ULL << 11))
+ /* do not do automatic start on add */
+ #define BLIST_NOSTARTONADD ((__force blist_flags_t)(1ULL << 12))
+-#define __BLIST_UNUSED_13 ((__force blist_flags_t)(1ULL << 13))
++/* do not ask for VPD page size first on some broken targets */
++#define BLIST_NO_VPD_SIZE ((__force blist_flags_t)(1ULL << 13))
+ #define __BLIST_UNUSED_14 ((__force blist_flags_t)(1ULL << 14))
+ #define __BLIST_UNUSED_15 ((__force blist_flags_t)(1ULL << 15))
+ #define __BLIST_UNUSED_16 ((__force blist_flags_t)(1ULL << 16))
+@@ -74,8 +75,7 @@
+ #define __BLIST_HIGH_UNUSED (~(__BLIST_LAST_USED | \
+ (__force blist_flags_t) \
+ ((__force __u64)__BLIST_LAST_USED - 1ULL)))
+-#define __BLIST_UNUSED_MASK (__BLIST_UNUSED_13 | \
+- __BLIST_UNUSED_14 | \
++#define __BLIST_UNUSED_MASK (__BLIST_UNUSED_14 | \
+ __BLIST_UNUSED_15 | \
+ __BLIST_UNUSED_16 | \
+ __BLIST_UNUSED_24 | \
+--
+2.39.2
+
--- /dev/null
+From 60375d0063d32d39efdf0514ce4f5e4554333480 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Mar 2023 13:44:28 -0800
+Subject: scsi: core: Fix a procfs host directory removal regression
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit be03df3d4bfe7e8866d4aa43d62e648ffe884f5f ]
+
+scsi_proc_hostdir_rm() decreases a reference counter and hence must only be
+called once per host that is removed. This change does not require a
+scsi_add_host_with_dma() change since scsi_add_host_with_dma() will return
+0 (success) if scsi_proc_host_add() is called.
+
+Fixes: fc663711b944 ("scsi: core: Remove the /proc/scsi/${proc_name} directory earlier")
+Cc: John Garry <john.g.garry@oracle.com>
+Reported-by: John Garry <john.g.garry@oracle.com>
+Link: https://lore.kernel.org/all/ed6b8027-a9d9-1b45-be8e-df4e8c6c4605@oracle.com/
+Reported-by: syzbot+645a4616b87a2f10e398@syzkaller.appspotmail.com
+Link: https://lore.kernel.org/linux-scsi/000000000000890fab05f65342b6@google.com/
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://lore.kernel.org/r/20230307214428.3703498-1-bvanassche@acm.org
+Tested-by: John Garry <john.g.garry@oracle.com>
+Tested-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/hosts.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
+index 85e66574ec414..45a2fd6584d16 100644
+--- a/drivers/scsi/hosts.c
++++ b/drivers/scsi/hosts.c
+@@ -341,9 +341,6 @@ static void scsi_host_dev_release(struct device *dev)
+ struct Scsi_Host *shost = dev_to_shost(dev);
+ struct device *parent = dev->parent;
+
+- /* In case scsi_remove_host() has not been called. */
+- scsi_proc_hostdir_rm(shost->hostt);
+-
+ /* Wait for functions invoked through call_rcu(&scmd->rcu, ...) */
+ rcu_barrier();
+
+--
+2.39.2
+
--- /dev/null
+From c1aee5ba15bf2b559f9f4d7dd922f74743df9b22 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Mar 2023 00:43:32 +0100
+Subject: scsi: mpi3mr: Fix config page DMA memory leak
+
+From: Tomas Henzl <thenzl@redhat.com>
+
+[ Upstream commit 7d2b02172b6a2ae6aecd7ef6480b9c4bf3dc59f4 ]
+
+A fix for:
+
+DMA-API: pci 0000:83:00.0: device driver has pending DMA allocations while released from device [count=1]
+
+Fixes: 32d457d5a2af ("scsi: mpi3mr: Add framework to issue config requests")
+Signed-off-by: Tomas Henzl <thenzl@redhat.com>
+Link: https://lore.kernel.org/r/20230302234336.25456-3-thenzl@redhat.com
+Acked-by: Sathya Prakash Veerichetty <sathya.prakash@broadcom.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/mpi3mr/mpi3mr_fw.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+index 2d46a0b04f345..f6b726359a1cc 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+@@ -4351,7 +4351,11 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
+ mrioc->admin_req_base, mrioc->admin_req_dma);
+ mrioc->admin_req_base = NULL;
+ }
+-
++ if (mrioc->cfg_page) {
++ dma_free_coherent(&mrioc->pdev->dev, mrioc->cfg_page_sz,
++ mrioc->cfg_page, mrioc->cfg_page_dma);
++ mrioc->cfg_page = NULL;
++ }
+ if (mrioc->pel_seqnum_virt) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
+ mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
+--
+2.39.2
+
--- /dev/null
+From 79eae1647b29bb2d1cb9761adbb2066a6de4742a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Mar 2023 00:43:36 +0100
+Subject: scsi: mpi3mr: Fix expander node leak in mpi3mr_remove()
+
+From: Tomas Henzl <thenzl@redhat.com>
+
+[ Upstream commit ce756daa36e1ba271bb3334267295e447aa57a5c ]
+
+Add a missing resource clean up in .remove.
+
+Fixes: e22bae30667a ("scsi: mpi3mr: Add expander devices to STL")
+Signed-off-by: Tomas Henzl <thenzl@redhat.com>
+Link: https://lore.kernel.org/r/20230302234336.25456-7-thenzl@redhat.com
+Acked-by: Sathya Prakash Veerichetty <sathya.prakash@broadcom.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/mpi3mr/mpi3mr.h | 2 ++
+ drivers/scsi/mpi3mr/mpi3mr_os.c | 7 +++++++
+ drivers/scsi/mpi3mr/mpi3mr_transport.c | 5 +----
+ 3 files changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
+index 68f29ffb05b82..de6914d57402c 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr.h
++++ b/drivers/scsi/mpi3mr/mpi3mr.h
+@@ -1394,4 +1394,6 @@ void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc);
+ void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc);
+ void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc);
+ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc);
++void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
++ struct mpi3mr_sas_node *sas_expander);
+ #endif /*MPI3MR_H_INCLUDED*/
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
+index 2e546c80d98ce..6d55698ea4d16 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
+@@ -5079,6 +5079,7 @@ static void mpi3mr_remove(struct pci_dev *pdev)
+ unsigned long flags;
+ struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
+ struct mpi3mr_hba_port *port, *hba_port_next;
++ struct mpi3mr_sas_node *sas_expander, *sas_expander_next;
+
+ if (!shost)
+ return;
+@@ -5119,6 +5120,12 @@ static void mpi3mr_remove(struct pci_dev *pdev)
+ mpi3mr_cleanup_resources(mrioc);
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
++ list_for_each_entry_safe_reverse(sas_expander, sas_expander_next,
++ &mrioc->sas_expander_list, list) {
++ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
++ mpi3mr_expander_node_remove(mrioc, sas_expander);
++ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
++ }
+ list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) {
+ ioc_info(mrioc,
+ "removing hba_port entry: %p port: %d from hba_port list\n",
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+index 3b61815979dab..50263ba4f8428 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+@@ -9,9 +9,6 @@
+
+ #include "mpi3mr.h"
+
+-static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
+- struct mpi3mr_sas_node *sas_expander);
+-
+ /**
+ * mpi3mr_post_transport_req - Issue transport requests and wait
+ * @mrioc: Adapter instance reference
+@@ -2163,7 +2160,7 @@ int mpi3mr_expander_add(struct mpi3mr_ioc *mrioc, u16 handle)
+ *
+ * Return nothing.
+ */
+-static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
++void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *sas_expander)
+ {
+ struct mpi3mr_sas_port *mr_sas_port, *next;
+--
+2.39.2
+
--- /dev/null
+From 4c998cca2da4f8f42c5dc1ff982dcc11ab5849d3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Mar 2023 00:43:35 +0100
+Subject: scsi: mpi3mr: Fix memory leaks in mpi3mr_init_ioc()
+
+From: Tomas Henzl <thenzl@redhat.com>
+
+[ Upstream commit c798304470cab88723d895726d17fcb96472e0e9 ]
+
+Don't allocate memory again when IOC is being reinitialized.
+
+Fixes: fe6db6151565 ("scsi: mpi3mr: Handle offline FW activation in graceful manner")
+Signed-off-by: Tomas Henzl <thenzl@redhat.com>
+Link: https://lore.kernel.org/r/20230302234336.25456-6-thenzl@redhat.com
+Acked-by: Sathya Prakash Veerichetty <sathya.prakash@broadcom.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/mpi3mr/mpi3mr_fw.c | 41 ++++++++++++++++++---------------
+ 1 file changed, 23 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+index 37aaf8dc65d4d..1a404d71b88cf 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+@@ -3814,29 +3814,34 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
+
+ mpi3mr_print_ioc_info(mrioc);
+
+- dprint_init(mrioc, "allocating config page buffers\n");
+- mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
+- MPI3MR_DEFAULT_CFG_PAGE_SZ, &mrioc->cfg_page_dma, GFP_KERNEL);
+ if (!mrioc->cfg_page) {
+- retval = -1;
+- goto out_failed_noretry;
++ dprint_init(mrioc, "allocating config page buffers\n");
++ mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
++ mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
++ mrioc->cfg_page_sz, &mrioc->cfg_page_dma, GFP_KERNEL);
++ if (!mrioc->cfg_page) {
++ retval = -1;
++ goto out_failed_noretry;
++ }
+ }
+
+- mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
+-
+- retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
+- if (retval) {
+- ioc_err(mrioc,
+- "%s :Failed to allocated reply sense buffers %d\n",
+- __func__, retval);
+- goto out_failed_noretry;
++ if (!mrioc->init_cmds.reply) {
++ retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
++ if (retval) {
++ ioc_err(mrioc,
++ "%s :Failed to allocated reply sense buffers %d\n",
++ __func__, retval);
++ goto out_failed_noretry;
++ }
+ }
+
+- retval = mpi3mr_alloc_chain_bufs(mrioc);
+- if (retval) {
+- ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
+- retval);
+- goto out_failed_noretry;
++ if (!mrioc->chain_sgl_list) {
++ retval = mpi3mr_alloc_chain_bufs(mrioc);
++ if (retval) {
++ ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
++ retval);
++ goto out_failed_noretry;
++ }
+ }
+
+ retval = mpi3mr_issue_iocinit(mrioc);
+--
+2.39.2
+
--- /dev/null
+From 4cf99a839bac4de3c688a2b4db8ad3f58af9f599 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Mar 2023 00:43:33 +0100
+Subject: scsi: mpi3mr: Fix mpi3mr_hba_port memory leak in mpi3mr_remove()
+
+From: Tomas Henzl <thenzl@redhat.com>
+
+[ Upstream commit d0f3c3728da8af76dfe435f7f0cfa2b9d9e43ef0 ]
+
+Free mpi3mr_hba_port at .remove.
+
+Fixes: 42fc9fee116f ("scsi: mpi3mr: Add helper functions to manage device's port")
+Signed-off-by: Tomas Henzl <thenzl@redhat.com>
+Link: https://lore.kernel.org/r/20230302234336.25456-4-thenzl@redhat.com
+Acked-by: Sathya Prakash Veerichetty <sathya.prakash@broadcom.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/mpi3mr/mpi3mr_os.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
+index 6eaeba41072cb..5032b0b5186d4 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
+@@ -5077,6 +5077,7 @@ static void mpi3mr_remove(struct pci_dev *pdev)
+ struct workqueue_struct *wq;
+ unsigned long flags;
+ struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
++ struct mpi3mr_hba_port *port, *hba_port_next;
+
+ if (!shost)
+ return;
+@@ -5116,6 +5117,16 @@ static void mpi3mr_remove(struct pci_dev *pdev)
+ mpi3mr_free_mem(mrioc);
+ mpi3mr_cleanup_resources(mrioc);
+
++ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
++ list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) {
++ ioc_info(mrioc,
++ "removing hba_port entry: %p port: %d from hba_port list\n",
++ port, port->port_id);
++ list_del(&port->list);
++ kfree(port);
++ }
++ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
++
+ spin_lock(&mrioc_list_lock);
+ list_del(&mrioc->list);
+ spin_unlock(&mrioc_list_lock);
+--
+2.39.2
+
--- /dev/null
+From f482b5af2382f0f6973283a67557cba32fa0cd57 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Mar 2023 00:43:34 +0100
+Subject: scsi: mpi3mr: Fix sas_hba.phy memory leak in mpi3mr_remove()
+
+From: Tomas Henzl <thenzl@redhat.com>
+
+[ Upstream commit d4caa1a4255cc44be56bcab3db2c97c632e6cc10 ]
+
+Free mrioc->sas_hba.phy at .remove.
+
+Fixes: 42fc9fee116f ("scsi: mpi3mr: Add helper functions to manage device's port")
+Signed-off-by: Tomas Henzl <thenzl@redhat.com>
+Link: https://lore.kernel.org/r/20230302234336.25456-5-thenzl@redhat.com
+Acked-by: Sathya Prakash Veerichetty <sathya.prakash@broadcom.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/mpi3mr/mpi3mr_os.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
+index 5032b0b5186d4..5698e7b90f852 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
+@@ -5127,6 +5127,12 @@ static void mpi3mr_remove(struct pci_dev *pdev)
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
++ if (mrioc->sas_hba.num_phys) {
++ kfree(mrioc->sas_hba.phy);
++ mrioc->sas_hba.phy = NULL;
++ mrioc->sas_hba.num_phys = 0;
++ }
++
+ spin_lock(&mrioc_list_lock);
+ list_del(&mrioc->list);
+ spin_unlock(&mrioc_list_lock);
+--
+2.39.2
+
--- /dev/null
+From 86a3ee3e8034f58e5f240bff9f286e97d8aa7ae3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Mar 2023 00:43:31 +0100
+Subject: scsi: mpi3mr: Fix throttle_groups memory leak
+
+From: Tomas Henzl <thenzl@redhat.com>
+
+[ Upstream commit f305a7b6ca21a665e8d0cf70b5936991a298c93c ]
+
+Add a missing kfree().
+
+Fixes: f10af057325c ("scsi: mpi3mr: Resource Based Metering")
+Signed-off-by: Tomas Henzl <thenzl@redhat.com>
+Link: https://lore.kernel.org/r/20230302234336.25456-2-thenzl@redhat.com
+Acked-by: Sathya Prakash Veerichetty <sathya.prakash@broadcom.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/mpi3mr/mpi3mr_fw.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+index 1e4467ea8472a..2d46a0b04f345 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+@@ -4358,6 +4358,9 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
+ mrioc->pel_seqnum_virt = NULL;
+ }
+
++ kfree(mrioc->throttle_groups);
++ mrioc->throttle_groups = NULL;
++
+ kfree(mrioc->logdata_buf);
+ mrioc->logdata_buf = NULL;
+
+--
+2.39.2
+
--- /dev/null
+From d45379a6ef80719f3dd9e3550d503c15745a66b6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Feb 2023 06:08:30 -0800
+Subject: scsi: mpi3mr: ioctl timeout when disabling/enabling interrupt
+
+From: Ranjan Kumar <ranjan.kumar@broadcom.com>
+
+[ Upstream commit 02ca7da2919ada525fb424640205110e24646b50 ]
+
+As part of Task Management handling, the driver will disable and enable the
+MSIx index zero which belongs to the Admin reply queue. During this
+transition the driver loses some interrupts and this leads to Admin request
+and ioctl timeouts.
+
+After enabling the interrupts, poll the Admin reply queue to avoid
+timeouts.
+
+Signed-off-by: Ranjan Kumar <ranjan.kumar@broadcom.com>
+Signed-off-by: Sreekanth Reddy <sreekanth.reddy@broadcom.com>
+Link: https://lore.kernel.org/r/20230228140835.4075-2-ranjan.kumar@broadcom.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Stable-dep-of: ce756daa36e1 ("scsi: mpi3mr: Fix expander node leak in mpi3mr_remove()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/mpi3mr/mpi3mr.h | 3 +++
+ drivers/scsi/mpi3mr/mpi3mr_fw.c | 12 ++++++++++--
+ drivers/scsi/mpi3mr/mpi3mr_os.c | 1 +
+ 3 files changed, 14 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
+index 8a438f248a820..68f29ffb05b82 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr.h
++++ b/drivers/scsi/mpi3mr/mpi3mr.h
+@@ -903,6 +903,7 @@ struct scmd_priv {
+ * @admin_reply_ephase:Admin reply queue expected phase
+ * @admin_reply_base: Admin reply queue base virtual address
+ * @admin_reply_dma: Admin reply queue base dma address
++ * @admin_reply_q_in_use: Queue is handled by poll/ISR
+ * @ready_timeout: Controller ready timeout
+ * @intr_info: Interrupt cookie pointer
+ * @intr_info_count: Number of interrupt cookies
+@@ -1056,6 +1057,7 @@ struct mpi3mr_ioc {
+ u8 admin_reply_ephase;
+ void *admin_reply_base;
+ dma_addr_t admin_reply_dma;
++ atomic_t admin_reply_q_in_use;
+
+ u32 ready_timeout;
+
+@@ -1391,4 +1393,5 @@ void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc);
+ void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc);
+ void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc);
+ void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc);
++int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc);
+ #endif /*MPI3MR_H_INCLUDED*/
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+index 1a404d71b88cf..74fa7f90399e3 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+@@ -415,7 +415,7 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
+ le64_to_cpu(scsi_reply->sense_data_buffer_address));
+ }
+
+-static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
++int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
+ {
+ u32 exp_phase = mrioc->admin_reply_ephase;
+ u32 admin_reply_ci = mrioc->admin_reply_ci;
+@@ -423,12 +423,17 @@ static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
+ u64 reply_dma = 0;
+ struct mpi3_default_reply_descriptor *reply_desc;
+
++ if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1))
++ return 0;
++
+ reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
+ admin_reply_ci;
+
+ if ((le16_to_cpu(reply_desc->reply_flags) &
+- MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
++ MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
++ atomic_dec(&mrioc->admin_reply_q_in_use);
+ return 0;
++ }
+
+ do {
+ if (mrioc->unrecoverable)
+@@ -454,6 +459,7 @@ static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
+ writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
+ mrioc->admin_reply_ci = admin_reply_ci;
+ mrioc->admin_reply_ephase = exp_phase;
++ atomic_dec(&mrioc->admin_reply_q_in_use);
+
+ return num_admin_replies;
+ }
+@@ -2605,6 +2611,7 @@ static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
+ mrioc->admin_reply_ci = 0;
+ mrioc->admin_reply_ephase = 1;
+ mrioc->admin_reply_base = NULL;
++ atomic_set(&mrioc->admin_reply_q_in_use, 0);
+
+ if (!mrioc->admin_req_base) {
+ mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
+@@ -4168,6 +4175,7 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
+ memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
+ if (mrioc->admin_reply_base)
+ memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
++ atomic_set(&mrioc->admin_reply_q_in_use, 0);
+
+ if (mrioc->init_cmds.reply) {
+ memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
+index 5698e7b90f852..2e546c80d98ce 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
+@@ -3720,6 +3720,7 @@ int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
+ mpi3mr_poll_pend_io_completions(mrioc);
+ mpi3mr_ioc_enable_intr(mrioc);
+ mpi3mr_poll_pend_io_completions(mrioc);
++ mpi3mr_process_admin_reply_q(mrioc);
+ }
+ switch (tm_type) {
+ case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+--
+2.39.2
+
--- /dev/null
+From 07625b16990dc81f7679d46dc677457bdffe831b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Feb 2023 06:08:33 -0800
+Subject: scsi: mpi3mr: Return proper values for failures in firmware init path
+
+From: Ranjan Kumar <ranjan.kumar@broadcom.com>
+
+[ Upstream commit ba8a9ba41fbde250fd8b0ed1e5dad0dc9318df46 ]
+
+Return proper non-zero return values for all the cases when the controller
+initialization and re-initialization fails.
+
+Signed-off-by: Ranjan Kumar <ranjan.kumar@broadcom.com>
+Signed-off-by: Sreekanth Reddy <sreekanth.reddy@broadcom.com>
+Link: https://lore.kernel.org/r/20230228140835.4075-5-ranjan.kumar@broadcom.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Stable-dep-of: c798304470ca ("scsi: mpi3mr: Fix memory leaks in mpi3mr_init_ioc()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/mpi3mr/mpi3mr_fw.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+index f6b726359a1cc..37aaf8dc65d4d 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+@@ -3817,8 +3817,10 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
+ dprint_init(mrioc, "allocating config page buffers\n");
+ mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
+ MPI3MR_DEFAULT_CFG_PAGE_SZ, &mrioc->cfg_page_dma, GFP_KERNEL);
+- if (!mrioc->cfg_page)
++ if (!mrioc->cfg_page) {
++ retval = -1;
+ goto out_failed_noretry;
++ }
+
+ mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
+
+@@ -3880,8 +3882,10 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
+ dprint_init(mrioc, "allocating memory for throttle groups\n");
+ sz = sizeof(struct mpi3mr_throttle_group_info);
+ mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
+- if (!mrioc->throttle_groups)
++ if (!mrioc->throttle_groups) {
++ retval = -1;
+ goto out_failed_noretry;
++ }
+ }
+
+ retval = mpi3mr_enable_events(mrioc);
+@@ -3901,6 +3905,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
+ mpi3mr_memset_buffers(mrioc);
+ goto retry_init;
+ }
++ retval = -1;
+ out_failed_noretry:
+ ioc_err(mrioc, "controller initialization failed\n");
+ mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+@@ -4013,6 +4018,7 @@ int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
+ ioc_err(mrioc,
+ "cannot create minimum number of operational queues expected:%d created:%d\n",
+ mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
++ retval = -1;
+ goto out_failed_noretry;
+ }
+
+@@ -4079,6 +4085,7 @@ int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
+ mpi3mr_memset_buffers(mrioc);
+ goto retry_init;
+ }
++ retval = -1;
+ out_failed_noretry:
+ ioc_err(mrioc, "controller %s is failed\n",
+ (is_resume)?"resume":"re-initialization");
+--
+2.39.2
+
--- /dev/null
+From b3e4d050fff6366969950dbedcf26af901672efc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 25 Feb 2023 18:01:36 +0800
+Subject: scsi: mpt3sas: Fix NULL pointer access in
+ mpt3sas_transport_port_add()
+
+From: Wenchao Hao <haowenchao2@huawei.com>
+
+[ Upstream commit d3c57724f1569311e4b81e98fad0931028b9bdcd ]
+
+Port is allocated by sas_port_alloc_num() and rphy is allocated by either
+sas_end_device_alloc() or sas_expander_alloc(), all of which may return
+NULL. So we need to check the rphy to avoid possible NULL pointer access.
+
+If sas_rphy_add() returned with failure, rphy is set to NULL. We would
+access the rphy in the following lines which would also result NULL pointer
+access.
+
+Fixes: 78316e9dfc24 ("scsi: mpt3sas: Fix possible resource leaks in mpt3sas_transport_port_add()")
+Signed-off-by: Wenchao Hao <haowenchao2@huawei.com>
+Link: https://lore.kernel.org/r/20230225100135.2109330-1-haowenchao2@huawei.com
+Acked-by: Sathya Prakash Veerichetty <sathya.prakash@broadcom.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/mpt3sas/mpt3sas_transport.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
+index e5ecd6ada6cdd..e8a4750f6ec47 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
+@@ -785,7 +785,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ goto out_fail;
+ }
+ port = sas_port_alloc_num(sas_node->parent_dev);
+- if ((sas_port_add(port))) {
++ if (!port || (sas_port_add(port))) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+@@ -824,6 +824,12 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ mpt3sas_port->remote_identify.sas_address;
+ }
+
++ if (!rphy) {
++ ioc_err(ioc, "failure at %s:%d/%s()!\n",
++ __FILE__, __LINE__, __func__);
++ goto out_delete_port;
++ }
++
+ rphy->identify = mpt3sas_port->remote_identify;
+
+ if ((sas_rphy_add(rphy))) {
+@@ -831,6 +837,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ __FILE__, __LINE__, __func__);
+ sas_rphy_free(rphy);
+ rphy = NULL;
++ goto out_delete_port;
+ }
+
+ if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+@@ -857,7 +864,10 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ rphy_to_expander_device(rphy), hba_port->port_id);
+ return mpt3sas_port;
+
+- out_fail:
++out_delete_port:
++ sas_port_delete(port);
++
++out_fail:
+ list_for_each_entry_safe(mpt3sas_phy, next, &mpt3sas_port->phy_list,
+ port_siblings)
+ list_del(&mpt3sas_phy->port_siblings);
+--
+2.39.2
+
--- /dev/null
+From d72ff773bc3ba7198287376d5649a1267313a1cf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 9 Aug 2022 16:22:31 +0200
+Subject: selftests: fix LLVM build for i386 and x86_64
+
+From: Guillaume Tucker <guillaume.tucker@collabora.com>
+
+[ Upstream commit 624c60f326c6e5a80b008e8a5c7feffe8c27dc72 ]
+
+Add missing cases for the i386 and x86_64 architectures when
+determining the LLVM target for building kselftest.
+
+Fixes: 795285ef2425 ("selftests: Fix clang cross compilation")
+Signed-off-by: Guillaume Tucker <guillaume.tucker@collabora.com>
+Reviewed-by: Nathan Chancellor <nathan@kernel.org>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/lib.mk | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
+index f7900e75d2306..05400462c7799 100644
+--- a/tools/testing/selftests/lib.mk
++++ b/tools/testing/selftests/lib.mk
+@@ -10,12 +10,14 @@ endif
+ CLANG_TARGET_FLAGS_arm := arm-linux-gnueabi
+ CLANG_TARGET_FLAGS_arm64 := aarch64-linux-gnu
+ CLANG_TARGET_FLAGS_hexagon := hexagon-linux-musl
++CLANG_TARGET_FLAGS_i386 := i386-linux-gnu
+ CLANG_TARGET_FLAGS_m68k := m68k-linux-gnu
+ CLANG_TARGET_FLAGS_mips := mipsel-linux-gnu
+ CLANG_TARGET_FLAGS_powerpc := powerpc64le-linux-gnu
+ CLANG_TARGET_FLAGS_riscv := riscv64-linux-gnu
+ CLANG_TARGET_FLAGS_s390 := s390x-linux-gnu
+ CLANG_TARGET_FLAGS_x86 := x86_64-linux-gnu
++CLANG_TARGET_FLAGS_x86_64 := x86_64-linux-gnu
+ CLANG_TARGET_FLAGS := $(CLANG_TARGET_FLAGS_$(ARCH))
+
+ ifeq ($(CROSS_COMPILE),)
+--
+2.39.2
+
--- /dev/null
+From 0ccca18311a870bde5f5c7d2cf9d4a7ccedece08 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Mar 2023 00:53:53 +0800
+Subject: selftests: net: devlink_port_split.py: skip test if no suitable
+ device available
+
+From: Po-Hsu Lin <po-hsu.lin@canonical.com>
+
+[ Upstream commit 24994513ad13ff2c47ba91d2b5df82c3d496c370 ]
+
+The `devlink -j port show` command output may not contain the "flavour"
+key, an example from Ubuntu 22.10 s390x LPAR(5.19.0-37-generic), with
+mlx4 driver and iproute2-5.15.0:
+ {"port":{"pci/0001:00:00.0/1":{"type":"eth","netdev":"ens301"},
+ "pci/0001:00:00.0/2":{"type":"eth","netdev":"ens301d1"},
+ "pci/0002:00:00.0/1":{"type":"eth","netdev":"ens317"},
+ "pci/0002:00:00.0/2":{"type":"eth","netdev":"ens317d1"}}}
+
+This will cause a KeyError exception.
+
+Create a validate_devlink_output() to check for this "flavour" from
+devlink command output to avoid this KeyError exception. Also let
+it handle the check for `devlink -j dev show` output in main().
+
+Apart from this, if the test was not started because the max lanes of
+the designated device is 0. The script will still return 0 and thus
+causing a false-negative test result.
+
+Use a found_max_lanes flag to determine if these tests were skipped
+due to this reason and return KSFT_SKIP to make it more clear.
+
+Link: https://bugs.launchpad.net/bugs/1937133
+Fixes: f3348a82e727 ("selftests: net: Add port split test")
+Signed-off-by: Po-Hsu Lin <po-hsu.lin@canonical.com>
+Link: https://lore.kernel.org/r/20230315165353.229590-1-po-hsu.lin@canonical.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../selftests/net/devlink_port_split.py | 36 ++++++++++++++++---
+ 1 file changed, 31 insertions(+), 5 deletions(-)
+
+diff --git a/tools/testing/selftests/net/devlink_port_split.py b/tools/testing/selftests/net/devlink_port_split.py
+index 2b5d6ff873738..2d84c7a0be6b2 100755
+--- a/tools/testing/selftests/net/devlink_port_split.py
++++ b/tools/testing/selftests/net/devlink_port_split.py
+@@ -59,6 +59,8 @@ class devlink_ports(object):
+ assert stderr == ""
+ ports = json.loads(stdout)['port']
+
++ validate_devlink_output(ports, 'flavour')
++
+ for port in ports:
+ if dev in port:
+ if ports[port]['flavour'] == 'physical':
+@@ -220,6 +222,27 @@ def split_splittable_port(port, k, lanes, dev):
+ unsplit(port.bus_info)
+
+
++def validate_devlink_output(devlink_data, target_property=None):
++ """
++ Determine if test should be skipped by checking:
++ 1. devlink_data contains values
++ 2. The target_property exist in devlink_data
++ """
++ skip_reason = None
++ if any(devlink_data.values()):
++ if target_property:
++ skip_reason = "{} not found in devlink output, test skipped".format(target_property)
++ for key in devlink_data:
++ if target_property in devlink_data[key]:
++ skip_reason = None
++ else:
++ skip_reason = 'devlink output is empty, test skipped'
++
++ if skip_reason:
++ print(skip_reason)
++ sys.exit(KSFT_SKIP)
++
++
+ def make_parser():
+ parser = argparse.ArgumentParser(description='A test for port splitting.')
+ parser.add_argument('--dev',
+@@ -240,12 +263,9 @@ def main(cmdline=None):
+ stdout, stderr = run_command(cmd)
+ assert stderr == ""
+
++ validate_devlink_output(json.loads(stdout))
+ devs = json.loads(stdout)['dev']
+- if devs:
+- dev = list(devs.keys())[0]
+- else:
+- print("no devlink device was found, test skipped")
+- sys.exit(KSFT_SKIP)
++ dev = list(devs.keys())[0]
+
+ cmd = "devlink dev show %s" % dev
+ stdout, stderr = run_command(cmd)
+@@ -255,6 +275,7 @@ def main(cmdline=None):
+
+ ports = devlink_ports(dev)
+
++ found_max_lanes = False
+ for port in ports.if_names:
+ max_lanes = get_max_lanes(port.name)
+
+@@ -277,6 +298,11 @@ def main(cmdline=None):
+ split_splittable_port(port, lane, max_lanes, dev)
+
+ lane //= 2
++ found_max_lanes = True
++
++ if not found_max_lanes:
++ print(f"Test not started, no port of device {dev} reports max_lanes")
++ sys.exit(KSFT_SKIP)
+
+
+ if __name__ == "__main__":
+--
+2.39.2
+
--- /dev/null
+xfrm-allow-transport-mode-states-with-af_unspec-sele.patch
+drm-virtio-pass-correct-device-to-dma_sync_sgtable_f.patch
+drm-msm-gem-prevent-blocking-within-shrinker-loop.patch
+drm-panfrost-don-t-sync-rpm-suspension-after-mmu-flu.patch
+fbdev-chipsfb-fix-error-codes-in-chipsfb_pci_init.patch
+cifs-move-the-in_send-statistic-to-__smb_send_rqst.patch
+drm-meson-fix-1px-pink-line-on-gxm-when-scaling-vide.patch
+clk-hi655x-select-regmap-instead-of-depending-on-it.patch
+asoc-sof-intel-mtl-fix-the-device-description.patch
+asoc-sof-intel-hda-fix-device-description.patch
+asoc-sof-intel-skl-fix-device-description.patch
+asoc-sof-intel-pci-tgl-fix-device-description.patch
+asoc-sof-ipc4-topology-set-dmic-dai-index-from-copie.patch
+docs-correct-missing-d_-prefix-for-dentry_operations.patch
+scsi-mpt3sas-fix-null-pointer-access-in-mpt3sas_tran.patch
+scsi-mpi3mr-fix-throttle_groups-memory-leak.patch
+scsi-mpi3mr-fix-config-page-dma-memory-leak.patch
+scsi-mpi3mr-fix-mpi3mr_hba_port-memory-leak-in-mpi3m.patch
+scsi-mpi3mr-fix-sas_hba.phy-memory-leak-in-mpi3mr_re.patch
+scsi-mpi3mr-return-proper-values-for-failures-in-fir.patch
+scsi-mpi3mr-fix-memory-leaks-in-mpi3mr_init_ioc.patch
+scsi-mpi3mr-ioctl-timeout-when-disabling-enabling-in.patch
+scsi-mpi3mr-fix-expander-node-leak-in-mpi3mr_remove.patch
+alsa-hda-match-only-intel-devices-with-controller_in.patch
+netfilter-nft_nat-correct-length-for-loading-protoco.patch
+netfilter-nft_masq-correct-length-for-loading-protoc.patch
+netfilter-nft_redir-correct-length-for-loading-proto.patch
+netfilter-nft_redir-correct-value-of-inet-type-.maxa.patch
+scsi-core-add-blist_no_vpd_size-for-some-vdasd.patch
+scsi-core-fix-a-procfs-host-directory-removal-regres.patch
+ftrace-kcfi-define-ftrace_stub_graph-conditionally.patch
+tcp-tcp_make_synack-can-be-called-from-process-conte.patch
+vdpa-mlx5-should-not-activate-virtq-object-when-susp.patch
+wifi-nl80211-fix-null-ptr-deref-in-offchan-check.patch
+wifi-cfg80211-fix-mlo-connection-ownership.patch
+selftests-fix-llvm-build-for-i386-and-x86_64.patch
+nfc-pn533-initialize-struct-pn533_out_arg-properly.patch
+ipvlan-make-skb-skb_iif-track-skb-dev-for-l3s-mode.patch
+i40e-fix-kernel-crash-during-reboot-when-adapter-is-.patch
+vhost-vdpa-free-iommu-domain-after-last-use-during-c.patch
+vdpa_sim-not-reset-state-in-vdpasim_queue_ready.patch
+vdpa_sim-set-last_used_idx-as-last_avail_idx-in-vdpa.patch
+pci-s390-fix-use-after-free-of-pci-resources-with-pe.patch
+drm-i915-psr-use-calculated-io-and-fast-wake-lines.patch
+drm-i915-sseu-fix-max_subslices-array-index-out-of-b.patch
+net-smc-fix-null-sndbuf_desc-in-smc_cdc_tx_handler.patch
+qed-qed_dev-guard-against-a-possible-division-by-zer.patch
+net-dsa-mt7530-remove-now-incorrect-comment-regardin.patch
+net-dsa-mt7530-set-pll-frequency-and-trgmii-only-whe.patch
+block-do-not-reverse-request-order-when-flushing-plu.patch
+loop-fix-use-after-free-issues.patch
+blk-mq-move-the-srcu_struct-used-for-quiescing-to-th.patch
+blk-mq-fix-bad-unlock-balance-detected-on-q-srcu-in-.patch
+net-tunnels-annotate-lockless-accesses-to-dev-needed.patch
+net-phy-smsc-bail-out-in-lan87xx_read_status-if-genp.patch
+tcp-fix-bind-conflict-check-for-dual-stack-wildcard-.patch
+nfc-st-nci-fix-use-after-free-bug-in-ndlc_remove-due.patch
+mlxsw-spectrum-fix-incorrect-parsing-depth-after-rel.patch
+net-smc-fix-deadlock-triggered-by-cancel_delayed_wor.patch
+net-usb-smsc75xx-limit-packet-length-to-skb-len.patch
+drm-bridge-fix-returned-array-size-name-for-atomic_g.patch
+powerpc-mm-fix-false-detection-of-read-faults.patch
+block-null_blk-fix-handling-of-fake-timeout-request.patch
+nvme-fix-handling-single-range-discard-request.patch
+nvmet-avoid-potential-uaf-in-nvmet_req_complete.patch
+block-sunvdc-add-check-for-mdesc_grab-returning-null.patch
+net-mlx5e-fix-macsec-aso-context-alignment.patch
+net-mlx5e-don-t-cache-tunnel-offloads-capability.patch
+net-mlx5-fix-setting-ec_function-bit-in-manage_pages.patch
+net-mlx5-disable-eswitch-before-waiting-for-vf-pages.patch
+net-mlx5e-support-geneve-and-gre-with-vf-tunnel-offl.patch
+net-mlx5-e-switch-fix-wrong-usage-of-source-port-rew.patch
+net-mlx5-e-switch-fix-missing-set-of-split_count-whe.patch
+net-mlx5e-fix-cleanup-null-ptr-deref-on-encap-lock.patch
+net-mlx5-set-break_fw_wait-flag-first-when-removing-.patch
+veth-fix-use-after-free-in-xdp_redirect.patch
+ice-xsk-disable-txq-irq-before-flushing-hw.patch
+net-dsa-don-t-error-out-when-drivers-return-eth_data.patch
+net-dsa-mv88e6xxx-fix-max_mtu-of-1492-on-6165-6191-6.patch
+ravb-avoid-phy-being-resumed-when-interface-is-not-u.patch
+sh_eth-avoid-phy-being-resumed-when-interface-is-not.patch
+ipv4-fix-incorrect-table-id-in-ioctl-path.patch
+net-usb-smsc75xx-move-packet-length-check-to-prevent.patch
+net-atlantic-fix-crash-when-xdp-is-enabled-but-no-pr.patch
+net-iucv-fix-size-of-interrupt-data.patch
+i825xx-sni_82596-use-eth_hw_addr_set.patch
+selftests-net-devlink_port_split.py-skip-test-if-no-.patch
+qed-qed_mng_tlv-correctly-zero-out-min-instead-of-ho.patch
+net-dsa-microchip-fix-rgmii-delay-configuration-on-k.patch
+ethernet-sun-add-check-for-the-mdesc_grab.patch
+bonding-restore-iff_master-slave-flags-on-bond-ensla.patch
+bonding-restore-bond-s-iff_slave-flag-if-a-non-eth-d.patch
--- /dev/null
+From ce3b1cfc71cf7a55b7328b39e36483b338585950 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Mar 2023 08:41:15 +0100
+Subject: sh_eth: avoid PHY being resumed when interface is not up
+
+From: Wolfram Sang <wsa+renesas@sang-engineering.com>
+
+[ Upstream commit c6be7136afb224a01d4cde2983ddebac8da98693 ]
+
+SH_ETH doesn't need mdiobus suspend/resume, that's why it sets
+'mac_managed_pm'. However, setting it needs to be moved from init to
+probe, so mdiobus PM functions will really never be called (e.g. when
+the interface is not up yet during suspend/resume).
+
+Fixes: 6a1dbfefdae4 ("net: sh_eth: Fix PHY state warning splat during system resume")
+Suggested-by: Heiner Kallweit <hkallweit1@gmail.com>
+Signed-off-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Reviewed-by: Michal Kubiak <michal.kubiak@intel.com>
+Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/renesas/sh_eth.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index 71a4991133080..14dc5833c465c 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -2029,8 +2029,6 @@ static int sh_eth_phy_init(struct net_device *ndev)
+ if (mdp->cd->register_type != SH_ETH_REG_GIGABIT)
+ phy_set_max_speed(phydev, SPEED_100);
+
+- /* Indicate that the MAC is responsible for managing PHY PM */
+- phydev->mac_managed_pm = true;
+ phy_attached_info(phydev);
+
+ return 0;
+@@ -3074,6 +3072,8 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
+ struct bb_info *bitbang;
+ struct platform_device *pdev = mdp->pdev;
+ struct device *dev = &mdp->pdev->dev;
++ struct phy_device *phydev;
++ struct device_node *pn;
+
+ /* create bit control struct for PHY */
+ bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
+@@ -3108,6 +3108,14 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
+ if (ret)
+ goto out_free_bus;
+
++ pn = of_parse_phandle(dev->of_node, "phy-handle", 0);
++ phydev = of_phy_find_device(pn);
++ if (phydev) {
++ phydev->mac_managed_pm = true;
++ put_device(&phydev->mdio.dev);
++ }
++ of_node_put(pn);
++
+ return 0;
+
+ out_free_bus:
+--
+2.39.2
+
--- /dev/null
+From 0c0caadafc198c63a1192c3f6a97e9cad079a5a7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 11 Mar 2023 19:19:03 -0800
+Subject: tcp: Fix bind() conflict check for dual-stack wildcard address.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit d9ba9934285514f1f95d96326a82398a22dc77f2 ]
+
+Paul Holzinger reported [0] that commit 5456262d2baa ("net: Fix
+incorrect address comparison when searching for a bind2 bucket")
+introduced a bind() regression. Paul also gave a nice repro that
+calls two types of bind() on the same port, both of which now
+succeed, but the second call should fail:
+
+ bind(fd1, ::, port) + bind(fd2, 127.0.0.1, port)
+
+The cited commit added address family tests in three functions to
+fix the uninit-value KMSAN report. [1] However, the test added to
+inet_bind2_bucket_match_addr_any() removed a necessary conflict
+check; the dual-stack wildcard address no longer conflicts with
+an IPv4 non-wildcard address.
+
+If tb->family is AF_INET6 and sk->sk_family is AF_INET in
+inet_bind2_bucket_match_addr_any(), we still need to check
+if tb has the dual-stack wildcard address.
+
+Note that the IPv4 wildcard address does not conflict with
+IPv6 non-wildcard addresses.
+
+[0]: https://lore.kernel.org/netdev/e21bf153-80b0-9ec0-15ba-e04a4ad42c34@redhat.com/
+[1]: https://lore.kernel.org/netdev/CAG_fn=Ud3zSW7AZWXc+asfMhZVL5ETnvuY44Pmyv4NPv-ijN-A@mail.gmail.com/
+
+Fixes: 5456262d2baa ("net: Fix incorrect address comparison when searching for a bind2 bucket")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Reported-by: Paul Holzinger <pholzing@redhat.com>
+Link: https://lore.kernel.org/netdev/CAG_fn=Ud3zSW7AZWXc+asfMhZVL5ETnvuY44Pmyv4NPv-ijN-A@mail.gmail.com/
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Tested-by: Paul Holzinger <pholzing@redhat.com>
+Reviewed-by: Martin KaFai Lau <martin.lau@kernel.org>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/inet_hashtables.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index cd8b2f7a8f341..f0750c06d5ffc 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -828,8 +828,14 @@ bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const
+ #if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr addr_any = {};
+
+- if (sk->sk_family != tb->family)
++ if (sk->sk_family != tb->family) {
++ if (sk->sk_family == AF_INET)
++ return net_eq(ib2_net(tb), net) && tb->port == port &&
++ tb->l3mdev == l3mdev &&
++ ipv6_addr_equal(&tb->v6_rcv_saddr, &addr_any);
++
+ return false;
++ }
+
+ if (sk->sk_family == AF_INET6)
+ return net_eq(ib2_net(tb), net) && tb->port == port &&
+--
+2.39.2
+
--- /dev/null
+From 5aa2f1901c5122c2e820f61bd473320824eb9358 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Mar 2023 11:07:45 -0800
+Subject: tcp: tcp_make_synack() can be called from process context
+
+From: Breno Leitao <leitao@debian.org>
+
+[ Upstream commit bced3f7db95ff2e6ca29dc4d1c9751ab5e736a09 ]
+
+tcp_rtx_synack() now could be called in process context as explained in
+0a375c822497 ("tcp: tcp_rtx_synack() can be called from process
+context").
+
+tcp_rtx_synack() might call tcp_make_synack(), which will touch per-CPU
+variables with preemption enabled. This causes the following BUG:
+
+ BUG: using __this_cpu_add() in preemptible [00000000] code: ThriftIO1/5464
+ caller is tcp_make_synack+0x841/0xac0
+ Call Trace:
+ <TASK>
+ dump_stack_lvl+0x10d/0x1a0
+ check_preemption_disabled+0x104/0x110
+ tcp_make_synack+0x841/0xac0
+ tcp_v6_send_synack+0x5c/0x450
+ tcp_rtx_synack+0xeb/0x1f0
+ inet_rtx_syn_ack+0x34/0x60
+ tcp_check_req+0x3af/0x9e0
+ tcp_rcv_state_process+0x59b/0x2030
+ tcp_v6_do_rcv+0x5f5/0x700
+ release_sock+0x3a/0xf0
+ tcp_sendmsg+0x33/0x40
+ ____sys_sendmsg+0x2f2/0x490
+ __sys_sendmsg+0x184/0x230
+ do_syscall_64+0x3d/0x90
+
+Avoid calling __TCP_INC_STATS() with will touch per-cpu variables. Use
+TCP_INC_STATS() which is safe to be called from context switch.
+
+Fixes: 8336886f786f ("tcp: TCP Fast Open Server - support TFO listeners")
+Signed-off-by: Breno Leitao <leitao@debian.org>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20230308190745.780221-1-leitao@debian.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/tcp_output.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index c69f4d966024c..925594dbeb929 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -3608,7 +3608,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
+ th->window = htons(min(req->rsk_rcv_wnd, 65535U));
+ tcp_options_write(th, NULL, &opts);
+ th->doff = (tcp_header_size >> 2);
+- __TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
++ TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
+
+ #ifdef CONFIG_TCP_MD5SIG
+ /* Okay, we have all we need - do the md5 hash if needed */
+--
+2.39.2
+
--- /dev/null
+From 2cf5b57820f07c4f5f061850671d174e3e81a28b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Feb 2023 17:30:40 -0800
+Subject: vdpa/mlx5: should not activate virtq object when suspended
+
+From: Si-Wei Liu <si-wei.liu@oracle.com>
+
+[ Upstream commit 09e65ee9059d76b89cb713795748805efd3f50c6 ]
+
+Otherwise the virtqueue object to instate could point to invalid address
+that was unmapped from the MTT:
+
+ mlx5_core 0000:41:04.2: mlx5_cmd_out_err:782:(pid 8321):
+ CREATE_GENERAL_OBJECT(0xa00) op_mod(0xd) failed, status
+ bad parameter(0x3), syndrome (0x5fa1c), err(-22)
+
+Fixes: cae15c2ed8e6 ("vdpa/mlx5: Implement susupend virtqueue callback")
+Cc: Eli Cohen <elic@nvidia.com>
+Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
+Reviewed-by: Eli Cohen <elic@nvidia.com>
+
+Message-Id: <1676424640-11673-1-git-send-email-si-wei.liu@oracle.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vdpa/mlx5/core/mlx5_vdpa.h | 1 +
+ drivers/vdpa/mlx5/net/mlx5_vnet.c | 6 +++++-
+ 2 files changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+index 058fbe28107e9..25fc4120b618d 100644
+--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
++++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+@@ -96,6 +96,7 @@ struct mlx5_vdpa_dev {
+ struct mlx5_control_vq cvq;
+ struct workqueue_struct *wq;
+ unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
++ bool suspended;
+ };
+
+ int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid);
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index 3a6dbbc6440d4..daac3ab314785 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -2411,7 +2411,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
+ if (err)
+ goto err_mr;
+
+- if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
++ if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) || mvdev->suspended)
+ goto err_mr;
+
+ restore_channels_info(ndev);
+@@ -2579,6 +2579,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
+ clear_vqs_ready(ndev);
+ mlx5_vdpa_destroy_mr(&ndev->mvdev);
+ ndev->mvdev.status = 0;
++ ndev->mvdev.suspended = false;
+ ndev->cur_num_vqs = 0;
+ ndev->mvdev.cvq.received_desc = 0;
+ ndev->mvdev.cvq.completed_desc = 0;
+@@ -2815,6 +2816,8 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
+ struct mlx5_vdpa_virtqueue *mvq;
+ int i;
+
++ mlx5_vdpa_info(mvdev, "suspending device\n");
++
+ down_write(&ndev->reslock);
+ ndev->nb_registered = false;
+ mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
+@@ -2824,6 +2827,7 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
+ suspend_vq(ndev, mvq);
+ }
+ mlx5_vdpa_cvq_suspend(mvdev);
++ mvdev->suspended = true;
+ up_write(&ndev->reslock);
+ return 0;
+ }
+--
+2.39.2
+
--- /dev/null
+From 60860eaf3a8c82533fe1d8be3a821136c44122ac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Jan 2023 17:43:58 +0100
+Subject: vdpa_sim: not reset state in vdpasim_queue_ready
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Eugenio Pérez <eperezma@redhat.com>
+
+[ Upstream commit 0e84f918fac8ae61dcb790534fad5e3555ca2930 ]
+
+vdpasim_queue_ready calls vringh_init_iotlb, which resets split indexes.
+But it can be called after setting a ring base with
+vdpasim_set_vq_state.
+
+Fix it by stashing them. They're still resetted in vdpasim_vq_reset.
+
+This was discovered and tested live migrating the vdpa_sim_net device.
+
+Fixes: 2c53d0f64c06 ("vdpasim: vDPA device simulator")
+Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
+Message-Id: <20230118164359.1523760-2-eperezma@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Tested-by: Lei Yang <leiyang@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vdpa/vdpa_sim/vdpa_sim.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
+index cb88891b44a8c..8839232a3fcbc 100644
+--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
++++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
+@@ -66,6 +66,7 @@ static void vdpasim_vq_notify(struct vringh *vring)
+ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
+ {
+ struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
++ uint16_t last_avail_idx = vq->vring.last_avail_idx;
+
+ vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num, false,
+ (struct vring_desc *)(uintptr_t)vq->desc_addr,
+@@ -74,6 +75,7 @@ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
+ (struct vring_used *)
+ (uintptr_t)vq->device_addr);
+
++ vq->vring.last_avail_idx = last_avail_idx;
+ vq->vring.notify = vdpasim_vq_notify;
+ }
+
+--
+2.39.2
+
--- /dev/null
+From 20146d6f84a7274d1d21ff8e21f30c184aa34121 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 2 Mar 2023 19:18:57 +0100
+Subject: vdpa_sim: set last_used_idx as last_avail_idx in vdpasim_queue_ready
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Eugenio Pérez <eperezma@redhat.com>
+
+[ Upstream commit b4cca6d48eb3fa6f0d9caba4329b1a2b0ff67a77 ]
+
+Starting from an used_idx different than 0 is needed in use cases like
+virtual machine migration. Not doing so and letting the caller set an
+avail idx different than 0 causes destination device to try to use old
+buffers that source driver already recover and are not available
+anymore.
+
+Since vdpa_sim does not support receive inflight descriptors as a
+destination of a migration, let's set both avail_idx and used_idx the
+same at vq start. This is how vhost-user works in a
+VHOST_SET_VRING_BASE call.
+
+Although the simple fix is to set last_used_idx at vdpasim_set_vq_state,
+it would be reset at vdpasim_queue_ready. The last_avail_idx case is
+fixed with commit 0e84f918fac8 ("vdpa_sim: not reset state in
+vdpasim_queue_ready"). Since the only option is to make it equal to
+last_avail_idx, adding the only change needed here.
+
+This was discovered and tested live migrating the vdpa_sim_net device.
+
+Fixes: 2c53d0f64c06 ("vdpasim: vDPA device simulator")
+Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
+Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
+Message-Id: <20230302181857.925374-1-eperezma@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vdpa/vdpa_sim/vdpa_sim.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
+index 8839232a3fcbc..61bde476cf9c8 100644
+--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
++++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
+@@ -76,6 +76,17 @@ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
+ (uintptr_t)vq->device_addr);
+
+ vq->vring.last_avail_idx = last_avail_idx;
++
++ /*
++ * Since vdpa_sim does not support receive inflight descriptors as a
++ * destination of a migration, let's set both avail_idx and used_idx
++ * the same at vq start. This is how vhost-user works in a
++ * VHOST_SET_VRING_BASE call.
++ *
++ * Although the simple fix is to set last_used_idx at
++ * vdpasim_set_vq_state, it would be reset at vdpasim_queue_ready.
++ */
++ vq->vring.last_used_idx = last_avail_idx;
+ vq->vring.notify = vdpasim_vq_notify;
+ }
+
+--
+2.39.2
+
--- /dev/null
+From 528f93c25843dd05b3a74d78fbdad090058e040b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Mar 2023 10:33:51 -0500
+Subject: veth: Fix use after free in XDP_REDIRECT
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Shawn Bohrer <sbohrer@cloudflare.com>
+
+[ Upstream commit 7c10131803e45269ddc6c817f19ed649110f3cae ]
+
+Commit 718a18a0c8a6 ("veth: Rework veth_xdp_rcv_skb in order
+to accept non-linear skb") introduced a bug where it tried to
+use pskb_expand_head() if the headroom was less than
+XDP_PACKET_HEADROOM. This however uses kmalloc to expand the head,
+which will later allow consume_skb() to free the skb while is it still
+in use by AF_XDP.
+
+Previously if the headroom was less than XDP_PACKET_HEADROOM we
+continued on to allocate a new skb from pages so this restores that
+behavior.
+
+BUG: KASAN: use-after-free in __xsk_rcv+0x18d/0x2c0
+Read of size 78 at addr ffff888976250154 by task napi/iconduit-g/148640
+
+CPU: 5 PID: 148640 Comm: napi/iconduit-g Kdump: loaded Tainted: G O 6.1.4-cloudflare-kasan-2023.1.2 #1
+Hardware name: Quanta Computer Inc. QuantaPlex T41S-2U/S2S-MB, BIOS S2S_3B10.03 06/21/2018
+Call Trace:
+ <TASK>
+ dump_stack_lvl+0x34/0x48
+ print_report+0x170/0x473
+ ? __xsk_rcv+0x18d/0x2c0
+ kasan_report+0xad/0x130
+ ? __xsk_rcv+0x18d/0x2c0
+ kasan_check_range+0x149/0x1a0
+ memcpy+0x20/0x60
+ __xsk_rcv+0x18d/0x2c0
+ __xsk_map_redirect+0x1f3/0x490
+ ? veth_xdp_rcv_skb+0x89c/0x1ba0 [veth]
+ xdp_do_redirect+0x5ca/0xd60
+ veth_xdp_rcv_skb+0x935/0x1ba0 [veth]
+ ? __netif_receive_skb_list_core+0x671/0x920
+ ? veth_xdp+0x670/0x670 [veth]
+ veth_xdp_rcv+0x304/0xa20 [veth]
+ ? do_xdp_generic+0x150/0x150
+ ? veth_xdp_rcv_one+0xde0/0xde0 [veth]
+ ? _raw_spin_lock_bh+0xe0/0xe0
+ ? newidle_balance+0x887/0xe30
+ ? __perf_event_task_sched_in+0xdb/0x800
+ veth_poll+0x139/0x571 [veth]
+ ? veth_xdp_rcv+0xa20/0xa20 [veth]
+ ? _raw_spin_unlock+0x39/0x70
+ ? finish_task_switch.isra.0+0x17e/0x7d0
+ ? __switch_to+0x5cf/0x1070
+ ? __schedule+0x95b/0x2640
+ ? io_schedule_timeout+0x160/0x160
+ __napi_poll+0xa1/0x440
+ napi_threaded_poll+0x3d1/0x460
+ ? __napi_poll+0x440/0x440
+ ? __kthread_parkme+0xc6/0x1f0
+ ? __napi_poll+0x440/0x440
+ kthread+0x2a2/0x340
+ ? kthread_complete_and_exit+0x20/0x20
+ ret_from_fork+0x22/0x30
+ </TASK>
+
+Freed by task 148640:
+ kasan_save_stack+0x23/0x50
+ kasan_set_track+0x21/0x30
+ kasan_save_free_info+0x2a/0x40
+ ____kasan_slab_free+0x169/0x1d0
+ slab_free_freelist_hook+0xd2/0x190
+ __kmem_cache_free+0x1a1/0x2f0
+ skb_release_data+0x449/0x600
+ consume_skb+0x9f/0x1c0
+ veth_xdp_rcv_skb+0x89c/0x1ba0 [veth]
+ veth_xdp_rcv+0x304/0xa20 [veth]
+ veth_poll+0x139/0x571 [veth]
+ __napi_poll+0xa1/0x440
+ napi_threaded_poll+0x3d1/0x460
+ kthread+0x2a2/0x340
+ ret_from_fork+0x22/0x30
+
+The buggy address belongs to the object at ffff888976250000
+ which belongs to the cache kmalloc-2k of size 2048
+The buggy address is located 340 bytes inside of
+ 2048-byte region [ffff888976250000, ffff888976250800)
+
+The buggy address belongs to the physical page:
+page:00000000ae18262a refcount:2 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x976250
+head:00000000ae18262a order:3 compound_mapcount:0 compound_pincount:0
+flags: 0x2ffff800010200(slab|head|node=0|zone=2|lastcpupid=0x1ffff)
+raw: 002ffff800010200 0000000000000000 dead000000000122 ffff88810004cf00
+raw: 0000000000000000 0000000080080008 00000002ffffffff 0000000000000000
+page dumped because: kasan: bad access detected
+
+Memory state around the buggy address:
+ ffff888976250000: fa fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ffff888976250080: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+> ffff888976250100: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ^
+ ffff888976250180: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ffff888976250200: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+
+Fixes: 718a18a0c8a6 ("veth: Rework veth_xdp_rcv_skb in order to accept non-linear skb")
+Signed-off-by: Shawn Bohrer <sbohrer@cloudflare.com>
+Acked-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Acked-by: Toshiaki Makita <toshiaki.makita1@gmail.com>
+Acked-by: Toke Høiland-Jørgensen <toke@kernel.org>
+Link: https://lore.kernel.org/r/20230314153351.2201328-1-sbohrer@cloudflare.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/veth.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index bd385ccd0d18d..a71786b3e7ba7 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -701,7 +701,8 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
+ u32 frame_sz;
+
+ if (skb_shared(skb) || skb_head_is_locked(skb) ||
+- skb_shinfo(skb)->nr_frags) {
++ skb_shinfo(skb)->nr_frags ||
++ skb_headroom(skb) < XDP_PACKET_HEADROOM) {
+ u32 size, len, max_head_size, off;
+ struct sk_buff *nskb;
+ struct page *page;
+@@ -766,9 +767,6 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
+
+ consume_skb(skb);
+ skb = nskb;
+- } else if (skb_headroom(skb) < XDP_PACKET_HEADROOM &&
+- pskb_expand_head(skb, VETH_XDP_HEADROOM, 0, GFP_ATOMIC)) {
+- goto drop;
+ }
+
+ /* SKB "head" area always have tailroom for skb_shared_info */
+--
+2.39.2
+
--- /dev/null
+From fa683ec76b89cbfaa285311509a627b6f7253b4f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Mar 2023 22:02:01 +0530
+Subject: vhost-vdpa: free iommu domain after last use during cleanup
+
+From: Gautam Dawar <gautam.dawar@amd.com>
+
+[ Upstream commit 5a522150093a0eabae9470a70a37a6e436bfad08 ]
+
+Currently vhost_vdpa_cleanup() unmaps the DMA mappings by calling
+`iommu_unmap(v->domain, map->start, map->size);`
+from vhost_vdpa_general_unmap() when the parent vDPA driver doesn't
+provide DMA config operations.
+
+However, the IOMMU domain referred to by `v->domain` is freed in
+vhost_vdpa_free_domain() before vhost_vdpa_cleanup() in
+vhost_vdpa_release() which results in NULL pointer de-reference.
+Accordingly, moving the call to vhost_vdpa_free_domain() in
+vhost_vdpa_cleanup() would makes sense. This will also help
+detaching the dma device in error handling of vhost_vdpa_alloc_domain().
+
+This issue was observed on terminating QEMU with SIGQUIT.
+
+Fixes: 037d4305569a ("vhost-vdpa: call vhost_vdpa_cleanup during the release")
+Signed-off-by: Gautam Dawar <gautam.dawar@amd.com>
+Message-Id: <20230301163203.29883-1-gautam.dawar@amd.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vhost/vdpa.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
+index ec32f785dfdec..b7657984dd8df 100644
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -1134,6 +1134,7 @@ static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
+
+ err_attach:
+ iommu_domain_free(v->domain);
++ v->domain = NULL;
+ return ret;
+ }
+
+@@ -1178,6 +1179,7 @@ static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
+ vhost_vdpa_remove_as(v, asid);
+ }
+
++ vhost_vdpa_free_domain(v);
+ vhost_dev_cleanup(&v->vdev);
+ kfree(v->vdev.vqs);
+ }
+@@ -1250,7 +1252,6 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
+ vhost_vdpa_clean_irq(v);
+ vhost_vdpa_reset(v);
+ vhost_dev_stop(&v->vdev);
+- vhost_vdpa_free_domain(v);
+ vhost_vdpa_config_put(v);
+ vhost_vdpa_cleanup(v);
+ mutex_unlock(&d->mutex);
+--
+2.39.2
+
--- /dev/null
+From e0cdbfa9eda10c764951ce6f0e389409225421b9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Mar 2023 12:09:33 +0200
+Subject: wifi: cfg80211: fix MLO connection ownership
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+[ Upstream commit 96c069508377547f913e7265a80fffe9355de592 ]
+
+When disconnecting from an MLO connection we need the AP
+MLD address, not an arbitrary BSSID. Fix the code to do
+that.
+
+Fixes: 9ecff10e82a5 ("wifi: nl80211: refactor BSS lookup in nl80211_associate()")
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Gregory Greenman <gregory.greenman@intel.com>
+Link: https://lore.kernel.org/r/20230301115906.4c1b3b18980e.I008f070c7f3b8e8bde9278101ef9e40706a82902@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/wireless/nl80211.c | 16 +++++++---------
+ 1 file changed, 7 insertions(+), 9 deletions(-)
+
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 4c6748aa6a1c1..7320d676ce3a5 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -10699,8 +10699,7 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
+
+ static struct cfg80211_bss *nl80211_assoc_bss(struct cfg80211_registered_device *rdev,
+ const u8 *ssid, int ssid_len,
+- struct nlattr **attrs,
+- const u8 **bssid_out)
++ struct nlattr **attrs)
+ {
+ struct ieee80211_channel *chan;
+ struct cfg80211_bss *bss;
+@@ -10727,7 +10726,6 @@ static struct cfg80211_bss *nl80211_assoc_bss(struct cfg80211_registered_device
+ if (!bss)
+ return ERR_PTR(-ENOENT);
+
+- *bssid_out = bssid;
+ return bss;
+ }
+
+@@ -10737,7 +10735,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
+ struct net_device *dev = info->user_ptr[1];
+ struct cfg80211_assoc_request req = {};
+ struct nlattr **attrs = NULL;
+- const u8 *bssid, *ssid;
++ const u8 *ap_addr, *ssid;
+ unsigned int link_id;
+ int err, ssid_len;
+
+@@ -10874,6 +10872,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
+ return -EINVAL;
+
+ req.ap_mld_addr = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]);
++ ap_addr = req.ap_mld_addr;
+
+ attrs = kzalloc(attrsize, GFP_KERNEL);
+ if (!attrs)
+@@ -10899,8 +10898,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
+ goto free;
+ }
+ req.links[link_id].bss =
+- nl80211_assoc_bss(rdev, ssid, ssid_len, attrs,
+- &bssid);
++ nl80211_assoc_bss(rdev, ssid, ssid_len, attrs);
+ if (IS_ERR(req.links[link_id].bss)) {
+ err = PTR_ERR(req.links[link_id].bss);
+ req.links[link_id].bss = NULL;
+@@ -10951,10 +10949,10 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
+ if (req.link_id >= 0)
+ return -EINVAL;
+
+- req.bss = nl80211_assoc_bss(rdev, ssid, ssid_len, info->attrs,
+- &bssid);
++ req.bss = nl80211_assoc_bss(rdev, ssid, ssid_len, info->attrs);
+ if (IS_ERR(req.bss))
+ return PTR_ERR(req.bss);
++ ap_addr = req.bss->bssid;
+ }
+
+ err = nl80211_crypto_settings(rdev, info, &req.crypto, 1);
+@@ -10967,7 +10965,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
+ dev->ieee80211_ptr->conn_owner_nlportid =
+ info->snd_portid;
+ memcpy(dev->ieee80211_ptr->disconnect_bssid,
+- bssid, ETH_ALEN);
++ ap_addr, ETH_ALEN);
+ }
+
+ wdev_unlock(dev->ieee80211_ptr);
+--
+2.39.2
+
--- /dev/null
+From 5cd5dfcc1d2078f0f07430cc3b314495cc4f7dd1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Mar 2023 12:09:29 +0200
+Subject: wifi: nl80211: fix NULL-ptr deref in offchan check
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+[ Upstream commit f624bb6fad23df3270580b4fcef415c6e7bf7705 ]
+
+If, e.g. in AP mode, the link was already created by userspace
+but not activated yet, it has a chandef but the chandef isn't
+valid and has no channel. Check for this and ignore this link.
+
+Fixes: 7b0a0e3c3a88 ("wifi: cfg80211: do some rework towards MLO link APIs")
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Gregory Greenman <gregory.greenman@intel.com>
+Link: https://lore.kernel.org/r/20230301115906.71bd4803fbb9.Iee39c0f6c2d3a59a8227674dc55d52e38b1090cf@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/wireless/nl80211.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 4d4de49f7ab65..4c6748aa6a1c1 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -8815,7 +8815,7 @@ static bool cfg80211_off_channel_oper_allowed(struct wireless_dev *wdev,
+ struct cfg80211_chan_def *chandef;
+
+ chandef = wdev_chandef(wdev, link_id);
+- if (!chandef)
++ if (!chandef || !chandef->chan)
+ continue;
+
+ /*
+--
+2.39.2
+
--- /dev/null
+From 51615d85071dccf940936689ba1f30f3510baa70 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Feb 2023 13:54:00 +0800
+Subject: xfrm: Allow transport-mode states with AF_UNSPEC selector
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+[ Upstream commit c276a706ea1f51cf9723ed8484feceaf961b8f89 ]
+
+xfrm state selectors are matched against the inner-most flow
+which can be of any address family. Therefore middle states
+in nested configurations need to carry a wildcard selector in
+order to work at all.
+
+However, this is currently forbidden for transport-mode states.
+
+Fix this by removing the unnecessary check.
+
+Fixes: 13996378e658 ("[IPSEC]: Rename mode to outer_mode and add inner_mode")
+Reported-by: David George <David.George@sophos.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/xfrm/xfrm_state.c | 5 -----
+ 1 file changed, 5 deletions(-)
+
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index 0f88cb6fc3c22..2f4cf976b59a3 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -2649,11 +2649,6 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload,
+ goto error;
+ }
+
+- if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) {
+- NL_SET_ERR_MSG(extack, "Only tunnel modes can accommodate an AF_UNSPEC selector");
+- goto error;
+- }
+-
+ x->inner_mode = *inner_mode;
+
+ if (x->props.family == AF_INET)
+--
+2.39.2
+