--- /dev/null
+From 9850ccd5dd88075b2b7fd28d96299d5535f58cc5 Mon Sep 17 00:00:00 2001
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+Date: Wed, 20 Sep 2023 13:51:16 +0300
+Subject: dm zoned: free dmz->ddev array in dmz_put_zoned_devices
+
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+
+commit 9850ccd5dd88075b2b7fd28d96299d5535f58cc5 upstream.
+
+Commit 4dba12881f88 ("dm zoned: support arbitrary number of devices")
+made the pointers to additional zoned devices to be stored in a
+dynamically allocated dmz->ddev array. However, this array is not freed.
+
+Rename dmz_put_zoned_device to dmz_put_zoned_devices and fix it to
+free the dmz->ddev array when cleaning up zoned device information.
+Remove NULL assignment for all dmz->ddev elements and just free the
+dmz->ddev array instead.
+
+Found by Linux Verification Center (linuxtesting.org).
+
+Fixes: 4dba12881f88 ("dm zoned: support arbitrary number of devices")
+Cc: stable@vger.kernel.org
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-zoned-target.c | 15 +++++++--------
+ 1 file changed, 7 insertions(+), 8 deletions(-)
+
+--- a/drivers/md/dm-zoned-target.c
++++ b/drivers/md/dm-zoned-target.c
+@@ -748,17 +748,16 @@ err:
+ /*
+ * Cleanup zoned device information.
+ */
+-static void dmz_put_zoned_device(struct dm_target *ti)
++static void dmz_put_zoned_devices(struct dm_target *ti)
+ {
+ struct dmz_target *dmz = ti->private;
+ int i;
+
+- for (i = 0; i < dmz->nr_ddevs; i++) {
+- if (dmz->ddev[i]) {
++ for (i = 0; i < dmz->nr_ddevs; i++)
++ if (dmz->ddev[i])
+ dm_put_device(ti, dmz->ddev[i]);
+- dmz->ddev[i] = NULL;
+- }
+- }
++
++ kfree(dmz->ddev);
+ }
+
+ static int dmz_fixup_devices(struct dm_target *ti)
+@@ -948,7 +947,7 @@ err_bio:
+ err_meta:
+ dmz_dtr_metadata(dmz->metadata);
+ err_dev:
+- dmz_put_zoned_device(ti);
++ dmz_put_zoned_devices(ti);
+ err:
+ kfree(dmz->dev);
+ kfree(dmz);
+@@ -978,7 +977,7 @@ static void dmz_dtr(struct dm_target *ti
+
+ bioset_exit(&dmz->bio_set);
+
+- dmz_put_zoned_device(ti);
++ dmz_put_zoned_devices(ti);
+
+ mutex_destroy(&dmz->chunk_lock);
+
--- /dev/null
+From f9315f17bf778cb8079a29639419fcc8a41a3c84 Mon Sep 17 00:00:00 2001
+From: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+Date: Tue, 3 Oct 2023 09:39:26 +0200
+Subject: gpio: aspeed: fix the GPIO number passed to pinctrl_gpio_set_config()
+
+From: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+
+commit f9315f17bf778cb8079a29639419fcc8a41a3c84 upstream.
+
+pinctrl_gpio_set_config() expects the GPIO number from the global GPIO
+numberspace, not the controller-relative offset, which needs to be added
+to the chip base.
+
+Fixes: 5ae4cb94b313 ("gpio: aspeed: Add debounce support")
+Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+Reviewed-by: Andy Shevchenko <andy@kernel.org>
+Reviewed-by: Andrew Jeffery <andrew@codeconstruct.com.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/gpio-aspeed.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpio/gpio-aspeed.c
++++ b/drivers/gpio/gpio-aspeed.c
+@@ -973,7 +973,7 @@ static int aspeed_gpio_set_config(struct
+ else if (param == PIN_CONFIG_BIAS_DISABLE ||
+ param == PIN_CONFIG_BIAS_PULL_DOWN ||
+ param == PIN_CONFIG_DRIVE_STRENGTH)
+- return pinctrl_gpio_set_config(offset, config);
++ return pinctrl_gpio_set_config(chip->base + offset, config);
+ else if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN ||
+ param == PIN_CONFIG_DRIVE_OPEN_SOURCE)
+ /* Return -ENOTSUPP to trigger emulation, as per datasheet */
--- /dev/null
+From f0575116507b981e6a810e78ce3c9040395b958b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Duje=20Mihanovi=C4=87?= <duje.mihanovic@skole.hr>
+Date: Fri, 29 Sep 2023 17:41:57 +0200
+Subject: gpio: pxa: disable pinctrl calls for MMP_GPIO
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Duje Mihanović <duje.mihanovic@skole.hr>
+
+commit f0575116507b981e6a810e78ce3c9040395b958b upstream.
+
+Similarly to PXA3xx and MMP2, pinctrl-single isn't capable of setting
+pin direction on MMP either.
+
+Fixes: a770d946371e ("gpio: pxa: add pin control gpio direction and request")
+Signed-off-by: Duje Mihanović <duje.mihanovic@skole.hr>
+Reviewed-by: Andy Shevchenko <andy@kernel.org>
+Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/gpio-pxa.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpio/gpio-pxa.c
++++ b/drivers/gpio/gpio-pxa.c
+@@ -238,6 +238,7 @@ static bool pxa_gpio_has_pinctrl(void)
+ switch (gpio_type) {
+ case PXA3XX_GPIO:
+ case MMP2_GPIO:
++ case MMP_GPIO:
+ return false;
+
+ default:
--- /dev/null
+From d7f393430a17c2bfcdf805462a5aa80be4285b27 Mon Sep 17 00:00:00 2001
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Date: Sat, 23 Sep 2023 07:55:56 +0200
+Subject: IB/mlx4: Fix the size of a buffer in add_port_entries()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+commit d7f393430a17c2bfcdf805462a5aa80be4285b27 upstream.
+
+In order to be sure that 'buff' is never truncated, its size should be
+12, not 11.
+
+When building with W=1, this fixes the following warnings:
+
+ drivers/infiniband/hw/mlx4/sysfs.c: In function ‘add_port_entries’:
+ drivers/infiniband/hw/mlx4/sysfs.c:268:34: error: ‘sprintf’ may write a terminating nul past the end of the destination [-Werror=format-overflow=]
+ 268 | sprintf(buff, "%d", i);
+ | ^
+ drivers/infiniband/hw/mlx4/sysfs.c:268:17: note: ‘sprintf’ output between 2 and 12 bytes into a destination of size 11
+ 268 | sprintf(buff, "%d", i);
+ | ^~~~~~~~~~~~~~~~~~~~~~
+ drivers/infiniband/hw/mlx4/sysfs.c:286:34: error: ‘sprintf’ may write a terminating nul past the end of the destination [-Werror=format-overflow=]
+ 286 | sprintf(buff, "%d", i);
+ | ^
+ drivers/infiniband/hw/mlx4/sysfs.c:286:17: note: ‘sprintf’ output between 2 and 12 bytes into a destination of size 11
+ 286 | sprintf(buff, "%d", i);
+ | ^~~~~~~~~~~~~~~~~~~~~~
+
+Fixes: c1e7e466120b ("IB/mlx4: Add iov directory in sysfs under the ib device")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Link: https://lore.kernel.org/r/0bb1443eb47308bc9be30232cc23004c4d4cf43e.1695448530.git.christophe.jaillet@wanadoo.fr
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/mlx4/sysfs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/mlx4/sysfs.c
++++ b/drivers/infiniband/hw/mlx4/sysfs.c
+@@ -223,7 +223,7 @@ void del_sysfs_port_mcg_attr(struct mlx4
+ static int add_port_entries(struct mlx4_ib_dev *device, int port_num)
+ {
+ int i;
+- char buff[11];
++ char buff[12];
+ struct mlx4_ib_iov_port *port = NULL;
+ int ret = 0 ;
+ struct ib_port_attr attr;
--- /dev/null
+From 55e95bfccf6db8d26a66c46e1de50d53c59a6774 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@linaro.org>
+Date: Fri, 8 Sep 2023 10:03:50 +0300
+Subject: of: dynamic: Fix potential memory leak in of_changeset_action()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+commit 55e95bfccf6db8d26a66c46e1de50d53c59a6774 upstream.
+
+Smatch complains that the error path where "action" is invalid leaks
+the "ce" allocation:
+ drivers/of/dynamic.c:935 of_changeset_action()
+ warn: possible memory leak of 'ce'
+
+Fix this by doing the validation before the allocation.
+
+Note that there is not any actual problem with upstream kernels. All
+callers of of_changeset_action() are static inlines with fixed action
+values.
+
+Fixes: 914d9d831e61 ("of: dynamic: Refactor action prints to not use "%pOF" inside devtree_lock")
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/r/202309011059.EOdr4im9-lkp@intel.com/
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Link: https://lore.kernel.org/r/7dfaf999-30ad-491c-9615-fb1138db121c@moroto.mountain
+Signed-off-by: Rob Herring <robh@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/of/dynamic.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/of/dynamic.c
++++ b/drivers/of/dynamic.c
+@@ -927,13 +927,13 @@ int of_changeset_action(struct of_change
+ {
+ struct of_changeset_entry *ce;
+
++ if (WARN_ON(action >= ARRAY_SIZE(action_names)))
++ return -EINVAL;
++
+ ce = kzalloc(sizeof(*ce), GFP_KERNEL);
+ if (!ce)
+ return -ENOMEM;
+
+- if (WARN_ON(action >= ARRAY_SIZE(action_names)))
+- return -EINVAL;
+-
+ /* get a reference to the node */
+ ce->action = action;
+ ce->np = of_node_get(np);
--- /dev/null
+From d3b3c637e4eb8d3bbe53e5692aee66add72f9851 Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Tue, 19 Sep 2023 15:26:35 +0200
+Subject: parisc: Fix crash with nr_cpus=1 option
+
+From: Helge Deller <deller@gmx.de>
+
+commit d3b3c637e4eb8d3bbe53e5692aee66add72f9851 upstream.
+
+John David Anglin reported that giving "nr_cpus=1" on the command
+line causes a crash, while "maxcpus=1" works.
+
+Reported-by: John David Anglin <dave.anglin@bell.net>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Cc: stable@vger.kernel.org # v5.18+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/parisc/kernel/smp.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/parisc/kernel/smp.c
++++ b/arch/parisc/kernel/smp.c
+@@ -440,7 +440,9 @@ int __cpu_up(unsigned int cpu, struct ta
+ if (cpu_online(cpu))
+ return 0;
+
+- if (num_online_cpus() < setup_max_cpus && smp_boot_one_cpu(cpu, tidle))
++ if (num_online_cpus() < nr_cpu_ids &&
++ num_online_cpus() < setup_max_cpus &&
++ smp_boot_one_cpu(cpu, tidle))
+ return -EIO;
+
+ return cpu_online(cpu) ? 0 : -EIO;
--- /dev/null
+From 914988e099fc658436fbd7b8f240160c352b6552 Mon Sep 17 00:00:00 2001
+From: John David Anglin <dave@parisc-linux.org>
+Date: Tue, 19 Sep 2023 17:51:40 +0000
+Subject: parisc: Restore __ldcw_align for PA-RISC 2.0 processors
+
+From: John David Anglin <dave@parisc-linux.org>
+
+commit 914988e099fc658436fbd7b8f240160c352b6552 upstream.
+
+Back in 2005, Kyle McMartin removed the 16-byte alignment for
+ldcw semaphores on PA 2.0 machines (CONFIG_PA20). This broke
+spinlocks on pre PA8800 processors. The main symptom was random
+faults in mmap'd memory (e.g., gcc compilations, etc).
+
+Unfortunately, the errata for this ldcw change is lost.
+
+The issue is the 16-byte alignment required for ldcw semaphore
+instructions can only be reduced to natural alignment when the
+ldcw operation can be handled coherently in cache. Only PA8800
+and PA8900 processors actually support doing the operation in
+cache.
+
+Aligning the spinlock dynamically adds two integer instructions
+to each spinlock.
+
+Tested on rp3440, c8000 and a500.
+
+Signed-off-by: John David Anglin <dave.anglin@bell.net>
+Link: https://lore.kernel.org/linux-parisc/6b332788-2227-127f-ba6d-55e99ecf4ed8@bell.net/T/#t
+Link: https://lore.kernel.org/linux-parisc/20050609050702.GB4641@roadwarrior.mcmartin.ca/
+Cc: stable@vger.kernel.org
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/parisc/include/asm/ldcw.h | 37 ++++++++++++++++---------------
+ arch/parisc/include/asm/spinlock_types.h | 5 ----
+ 2 files changed, 20 insertions(+), 22 deletions(-)
+
+--- a/arch/parisc/include/asm/ldcw.h
++++ b/arch/parisc/include/asm/ldcw.h
+@@ -2,39 +2,42 @@
+ #ifndef __PARISC_LDCW_H
+ #define __PARISC_LDCW_H
+
+-#ifndef CONFIG_PA20
+ /* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,
+ and GCC only guarantees 8-byte alignment for stack locals, we can't
+ be assured of 16-byte alignment for atomic lock data even if we
+ specify "__attribute ((aligned(16)))" in the type declaration. So,
+ we use a struct containing an array of four ints for the atomic lock
+ type and dynamically select the 16-byte aligned int from the array
+- for the semaphore. */
++ for the semaphore. */
++
++/* From: "Jim Hull" <jim.hull of hp.com>
++ I've attached a summary of the change, but basically, for PA 2.0, as
++ long as the ",CO" (coherent operation) completer is implemented, then the
++ 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
++ they only require "natural" alignment (4-byte for ldcw, 8-byte for
++ ldcd).
++
++ Although the cache control hint is accepted by all PA 2.0 processors,
++ it is only implemented on PA8800/PA8900 CPUs. Prior PA8X00 CPUs still
++ require 16-byte alignment. If the address is unaligned, the operation
++ of the instruction is undefined. The ldcw instruction does not generate
++ unaligned data reference traps so misaligned accesses are not detected.
++ This hid the problem for years. So, restore the 16-byte alignment dropped
++ by Kyle McMartin in "Remove __ldcw_align for PA-RISC 2.0 processors". */
+
+ #define __PA_LDCW_ALIGNMENT 16
+-#define __PA_LDCW_ALIGN_ORDER 4
+ #define __ldcw_align(a) ({ \
+ unsigned long __ret = (unsigned long) &(a)->lock[0]; \
+ __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \
+ & ~(__PA_LDCW_ALIGNMENT - 1); \
+ (volatile unsigned int *) __ret; \
+ })
+-#define __LDCW "ldcw"
+
+-#else /*CONFIG_PA20*/
+-/* From: "Jim Hull" <jim.hull of hp.com>
+- I've attached a summary of the change, but basically, for PA 2.0, as
+- long as the ",CO" (coherent operation) completer is specified, then the
+- 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
+- they only require "natural" alignment (4-byte for ldcw, 8-byte for
+- ldcd). */
+-
+-#define __PA_LDCW_ALIGNMENT 4
+-#define __PA_LDCW_ALIGN_ORDER 2
+-#define __ldcw_align(a) (&(a)->slock)
++#ifdef CONFIG_PA20
+ #define __LDCW "ldcw,co"
+-
+-#endif /*!CONFIG_PA20*/
++#else
++#define __LDCW "ldcw"
++#endif
+
+ /* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
+ We don't explicitly expose that "*a" may be written as reload
+--- a/arch/parisc/include/asm/spinlock_types.h
++++ b/arch/parisc/include/asm/spinlock_types.h
+@@ -9,15 +9,10 @@
+ #ifndef __ASSEMBLY__
+
+ typedef struct {
+-#ifdef CONFIG_PA20
+- volatile unsigned int slock;
+-# define __ARCH_SPIN_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED_VAL }
+-#else
+ volatile unsigned int lock[4];
+ # define __ARCH_SPIN_LOCK_UNLOCKED \
+ { { __ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL, \
+ __ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL } }
+-#endif
+ } arch_spinlock_t;
+
+
--- /dev/null
+From 9fc5f9a92fe6897dbed7b9295b234cb7e3cc9d11 Mon Sep 17 00:00:00 2001
+From: Selvin Xavier <selvin.xavier@broadcom.com>
+Date: Wed, 20 Sep 2023 01:41:19 -0700
+Subject: RDMA/bnxt_re: Fix the handling of control path response data
+
+From: Selvin Xavier <selvin.xavier@broadcom.com>
+
+commit 9fc5f9a92fe6897dbed7b9295b234cb7e3cc9d11 upstream.
+
+Flag that indicate control path command completion should be cleared
+only after copying the command response data. As soon as the is_in_used
+flag is clear, the waiting thread can proceed with wrong response
+data. This wrong data is causing multiple issues like wrong lkey
+used in data traffic and wrong AH Id etc.
+
+Use a memory barrier to ensure that the response data
+is copied and visible to the process waiting on a different
+cpu core before clearing the is_in_used flag.
+
+Clear the is_in_used after copying the command response.
+
+Fixes: bcfee4ce3e01 ("RDMA/bnxt_re: remove redundant cmdq_bitmap")
+Signed-off-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Link: https://lore.kernel.org/r/1695199280-13520-2-git-send-email-selvin.xavier@broadcom.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/bnxt_re/qplib_rcfw.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+@@ -664,7 +664,6 @@ static int bnxt_qplib_process_qp_event(s
+ blocked = cookie & RCFW_CMD_IS_BLOCKING;
+ cookie &= RCFW_MAX_COOKIE_VALUE;
+ crsqe = &rcfw->crsqe_tbl[cookie];
+- crsqe->is_in_used = false;
+
+ if (WARN_ONCE(test_bit(FIRMWARE_STALL_DETECTED,
+ &rcfw->cmdq.flags),
+@@ -680,8 +679,14 @@ static int bnxt_qplib_process_qp_event(s
+ atomic_dec(&rcfw->timeout_send);
+
+ if (crsqe->is_waiter_alive) {
+- if (crsqe->resp)
++ if (crsqe->resp) {
+ memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
++ /* Insert write memory barrier to ensure that
++ * response data is copied before clearing the
++ * flags
++ */
++ smp_wmb();
++ }
+ if (!blocked)
+ wait_cmds++;
+ }
+@@ -693,6 +698,8 @@ static int bnxt_qplib_process_qp_event(s
+ if (!is_waiter_alive)
+ crsqe->resp = NULL;
+
++ crsqe->is_in_used = false;
++
+ hwq->cons += req_size;
+
+ /* This is a case to handle below scenario -
--- /dev/null
+From 18126c767658ae8a831257c6cb7776c5ba5e7249 Mon Sep 17 00:00:00 2001
+From: Leon Romanovsky <leonro@nvidia.com>
+Date: Mon, 11 Sep 2023 15:18:06 +0300
+Subject: RDMA/cma: Fix truncation compilation warning in make_cma_ports
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Leon Romanovsky <leonro@nvidia.com>
+
+commit 18126c767658ae8a831257c6cb7776c5ba5e7249 upstream.
+
+The following compilation error is false alarm as RDMA devices don't
+have such large amount of ports to actually cause to format truncation.
+
+drivers/infiniband/core/cma_configfs.c: In function ‘make_cma_ports’:
+drivers/infiniband/core/cma_configfs.c:223:57: error: ‘snprintf’ output may be truncated before the last format character [-Werror=format-truncation=]
+ 223 | snprintf(port_str, sizeof(port_str), "%u", i + 1);
+ | ^
+drivers/infiniband/core/cma_configfs.c:223:17: note: ‘snprintf’ output between 2 and 11 bytes into a destination of size 10
+ 223 | snprintf(port_str, sizeof(port_str), "%u", i + 1);
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+cc1: all warnings being treated as errors
+make[5]: *** [scripts/Makefile.build:243: drivers/infiniband/core/cma_configfs.o] Error 1
+
+Fixes: 045959db65c6 ("IB/cma: Add configfs for rdma_cm")
+Link: https://lore.kernel.org/r/a7e3b347ee134167fa6a3787c56ef231a04bc8c2.1694434639.git.leonro@nvidia.com
+Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/core/cma_configfs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/core/cma_configfs.c
++++ b/drivers/infiniband/core/cma_configfs.c
+@@ -217,7 +217,7 @@ static int make_cma_ports(struct cma_dev
+ return -ENOMEM;
+
+ for (i = 0; i < ports_num; i++) {
+- char port_str[10];
++ char port_str[11];
+
+ ports[i].port_num = i + 1;
+ snprintf(port_str, sizeof(port_str), "%u", i + 1);
--- /dev/null
+From e0fe97efdb00f0f32b038a4836406a82886aec9c Mon Sep 17 00:00:00 2001
+From: Mark Zhang <markzhang@nvidia.com>
+Date: Wed, 27 Sep 2023 12:05:11 +0300
+Subject: RDMA/cma: Initialize ib_sa_multicast structure to 0 when join
+
+From: Mark Zhang <markzhang@nvidia.com>
+
+commit e0fe97efdb00f0f32b038a4836406a82886aec9c upstream.
+
+Initialize the structure to 0 so that it's fields won't have random
+values. For example fields like rec.traffic_class (as well as
+rec.flow_label and rec.sl) is used to generate the user AH through:
+ cma_iboe_join_multicast
+ cma_make_mc_event
+ ib_init_ah_from_mcmember
+
+And a random traffic_class causes a random IP DSCP in RoCEv2.
+
+Fixes: b5de0c60cc30 ("RDMA/cma: Fix use after free race in roce multicast join")
+Signed-off-by: Mark Zhang <markzhang@nvidia.com>
+Link: https://lore.kernel.org/r/20230927090511.603595-1-markzhang@nvidia.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/core/cma.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -4946,7 +4946,7 @@ static int cma_iboe_join_multicast(struc
+ int err = 0;
+ struct sockaddr *addr = (struct sockaddr *)&mc->addr;
+ struct net_device *ndev = NULL;
+- struct ib_sa_multicast ib;
++ struct ib_sa_multicast ib = {};
+ enum ib_gid_type gid_type;
+ bool send_only;
+
--- /dev/null
+From c38d23a54445f9a8aa6831fafc9af0496ba02f9e Mon Sep 17 00:00:00 2001
+From: Leon Romanovsky <leonro@nvidia.com>
+Date: Wed, 4 Oct 2023 21:17:49 +0300
+Subject: RDMA/core: Require admin capabilities to set system parameters
+
+From: Leon Romanovsky <leonro@nvidia.com>
+
+commit c38d23a54445f9a8aa6831fafc9af0496ba02f9e upstream.
+
+Like any other set command, require admin permissions to do it.
+
+Cc: stable@vger.kernel.org
+Fixes: 2b34c5580226 ("RDMA/core: Add command to set ib_core device net namspace sharing mode")
+Link: https://lore.kernel.org/r/75d329fdd7381b52cbdf87910bef16c9965abb1f.1696443438.git.leon@kernel.org
+Reviewed-by: Parav Pandit <parav@nvidia.com>
+Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/core/nldev.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/infiniband/core/nldev.c
++++ b/drivers/infiniband/core/nldev.c
+@@ -2529,6 +2529,7 @@ static const struct rdma_nl_cbs nldev_cb
+ },
+ [RDMA_NLDEV_CMD_SYS_SET] = {
+ .doit = nldev_set_sys_set_doit,
++ .flags = RDMA_NL_ADMIN_PERM,
+ },
+ [RDMA_NLDEV_CMD_STAT_SET] = {
+ .doit = nldev_stat_set_doit,
--- /dev/null
+From 4f14c6c0213e1def48f0f887d35f44095416c67d Mon Sep 17 00:00:00 2001
+From: Michael Guralnik <michaelgur@nvidia.com>
+Date: Wed, 20 Sep 2023 13:01:54 +0300
+Subject: RDMA/mlx5: Fix assigning access flags to cache mkeys
+
+From: Michael Guralnik <michaelgur@nvidia.com>
+
+commit 4f14c6c0213e1def48f0f887d35f44095416c67d upstream.
+
+After the change to use dynamic cache structure, new cache entries
+can be added and the mkey allocation can no longer assume that all
+mkeys created for the cache have access_flags equal to zero.
+
+Example of a flow that exposes the issue:
+A user registers MR with RO on a HCA that cannot UMR RO and the mkey is
+created outside of the cache. When the user deregisters the MR, a new
+cache entry is created to store mkeys with RO.
+
+Later, the user registers 2 MRs with RO. The first MR is reused from the
+new cache entry. When we try to get the second mkey from the cache we see
+the entry is empty so we go to the MR cache mkey allocation flow which
+would have allocated a mkey with no access flags, resulting the user getting
+a MR without RO.
+
+Fixes: dd1b913fb0d0 ("RDMA/mlx5: Cache all user cacheable mkeys on dereg MR flow")
+Reviewed-by: Edward Srouji <edwards@nvidia.com>
+Signed-off-by: Michael Guralnik <michaelgur@nvidia.com>
+Link: https://lore.kernel.org/r/8a802700b82def3ace3f77cd7a9ad9d734af87e7.1695203958.git.leonro@nvidia.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/mlx5/mr.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -301,7 +301,8 @@ static int get_mkc_octo_size(unsigned in
+
+ static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
+ {
+- set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd);
++ set_mkc_access_pd_addr_fields(mkc, ent->rb_key.access_flags, 0,
++ ent->dev->umrc.pd);
+ MLX5_SET(mkc, mkc, free, 1);
+ MLX5_SET(mkc, mkc, umr_en, 1);
+ MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3);
--- /dev/null
+From 374012b0045780b7ad498be62e85153009bb7fe9 Mon Sep 17 00:00:00 2001
+From: Shay Drory <shayd@nvidia.com>
+Date: Tue, 12 Sep 2023 13:07:45 +0300
+Subject: RDMA/mlx5: Fix mkey cache possible deadlock on cleanup
+
+From: Shay Drory <shayd@nvidia.com>
+
+commit 374012b0045780b7ad498be62e85153009bb7fe9 upstream.
+
+Fix the deadlock by refactoring the MR cache cleanup flow to flush the
+workqueue without holding the rb_lock.
+This adds a race between cache cleanup and creation of new entries which
+we solve by denied creation of new entries after cache cleanup started.
+
+Lockdep:
+WARNING: possible circular locking dependency detected
+ [ 2785.326074 ] 6.2.0-rc6_for_upstream_debug_2023_01_31_14_02 #1 Not tainted
+ [ 2785.339778 ] ------------------------------------------------------
+ [ 2785.340848 ] devlink/53872 is trying to acquire lock:
+ [ 2785.341701 ] ffff888124f8c0c8 ((work_completion)(&(&ent->dwork)->work)){+.+.}-{0:0}, at: __flush_work+0xc8/0x900
+ [ 2785.343403 ]
+ [ 2785.343403 ] but task is already holding lock:
+ [ 2785.344464 ] ffff88817e8f1260 (&dev->cache.rb_lock){+.+.}-{3:3}, at: mlx5_mkey_cache_cleanup+0x77/0x250 [mlx5_ib]
+ [ 2785.346273 ]
+ [ 2785.346273 ] which lock already depends on the new lock.
+ [ 2785.346273 ]
+ [ 2785.347720 ]
+ [ 2785.347720 ] the existing dependency chain (in reverse order) is:
+ [ 2785.349003 ]
+ [ 2785.349003 ] -> #1 (&dev->cache.rb_lock){+.+.}-{3:3}:
+ [ 2785.350160 ] __mutex_lock+0x14c/0x15c0
+ [ 2785.350962 ] delayed_cache_work_func+0x2d1/0x610 [mlx5_ib]
+ [ 2785.352044 ] process_one_work+0x7c2/0x1310
+ [ 2785.352879 ] worker_thread+0x59d/0xec0
+ [ 2785.353636 ] kthread+0x28f/0x330
+ [ 2785.354370 ] ret_from_fork+0x1f/0x30
+ [ 2785.355135 ]
+ [ 2785.355135 ] -> #0 ((work_completion)(&(&ent->dwork)->work)){+.+.}-{0:0}:
+ [ 2785.356515 ] __lock_acquire+0x2d8a/0x5fe0
+ [ 2785.357349 ] lock_acquire+0x1c1/0x540
+ [ 2785.358121 ] __flush_work+0xe8/0x900
+ [ 2785.358852 ] __cancel_work_timer+0x2c7/0x3f0
+ [ 2785.359711 ] mlx5_mkey_cache_cleanup+0xfb/0x250 [mlx5_ib]
+ [ 2785.360781 ] mlx5_ib_stage_pre_ib_reg_umr_cleanup+0x16/0x30 [mlx5_ib]
+ [ 2785.361969 ] __mlx5_ib_remove+0x68/0x120 [mlx5_ib]
+ [ 2785.362960 ] mlx5r_remove+0x63/0x80 [mlx5_ib]
+ [ 2785.363870 ] auxiliary_bus_remove+0x52/0x70
+ [ 2785.364715 ] device_release_driver_internal+0x3c1/0x600
+ [ 2785.365695 ] bus_remove_device+0x2a5/0x560
+ [ 2785.366525 ] device_del+0x492/0xb80
+ [ 2785.367276 ] mlx5_detach_device+0x1a9/0x360 [mlx5_core]
+ [ 2785.368615 ] mlx5_unload_one_devl_locked+0x5a/0x110 [mlx5_core]
+ [ 2785.369934 ] mlx5_devlink_reload_down+0x292/0x580 [mlx5_core]
+ [ 2785.371292 ] devlink_reload+0x439/0x590
+ [ 2785.372075 ] devlink_nl_cmd_reload+0xaef/0xff0
+ [ 2785.372973 ] genl_family_rcv_msg_doit.isra.0+0x1bd/0x290
+ [ 2785.374011 ] genl_rcv_msg+0x3ca/0x6c0
+ [ 2785.374798 ] netlink_rcv_skb+0x12c/0x360
+ [ 2785.375612 ] genl_rcv+0x24/0x40
+ [ 2785.376295 ] netlink_unicast+0x438/0x710
+ [ 2785.377121 ] netlink_sendmsg+0x7a1/0xca0
+ [ 2785.377926 ] sock_sendmsg+0xc5/0x190
+ [ 2785.378668 ] __sys_sendto+0x1bc/0x290
+ [ 2785.379440 ] __x64_sys_sendto+0xdc/0x1b0
+ [ 2785.380255 ] do_syscall_64+0x3d/0x90
+ [ 2785.381031 ] entry_SYSCALL_64_after_hwframe+0x46/0xb0
+ [ 2785.381967 ]
+ [ 2785.381967 ] other info that might help us debug this:
+ [ 2785.381967 ]
+ [ 2785.383448 ] Possible unsafe locking scenario:
+ [ 2785.383448 ]
+ [ 2785.384544 ] CPU0 CPU1
+ [ 2785.385383 ] ---- ----
+ [ 2785.386193 ] lock(&dev->cache.rb_lock);
+ [ 2785.386940 ] lock((work_completion)(&(&ent->dwork)->work));
+ [ 2785.388327 ] lock(&dev->cache.rb_lock);
+ [ 2785.389425 ] lock((work_completion)(&(&ent->dwork)->work));
+ [ 2785.390414 ]
+ [ 2785.390414 ] *** DEADLOCK ***
+ [ 2785.390414 ]
+ [ 2785.391579 ] 6 locks held by devlink/53872:
+ [ 2785.392341 ] #0: ffffffff84c17a50 (cb_lock){++++}-{3:3}, at: genl_rcv+0x15/0x40
+ [ 2785.393630 ] #1: ffff888142280218 (&devlink->lock_key){+.+.}-{3:3}, at: devlink_get_from_attrs_lock+0x12d/0x2d0
+ [ 2785.395324 ] #2: ffff8881422d3c38 (&dev->lock_key){+.+.}-{3:3}, at: mlx5_unload_one_devl_locked+0x4a/0x110 [mlx5_core]
+ [ 2785.397322 ] #3: ffffffffa0e59068 (mlx5_intf_mutex){+.+.}-{3:3}, at: mlx5_detach_device+0x60/0x360 [mlx5_core]
+ [ 2785.399231 ] #4: ffff88810e3cb0e8 (&dev->mutex){....}-{3:3}, at: device_release_driver_internal+0x8d/0x600
+ [ 2785.400864 ] #5: ffff88817e8f1260 (&dev->cache.rb_lock){+.+.}-{3:3}, at: mlx5_mkey_cache_cleanup+0x77/0x250 [mlx5_ib]
+
+Fixes: b95845178328 ("RDMA/mlx5: Change the cache structure to an RB-tree")
+Signed-off-by: Shay Drory <shayd@nvidia.com>
+Signed-off-by: Michael Guralnik <michaelgur@nvidia.com>
+Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/mlx5/mlx5_ib.h | 1 +
+ drivers/infiniband/hw/mlx5/mr.c | 16 ++++++++++++++--
+ 2 files changed, 15 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+@@ -797,6 +797,7 @@ struct mlx5_mkey_cache {
+ struct dentry *fs_root;
+ unsigned long last_add;
+ struct delayed_work remove_ent_dwork;
++ u8 disable: 1;
+ };
+
+ struct mlx5_ib_port_resources {
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -1024,19 +1024,27 @@ void mlx5_mkey_cache_cleanup(struct mlx5
+ if (!dev->cache.wq)
+ return;
+
+- cancel_delayed_work_sync(&dev->cache.remove_ent_dwork);
+ mutex_lock(&dev->cache.rb_lock);
++ dev->cache.disable = true;
+ for (node = rb_first(root); node; node = rb_next(node)) {
+ ent = rb_entry(node, struct mlx5_cache_ent, node);
+ xa_lock_irq(&ent->mkeys);
+ ent->disabled = true;
+ xa_unlock_irq(&ent->mkeys);
+- cancel_delayed_work_sync(&ent->dwork);
+ }
++ mutex_unlock(&dev->cache.rb_lock);
++
++ /*
++ * After all entries are disabled and will not reschedule on WQ,
++ * flush it and all async commands.
++ */
++ flush_workqueue(dev->cache.wq);
+
+ mlx5_mkey_cache_debugfs_cleanup(dev);
+ mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
+
++ /* At this point all entries are disabled and have no concurrent work. */
++ mutex_lock(&dev->cache.rb_lock);
+ node = rb_first(root);
+ while (node) {
+ ent = rb_entry(node, struct mlx5_cache_ent, node);
+@@ -1815,6 +1823,10 @@ static int cache_ent_find_and_store(stru
+ }
+
+ mutex_lock(&cache->rb_lock);
++ if (cache->disable) {
++ mutex_unlock(&cache->rb_lock);
++ return 0;
++ }
+ ent = mkey_cache_ent_from_rb_key(dev, mr->mmkey.rb_key);
+ if (ent) {
+ if (ent->rb_key.ndescs == mr->mmkey.rb_key.ndescs) {
--- /dev/null
+From 2fad8f06a582cd431d398a0b3f9be21d069603ab Mon Sep 17 00:00:00 2001
+From: Hamdan Igbaria <hamdani@nvidia.com>
+Date: Wed, 20 Sep 2023 13:01:55 +0300
+Subject: RDMA/mlx5: Fix mutex unlocking on error flow for steering anchor creation
+
+From: Hamdan Igbaria <hamdani@nvidia.com>
+
+commit 2fad8f06a582cd431d398a0b3f9be21d069603ab upstream.
+
+The mutex was not unlocked on some of the error flows.
+Moved the unlock location to include all the error flow scenarios.
+
+Fixes: e1f4a52ac171 ("RDMA/mlx5: Create an indirect flow table for steering anchor")
+Reviewed-by: Mark Bloch <mbloch@nvidia.com>
+Signed-off-by: Hamdan Igbaria <hamdani@nvidia.com>
+Link: https://lore.kernel.org/r/1244a69d783da997c0af0b827c622eb00495492e.1695203958.git.leonro@nvidia.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/mlx5/fs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/mlx5/fs.c
++++ b/drivers/infiniband/hw/mlx5/fs.c
+@@ -2470,8 +2470,8 @@ destroy_res:
+ mlx5_steering_anchor_destroy_res(ft_prio);
+ put_flow_table:
+ put_flow_table(dev, ft_prio, true);
+- mutex_unlock(&dev->flow_db->lock);
+ free_obj:
++ mutex_unlock(&dev->flow_db->lock);
+ kfree(obj);
+
+ return err;
--- /dev/null
+From dab994bcc609a172bfdab15a0d4cb7e50e8b5458 Mon Sep 17 00:00:00 2001
+From: Shay Drory <shayd@nvidia.com>
+Date: Wed, 20 Sep 2023 13:01:56 +0300
+Subject: RDMA/mlx5: Fix NULL string error
+
+From: Shay Drory <shayd@nvidia.com>
+
+commit dab994bcc609a172bfdab15a0d4cb7e50e8b5458 upstream.
+
+checkpath is complaining about NULL string, change it to 'Unknown'.
+
+Fixes: 37aa5c36aa70 ("IB/mlx5: Add UARs write-combining and non-cached mapping")
+Signed-off-by: Shay Drory <shayd@nvidia.com>
+Link: https://lore.kernel.org/r/8638e5c14fadbde5fa9961874feae917073af920.1695203958.git.leonro@nvidia.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/mlx5/main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -2070,7 +2070,7 @@ static inline char *mmap_cmd2str(enum ml
+ case MLX5_IB_MMAP_DEVICE_MEM:
+ return "Device Memory";
+ default:
+- return NULL;
++ return "Unknown";
+ }
+ }
+
--- /dev/null
+From 53a3f777049771496f791504e7dc8ef017cba590 Mon Sep 17 00:00:00 2001
+From: Bernard Metzler <bmt@zurich.ibm.com>
+Date: Tue, 5 Sep 2023 16:58:22 +0200
+Subject: RDMA/siw: Fix connection failure handling
+
+From: Bernard Metzler <bmt@zurich.ibm.com>
+
+commit 53a3f777049771496f791504e7dc8ef017cba590 upstream.
+
+In case immediate MPA request processing fails, the newly
+created endpoint unlinks the listening endpoint and is
+ready to be dropped. This special case was not handled
+correctly by the code handling the later TCP socket close,
+causing a NULL dereference crash in siw_cm_work_handler()
+when dereferencing a NULL listener. We now also cancel
+the useless MPA timeout, if immediate MPA request
+processing fails.
+
+This patch furthermore simplifies MPA processing in general:
+Scheduling a useless TCP socket read in sk_data_ready() upcall
+is now surpressed, if the socket is already moved out of
+TCP_ESTABLISHED state.
+
+Fixes: 6c52fdc244b5 ("rdma/siw: connection management")
+Signed-off-by: Bernard Metzler <bmt@zurich.ibm.com>
+Link: https://lore.kernel.org/r/20230905145822.446263-1-bmt@zurich.ibm.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/sw/siw/siw_cm.c | 16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+--- a/drivers/infiniband/sw/siw/siw_cm.c
++++ b/drivers/infiniband/sw/siw/siw_cm.c
+@@ -976,6 +976,7 @@ static void siw_accept_newconn(struct si
+ siw_cep_put(cep);
+ new_cep->listen_cep = NULL;
+ if (rv) {
++ siw_cancel_mpatimer(new_cep);
+ siw_cep_set_free(new_cep);
+ goto error;
+ }
+@@ -1100,9 +1101,12 @@ static void siw_cm_work_handler(struct w
+ /*
+ * Socket close before MPA request received.
+ */
+- siw_dbg_cep(cep, "no mpareq: drop listener\n");
+- siw_cep_put(cep->listen_cep);
+- cep->listen_cep = NULL;
++ if (cep->listen_cep) {
++ siw_dbg_cep(cep,
++ "no mpareq: drop listener\n");
++ siw_cep_put(cep->listen_cep);
++ cep->listen_cep = NULL;
++ }
+ }
+ }
+ release_cep = 1;
+@@ -1227,7 +1231,11 @@ static void siw_cm_llp_data_ready(struct
+ if (!cep)
+ goto out;
+
+- siw_dbg_cep(cep, "state: %d\n", cep->state);
++ siw_dbg_cep(cep, "cep state: %d, socket state %d\n",
++ cep->state, sk->sk_state);
++
++ if (sk->sk_state != TCP_ESTABLISHED)
++ goto out;
+
+ switch (cep->state) {
+ case SIW_EPSTATE_RDMA_MODE:
--- /dev/null
+From e193b7955dfad68035b983a0011f4ef3590c85eb Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bvanassche@acm.org>
+Date: Wed, 23 Aug 2023 13:57:27 -0700
+Subject: RDMA/srp: Do not call scsi_done() from srp_abort()
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+commit e193b7955dfad68035b983a0011f4ef3590c85eb upstream.
+
+After scmd_eh_abort_handler() has called the SCSI LLD eh_abort_handler
+callback, it performs one of the following actions:
+* Call scsi_queue_insert().
+* Call scsi_finish_command().
+* Call scsi_eh_scmd_add().
+Hence, SCSI abort handlers must not call scsi_done(). Otherwise all
+the above actions would trigger a use-after-free. Hence remove the
+scsi_done() call from srp_abort(). Keep the srp_free_req() call
+before returning SUCCESS because we may not see the command again if
+SUCCESS is returned.
+
+Cc: Bob Pearson <rpearsonhpe@gmail.com>
+Cc: Shinichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Fixes: d8536670916a ("IB/srp: Avoid having aborted requests hang")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://lore.kernel.org/r/20230823205727.505681-1-bvanassche@acm.org
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/ulp/srp/ib_srp.c | 16 +++++-----------
+ 1 file changed, 5 insertions(+), 11 deletions(-)
+
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -2784,7 +2784,6 @@ static int srp_abort(struct scsi_cmnd *s
+ u32 tag;
+ u16 ch_idx;
+ struct srp_rdma_ch *ch;
+- int ret;
+
+ shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
+
+@@ -2798,19 +2797,14 @@ static int srp_abort(struct scsi_cmnd *s
+ shost_printk(KERN_ERR, target->scsi_host,
+ "Sending SRP abort for tag %#x\n", tag);
+ if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
+- SRP_TSK_ABORT_TASK, NULL) == 0)
+- ret = SUCCESS;
+- else if (target->rport->state == SRP_RPORT_LOST)
+- ret = FAST_IO_FAIL;
+- else
+- ret = FAILED;
+- if (ret == SUCCESS) {
++ SRP_TSK_ABORT_TASK, NULL) == 0) {
+ srp_free_req(ch, req, scmnd, 0);
+- scmnd->result = DID_ABORT << 16;
+- scsi_done(scmnd);
++ return SUCCESS;
+ }
++ if (target->rport->state == SRP_RPORT_LOST)
++ return FAST_IO_FAIL;
+
+- return ret;
++ return FAILED;
+ }
+
+ static int srp_reset_device(struct scsi_cmnd *scmnd)
--- /dev/null
+From c489800e0d48097fc6afebd862c6afa039110a36 Mon Sep 17 00:00:00 2001
+From: Konstantin Meskhidze <konstantin.meskhidze@huawei.com>
+Date: Tue, 5 Sep 2023 18:32:58 +0800
+Subject: RDMA/uverbs: Fix typo of sizeof argument
+
+From: Konstantin Meskhidze <konstantin.meskhidze@huawei.com>
+
+commit c489800e0d48097fc6afebd862c6afa039110a36 upstream.
+
+Since size of 'hdr' pointer and '*hdr' structure is equal on 64-bit
+machines issue probably didn't cause any wrong behavior. But anyway,
+fixing of typo is required.
+
+Fixes: da0f60df7bd5 ("RDMA/uverbs: Prohibit write() calls with too small buffers")
+Co-developed-by: Ivanov Mikhail <ivanov.mikhail1@huawei-partners.com>
+Signed-off-by: Ivanov Mikhail <ivanov.mikhail1@huawei-partners.com>
+Signed-off-by: Konstantin Meskhidze <konstantin.meskhidze@huawei.com>
+Link: https://lore.kernel.org/r/20230905103258.1738246-1-konstantin.meskhidze@huawei.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/core/uverbs_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -535,7 +535,7 @@ static ssize_t verify_hdr(struct ib_uver
+ if (hdr->in_words * 4 != count)
+ return -EINVAL;
+
+- if (count < method_elm->req_size + sizeof(hdr)) {
++ if (count < method_elm->req_size + sizeof(*hdr)) {
+ /*
+ * rdma-core v18 and v19 have a bug where they send DESTROY_CQ
+ * with a 16 byte write instead of 24. Old kernels didn't
hid-sony-remove-duplicate-null-check-before-calling-.patch
hid-intel-ish-hid-ipc-disable-and-reenable-acpi-gpe-.patch
net-lan743x-also-select-phylib.patch
+parisc-restore-__ldcw_align-for-pa-risc-2.0-processors.patch
+smb-use-kernel_connect-and-kernel_bind.patch
+parisc-fix-crash-with-nr_cpus-1-option.patch
+dm-zoned-free-dmz-ddev-array-in-dmz_put_zoned_devices.patch
+rdma-core-require-admin-capabilities-to-set-system-parameters.patch
+of-dynamic-fix-potential-memory-leak-in-of_changeset_action.patch
+ib-mlx4-fix-the-size-of-a-buffer-in-add_port_entries.patch
+gpio-aspeed-fix-the-gpio-number-passed-to-pinctrl_gpio_set_config.patch
+gpio-pxa-disable-pinctrl-calls-for-mmp_gpio.patch
+rdma-cma-initialize-ib_sa_multicast-structure-to-0-when-join.patch
+rdma-cma-fix-truncation-compilation-warning-in-make_cma_ports.patch
+rdma-bnxt_re-fix-the-handling-of-control-path-response-data.patch
+rdma-uverbs-fix-typo-of-sizeof-argument.patch
+rdma-srp-do-not-call-scsi_done-from-srp_abort.patch
+rdma-siw-fix-connection-failure-handling.patch
+rdma-mlx5-fix-mkey-cache-possible-deadlock-on-cleanup.patch
+rdma-mlx5-fix-assigning-access-flags-to-cache-mkeys.patch
+rdma-mlx5-fix-mutex-unlocking-on-error-flow-for-steering-anchor-creation.patch
+rdma-mlx5-fix-null-string-error.patch
--- /dev/null
+From cedc019b9f260facfadd20c6c490e403abf292e3 Mon Sep 17 00:00:00 2001
+From: Jordan Rife <jrife@google.com>
+Date: Tue, 3 Oct 2023 20:13:03 -0500
+Subject: smb: use kernel_connect() and kernel_bind()
+
+From: Jordan Rife <jrife@google.com>
+
+commit cedc019b9f260facfadd20c6c490e403abf292e3 upstream.
+
+Recent changes to kernel_connect() and kernel_bind() ensure that
+callers are insulated from changes to the address parameter made by BPF
+SOCK_ADDR hooks. This patch wraps direct calls to ops->connect() and
+ops->bind() with kernel_connect() and kernel_bind() to ensure that SMB
+mounts do not see their mount address overwritten in such cases.
+
+Link: https://lore.kernel.org/netdev/9944248dba1bce861375fcce9de663934d933ba9.camel@redhat.com/
+Cc: <stable@vger.kernel.org> # 6.0+
+Signed-off-by: Jordan Rife <jrife@google.com>
+Acked-by: Paulo Alcantara (SUSE) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/connect.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -2890,9 +2890,9 @@ bind_socket(struct TCP_Server_Info *serv
+ if (server->srcaddr.ss_family != AF_UNSPEC) {
+ /* Bind to the specified local IP address */
+ struct socket *socket = server->ssocket;
+- rc = socket->ops->bind(socket,
+- (struct sockaddr *) &server->srcaddr,
+- sizeof(server->srcaddr));
++ rc = kernel_bind(socket,
++ (struct sockaddr *) &server->srcaddr,
++ sizeof(server->srcaddr));
+ if (rc < 0) {
+ struct sockaddr_in *saddr4;
+ struct sockaddr_in6 *saddr6;
+@@ -3041,8 +3041,8 @@ generic_ip_connect(struct TCP_Server_Inf
+ socket->sk->sk_sndbuf,
+ socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo);
+
+- rc = socket->ops->connect(socket, saddr, slen,
+- server->noblockcnt ? O_NONBLOCK : 0);
++ rc = kernel_connect(socket, saddr, slen,
++ server->noblockcnt ? O_NONBLOCK : 0);
+ /*
+ * When mounting SMB root file systems, we do not want to block in
+ * connect. Otherwise bail out and then let cifs_reconnect() perform