--- /dev/null
+From 81781081d7f608d330ee353130beed16933fbb79 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Nov 2023 12:10:08 +0200
+Subject: acpi: property: Let args be NULL in
+ __acpi_node_get_property_reference
+
+From: Sakari Ailus <sakari.ailus@linux.intel.com>
+
+[ Upstream commit bef52aa0f3de1b7d8c258c13b16e577361dabf3a ]
+
+fwnode_get_property_reference_args() may not be called with args argument
+NULL on ACPI, OF already supports this. Add the missing NULL checks and
+document this.
+
+The purpose is to be able to count the references.
+
+Fixes: 977d5ad39f3e ("ACPI: Convert ACPI reference args to generic fwnode reference args")
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Link: https://lore.kernel.org/r/20231109101010.1329587-2-sakari.ailus@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/acpi/property.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
+index 479856ceda9f..5906e247b9fa 100644
+--- a/drivers/acpi/property.c
++++ b/drivers/acpi/property.c
+@@ -646,6 +646,7 @@ acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
+ * @index: Index of the reference to return
+ * @num_args: Maximum number of arguments after each reference
+ * @args: Location to store the returned reference with optional arguments
++ * (may be NULL)
+ *
+ * Find property with @name, verifify that it is a package containing at least
+ * one object reference and if so, store the ACPI device object pointer to the
+@@ -704,6 +705,9 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
+ if (ret)
+ return ret == -ENODEV ? -EINVAL : ret;
+
++ if (!args)
++ return 0;
++
+ args->fwnode = acpi_fwnode_handle(device);
+ args->nargs = 0;
+ return 0;
+--
+2.43.0
+
--- /dev/null
+From 7ca1354e70777e547bc2f12f4a2af81aafa422fe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 28 Dec 2023 19:07:43 +0300
+Subject: apparmor: avoid crash when parsed profile name is empty
+
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+
+[ Upstream commit 55a8210c9e7d21ff2644809699765796d4bfb200 ]
+
+When processing a packed profile in unpack_profile() described like
+
+ "profile :ns::samba-dcerpcd /usr/lib*/samba/{,samba/}samba-dcerpcd {...}"
+
+a string ":samba-dcerpcd" is unpacked as a fully-qualified name and then
+passed to aa_splitn_fqname().
+
+aa_splitn_fqname() treats ":samba-dcerpcd" as only containing a namespace.
+Thus it returns NULL for tmpname, meanwhile tmpns is non-NULL. Later
+aa_alloc_profile() crashes as the new profile name is NULL now.
+
+general protection fault, probably for non-canonical address 0xdffffc0000000000: 0000 [#1] PREEMPT SMP KASAN NOPTI
+KASAN: null-ptr-deref in range [0x0000000000000000-0x0000000000000007]
+CPU: 6 PID: 1657 Comm: apparmor_parser Not tainted 6.7.0-rc2-dirty #16
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.2-3-gd478f380-rebuilt.opensuse.org 04/01/2014
+RIP: 0010:strlen+0x1e/0xa0
+Call Trace:
+ <TASK>
+ ? strlen+0x1e/0xa0
+ aa_policy_init+0x1bb/0x230
+ aa_alloc_profile+0xb1/0x480
+ unpack_profile+0x3bc/0x4960
+ aa_unpack+0x309/0x15e0
+ aa_replace_profiles+0x213/0x33c0
+ policy_update+0x261/0x370
+ profile_replace+0x20e/0x2a0
+ vfs_write+0x2af/0xe00
+ ksys_write+0x126/0x250
+ do_syscall_64+0x46/0xf0
+ entry_SYSCALL_64_after_hwframe+0x6e/0x76
+ </TASK>
+---[ end trace 0000000000000000 ]---
+RIP: 0010:strlen+0x1e/0xa0
+
+It seems such behaviour of aa_splitn_fqname() is expected and checked in
+other places where it is called (e.g. aa_remove_profiles). Well, there
+is an explicit comment "a ns name without a following profile is allowed"
+inside.
+
+AFAICS, nothing can prevent unpacked "name" to be in form like
+":samba-dcerpcd" - it is passed from userspace.
+
+Deny the whole profile set replacement in such case and inform user with
+EPROTO and an explaining message.
+
+Found by Linux Verification Center (linuxtesting.org).
+
+Fixes: 04dc715e24d0 ("apparmor: audit policy ns specified in policy load")
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Signed-off-by: John Johansen <john.johansen@canonical.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ security/apparmor/policy_unpack.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
+index 7e32c09249b1..8bd79aad37fd 100644
+--- a/security/apparmor/policy_unpack.c
++++ b/security/apparmor/policy_unpack.c
+@@ -693,6 +693,10 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+
+ tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len);
+ if (tmpns) {
++ if (!tmpname) {
++ info = "empty profile name";
++ goto fail;
++ }
+ *ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL);
+ if (!*ns_name) {
+ info = "out of memory";
+--
+2.43.0
+
--- /dev/null
+From c3e0fbeec7805ff98e4dd74c5a3d38031269da1e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Dec 2023 17:21:34 +0000
+Subject: binder: fix unused alloc->free_async_space
+
+From: Carlos Llamas <cmllamas@google.com>
+
+[ Upstream commit c6d05e0762ab276102246d24affd1e116a46aa0c ]
+
+Each transaction is associated with a 'struct binder_buffer' that stores
+the metadata about its buffer area. Since commit 74310e06be4d ("android:
+binder: Move buffer out of area shared with user space") this struct is
+no longer embedded within the buffer itself but is instead allocated on
+the heap to prevent userspace access to this driver-exclusive info.
+
+Unfortunately, the space of this struct is still being accounted for in
+the total buffer size calculation, specifically for async transactions.
+This results in an additional 104 bytes added to every async buffer
+request, and this area is never used.
+
+This wasted space can be substantial. If we consider the maximum mmap
+buffer space of SZ_4M, the driver will reserve half of it for async
+transactions, or 0x200000. This area should, in theory, accommodate up
+to 262,144 buffers of the minimum 8-byte size. However, after adding
+the extra 'sizeof(struct binder_buffer)', the total number of buffers
+drops to only 18,724, which is a sad 7.14% of the actual capacity.
+
+This patch fixes the buffer size calculation to enable the utilization
+of the entire async buffer space. This is expected to reduce the number
+of -ENOSPC errors that are seen on the field.
+
+Fixes: 74310e06be4d ("android: binder: Move buffer out of area shared with user space")
+Signed-off-by: Carlos Llamas <cmllamas@google.com>
+Reviewed-by: Alice Ryhl <aliceryhl@google.com>
+Link: https://lore.kernel.org/r/20231201172212.1813387-6-cmllamas@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/android/binder_alloc.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
+index ceb70543ca90..cfe8c61d14fc 100644
+--- a/drivers/android/binder_alloc.c
++++ b/drivers/android/binder_alloc.c
+@@ -360,8 +360,7 @@ static void debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
+ continue;
+ if (!buffer->async_transaction)
+ continue;
+- total_alloc_size += binder_alloc_buffer_size(alloc, buffer)
+- + sizeof(struct binder_buffer);
++ total_alloc_size += binder_alloc_buffer_size(alloc, buffer);
+ num_buffers++;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From d6d504e7bb58e0999f8199bb4e1e20c839b9bfed Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Aug 2020 14:25:44 +0200
+Subject: binder: print warnings when detecting oneway spamming.
+
+From: Martijn Coenen <maco@android.com>
+
+[ Upstream commit 261e7818f06ec51e488e007f787ccd7e77272918 ]
+
+The most common cause of the binder transaction buffer filling up is a
+client rapidly firing oneway transactions into a process, before it has
+a chance to handle them. Yet the root cause of this is often hard to
+debug, because either the system or the app will stop, and by that time
+binder debug information we dump in bugreports is no longer relevant.
+
+This change warns as soon as a process dips below 80% of its oneway
+space (less than 100kB available in the configuration), when any one
+process is responsible for either more than 50 transactions, or more
+than 50% of the oneway space.
+
+Signed-off-by: Martijn Coenen <maco@android.com>
+Acked-by: Todd Kjos <tkjos@google.com>
+Link: https://lore.kernel.org/r/20200821122544.1277051-1-maco@android.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: c6d05e0762ab ("binder: fix unused alloc->free_async_space")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/android/binder.c | 2 +-
+ drivers/android/binder_alloc.c | 55 +++++++++++++++++++++++--
+ drivers/android/binder_alloc.h | 5 ++-
+ drivers/android/binder_alloc_selftest.c | 2 +-
+ 4 files changed, 58 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index ca1c67a1126d..5bb2716a59cd 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -3425,7 +3425,7 @@ static void binder_transaction(struct binder_proc *proc,
+
+ t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
+ tr->offsets_size, extra_buffers_size,
+- !reply && (t->flags & TF_ONE_WAY));
++ !reply && (t->flags & TF_ONE_WAY), current->tgid);
+ if (IS_ERR(t->buffer)) {
+ /*
+ * -ESRCH indicates VMA cleared. The target is dying.
+diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
+index a331e9f82125..ceb70543ca90 100644
+--- a/drivers/android/binder_alloc.c
++++ b/drivers/android/binder_alloc.c
+@@ -339,12 +339,50 @@ static inline struct vm_area_struct *binder_alloc_get_vma(
+ return vma;
+ }
+
++static void debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
++{
++ /*
++ * Find the amount and size of buffers allocated by the current caller;
++ * The idea is that once we cross the threshold, whoever is responsible
++ * for the low async space is likely to try to send another async txn,
++ * and at some point we'll catch them in the act. This is more efficient
++ * than keeping a map per pid.
++ */
++ struct rb_node *n = alloc->free_buffers.rb_node;
++ struct binder_buffer *buffer;
++ size_t total_alloc_size = 0;
++ size_t num_buffers = 0;
++
++ for (n = rb_first(&alloc->allocated_buffers); n != NULL;
++ n = rb_next(n)) {
++ buffer = rb_entry(n, struct binder_buffer, rb_node);
++ if (buffer->pid != pid)
++ continue;
++ if (!buffer->async_transaction)
++ continue;
++ total_alloc_size += binder_alloc_buffer_size(alloc, buffer)
++ + sizeof(struct binder_buffer);
++ num_buffers++;
++ }
++
++ /*
++ * Warn if this pid has more than 50 transactions, or more than 50% of
++ * async space (which is 25% of total buffer size).
++ */
++ if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
++ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
++ "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
++ alloc->pid, pid, num_buffers, total_alloc_size);
++ }
++}
++
+ static struct binder_buffer *binder_alloc_new_buf_locked(
+ struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+- int is_async)
++ int is_async,
++ int pid)
+ {
+ struct rb_node *n = alloc->free_buffers.rb_node;
+ struct binder_buffer *buffer;
+@@ -487,11 +525,20 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
+ buffer->offsets_size = offsets_size;
+ buffer->async_transaction = is_async;
+ buffer->extra_buffers_size = extra_buffers_size;
++ buffer->pid = pid;
+ if (is_async) {
+ alloc->free_async_space -= size;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "%d: binder_alloc_buf size %zd async free %zd\n",
+ alloc->pid, size, alloc->free_async_space);
++ if (alloc->free_async_space < alloc->buffer_size / 10) {
++ /*
++ * Start detecting spammers once we have less than 20%
++ * of async space left (which is less than 10% of total
++ * buffer size).
++ */
++ debug_low_async_space_locked(alloc, pid);
++ }
+ }
+ return buffer;
+
+@@ -509,6 +556,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
+ * @offsets_size: user specified buffer offset
+ * @extra_buffers_size: size of extra space for meta-data (eg, security context)
+ * @is_async: buffer for async transaction
++ * @pid: pid to attribute allocation to (used for debugging)
+ *
+ * Allocate a new buffer given the requested sizes. Returns
+ * the kernel version of the buffer pointer. The size allocated
+@@ -521,13 +569,14 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+- int is_async)
++ int is_async,
++ int pid)
+ {
+ struct binder_buffer *buffer;
+
+ mutex_lock(&alloc->mutex);
+ buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
+- extra_buffers_size, is_async);
++ extra_buffers_size, is_async, pid);
+ mutex_unlock(&alloc->mutex);
+ return buffer;
+ }
+diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
+index 02a19afd9506..f6052c97bce5 100644
+--- a/drivers/android/binder_alloc.h
++++ b/drivers/android/binder_alloc.h
+@@ -33,6 +33,7 @@ struct binder_transaction;
+ * @offsets_size: size of array of offsets
+ * @extra_buffers_size: size of space for other objects (like sg lists)
+ * @user_data: user pointer to base of buffer space
++ * @pid: pid to attribute the buffer to (caller)
+ *
+ * Bookkeeping structure for binder transaction buffers
+ */
+@@ -53,6 +54,7 @@ struct binder_buffer {
+ size_t offsets_size;
+ size_t extra_buffers_size;
+ void __user *user_data;
++ int pid;
+ };
+
+ /**
+@@ -119,7 +121,8 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+- int is_async);
++ int is_async,
++ int pid);
+ extern void binder_alloc_init(struct binder_alloc *alloc);
+ extern int binder_alloc_shrinker_init(void);
+ extern void binder_alloc_shrinker_exit(void);
+diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
+index 4151d9938255..c2b323bc3b3a 100644
+--- a/drivers/android/binder_alloc_selftest.c
++++ b/drivers/android/binder_alloc_selftest.c
+@@ -119,7 +119,7 @@ static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++) {
+- buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
++ buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0, 0);
+ if (IS_ERR(buffers[i]) ||
+ !check_buffer_pages_allocated(alloc, buffers[i],
+ sizes[i])) {
+--
+2.43.0
+
--- /dev/null
+From c793240f9d0432af83faad54632e9bdf4d18155d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Nov 2023 17:43:52 +0100
+Subject: i2c: s3c24xx: fix read transfers in polling mode
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+[ Upstream commit 0d9cf23ed55d7ba3ab26d617a3ae507863674c8f ]
+
+To properly handle read transfers in polling mode, no waiting for the ACK
+state is needed as it will never come. Just wait a bit to ensure start
+state is on the bus and continue processing next bytes.
+
+Fixes: 117053f77a5a ("i2c: s3c2410: Add polling mode support")
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Reviewed-by: Chanho Park <chanho61.park@samsung.com>
+Reviewed-by: Andi Shyti <andi.shyti@kernel.org>
+Signed-off-by: Wolfram Sang <wsa@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/i2c/busses/i2c-s3c2410.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
+index e6f927c6f8af..a306f8e6702b 100644
+--- a/drivers/i2c/busses/i2c-s3c2410.c
++++ b/drivers/i2c/busses/i2c-s3c2410.c
+@@ -223,8 +223,17 @@ static bool is_ack(struct s3c24xx_i2c *i2c)
+ int tries;
+
+ for (tries = 50; tries; --tries) {
+- if (readl(i2c->regs + S3C2410_IICCON)
+- & S3C2410_IICCON_IRQPEND) {
++ unsigned long tmp = readl(i2c->regs + S3C2410_IICCON);
++
++ if (!(tmp & S3C2410_IICCON_ACKEN)) {
++ /*
++ * Wait a bit for the bus to stabilize,
++ * delay estimated experimentally.
++ */
++ usleep_range(100, 200);
++ return true;
++ }
++ if (tmp & S3C2410_IICCON_IRQPEND) {
+ if (!(readl(i2c->regs + S3C2410_IICSTAT)
+ & S3C2410_IICSTAT_LASTBIT))
+ return true;
+--
+2.43.0
+
--- /dev/null
+From b86b7502728b1aaa6704d38ac74e70e2dd1d3d89 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Nov 2023 17:43:53 +0100
+Subject: i2c: s3c24xx: fix transferring more than one message in polling mode
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+[ Upstream commit 990489e1042c6c5d6bccf56deca68f8dbeed8180 ]
+
+To properly handle ACK on the bus when transferring more than one
+message in polling mode, move the polling handling loop from
+s3c24xx_i2c_message_start() to s3c24xx_i2c_doxfer(). This way
+i2c_s3c_irq_nextbyte() is always executed till the end, properly
+acknowledging the IRQ bits and no recursive calls to
+i2c_s3c_irq_nextbyte() are made.
+
+While touching this, also fix finishing transfers in polling mode by
+using common code path and always waiting for the bus to become idle
+and disabled.
+
+Fixes: 117053f77a5a ("i2c: s3c2410: Add polling mode support")
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Reviewed-by: Andi Shyti <andi.shyti@kernel.org>
+Signed-off-by: Wolfram Sang <wsa@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/i2c/busses/i2c-s3c2410.c | 27 ++++++++++-----------------
+ 1 file changed, 10 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
+index a306f8e6702b..7402f71dd24d 100644
+--- a/drivers/i2c/busses/i2c-s3c2410.c
++++ b/drivers/i2c/busses/i2c-s3c2410.c
+@@ -286,16 +286,6 @@ static void s3c24xx_i2c_message_start(struct s3c24xx_i2c *i2c,
+
+ stat |= S3C2410_IICSTAT_START;
+ writel(stat, i2c->regs + S3C2410_IICSTAT);
+-
+- if (i2c->quirks & QUIRK_POLL) {
+- while ((i2c->msg_num != 0) && is_ack(i2c)) {
+- i2c_s3c_irq_nextbyte(i2c, stat);
+- stat = readl(i2c->regs + S3C2410_IICSTAT);
+-
+- if (stat & S3C2410_IICSTAT_ARBITR)
+- dev_err(i2c->dev, "deal with arbitration loss\n");
+- }
+- }
+ }
+
+ static inline void s3c24xx_i2c_stop(struct s3c24xx_i2c *i2c, int ret)
+@@ -703,7 +693,7 @@ static void s3c24xx_i2c_wait_idle(struct s3c24xx_i2c *i2c)
+ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
+ struct i2c_msg *msgs, int num)
+ {
+- unsigned long timeout;
++ unsigned long timeout = 0;
+ int ret;
+
+ ret = s3c24xx_i2c_set_master(i2c);
+@@ -723,16 +713,19 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
+ s3c24xx_i2c_message_start(i2c, msgs);
+
+ if (i2c->quirks & QUIRK_POLL) {
+- ret = i2c->msg_idx;
++ while ((i2c->msg_num != 0) && is_ack(i2c)) {
++ unsigned long stat = readl(i2c->regs + S3C2410_IICSTAT);
+
+- if (ret != num)
+- dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret);
++ i2c_s3c_irq_nextbyte(i2c, stat);
+
+- goto out;
++ stat = readl(i2c->regs + S3C2410_IICSTAT);
++ if (stat & S3C2410_IICSTAT_ARBITR)
++ dev_err(i2c->dev, "deal with arbitration loss\n");
++ }
++ } else {
++ timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
+ }
+
+- timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
+-
+ ret = i2c->msg_idx;
+
+ /*
+--
+2.43.0
+
--- /dev/null
+From 702e7e1988ab1b43de21c52930bb0373f452f2fb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Jan 2024 17:39:22 +0300
+Subject: ipvs: avoid stat macros calls from preemptible context
+
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+
+[ Upstream commit d6938c1c76c64f42363d0d1f051e1b4641c2ad40 ]
+
+Inside decrement_ttl() upon discovering that the packet ttl has exceeded,
+__IP_INC_STATS and __IP6_INC_STATS macros can be called from preemptible
+context having the following backtrace:
+
+check_preemption_disabled: 48 callbacks suppressed
+BUG: using __this_cpu_add() in preemptible [00000000] code: curl/1177
+caller is decrement_ttl+0x217/0x830
+CPU: 5 PID: 1177 Comm: curl Not tainted 6.7.0+ #34
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 04/01/2014
+Call Trace:
+ <TASK>
+ dump_stack_lvl+0xbd/0xe0
+ check_preemption_disabled+0xd1/0xe0
+ decrement_ttl+0x217/0x830
+ __ip_vs_get_out_rt+0x4e0/0x1ef0
+ ip_vs_nat_xmit+0x205/0xcd0
+ ip_vs_in_hook+0x9b1/0x26a0
+ nf_hook_slow+0xc2/0x210
+ nf_hook+0x1fb/0x770
+ __ip_local_out+0x33b/0x640
+ ip_local_out+0x2a/0x490
+ __ip_queue_xmit+0x990/0x1d10
+ __tcp_transmit_skb+0x288b/0x3d10
+ tcp_connect+0x3466/0x5180
+ tcp_v4_connect+0x1535/0x1bb0
+ __inet_stream_connect+0x40d/0x1040
+ inet_stream_connect+0x57/0xa0
+ __sys_connect_file+0x162/0x1a0
+ __sys_connect+0x137/0x160
+ __x64_sys_connect+0x72/0xb0
+ do_syscall_64+0x6f/0x140
+ entry_SYSCALL_64_after_hwframe+0x6e/0x76
+RIP: 0033:0x7fe6dbbc34e0
+
+Use the corresponding preemption-aware variants: IP_INC_STATS and
+IP6_INC_STATS.
+
+Found by Linux Verification Center (linuxtesting.org).
+
+Fixes: 8d8e20e2d7bb ("ipvs: Decrement ttl")
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Acked-by: Julian Anastasov <ja@ssi.bg>
+Acked-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/ipvs/ip_vs_xmit.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
+index 43ef3e25ea7d..5c81772158d8 100644
+--- a/net/netfilter/ipvs/ip_vs_xmit.c
++++ b/net/netfilter/ipvs/ip_vs_xmit.c
+@@ -271,7 +271,7 @@ static inline bool decrement_ttl(struct netns_ipvs *ipvs,
+ skb->dev = dst->dev;
+ icmpv6_send(skb, ICMPV6_TIME_EXCEED,
+ ICMPV6_EXC_HOPLIMIT, 0);
+- __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
++ IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
+
+ return false;
+ }
+@@ -286,7 +286,7 @@ static inline bool decrement_ttl(struct netns_ipvs *ipvs,
+ {
+ if (ip_hdr(skb)->ttl <= 1) {
+ /* Tell the sender its packet died... */
+- __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
++ IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
+ icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
+ return false;
+ }
+--
+2.43.0
+
--- /dev/null
+From 03b0465e580090c36c885556160beb6d9966534d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Feb 2020 15:16:40 +0000
+Subject: kdb: Censor attempts to set PROMPT without ENABLE_MEM_READ
+
+From: Daniel Thompson <daniel.thompson@linaro.org>
+
+[ Upstream commit ad99b5105c0823ff02126497f4366e6a8009453e ]
+
+Currently the PROMPT variable could be abused to provoke the printf()
+machinery to read outside the current stack frame. Normally this
+doesn't matter becaues md is already a much better tool for reading
+from memory.
+
+However the md command can be disabled by not setting KDB_ENABLE_MEM_READ.
+Let's also prevent PROMPT from being modified in these circumstances.
+
+Whilst adding a comment to help future code reviewers we also remove
+the #ifdef where PROMPT in consumed. There is no problem passing an
+unused (0) to snprintf when !CONFIG_SMP.
+argument
+
+Reported-by: Wang Xiayang <xywang.sjtu@sjtu.edu.cn>
+Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Stable-dep-of: 4f41d30cd6dc ("kdb: Fix a potential buffer overflow in kdb_local()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/debug/kdb/kdb_main.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
+index 7c96bf9a6c2c..f8f087193644 100644
+--- a/kernel/debug/kdb/kdb_main.c
++++ b/kernel/debug/kdb/kdb_main.c
+@@ -452,6 +452,13 @@ int kdb_set(int argc, const char **argv)
+ if (argc != 2)
+ return KDB_ARGCOUNT;
+
++ /*
++ * Censor sensitive variables
++ */
++ if (strcmp(argv[1], "PROMPT") == 0 &&
++ !kdb_check_flags(KDB_ENABLE_MEM_READ, kdb_cmd_enabled, false))
++ return KDB_NOPERM;
++
+ /*
+ * Check for internal variables
+ */
+@@ -1355,12 +1362,9 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
+ *(cmd_hist[cmd_head]) = '\0';
+
+ do_full_getstr:
+-#if defined(CONFIG_SMP)
++ /* PROMPT can only be set if we have MEM_READ permission. */
+ snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"),
+ raw_smp_processor_id());
+-#else
+- snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"));
+-#endif
+ if (defcmd_in_progress)
+ strncat(kdb_prompt_str, "[defcmd]", CMD_BUFLEN);
+
+--
+2.43.0
+
--- /dev/null
+From 59969cd2901c9915067c5908e85566c9c0f06790 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 25 Nov 2023 13:05:04 +0100
+Subject: kdb: Fix a potential buffer overflow in kdb_local()
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 4f41d30cd6dc865c3cbc1a852372321eba6d4e4c ]
+
+When appending "[defcmd]" to 'kdb_prompt_str', the size of the string
+already in the buffer should be taken into account.
+
+An option could be to switch from strncat() to strlcat() which does the
+correct test to avoid such an overflow.
+
+However, this actually looks as dead code, because 'defcmd_in_progress'
+can't be true here.
+See a more detailed explanation at [1].
+
+[1]: https://lore.kernel.org/all/CAD=FV=WSh7wKN7Yp-3wWiDgX4E3isQ8uh0LCzTmd1v9Cg9j+nQ@mail.gmail.com/
+
+Fixes: 5d5314d6795f ("kdb: core for kgdb back end (1 of 2)")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/debug/kdb/kdb_main.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
+index f8f087193644..51cfb8618205 100644
+--- a/kernel/debug/kdb/kdb_main.c
++++ b/kernel/debug/kdb/kdb_main.c
+@@ -1365,8 +1365,6 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
+ /* PROMPT can only be set if we have MEM_READ permission. */
+ snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"),
+ raw_smp_processor_id());
+- if (defcmd_in_progress)
+- strncat(kdb_prompt_str, "[defcmd]", CMD_BUFLEN);
+
+ /*
+ * Fetch command from keyboard
+--
+2.43.0
+
--- /dev/null
+From b5984b33b62db185555161c2f038f6514d3a37ba Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Jan 2024 19:07:36 +0100
+Subject: MIPS: Alchemy: Fix an out-of-bound access in db1200_dev_setup()
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 89c4b588d11e9acf01d604de4b0c715884f59213 ]
+
+When calling spi_register_board_info(), we should pass the number of
+elements in 'db1200_spi_devs', not 'db1200_i2c_devs'.
+
+Fixes: 63323ec54a7e ("MIPS: Alchemy: Extended DB1200 board support.")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/alchemy/devboards/db1200.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c
+index 414f92eacb5e..9ad26215b004 100644
+--- a/arch/mips/alchemy/devboards/db1200.c
++++ b/arch/mips/alchemy/devboards/db1200.c
+@@ -847,7 +847,7 @@ int __init db1200_dev_setup(void)
+ i2c_register_board_info(0, db1200_i2c_devs,
+ ARRAY_SIZE(db1200_i2c_devs));
+ spi_register_board_info(db1200_spi_devs,
+- ARRAY_SIZE(db1200_i2c_devs));
++ ARRAY_SIZE(db1200_spi_devs));
+
+ /* SWITCHES: S6.8 I2C/SPI selector (OFF=I2C ON=SPI)
+ * S6.7 AC97/I2S selector (OFF=AC97 ON=I2S)
+--
+2.43.0
+
--- /dev/null
+From 9b79cba5fa3a80fd7b0a41d877061dc0496f8468 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Jan 2024 19:09:46 +0100
+Subject: MIPS: Alchemy: Fix an out-of-bound access in db1550_dev_setup()
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 3c1e5abcda64bed0c7bffa65af2316995f269a61 ]
+
+When calling spi_register_board_info(),
+
+Fixes: f869d42e580f ("MIPS: Alchemy: Improved DB1550 support, with audio and serial busses.")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/alchemy/devboards/db1550.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
+index 3e0c75c0ece0..583b265d7407 100644
+--- a/arch/mips/alchemy/devboards/db1550.c
++++ b/arch/mips/alchemy/devboards/db1550.c
+@@ -588,7 +588,7 @@ int __init db1550_dev_setup(void)
+ i2c_register_board_info(0, db1550_i2c_devs,
+ ARRAY_SIZE(db1550_i2c_devs));
+ spi_register_board_info(db1550_spi_devs,
+- ARRAY_SIZE(db1550_i2c_devs));
++ ARRAY_SIZE(db1550_spi_devs));
+
+ c = clk_get(NULL, "psc0_intclk");
+ if (!IS_ERR(c)) {
+--
+2.43.0
+
--- /dev/null
+From ade38cadfd138a38d7184d399af0d19293e202d6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 2 Dec 2023 14:14:19 +0300
+Subject: mips: Fix incorrect max_low_pfn adjustment
+
+From: Serge Semin <fancer.lancer@gmail.com>
+
+[ Upstream commit 0f5cc249ff73552d3bd864e62f85841dafaa107d ]
+
+max_low_pfn variable is incorrectly adjusted if the kernel is built with
+high memory support and the later is detected in a running system, so the
+memory which actually can be directly mapped is getting into the highmem
+zone. See the ZONE_NORMAL range on my MIPS32r5 system:
+
+> Zone ranges:
+> DMA [mem 0x0000000000000000-0x0000000000ffffff]
+> Normal [mem 0x0000000001000000-0x0000000007ffffff]
+> HighMem [mem 0x0000000008000000-0x000000020fffffff]
+
+while the zones are supposed to look as follows:
+
+> Zone ranges:
+> DMA [mem 0x0000000000000000-0x0000000000ffffff]
+> Normal [mem 0x0000000001000000-0x000000001fffffff]
+> HighMem [mem 0x0000000020000000-0x000000020fffffff]
+
+Even though the physical memory within the range [0x08000000;0x20000000]
+belongs to MMIO on our system, we don't really want it to be considered as
+high memory since on MIPS32 that range still can be directly mapped.
+
+Note there might be other problems caused by the max_low_pfn variable
+misconfiguration. For instance high_memory variable is initialize with
+virtual address corresponding to the max_low_pfn PFN, and by design it
+must define the upper bound on direct map memory, then end of the normal
+zone. That in its turn potentially may cause problems in accessing the
+memory by means of the /dev/mem and /dev/kmem devices.
+
+Let's fix the discovered misconfiguration then. It turns out the commit
+a94e4f24ec83 ("MIPS: init: Drop boot_mem_map") didn't introduce the
+max_low_pfn adjustment quite correct. If the kernel is built with high
+memory support and the system is equipped with high memory, the
+max_low_pfn variable will need to be initialized with PFN of the most
+upper directly reachable memory address so the zone normal would be
+correctly setup. On MIPS that PFN corresponds to PFN_DOWN(HIGHMEM_START).
+If the system is built with no high memory support and one is detected in
+the running system, we'll just need to adjust the max_pfn variable to
+discard the found high memory from the system and leave the max_low_pfn as
+is, since the later will be less than PFN_DOWN(HIGHMEM_START) anyway by
+design of the for_each_memblock() loop performed a bit early in the
+bootmem_init() method.
+
+Fixes: a94e4f24ec83 ("MIPS: init: Drop boot_mem_map")
+Signed-off-by: Serge Semin <fancer.lancer@gmail.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/kernel/setup.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
+index 1c4114f8f9aa..0419629aee60 100644
+--- a/arch/mips/kernel/setup.c
++++ b/arch/mips/kernel/setup.c
+@@ -362,11 +362,11 @@ static void __init bootmem_init(void)
+ panic("Incorrect memory mapping !!!");
+
+ if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
++ max_low_pfn = PFN_DOWN(HIGHMEM_START);
+ #ifdef CONFIG_HIGHMEM
+- highstart_pfn = PFN_DOWN(HIGHMEM_START);
++ highstart_pfn = max_low_pfn;
+ highend_pfn = max_pfn;
+ #else
+- max_low_pfn = PFN_DOWN(HIGHMEM_START);
+ max_pfn = max_low_pfn;
+ #endif
+ }
+--
+2.43.0
+
--- /dev/null
+From 0e64fd6a61caf47fc6679ec0ab1bd1e68b34f98b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Jan 2024 16:04:16 +0100
+Subject: mlxsw: spectrum_acl_erp: Fix error flow of pool allocation failure
+
+From: Amit Cohen <amcohen@nvidia.com>
+
+[ Upstream commit 6d6eeabcfaba2fcadf5443b575789ea606f9de83 ]
+
+Lately, a bug was found when many TC filters are added - at some point,
+several bugs are printed to dmesg [1] and the switch is crashed with
+segmentation fault.
+
+The issue starts when gen_pool_free() fails because of unexpected
+behavior - a try to free memory which is already freed, this leads to BUG()
+call which crashes the switch and makes many other bugs.
+
+Trying to track down the unexpected behavior led to a bug in eRP code. The
+function mlxsw_sp_acl_erp_table_alloc() gets a pointer to the allocated
+index, sets the value and returns an error code. When gen_pool_alloc()
+fails it returns address 0, we track it and return -ENOBUFS outside, BUT
+the call for gen_pool_alloc() already override the index in erp_table
+structure. This is a problem when such allocation is done as part of
+table expansion. This is not a new table, which will not be used in case
+of allocation failure. We try to expand eRP table and override the
+current index (non-zero) with zero. Then, it leads to an unexpected
+behavior when address 0 is freed twice. Note that address 0 is valid in
+erp_table->base_index and indeed other tables use it.
+
+gen_pool_alloc() fails in case that there is no space left in the
+pre-allocated pool, in our case, the pool is limited to
+ACL_MAX_ERPT_BANK_SIZE, which is read from hardware. When more than max
+erp entries are required, we exceed the limit and return an error, this
+error leads to "Failed to migrate vregion" print.
+
+Fix this by changing erp_table->base_index only in case of a successful
+allocation.
+
+Add a test case for such a scenario. Without this fix it causes
+segmentation fault:
+
+$ TESTS="max_erp_entries_test" ./tc_flower.sh
+./tc_flower.sh: line 988: 1560 Segmentation fault tc filter del dev $h2 ingress chain $i protocol ip pref $i handle $j flower &>/dev/null
+
+[1]:
+kernel BUG at lib/genalloc.c:508!
+invalid opcode: 0000 [#1] PREEMPT SMP
+CPU: 6 PID: 3531 Comm: tc Not tainted 6.7.0-rc5-custom-ga6893f479f5e #1
+Hardware name: Mellanox Technologies Ltd. MSN4700/VMOD0010, BIOS 5.11 07/12/2021
+RIP: 0010:gen_pool_free_owner+0xc9/0xe0
+...
+Call Trace:
+ <TASK>
+ __mlxsw_sp_acl_erp_table_other_dec+0x70/0xa0 [mlxsw_spectrum]
+ mlxsw_sp_acl_erp_mask_destroy+0xf5/0x110 [mlxsw_spectrum]
+ objagg_obj_root_destroy+0x18/0x80 [objagg]
+ objagg_obj_destroy+0x12c/0x130 [objagg]
+ mlxsw_sp_acl_erp_mask_put+0x37/0x50 [mlxsw_spectrum]
+ mlxsw_sp_acl_ctcam_region_entry_remove+0x74/0xa0 [mlxsw_spectrum]
+ mlxsw_sp_acl_ctcam_entry_del+0x1e/0x40 [mlxsw_spectrum]
+ mlxsw_sp_acl_tcam_ventry_del+0x78/0xd0 [mlxsw_spectrum]
+ mlxsw_sp_flower_destroy+0x4d/0x70 [mlxsw_spectrum]
+ mlxsw_sp_flow_block_cb+0x73/0xb0 [mlxsw_spectrum]
+ tc_setup_cb_destroy+0xc1/0x180
+ fl_hw_destroy_filter+0x94/0xc0 [cls_flower]
+ __fl_delete+0x1ac/0x1c0 [cls_flower]
+ fl_destroy+0xc2/0x150 [cls_flower]
+ tcf_proto_destroy+0x1a/0xa0
+...
+mlxsw_spectrum3 0000:07:00.0: Failed to migrate vregion
+mlxsw_spectrum3 0000:07:00.0: Failed to migrate vregion
+
+Fixes: f465261aa105 ("mlxsw: spectrum_acl: Implement common eRP core")
+Signed-off-by: Amit Cohen <amcohen@nvidia.com>
+Signed-off-by: Ido Schimmel <idosch@nvidia.com>
+Signed-off-by: Petr Machata <petrm@nvidia.com>
+Acked-by: Paolo Abeni <pabeni@redhat.com>
+Link: https://lore.kernel.org/r/4cfca254dfc0e5d283974801a24371c7b6db5989.1705502064.git.petrm@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../mellanox/mlxsw/spectrum_acl_erp.c | 8 +--
+ .../drivers/net/mlxsw/spectrum-2/tc_flower.sh | 52 ++++++++++++++++++-
+ 2 files changed, 56 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+index 4c98950380d5..d231f4d2888b 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+@@ -301,6 +301,7 @@ mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core,
+ unsigned long *p_index)
+ {
+ unsigned int num_rows, entry_size;
++ unsigned long index;
+
+ /* We only allow allocations of entire rows */
+ if (num_erps % erp_core->num_erp_banks != 0)
+@@ -309,10 +310,11 @@ mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core,
+ entry_size = erp_core->erpt_entries_size[region_type];
+ num_rows = num_erps / erp_core->num_erp_banks;
+
+- *p_index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size);
+- if (*p_index == 0)
++ index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size);
++ if (!index)
+ return -ENOBUFS;
+- *p_index -= MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
++
++ *p_index = index - MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
+
+ return 0;
+ }
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
+index fb850e0ec837..7bf56ea161e3 100755
+--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
+@@ -10,7 +10,8 @@ lib_dir=$(dirname $0)/../../../../net/forwarding
+ ALL_TESTS="single_mask_test identical_filters_test two_masks_test \
+ multiple_masks_test ctcam_edge_cases_test delta_simple_test \
+ delta_two_masks_one_key_test delta_simple_rehash_test \
+- bloom_simple_test bloom_complex_test bloom_delta_test"
++ bloom_simple_test bloom_complex_test bloom_delta_test \
++ max_erp_entries_test"
+ NUM_NETIFS=2
+ source $lib_dir/lib.sh
+ source $lib_dir/tc_common.sh
+@@ -983,6 +984,55 @@ bloom_delta_test()
+ log_test "bloom delta test ($tcflags)"
+ }
+
++max_erp_entries_test()
++{
++ # The number of eRP entries is limited. Once the maximum number of eRPs
++ # has been reached, filters cannot be added. This test verifies that
++ # when this limit is reached, inserstion fails without crashing.
++
++ RET=0
++
++ local num_masks=32
++ local num_regions=15
++ local chain_failed
++ local mask_failed
++ local ret
++
++ if [[ "$tcflags" != "skip_sw" ]]; then
++ return 0;
++ fi
++
++ for ((i=1; i < $num_regions; i++)); do
++ for ((j=$num_masks; j >= 0; j--)); do
++ tc filter add dev $h2 ingress chain $i protocol ip \
++ pref $i handle $j flower $tcflags \
++ dst_ip 192.1.0.0/$j &> /dev/null
++ ret=$?
++
++ if [ $ret -ne 0 ]; then
++ chain_failed=$i
++ mask_failed=$j
++ break 2
++ fi
++ done
++ done
++
++ # We expect to exceed the maximum number of eRP entries, so that
++ # insertion eventually fails. Otherwise, the test should be adjusted to
++ # add more filters.
++ check_fail $ret "expected to exceed number of eRP entries"
++
++ for ((; i >= 1; i--)); do
++ for ((j=0; j <= $num_masks; j++)); do
++ tc filter del dev $h2 ingress chain $i protocol ip \
++ pref $i handle $j flower &> /dev/null
++ done
++ done
++
++ log_test "max eRP entries test ($tcflags). " \
++ "max chain $chain_failed, mask $mask_failed"
++}
++
+ setup_prepare()
+ {
+ h1=${NETIFS[p1]}
+--
+2.43.0
+
--- /dev/null
+From d3d51a1706f0d1386b367484d4ce7695117a3cb8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 Jan 2024 15:20:18 +0800
+Subject: net: dsa: vsc73xx: Add null pointer check to vsc73xx_gpio_probe
+
+From: Kunwu Chan <chentao@kylinos.cn>
+
+[ Upstream commit 776dac5a662774f07a876b650ba578d0a62d20db ]
+
+devm_kasprintf() returns a pointer to dynamically allocated memory
+which can be NULL upon failure.
+
+Fixes: 05bd97fc559d ("net: dsa: Add Vitesse VSC73xx DSA router driver")
+Signed-off-by: Kunwu Chan <chentao@kylinos.cn>
+Suggested-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://lore.kernel.org/r/20240111072018.75971-1-chentao@kylinos.cn
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/vitesse-vsc73xx-core.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
+index 614377ef7956..c7ff98c26ee3 100644
+--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
++++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
+@@ -1108,6 +1108,8 @@ static int vsc73xx_gpio_probe(struct vsc73xx *vsc)
+
+ vsc->gc.label = devm_kasprintf(vsc->dev, GFP_KERNEL, "VSC%04x",
+ vsc->chipid);
++ if (!vsc->gc.label)
++ return -ENOMEM;
+ vsc->gc.ngpio = 4;
+ vsc->gc.owner = THIS_MODULE;
+ vsc->gc.parent = vsc->dev;
+--
+2.43.0
+
--- /dev/null
+From ff1392d9ac636a592b4e468bb0e85c030ae250ac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Jan 2024 10:52:42 +0200
+Subject: net: phy: micrel: populate .soft_reset for KSZ9131
+
+From: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+
+[ Upstream commit e398822c4751017fe401f57409488f5948d12fb5 ]
+
+The RZ/G3S SMARC Module has 2 KSZ9131 PHYs. In this setup, the KSZ9131 PHY
+is used with the ravb Ethernet driver. It has been discovered that when
+bringing the Ethernet interface down/up continuously, e.g., with the
+following sh script:
+
+$ while :; do ifconfig eth0 down; ifconfig eth0 up; done
+
+the link speed and duplex are wrong after interrupting the bring down/up
+operation even though the Ethernet interface is up. To recover from this
+state the following configuration sequence is necessary (executed
+manually):
+
+$ ifconfig eth0 down
+$ ifconfig eth0 up
+
+The behavior has been identified also on the Microchip SAMA7G5-EK board
+which runs the macb driver and uses the same PHY.
+
+The order of PHY-related operations in ravb_open() is as follows:
+ravb_open() ->
+ ravb_phy_start() ->
+ ravb_phy_init() ->
+ of_phy_connect() ->
+ phy_connect_direct() ->
+ phy_attach_direct() ->
+ phy_init_hw() ->
+ phydev->drv->soft_reset()
+ phydev->drv->config_init()
+ phydev->drv->config_intr()
+ phy_resume()
+ kszphy_resume()
+
+The order of PHY-related operations in ravb_close is as follows:
+ravb_close() ->
+ phy_stop() ->
+ phy_suspend() ->
+ kszphy_suspend() ->
+ genphy_suspend()
+ // set BMCR_PDOWN bit in MII_BMCR
+
+In genphy_suspend() setting the BMCR_PDWN bit in MII_BMCR switches the PHY
+to Software Power-Down (SPD) mode (according to the KSZ9131 datasheet).
+Thus, when opening the interface after it has been previously closed (via
+ravb_close()), the phydev->drv->config_init() and
+phydev->drv->config_intr() reach the KSZ9131 PHY driver via the
+ksz9131_config_init() and kszphy_config_intr() functions.
+
+KSZ9131 specifies that the MII management interface remains operational
+during SPD (Software Power-Down), but (according to manual):
+- Only access to the standard registers (0 through 31) is supported.
+- Access to MMD address spaces other than MMD address space 1 is possible
+ if the spd_clock_gate_override bit is set.
+- Access to MMD address space 1 is not possible.
+
+The spd_clock_gate_override bit is not used in the KSZ9131 driver.
+
+ksz9131_config_init() configures RGMII delay, pad skews and LEDs by
+accessesing MMD registers other than those in address space 1.
+
+The datasheet for the KSZ9131 does not specify what happens if registers
+from an unsupported address space are accessed while the PHY is in SPD.
+
+To fix the issue the .soft_reset method has been instantiated for KSZ9131,
+too. This resets the PHY to the default state before doing any
+configurations to it, thus switching it out of SPD.
+
+Fixes: bff5b4b37372 ("net: phy: micrel: add Microchip KSZ9131 initial driver")
+Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+Reviewed-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/micrel.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 721153dcfd15..caaa51a70cbd 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -1156,6 +1156,7 @@ static struct phy_driver ksphy_driver[] = {
+ /* PHY_GBIT_FEATURES */
+ .driver_data = &ksz9021_type,
+ .probe = kszphy_probe,
++ .soft_reset = genphy_soft_reset,
+ .config_init = ksz9131_config_init,
+ .read_status = genphy_read_status,
+ .ack_interrupt = kszphy_ack_interrupt,
+--
+2.43.0
+
--- /dev/null
+From 529c36c45cbddec1ebd7f34d15742744b9c9a838 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Jan 2024 14:14:00 +0800
+Subject: net: qualcomm: rmnet: fix global oob in rmnet_policy
+
+From: Lin Ma <linma@zju.edu.cn>
+
+[ Upstream commit b33fb5b801c6db408b774a68e7c8722796b59ecc ]
+
+The variable rmnet_link_ops assign a *bigger* maxtype which leads to a
+global out-of-bounds read when parsing the netlink attributes. See bug
+trace below:
+
+==================================================================
+BUG: KASAN: global-out-of-bounds in validate_nla lib/nlattr.c:386 [inline]
+BUG: KASAN: global-out-of-bounds in __nla_validate_parse+0x24af/0x2750 lib/nlattr.c:600
+Read of size 1 at addr ffffffff92c438d0 by task syz-executor.6/84207
+
+CPU: 0 PID: 84207 Comm: syz-executor.6 Tainted: G N 6.1.0 #3
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1ubuntu1.1 04/01/2014
+Call Trace:
+ <TASK>
+ __dump_stack lib/dump_stack.c:88 [inline]
+ dump_stack_lvl+0x8b/0xb3 lib/dump_stack.c:106
+ print_address_description mm/kasan/report.c:284 [inline]
+ print_report+0x172/0x475 mm/kasan/report.c:395
+ kasan_report+0xbb/0x1c0 mm/kasan/report.c:495
+ validate_nla lib/nlattr.c:386 [inline]
+ __nla_validate_parse+0x24af/0x2750 lib/nlattr.c:600
+ __nla_parse+0x3e/0x50 lib/nlattr.c:697
+ nla_parse_nested_deprecated include/net/netlink.h:1248 [inline]
+ __rtnl_newlink+0x50a/0x1880 net/core/rtnetlink.c:3485
+ rtnl_newlink+0x64/0xa0 net/core/rtnetlink.c:3594
+ rtnetlink_rcv_msg+0x43c/0xd70 net/core/rtnetlink.c:6091
+ netlink_rcv_skb+0x14f/0x410 net/netlink/af_netlink.c:2540
+ netlink_unicast_kernel net/netlink/af_netlink.c:1319 [inline]
+ netlink_unicast+0x54e/0x800 net/netlink/af_netlink.c:1345
+ netlink_sendmsg+0x930/0xe50 net/netlink/af_netlink.c:1921
+ sock_sendmsg_nosec net/socket.c:714 [inline]
+ sock_sendmsg+0x154/0x190 net/socket.c:734
+ ____sys_sendmsg+0x6df/0x840 net/socket.c:2482
+ ___sys_sendmsg+0x110/0x1b0 net/socket.c:2536
+ __sys_sendmsg+0xf3/0x1c0 net/socket.c:2565
+ do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ do_syscall_64+0x3b/0x90 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x63/0xcd
+RIP: 0033:0x7fdcf2072359
+Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 f1 19 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48
+RSP: 002b:00007fdcf13e3168 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
+RAX: ffffffffffffffda RBX: 00007fdcf219ff80 RCX: 00007fdcf2072359
+RDX: 0000000000000000 RSI: 0000000020000200 RDI: 0000000000000003
+RBP: 00007fdcf20bd493 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
+R13: 00007fffbb8d7bdf R14: 00007fdcf13e3300 R15: 0000000000022000
+ </TASK>
+
+The buggy address belongs to the variable:
+ rmnet_policy+0x30/0xe0
+
+The buggy address belongs to the physical page:
+page:0000000065bdeb3c refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x155243
+flags: 0x200000000001000(reserved|node=0|zone=2)
+raw: 0200000000001000 ffffea00055490c8 ffffea00055490c8 0000000000000000
+raw: 0000000000000000 0000000000000000 00000001ffffffff 0000000000000000
+page dumped because: kasan: bad access detected
+
+Memory state around the buggy address:
+ ffffffff92c43780: f9 f9 f9 f9 00 00 00 02 f9 f9 f9 f9 00 00 00 07
+ ffffffff92c43800: f9 f9 f9 f9 00 00 00 05 f9 f9 f9 f9 06 f9 f9 f9
+>ffffffff92c43880: f9 f9 f9 f9 00 00 00 00 00 00 f9 f9 f9 f9 f9 f9
+ ^
+ ffffffff92c43900: 00 00 00 00 00 00 00 00 07 f9 f9 f9 f9 f9 f9 f9
+ ffffffff92c43980: 00 00 00 07 f9 f9 f9 f9 00 00 00 05 f9 f9 f9 f9
+
+According to the comment of `nla_parse_nested_deprecated`, the maxtype
+should be len(destination array) - 1. Hence use `IFLA_RMNET_MAX` here.
+
+Fixes: 14452ca3b5ce ("net: qualcomm: rmnet: Export mux_id and flags to netlink")
+Signed-off-by: Lin Ma <linma@zju.edu.cn>
+Reviewed-by: Subash Abhinov Kasiviswanathan <quic_subashab@quicinc.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Jiri Pirko <jiri@nvidia.com>
+Link: https://lore.kernel.org/r/20240110061400.3356108-1-linma@zju.edu.cn
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+index 18d88b424828..deb8d00d27ad 100644
+--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+@@ -375,7 +375,7 @@ static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)
+
+ struct rtnl_link_ops rmnet_link_ops __read_mostly = {
+ .kind = "rmnet",
+- .maxtype = __IFLA_RMNET_MAX,
++ .maxtype = IFLA_RMNET_MAX,
+ .priv_size = sizeof(struct rmnet_priv),
+ .setup = rmnet_vnd_setup,
+ .validate = rmnet_rtnl_validate,
+--
+2.43.0
+
--- /dev/null
+From 399aba176975e21e042239c3f6d4658dd76ead96 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 13 Jan 2024 10:22:21 +0600
+Subject: net: ravb: Fix dma_addr_t truncation in error case
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Nikita Yushchenko <nikita.yoush@cogentembedded.com>
+
+[ Upstream commit e327b2372bc0f18c30433ac40be07741b59231c5 ]
+
+In ravb_start_xmit(), ravb driver uses u32 variable to store result of
+dma_map_single() call. Since ravb hardware has 32-bit address fields in
+descriptors, this works properly when mapping is successful - it is
+platform's job to provide mapping addresses that fit into hardware
+limitations.
+
+However, in failure case dma_map_single() returns DMA_MAPPING_ERROR
+constant that is 64-bit when dma_addr_t is 64-bit. Storing this constant
+in u32 leads to truncation, and further call to dma_mapping_error()
+fails to notice the error.
+
+Fix that by storing result of dma_map_single() in a dma_addr_t
+variable.
+
+Fixes: c156633f1353 ("Renesas Ethernet AVB driver proper")
+Signed-off-by: Nikita Yushchenko <nikita.yoush@cogentembedded.com>
+Reviewed-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
+Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/renesas/ravb_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index 42e62f51ba6d..53b9c77c7f6a 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -1497,7 +1497,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ struct ravb_tstamp_skb *ts_skb;
+ struct ravb_tx_desc *desc;
+ unsigned long flags;
+- u32 dma_addr;
++ dma_addr_t dma_addr;
+ void *buffer;
+ u32 entry;
+ u32 len;
+--
+2.43.0
+
--- /dev/null
+From 6297361a9d680e72bdbffc67edb383d39aad47ee Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Jan 2024 00:14:38 +0100
+Subject: netfilter: nf_tables: skip dead set elements in netlink dump
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 6b1ca88e4bb63673dc9f9c7f23c899f22c3cb17a ]
+
+Delete from packet path relies on the garbage collector to purge
+elements with NFT_SET_ELEM_DEAD_BIT on.
+
+Skip these dead elements from nf_tables_dump_setelem() path, I very
+rarely see tests/shell/testcases/maps/typeof_maps_add_delete reports
+[DUMP FAILED] showing a mismatch in the expected output with an element
+that should not be there.
+
+If the netlink dump happens before GC worker run, it might show dead
+elements in the ruleset listing.
+
+nft_rhash_get() already skips dead elements in nft_rhash_cmp(),
+therefore, it already does not show the element when getting a single
+element via netlink control plane.
+
+Fixes: 5f68718b34a5 ("netfilter: nf_tables: GC transaction API to avoid race with control plane")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 9bd8ed0b62f1..534126f3687b 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -4326,7 +4326,7 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
+ const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+ struct nft_set_dump_args *args;
+
+- if (nft_set_elem_expired(ext))
++ if (nft_set_elem_expired(ext) || nft_set_elem_is_dead(ext))
+ return 0;
+
+ args = container_of(iter, struct nft_set_dump_args, iter);
+--
+2.43.0
+
--- /dev/null
+From b7fc77e120620f17e9cc1ce037c1a767dbceec9e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Dec 2023 16:17:49 +0100
+Subject: nvmet-tcp: fix a crash in nvmet_req_complete()
+
+From: Maurizio Lombardi <mlombard@redhat.com>
+
+[ Upstream commit 0849a5441358cef02586fb2d60f707c0db195628 ]
+
+in nvmet_tcp_handle_h2c_data_pdu(), if the host sends a data_offset
+different from rbytes_done, the driver ends up calling nvmet_req_complete()
+passing a status error.
+The problem is that at this point cmd->req is not yet initialized,
+the kernel will crash after dereferencing a NULL pointer.
+
+Fix the bug by replacing the call to nvmet_req_complete() with
+nvmet_tcp_fatal_error().
+
+Fixes: 872d26a391da ("nvmet-tcp: add NVMe over TCP target driver")
+Reviewed-by: Keith Busch <kbsuch@kernel.org>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Maurizio Lombardi <mlombard@redhat.com>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/target/tcp.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index 1747d3c4fa32..1f584a74b17f 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -886,8 +886,7 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
+ data->ttag, le32_to_cpu(data->data_offset),
+ cmd->rbytes_done);
+ /* FIXME: use path and transport errors */
+- nvmet_req_complete(&cmd->req,
+- NVME_SC_INVALID_FIELD | NVME_SC_DNR);
++ nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 530a3cce5d92218e7091a422815970d3def05c99 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Dec 2023 16:17:48 +0100
+Subject: nvmet-tcp: Fix a kernel panic when host sends an invalid H2C PDU
+ length
+
+From: Maurizio Lombardi <mlombard@redhat.com>
+
+[ Upstream commit efa56305908ba20de2104f1b8508c6a7401833be ]
+
+If the host sends an H2CData command with an invalid DATAL,
+the kernel may crash in nvmet_tcp_build_pdu_iovec().
+
+Unable to handle kernel NULL pointer dereference at
+virtual address 0000000000000000
+lr : nvmet_tcp_io_work+0x6ac/0x718 [nvmet_tcp]
+Call trace:
+ process_one_work+0x174/0x3c8
+ worker_thread+0x2d0/0x3e8
+ kthread+0x104/0x110
+
+Fix the bug by raising a fatal error if DATAL isn't coherent
+with the packet size.
+Also, the PDU length should never exceed the MAXH2CDATA parameter which
+has been communicated to the host in nvmet_tcp_handle_icreq().
+
+Fixes: 872d26a391da ("nvmet-tcp: add NVMe over TCP target driver")
+Signed-off-by: Maurizio Lombardi <mlombard@redhat.com>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/target/tcp.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index e8c7135c4c11..1747d3c4fa32 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -18,6 +18,7 @@
+ #include "nvmet.h"
+
+ #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
++#define NVMET_TCP_MAXH2CDATA 0x400000 /* 16M arbitrary limit */
+
+ #define NVMET_TCP_RECV_BUDGET 8
+ #define NVMET_TCP_SEND_BUDGET 8
+@@ -818,7 +819,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
+ icresp->hdr.pdo = 0;
+ icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
+ icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
+- icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
++ icresp->maxdata = cpu_to_le32(NVMET_TCP_MAXH2CDATA);
+ icresp->cpda = 0;
+ if (queue->hdr_digest)
+ icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
+@@ -866,6 +867,7 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
+ {
+ struct nvme_tcp_data_pdu *data = &queue->pdu.data;
+ struct nvmet_tcp_cmd *cmd;
++ unsigned int plen;
+
+ if (likely(queue->nr_cmds)) {
+ if (unlikely(data->ttag >= queue->nr_cmds)) {
+@@ -889,7 +891,16 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
+ return -EPROTO;
+ }
+
++ plen = le32_to_cpu(data->hdr.plen);
+ cmd->pdu_len = le32_to_cpu(data->data_length);
++ if (unlikely(cmd->pdu_len != (plen - sizeof(*data)) ||
++ cmd->pdu_len == 0 ||
++ cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) {
++ pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
++ /* FIXME: use proper transport errors */
++ nvmet_tcp_fatal_error(queue);
++ return -EPROTO;
++ }
+ cmd->pdu_recv = 0;
+ nvmet_tcp_map_pdu_iovec(cmd);
+ queue->cmd = cmd;
+--
+2.43.0
+
--- /dev/null
+From 6505a310f2744611fbb60ac6a475eaa63c98f654 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Jan 2024 09:14:44 +0100
+Subject: nvmet-tcp: Fix the H2C expected PDU len calculation
+
+From: Maurizio Lombardi <mlombard@redhat.com>
+
+[ Upstream commit 9a1abc24850eb759e36a2f8869161c3b7254c904 ]
+
+The nvmet_tcp_handle_h2c_data_pdu() function should take into
+consideration the possibility that the header digest and/or the data
+digests are enabled when calculating the expected PDU length, before
+comparing it to the value stored in cmd->pdu_len.
+
+Fixes: efa56305908b ("nvmet-tcp: Fix a kernel panic when host sends an invalid H2C PDU length")
+Signed-off-by: Maurizio Lombardi <mlombard@redhat.com>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/target/tcp.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index 1f584a74b17f..be9e97657557 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -867,7 +867,7 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
+ {
+ struct nvme_tcp_data_pdu *data = &queue->pdu.data;
+ struct nvmet_tcp_cmd *cmd;
+- unsigned int plen;
++ unsigned int exp_data_len;
+
+ if (likely(queue->nr_cmds)) {
+ if (unlikely(data->ttag >= queue->nr_cmds)) {
+@@ -890,9 +890,13 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
+ return -EPROTO;
+ }
+
+- plen = le32_to_cpu(data->hdr.plen);
++ exp_data_len = le32_to_cpu(data->hdr.plen) -
++ nvmet_tcp_hdgst_len(queue) -
++ nvmet_tcp_ddgst_len(queue) -
++ sizeof(*data);
++
+ cmd->pdu_len = le32_to_cpu(data->data_length);
+- if (unlikely(cmd->pdu_len != (plen - sizeof(*data)) ||
++ if (unlikely(cmd->pdu_len != exp_data_len ||
+ cmd->pdu_len == 0 ||
+ cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) {
+ pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
+--
+2.43.0
+
--- /dev/null
+From 7e76cfd1e54efc18eca65df943457e8d0332b491 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 27 Sep 2023 09:48:45 +0530
+Subject: PCI: keystone: Fix race condition when initializing PHYs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Siddharth Vadapalli <s-vadapalli@ti.com>
+
+[ Upstream commit c12ca110c613a81cb0f0099019c839d078cd0f38 ]
+
+The PCI driver invokes the PHY APIs using the ks_pcie_enable_phy()
+function. The PHY in this case is the Serdes. It is possible that the
+PCI instance is configured for two lane operation across two different
+Serdes instances, using one lane of each Serdes.
+
+In such a configuration, if the reference clock for one Serdes is
+provided by the other Serdes, it results in a race condition. After the
+Serdes providing the reference clock is initialized by the PCI driver by
+invoking its PHY APIs, it is not guaranteed that this Serdes remains
+powered on long enough for the PHY APIs based initialization of the
+dependent Serdes. In such cases, the PLL of the dependent Serdes fails
+to lock due to the absence of the reference clock from the former Serdes
+which has been powered off by the PM Core.
+
+Fix this by obtaining reference to the PHYs before invoking the PHY
+initialization APIs and releasing reference after the initialization is
+complete.
+
+Link: https://lore.kernel.org/linux-pci/20230927041845.1222080-1-s-vadapalli@ti.com
+Fixes: 49229238ab47 ("PCI: keystone: Cleanup PHY handling")
+Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com>
+Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
+Acked-by: Ravi Gunasekaran <r-gunasekaran@ti.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/dwc/pci-keystone.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
+index 920444b1cfc7..b18ddb2b9ef8 100644
+--- a/drivers/pci/controller/dwc/pci-keystone.c
++++ b/drivers/pci/controller/dwc/pci-keystone.c
+@@ -1305,7 +1305,16 @@ static int ks_pcie_probe(struct platform_device *pdev)
+ goto err_link;
+ }
+
++ /* Obtain references to the PHYs */
++ for (i = 0; i < num_lanes; i++)
++ phy_pm_runtime_get_sync(ks_pcie->phy[i]);
++
+ ret = ks_pcie_enable_phy(ks_pcie);
++
++ /* Release references to the PHYs */
++ for (i = 0; i < num_lanes; i++)
++ phy_pm_runtime_put_sync(ks_pcie->phy[i]);
++
+ if (ret) {
+ dev_err(dev, "failed to enable phy\n");
+ goto err_link;
+--
+2.43.0
+
--- /dev/null
+From 82eadef513a7f524647f1613e3a5cab386e12b8e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Apr 2020 12:24:51 -0300
+Subject: perf bpf: Decouple creating the evlist from adding the SB event
+
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+
+[ Upstream commit b38d85ef49cf6af9d1deaaf01daf0986d47e6c7a ]
+
+Renaming bpf_event__add_sb_event() to evlist__add_sb_event() and
+requiring that the evlist be allocated beforehand.
+
+This will allow using the same side band thread and evlist to be used
+for multiple purposes in addition to react to PERF_RECORD_BPF_EVENT soon
+after they are generated.
+
+Acked-by: Jiri Olsa <jolsa@redhat.com>
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Song Liu <songliubraving@fb.com>
+Link: http://lore.kernel.org/lkml/20200429131106.27974-4-acme@kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Stable-dep-of: 9c51f8788b5d ("perf env: Avoid recursively taking env->bpf_progs.lock")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/builtin-record.c | 17 ++++++++++++++---
+ tools/perf/builtin-top.c | 15 +++++++++++++--
+ tools/perf/util/bpf-event.c | 3 +--
+ tools/perf/util/bpf-event.h | 7 +++----
+ tools/perf/util/evlist.c | 21 ++++-----------------
+ tools/perf/util/evlist.h | 2 +-
+ 6 files changed, 36 insertions(+), 29 deletions(-)
+
+diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
+index 7fc3dadfa156..a9891c9fe94d 100644
+--- a/tools/perf/builtin-record.c
++++ b/tools/perf/builtin-record.c
+@@ -1446,16 +1446,27 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
+ goto out_child;
+ }
+
++ err = -1;
+ if (!rec->no_buildid
+ && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
+ pr_err("Couldn't generate buildids. "
+ "Use --no-buildid to profile anyway.\n");
+- err = -1;
+ goto out_child;
+ }
+
+- if (!opts->no_bpf_event)
+- bpf_event__add_sb_event(&rec->sb_evlist, &session->header.env);
++ if (!opts->no_bpf_event) {
++ rec->sb_evlist = evlist__new();
++
++ if (rec->sb_evlist == NULL) {
++ pr_err("Couldn't create side band evlist.\n.");
++ goto out_child;
++ }
++
++ if (evlist__add_bpf_sb_event(rec->sb_evlist, &session->header.env)) {
++ pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
++ goto out_child;
++ }
++ }
+
+ if (perf_evlist__start_sb_thread(rec->sb_evlist, &rec->opts.target)) {
+ pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
+diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
+index d83954f75557..82642fe5cd21 100644
+--- a/tools/perf/builtin-top.c
++++ b/tools/perf/builtin-top.c
+@@ -1682,8 +1682,19 @@ int cmd_top(int argc, const char **argv)
+ goto out_delete_evlist;
+ }
+
+- if (!top.record_opts.no_bpf_event)
+- bpf_event__add_sb_event(&top.sb_evlist, &perf_env);
++ if (!top.record_opts.no_bpf_event) {
++ top.sb_evlist = evlist__new();
++
++ if (top.sb_evlist == NULL) {
++ pr_err("Couldn't create side band evlist.\n.");
++ goto out_delete_evlist;
++ }
++
++ if (evlist__add_bpf_sb_event(top.sb_evlist, &perf_env)) {
++ pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
++ goto out_delete_evlist;
++ }
++ }
+
+ if (perf_evlist__start_sb_thread(top.sb_evlist, target)) {
+ pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
+diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
+index 782c0c8a9a83..96e0a31a38f6 100644
+--- a/tools/perf/util/bpf-event.c
++++ b/tools/perf/util/bpf-event.c
+@@ -422,8 +422,7 @@ static int bpf_event__sb_cb(union perf_event *event, void *data)
+ return 0;
+ }
+
+-int bpf_event__add_sb_event(struct evlist **evlist,
+- struct perf_env *env)
++int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env)
+ {
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_SOFTWARE,
+diff --git a/tools/perf/util/bpf-event.h b/tools/perf/util/bpf-event.h
+index 81fdc88e6c1a..68f315c3df5b 100644
+--- a/tools/perf/util/bpf-event.h
++++ b/tools/perf/util/bpf-event.h
+@@ -33,8 +33,7 @@ struct btf_node {
+ #ifdef HAVE_LIBBPF_SUPPORT
+ int machine__process_bpf(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample);
+-int bpf_event__add_sb_event(struct evlist **evlist,
+- struct perf_env *env);
++int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env);
+ void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+ struct perf_env *env,
+ FILE *fp);
+@@ -46,8 +45,8 @@ static inline int machine__process_bpf(struct machine *machine __maybe_unused,
+ return 0;
+ }
+
+-static inline int bpf_event__add_sb_event(struct evlist **evlist __maybe_unused,
+- struct perf_env *env __maybe_unused)
++static inline int evlist__add_bpf_sb_event(struct evlist *evlist __maybe_unused,
++ struct perf_env *env __maybe_unused)
+ {
+ return 0;
+ }
+diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
+index 505b890ac85c..b110deb88425 100644
+--- a/tools/perf/util/evlist.c
++++ b/tools/perf/util/evlist.c
+@@ -1672,39 +1672,26 @@ struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list,
+ return leader;
+ }
+
+-int perf_evlist__add_sb_event(struct evlist **evlist,
++int perf_evlist__add_sb_event(struct evlist *evlist,
+ struct perf_event_attr *attr,
+ perf_evsel__sb_cb_t cb,
+ void *data)
+ {
+ struct evsel *evsel;
+- bool new_evlist = (*evlist) == NULL;
+-
+- if (*evlist == NULL)
+- *evlist = evlist__new();
+- if (*evlist == NULL)
+- return -1;
+
+ if (!attr->sample_id_all) {
+ pr_warning("enabling sample_id_all for all side band events\n");
+ attr->sample_id_all = 1;
+ }
+
+- evsel = perf_evsel__new_idx(attr, (*evlist)->core.nr_entries);
++ evsel = perf_evsel__new_idx(attr, evlist->core.nr_entries);
+ if (!evsel)
+- goto out_err;
++ return -1;
+
+ evsel->side_band.cb = cb;
+ evsel->side_band.data = data;
+- evlist__add(*evlist, evsel);
++ evlist__add(evlist, evsel);
+ return 0;
+-
+-out_err:
+- if (new_evlist) {
+- evlist__delete(*evlist);
+- *evlist = NULL;
+- }
+- return -1;
+ }
+
+ static void *perf_evlist__poll_thread(void *arg)
+diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
+index 7cfe75522ba5..6f920ca91a69 100644
+--- a/tools/perf/util/evlist.h
++++ b/tools/perf/util/evlist.h
+@@ -107,7 +107,7 @@ int __perf_evlist__add_default_attrs(struct evlist *evlist,
+
+ int perf_evlist__add_dummy(struct evlist *evlist);
+
+-int perf_evlist__add_sb_event(struct evlist **evlist,
++int perf_evlist__add_sb_event(struct evlist *evlist,
+ struct perf_event_attr *attr,
+ perf_evsel__sb_cb_t cb,
+ void *data);
+--
+2.43.0
+
--- /dev/null
+From 55251e81f3bf963c59f5e1792b8a9b8f8e911070 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Aug 2019 13:31:48 +0200
+Subject: perf env: Add perf_env__numa_node()
+
+From: Jiri Olsa <jolsa@kernel.org>
+
+[ Upstream commit 389799a7a1e86c55f38897e679762efadcc9dedd ]
+
+To speed up cpu to node lookup, add perf_env__numa_node(), that creates
+cpu array on the first lookup, that holds numa nodes for each stored
+cpu.
+
+Signed-off-by: Jiri Olsa <jolsa@kernel.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Joe Mario <jmario@redhat.com>
+Cc: Kan Liang <kan.liang@linux.intel.com>
+Cc: Michael Petlan <mpetlan@redhat.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Link: http://lkml.kernel.org/r/20190904073415.723-3-jolsa@kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Stable-dep-of: 9c51f8788b5d ("perf env: Avoid recursively taking env->bpf_progs.lock")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/env.c | 40 ++++++++++++++++++++++++++++++++++++++++
+ tools/perf/util/env.h | 6 ++++++
+ 2 files changed, 46 insertions(+)
+
+diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
+index ef64e197bc8d..2d517f377053 100644
+--- a/tools/perf/util/env.c
++++ b/tools/perf/util/env.c
+@@ -183,6 +183,7 @@ void perf_env__exit(struct perf_env *env)
+ zfree(&env->sibling_threads);
+ zfree(&env->pmu_mappings);
+ zfree(&env->cpu);
++ zfree(&env->numa_map);
+
+ for (i = 0; i < env->nr_numa_nodes; i++)
+ perf_cpu_map__put(env->numa_nodes[i].map);
+@@ -342,3 +343,42 @@ const char *perf_env__arch(struct perf_env *env)
+
+ return normalize_arch(arch_name);
+ }
++
++
++int perf_env__numa_node(struct perf_env *env, int cpu)
++{
++ if (!env->nr_numa_map) {
++ struct numa_node *nn;
++ int i, nr = 0;
++
++ for (i = 0; i < env->nr_numa_nodes; i++) {
++ nn = &env->numa_nodes[i];
++ nr = max(nr, perf_cpu_map__max(nn->map));
++ }
++
++ nr++;
++
++ /*
++ * We initialize the numa_map array to prepare
++ * it for missing cpus, which return node -1
++ */
++ env->numa_map = malloc(nr * sizeof(int));
++ if (!env->numa_map)
++ return -1;
++
++ for (i = 0; i < nr; i++)
++ env->numa_map[i] = -1;
++
++ env->nr_numa_map = nr;
++
++ for (i = 0; i < env->nr_numa_nodes; i++) {
++ int tmp, j;
++
++ nn = &env->numa_nodes[i];
++ perf_cpu_map__for_each_cpu(j, tmp, nn->map)
++ env->numa_map[j] = i;
++ }
++ }
++
++ return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1;
++}
+diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
+index 37028215d4a5..ceddddace5cc 100644
+--- a/tools/perf/util/env.h
++++ b/tools/perf/util/env.h
+@@ -87,6 +87,10 @@ struct perf_env {
+ struct rb_root btfs;
+ u32 btfs_cnt;
+ } bpf_progs;
++
++ /* For fast cpu to numa node lookup via perf_env__numa_node */
++ int *numa_map;
++ int nr_numa_map;
+ };
+
+ enum perf_compress_type {
+@@ -119,4 +123,6 @@ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+ __u32 prog_id);
+ bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
+ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
++
++int perf_env__numa_node(struct perf_env *env, int cpu);
+ #endif /* __PERF_ENV_H */
+--
+2.43.0
+
--- /dev/null
+From 67181df5b09afc9dd808a48d76cc1518128ae810 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Dec 2023 17:46:55 -0800
+Subject: perf env: Avoid recursively taking env->bpf_progs.lock
+
+From: Ian Rogers <irogers@google.com>
+
+[ Upstream commit 9c51f8788b5d4e9f46afbcf563255cfd355690b3 ]
+
+Add variants of perf_env__insert_bpf_prog_info(), perf_env__insert_btf()
+and perf_env__find_btf prefixed with __ to indicate the
+env->bpf_progs.lock is assumed held.
+
+Call these variants when the lock is held to avoid recursively taking it
+and potentially having a thread deadlock with itself.
+
+Fixes: f8dfeae009effc0b ("perf bpf: Show more BPF program info in print_bpf_prog_info()")
+Signed-off-by: Ian Rogers <irogers@google.com>
+Acked-by: Jiri Olsa <jolsa@kernel.org>
+Acked-by: Song Liu <song@kernel.org>
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Huacai Chen <chenhuacai@kernel.org>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: K Prateek Nayak <kprateek.nayak@amd.com>
+Cc: Kan Liang <kan.liang@linux.intel.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Ming Wang <wangming01@loongson.cn>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Ravi Bangoria <ravi.bangoria@amd.com>
+Link: https://lore.kernel.org/r/20231207014655.1252484-1-irogers@google.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/bpf-event.c | 8 +++---
+ tools/perf/util/bpf-event.h | 12 ++++-----
+ tools/perf/util/env.c | 50 ++++++++++++++++++++++++-------------
+ tools/perf/util/env.h | 4 +++
+ tools/perf/util/header.c | 8 +++---
+ 5 files changed, 50 insertions(+), 32 deletions(-)
+
+diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
+index 96e0a31a38f6..ae30e20af246 100644
+--- a/tools/perf/util/bpf-event.c
++++ b/tools/perf/util/bpf-event.c
+@@ -442,9 +442,9 @@ int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env)
+ return perf_evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
+ }
+
+-void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+- struct perf_env *env,
+- FILE *fp)
++void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
++ struct perf_env *env,
++ FILE *fp)
+ {
+ __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
+ __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
+@@ -460,7 +460,7 @@ void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+ if (info->btf_id) {
+ struct btf_node *node;
+
+- node = perf_env__find_btf(env, info->btf_id);
++ node = __perf_env__find_btf(env, info->btf_id);
+ if (node)
+ btf = btf__new((__u8 *)(node->data),
+ node->data_size);
+diff --git a/tools/perf/util/bpf-event.h b/tools/perf/util/bpf-event.h
+index 68f315c3df5b..50f7412464df 100644
+--- a/tools/perf/util/bpf-event.h
++++ b/tools/perf/util/bpf-event.h
+@@ -34,9 +34,9 @@ struct btf_node {
+ int machine__process_bpf(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample);
+ int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env);
+-void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+- struct perf_env *env,
+- FILE *fp);
++void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
++ struct perf_env *env,
++ FILE *fp);
+ #else
+ static inline int machine__process_bpf(struct machine *machine __maybe_unused,
+ union perf_event *event __maybe_unused,
+@@ -51,9 +51,9 @@ static inline int evlist__add_bpf_sb_event(struct evlist *evlist __maybe_unused,
+ return 0;
+ }
+
+-static inline void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
+- struct perf_env *env __maybe_unused,
+- FILE *fp __maybe_unused)
++static inline void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
++ struct perf_env *env __maybe_unused,
++ FILE *fp __maybe_unused)
+ {
+
+ }
+diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
+index 2d517f377053..953db9dd25eb 100644
+--- a/tools/perf/util/env.c
++++ b/tools/perf/util/env.c
+@@ -15,13 +15,19 @@ struct perf_env perf_env;
+
+ void perf_env__insert_bpf_prog_info(struct perf_env *env,
+ struct bpf_prog_info_node *info_node)
++{
++ down_write(&env->bpf_progs.lock);
++ __perf_env__insert_bpf_prog_info(env, info_node);
++ up_write(&env->bpf_progs.lock);
++}
++
++void __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
+ {
+ __u32 prog_id = info_node->info_linear->info.id;
+ struct bpf_prog_info_node *node;
+ struct rb_node *parent = NULL;
+ struct rb_node **p;
+
+- down_write(&env->bpf_progs.lock);
+ p = &env->bpf_progs.infos.rb_node;
+
+ while (*p != NULL) {
+@@ -33,15 +39,13 @@ void perf_env__insert_bpf_prog_info(struct perf_env *env,
+ p = &(*p)->rb_right;
+ } else {
+ pr_debug("duplicated bpf prog info %u\n", prog_id);
+- goto out;
++ return;
+ }
+ }
+
+ rb_link_node(&info_node->rb_node, parent, p);
+ rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
+ env->bpf_progs.infos_cnt++;
+-out:
+- up_write(&env->bpf_progs.lock);
+ }
+
+ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+@@ -70,14 +74,22 @@ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+ }
+
+ bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
++{
++ bool ret;
++
++ down_write(&env->bpf_progs.lock);
++ ret = __perf_env__insert_btf(env, btf_node);
++ up_write(&env->bpf_progs.lock);
++ return ret;
++}
++
++bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
+ {
+ struct rb_node *parent = NULL;
+ __u32 btf_id = btf_node->id;
+ struct btf_node *node;
+ struct rb_node **p;
+- bool ret = true;
+
+- down_write(&env->bpf_progs.lock);
+ p = &env->bpf_progs.btfs.rb_node;
+
+ while (*p != NULL) {
+@@ -89,25 +101,31 @@ bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
+ p = &(*p)->rb_right;
+ } else {
+ pr_debug("duplicated btf %u\n", btf_id);
+- ret = false;
+- goto out;
++ return false;
+ }
+ }
+
+ rb_link_node(&btf_node->rb_node, parent, p);
+ rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
+ env->bpf_progs.btfs_cnt++;
+-out:
+- up_write(&env->bpf_progs.lock);
+- return ret;
++ return true;
+ }
+
+ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
++{
++ struct btf_node *res;
++
++ down_read(&env->bpf_progs.lock);
++ res = __perf_env__find_btf(env, btf_id);
++ up_read(&env->bpf_progs.lock);
++ return res;
++}
++
++struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id)
+ {
+ struct btf_node *node = NULL;
+ struct rb_node *n;
+
+- down_read(&env->bpf_progs.lock);
+ n = env->bpf_progs.btfs.rb_node;
+
+ while (n) {
+@@ -117,13 +135,9 @@ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
+ else if (btf_id > node->id)
+ n = n->rb_right;
+ else
+- goto out;
++ return node;
+ }
+- node = NULL;
+-
+-out:
+- up_read(&env->bpf_progs.lock);
+- return node;
++ return NULL;
+ }
+
+ /* purge data in bpf_progs.infos tree */
+diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
+index ceddddace5cc..b0778483fa04 100644
+--- a/tools/perf/util/env.h
++++ b/tools/perf/util/env.h
+@@ -117,12 +117,16 @@ const char *perf_env__raw_arch(struct perf_env *env);
+ int perf_env__nr_cpus_avail(struct perf_env *env);
+
+ void perf_env__init(struct perf_env *env);
++void __perf_env__insert_bpf_prog_info(struct perf_env *env,
++ struct bpf_prog_info_node *info_node);
+ void perf_env__insert_bpf_prog_info(struct perf_env *env,
+ struct bpf_prog_info_node *info_node);
+ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+ __u32 prog_id);
+ bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
++bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
+ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
++struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id);
+
+ int perf_env__numa_node(struct perf_env *env, int cpu);
+ #endif /* __PERF_ENV_H */
+diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
+index 642528613927..a68feeb3eb00 100644
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -1546,8 +1546,8 @@ static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
+ node = rb_entry(next, struct bpf_prog_info_node, rb_node);
+ next = rb_next(&node->rb_node);
+
+- bpf_event__print_bpf_prog_info(&node->info_linear->info,
+- env, fp);
++ __bpf_event__print_bpf_prog_info(&node->info_linear->info,
++ env, fp);
+ }
+
+ up_read(&env->bpf_progs.lock);
+@@ -2724,7 +2724,7 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
+ /* after reading from file, translate offset to address */
+ bpf_program__bpil_offs_to_addr(info_linear);
+ info_node->info_linear = info_linear;
+- perf_env__insert_bpf_prog_info(env, info_node);
++ __perf_env__insert_bpf_prog_info(env, info_node);
+ }
+
+ up_write(&env->bpf_progs.lock);
+@@ -2777,7 +2777,7 @@ static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
+ if (__do_read(ff, node->data, data_size))
+ goto out;
+
+- perf_env__insert_btf(env, node);
++ __perf_env__insert_btf(env, node);
+ node = NULL;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From f55fec24b39a64286839db9d399f3d2f080f1004 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Dec 2023 23:05:44 -0800
+Subject: perf genelf: Set ELF program header addresses properly
+
+From: Namhyung Kim <namhyung@kernel.org>
+
+[ Upstream commit 1af478903fc48c1409a8dd6b698383b62387adf1 ]
+
+The text section starts after the ELF headers so PHDR.p_vaddr and
+others should have the correct addresses.
+
+Fixes: babd04386b1df8c3 ("perf jit: Include program header in ELF files")
+Reviewed-by: Ian Rogers <irogers@google.com>
+Signed-off-by: Namhyung Kim <namhyung@kernel.org>
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Fangrui Song <maskray@google.com>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Lieven Hey <lieven.hey@kdab.com>
+Cc: Milian Wolff <milian.wolff@kdab.com>
+Cc: Pablo Galindo <pablogsal@gmail.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20231212070547.612536-2-namhyung@kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/genelf.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c
+index 69744fd5db39..04509144ff84 100644
+--- a/tools/perf/util/genelf.c
++++ b/tools/perf/util/genelf.c
+@@ -296,9 +296,9 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
+ */
+ phdr = elf_newphdr(e, 1);
+ phdr[0].p_type = PT_LOAD;
+- phdr[0].p_offset = 0;
+- phdr[0].p_vaddr = 0;
+- phdr[0].p_paddr = 0;
++ phdr[0].p_offset = GEN_ELF_TEXT_OFFSET;
++ phdr[0].p_vaddr = GEN_ELF_TEXT_OFFSET;
++ phdr[0].p_paddr = GEN_ELF_TEXT_OFFSET;
+ phdr[0].p_filesz = csize;
+ phdr[0].p_memsz = csize;
+ phdr[0].p_flags = PF_X | PF_R;
+--
+2.43.0
+
--- /dev/null
+From 41f5820f9d043b69d1eef79eda5d9d29550fd260 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Apr 2020 10:24:04 -0300
+Subject: perf record: Move sb_evlist to 'struct record'
+
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+
+[ Upstream commit bc477d7983e345262757568ec27be0395dc2fe73 ]
+
+Where state related to a 'perf record' session is grouped.
+
+Acked-by: Jiri Olsa <jolsa@redhat.com>
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Song Liu <songliubraving@fb.com>
+Link: http://lore.kernel.org/lkml/20200429131106.27974-2-acme@kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Stable-dep-of: 9c51f8788b5d ("perf env: Avoid recursively taking env->bpf_progs.lock")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/builtin-record.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
+index 454e275cd5df..7fc3dadfa156 100644
+--- a/tools/perf/builtin-record.c
++++ b/tools/perf/builtin-record.c
+@@ -80,6 +80,7 @@ struct record {
+ struct auxtrace_record *itr;
+ struct evlist *evlist;
+ struct perf_session *session;
++ struct evlist *sb_evlist;
+ int realtime_prio;
+ bool no_buildid;
+ bool no_buildid_set;
+@@ -1343,7 +1344,6 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
+ struct perf_data *data = &rec->data;
+ struct perf_session *session;
+ bool disabled = false, draining = false;
+- struct evlist *sb_evlist = NULL;
+ int fd;
+ float ratio = 0;
+
+@@ -1455,9 +1455,9 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
+ }
+
+ if (!opts->no_bpf_event)
+- bpf_event__add_sb_event(&sb_evlist, &session->header.env);
++ bpf_event__add_sb_event(&rec->sb_evlist, &session->header.env);
+
+- if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
++ if (perf_evlist__start_sb_thread(rec->sb_evlist, &rec->opts.target)) {
+ pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
+ opts->no_bpf_event = true;
+ }
+@@ -1731,7 +1731,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
+ perf_session__delete(session);
+
+ if (!opts->no_bpf_event)
+- perf_evlist__stop_sb_thread(sb_evlist);
++ perf_evlist__stop_sb_thread(rec->sb_evlist);
+ return status;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 8d4aff39dee6b6281d8920114547422643fce749 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Apr 2020 10:40:54 -0300
+Subject: perf top: Move sb_evlist to 'struct perf_top'
+
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+
+[ Upstream commit ca6c9c8b107f9788662117587cd24bbb19cea94d ]
+
+Where state related to a 'perf top' session is grouped.
+
+Acked-by: Jiri Olsa <jolsa@redhat.com>
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Song Liu <songliubraving@fb.com>
+Link: http://lore.kernel.org/lkml/20200429131106.27974-3-acme@kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Stable-dep-of: 9c51f8788b5d ("perf env: Avoid recursively taking env->bpf_progs.lock")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/builtin-top.c | 7 +++----
+ tools/perf/util/top.h | 2 +-
+ 2 files changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
+index b83a861fab2e..d83954f75557 100644
+--- a/tools/perf/builtin-top.c
++++ b/tools/perf/builtin-top.c
+@@ -1542,7 +1542,6 @@ int cmd_top(int argc, const char **argv)
+ OPTS_EVSWITCH(&top.evswitch),
+ OPT_END()
+ };
+- struct evlist *sb_evlist = NULL;
+ const char * const top_usage[] = {
+ "perf top [<options>]",
+ NULL
+@@ -1684,9 +1683,9 @@ int cmd_top(int argc, const char **argv)
+ }
+
+ if (!top.record_opts.no_bpf_event)
+- bpf_event__add_sb_event(&sb_evlist, &perf_env);
++ bpf_event__add_sb_event(&top.sb_evlist, &perf_env);
+
+- if (perf_evlist__start_sb_thread(sb_evlist, target)) {
++ if (perf_evlist__start_sb_thread(top.sb_evlist, target)) {
+ pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
+ opts->no_bpf_event = true;
+ }
+@@ -1694,7 +1693,7 @@ int cmd_top(int argc, const char **argv)
+ status = __cmd_top(&top);
+
+ if (!opts->no_bpf_event)
+- perf_evlist__stop_sb_thread(sb_evlist);
++ perf_evlist__stop_sb_thread(top.sb_evlist);
+
+ out_delete_evlist:
+ evlist__delete(top.evlist);
+diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
+index f117d4f4821e..7bea36a61645 100644
+--- a/tools/perf/util/top.h
++++ b/tools/perf/util/top.h
+@@ -18,7 +18,7 @@ struct perf_session;
+
+ struct perf_top {
+ struct perf_tool tool;
+- struct evlist *evlist;
++ struct evlist *evlist, *sb_evlist;
+ struct record_opts record_opts;
+ struct annotation_options annotation_opts;
+ struct evswitch evswitch;
+--
+2.43.0
+
--- /dev/null
+From 750289f0827c8cd33fe34b7deaff74b59cdfb477 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Nov 2023 16:22:49 +0100
+Subject: s390/pci: fix max size calculation in zpci_memcpy_toio()
+
+From: Niklas Schnelle <schnelle@linux.ibm.com>
+
+[ Upstream commit 80df7d6af7f6d229b34cf237b2cc9024c07111cd ]
+
+The zpci_get_max_write_size() helper is used to determine the maximum
+size a PCI store or load can use at a given __iomem address.
+
+For the PCI block store the following restrictions apply:
+
+1. The dst + len must not cross a 4K boundary in the (pseudo-)MMIO space
+2. len must not exceed ZPCI_MAX_WRITE_SIZE
+3. len must be a multiple of 8 bytes
+4. The src address must be double word (8 byte) aligned
+5. The dst address must be double word (8 byte) aligned
+
+Otherwise only a normal PCI store which takes its src value from
+a register can be used. For these PCI store restriction 1 still applies.
+Similarly 1 also applies to PCI loads.
+
+It turns out zpci_max_write_size() instead implements stricter
+conditions which prevents PCI block stores from being used where they
+can and should be used. In particular instead of conditions 4 and 5 it
+wrongly enforces both dst and src to be size aligned. This indirectly
+covers condition 1 but also prevents many legal PCI block stores.
+
+On top of the functional shortcomings the zpci_get_max_write_size() is
+misnamed as it is used for both read and write size calculations. Rename
+it to zpci_get_max_io_size() and implement the listed conditions
+explicitly.
+
+Reviewed-by: Matthew Rosato <mjrosato@linux.ibm.com>
+Fixes: cd24834130ac ("s390/pci: base support")
+Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
+[agordeev@linux.ibm.com replaced spaces with tabs]
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/include/asm/pci_io.h | 32 ++++++++++++++++++--------------
+ arch/s390/pci/pci_mmio.c | 12 ++++++------
+ 2 files changed, 24 insertions(+), 20 deletions(-)
+
+diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
+index 287bb88f7698..2686bee800e3 100644
+--- a/arch/s390/include/asm/pci_io.h
++++ b/arch/s390/include/asm/pci_io.h
+@@ -11,6 +11,8 @@
+ /* I/O size constraints */
+ #define ZPCI_MAX_READ_SIZE 8
+ #define ZPCI_MAX_WRITE_SIZE 128
++#define ZPCI_BOUNDARY_SIZE (1 << 12)
++#define ZPCI_BOUNDARY_MASK (ZPCI_BOUNDARY_SIZE - 1)
+
+ /* I/O Map */
+ #define ZPCI_IOMAP_SHIFT 48
+@@ -125,16 +127,18 @@ static inline int zpci_read_single(void *dst, const volatile void __iomem *src,
+ int zpci_write_block(volatile void __iomem *dst, const void *src,
+ unsigned long len);
+
+-static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
++static inline int zpci_get_max_io_size(u64 src, u64 dst, int len, int max)
+ {
+- int count = len > max ? max : len, size = 1;
++ int offset = dst & ZPCI_BOUNDARY_MASK;
++ int size;
+
+- while (!(src & 0x1) && !(dst & 0x1) && ((size << 1) <= count)) {
+- dst = dst >> 1;
+- src = src >> 1;
+- size = size << 1;
+- }
+- return size;
++ size = min3(len, ZPCI_BOUNDARY_SIZE - offset, max);
++ if (IS_ALIGNED(src, 8) && IS_ALIGNED(dst, 8) && IS_ALIGNED(size, 8))
++ return size;
++
++ if (size >= 8)
++ return 8;
++ return rounddown_pow_of_two(size);
+ }
+
+ static inline int zpci_memcpy_fromio(void *dst,
+@@ -144,9 +148,9 @@ static inline int zpci_memcpy_fromio(void *dst,
+ int size, rc = 0;
+
+ while (n > 0) {
+- size = zpci_get_max_write_size((u64 __force) src,
+- (u64) dst, n,
+- ZPCI_MAX_READ_SIZE);
++ size = zpci_get_max_io_size((u64 __force) src,
++ (u64) dst, n,
++ ZPCI_MAX_READ_SIZE);
+ rc = zpci_read_single(dst, src, size);
+ if (rc)
+ break;
+@@ -166,9 +170,9 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
+ return -EINVAL;
+
+ while (n > 0) {
+- size = zpci_get_max_write_size((u64 __force) dst,
+- (u64) src, n,
+- ZPCI_MAX_WRITE_SIZE);
++ size = zpci_get_max_io_size((u64 __force) dst,
++ (u64) src, n,
++ ZPCI_MAX_WRITE_SIZE);
+ if (size > 8) /* main path */
+ rc = zpci_write_block(dst, src, size);
+ else
+diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
+index a4d5048b7eee..675e6cb50584 100644
+--- a/arch/s390/pci/pci_mmio.c
++++ b/arch/s390/pci/pci_mmio.c
+@@ -100,9 +100,9 @@ static inline int __memcpy_toio_inuser(void __iomem *dst,
+
+ old_fs = enable_sacf_uaccess();
+ while (n > 0) {
+- size = zpci_get_max_write_size((u64 __force) dst,
+- (u64 __force) src, n,
+- ZPCI_MAX_WRITE_SIZE);
++ size = zpci_get_max_io_size((u64 __force) dst,
++ (u64 __force) src, n,
++ ZPCI_MAX_WRITE_SIZE);
+ if (size > 8) /* main path */
+ rc = __pcistb_mio_inuser(dst, src, size, &status);
+ else
+@@ -250,9 +250,9 @@ static inline int __memcpy_fromio_inuser(void __user *dst,
+
+ old_fs = enable_sacf_uaccess();
+ while (n > 0) {
+- size = zpci_get_max_write_size((u64 __force) src,
+- (u64 __force) dst, n,
+- ZPCI_MAX_READ_SIZE);
++ size = zpci_get_max_io_size((u64 __force) src,
++ (u64 __force) dst, n,
++ ZPCI_MAX_READ_SIZE);
+ rc = __pcilg_mio_inuser(dst, src, size, &status);
+ if (rc)
+ break;
+--
+2.43.0
+
--- /dev/null
+From a7b8a6088bdcffd5d278405bc86b92c18aa950ff Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Nov 2023 16:29:29 +0100
+Subject: serial: 8250: omap: Don't skip resource freeing if
+ pm_runtime_resume_and_get() failed
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit ad90d0358bd3b4554f243a425168fc7cebe7d04e ]
+
+Returning an error code from .remove() makes the driver core emit the
+little helpful error message:
+
+ remove callback returned a non-zero value. This will be ignored.
+
+and then remove the device anyhow. So all resources that were not freed
+are leaked in this case. Skipping serial8250_unregister_port() has the
+potential to keep enough of the UART around to trigger a use-after-free.
+
+So replace the error return (and with it the little helpful error
+message) by a more useful error message and continue to cleanup.
+
+Fixes: e3f0c638f428 ("serial: 8250: omap: Fix unpaired pm_runtime_put_sync() in omap8250_remove()")
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Reviewed-by: Tony Lindgren <tony@atomide.com>
+Link: https://lore.kernel.org/r/20231110152927.70601-2-u.kleine-koenig@pengutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/8250/8250_omap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
+index a2db055278a1..6bb8bbaa4fdb 100644
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -1278,7 +1278,7 @@ static int omap8250_remove(struct platform_device *pdev)
+
+ err = pm_runtime_resume_and_get(&pdev->dev);
+ if (err)
+- return err;
++ dev_err(&pdev->dev, "Failed to resume hardware\n");
+
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
+--
+2.43.0
+
--- /dev/null
+From b8fc577b58a8f69063889003e0735eb289de015d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 24 Dec 2023 10:32:09 +0100
+Subject: serial: imx: Correct clock error message in function probe()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Christoph Niedermaier <cniedermaier@dh-electronics.com>
+
+[ Upstream commit 3e189470cad27d41a3a9dc02649f965b7ed1c90f ]
+
+Correct the clock error message by changing the clock name.
+
+Fixes: 1e512d45332b ("serial: imx: add error messages when .probe fails")
+Signed-off-by: Christoph Niedermaier <cniedermaier@dh-electronics.com>
+Reviewed-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Link: https://lore.kernel.org/r/20231224093209.2612-1-cniedermaier@dh-electronics.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/imx.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 3f5878e367c7..80cb72350cea 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -2277,7 +2277,7 @@ static int imx_uart_probe(struct platform_device *pdev)
+ /* For register access, we only need to enable the ipg clock. */
+ ret = clk_prepare_enable(sport->clk_ipg);
+ if (ret) {
+- dev_err(&pdev->dev, "failed to enable per clk: %d\n", ret);
++ dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret);
+ return ret;
+ }
+
+--
+2.43.0
+
wifi-mwifiex-configure-bssid-consistently-when-starting-ap.patch
x86-kvm-do-not-try-to-disable-kvmclock-if-it-was-not-enabled.patch
hid-wacom-correct-behavior-when-processing-some-confidence-false-touches.patch
+mips-fix-incorrect-max_low_pfn-adjustment.patch
+mips-alchemy-fix-an-out-of-bound-access-in-db1200_de.patch
+mips-alchemy-fix-an-out-of-bound-access-in-db1550_de.patch
+serial-8250-omap-don-t-skip-resource-freeing-if-pm_r.patch
+binder-print-warnings-when-detecting-oneway-spamming.patch
+binder-fix-unused-alloc-free_async_space.patch-15870
+acpi-property-let-args-be-null-in-__acpi_node_get_pr.patch
+software-node-let-args-be-null-in-software_node_get_.patch
+perf-genelf-set-elf-program-header-addresses-properl.patch
+nvmet-tcp-fix-a-kernel-panic-when-host-sends-an-inva.patch
+nvmet-tcp-fix-a-crash-in-nvmet_req_complete.patch
+perf-env-add-perf_env__numa_node.patch
+perf-record-move-sb_evlist-to-struct-record.patch
+perf-top-move-sb_evlist-to-struct-perf_top.patch
+perf-bpf-decouple-creating-the-evlist-from-adding-th.patch
+perf-env-avoid-recursively-taking-env-bpf_progs.lock.patch
+apparmor-avoid-crash-when-parsed-profile-name-is-emp.patch
+serial-imx-correct-clock-error-message-in-function-p.patch
+nvmet-tcp-fix-the-h2c-expected-pdu-len-calculation.patch
+pci-keystone-fix-race-condition-when-initializing-ph.patch
+s390-pci-fix-max-size-calculation-in-zpci_memcpy_toi.patch
+net-qualcomm-rmnet-fix-global-oob-in-rmnet_policy.patch
+net-phy-micrel-populate-.soft_reset-for-ksz9131.patch
+net-ravb-fix-dma_addr_t-truncation-in-error-case.patch
+net-dsa-vsc73xx-add-null-pointer-check-to-vsc73xx_gp.patch
+netfilter-nf_tables-skip-dead-set-elements-in-netlin.patch
+ipvs-avoid-stat-macros-calls-from-preemptible-contex.patch
+kdb-censor-attempts-to-set-prompt-without-enable_mem.patch
+kdb-fix-a-potential-buffer-overflow-in-kdb_local.patch
+mlxsw-spectrum_acl_erp-fix-error-flow-of-pool-alloca.patch
+i2c-s3c24xx-fix-read-transfers-in-polling-mode.patch
+i2c-s3c24xx-fix-transferring-more-than-one-message-i.patch
--- /dev/null
+From 6d2c75d5db1a5d08ccddb94db56b4cdeff70dfe2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Nov 2023 12:10:09 +0200
+Subject: software node: Let args be NULL in software_node_get_reference_args
+
+From: Sakari Ailus <sakari.ailus@linux.intel.com>
+
+[ Upstream commit 1eaea4b3604eb9ca7d9a1e73d88fc121bb4061f5 ]
+
+fwnode_get_property_reference_args() may not be called with args argument
+NULL and while OF already supports this. Add the missing NULL check.
+
+The purpose is to be able to count the references.
+
+Fixes: b06184acf751 ("software node: Add software_node_get_reference_args()")
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Link: https://lore.kernel.org/r/20231109101010.1329587-3-sakari.ailus@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/base/swnode.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
+index 4c3b9813b284..636d52d1a1b8 100644
+--- a/drivers/base/swnode.c
++++ b/drivers/base/swnode.c
+@@ -604,6 +604,9 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
+ if (nargs > NR_FWNODE_REFERENCE_ARGS)
+ return -EINVAL;
+
++ if (!args)
++ return 0;
++
+ args->fwnode = software_node_get(refnode);
+ args->nargs = nargs;
+
+--
+2.43.0
+