--- /dev/null
+From ff222ad7bcfb4ca4abbd97fd2b966847011472ab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Jul 2023 13:24:30 +0200
+Subject: ALSA: pcm: Fix potential data race at PCM memory allocation helpers
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit bd55842ed998a622ba6611fe59b3358c9f76773d ]
+
+The PCM memory allocation helpers have a sanity check against too many
+buffer allocations. However, the check is performed without a proper
+lock and the allocation isn't serialized; this allows user to allocate
+more memories than predefined max size.
+
+Practically seen, this isn't really a big problem, as it's more or
+less some "soft limit" as a sanity check, and it's not possible to
+allocate unlimitedly. But it's still better to address this for more
+consistent behavior.
+
+The patch covers the size check in do_alloc_pages() with the
+card->memory_mutex, and increases the allocated size there for
+preventing the further overflow. When the actual allocation fails,
+the size is decreased accordingly.
+
+Reported-by: BassCheck <bass@buaa.edu.cn>
+Reported-by: Tuo Li <islituo@gmail.com>
+Link: https://lore.kernel.org/r/CADm8Tek6t0WedK+3Y6rbE5YEt19tML8BUL45N2ji4ZAz1KcN_A@mail.gmail.com
+Reviewed-by: Jaroslav Kysela <perex@perex.cz>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20230703112430.30634-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/core/pcm_memory.c | 44 +++++++++++++++++++++++++++++++++--------
+ 1 file changed, 36 insertions(+), 8 deletions(-)
+
+diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c
+index 191883842a35d..3e60a337bbef1 100644
+--- a/sound/core/pcm_memory.c
++++ b/sound/core/pcm_memory.c
+@@ -31,20 +31,51 @@ static unsigned long max_alloc_per_card = 32UL * 1024UL * 1024UL;
+ module_param(max_alloc_per_card, ulong, 0644);
+ MODULE_PARM_DESC(max_alloc_per_card, "Max total allocation bytes per card.");
+
++static void __update_allocated_size(struct snd_card *card, ssize_t bytes)
++{
++ card->total_pcm_alloc_bytes += bytes;
++}
++
++static void update_allocated_size(struct snd_card *card, ssize_t bytes)
++{
++ mutex_lock(&card->memory_mutex);
++ __update_allocated_size(card, bytes);
++ mutex_unlock(&card->memory_mutex);
++}
++
++static void decrease_allocated_size(struct snd_card *card, size_t bytes)
++{
++ mutex_lock(&card->memory_mutex);
++ WARN_ON(card->total_pcm_alloc_bytes < bytes);
++ __update_allocated_size(card, -(ssize_t)bytes);
++ mutex_unlock(&card->memory_mutex);
++}
++
+ static int do_alloc_pages(struct snd_card *card, int type, struct device *dev,
+ size_t size, struct snd_dma_buffer *dmab)
+ {
+ int err;
+
++ /* check and reserve the requested size */
++ mutex_lock(&card->memory_mutex);
+ if (max_alloc_per_card &&
+- card->total_pcm_alloc_bytes + size > max_alloc_per_card)
++ card->total_pcm_alloc_bytes + size > max_alloc_per_card) {
++ mutex_unlock(&card->memory_mutex);
+ return -ENOMEM;
++ }
++ __update_allocated_size(card, size);
++ mutex_unlock(&card->memory_mutex);
+
+ err = snd_dma_alloc_pages(type, dev, size, dmab);
+ if (!err) {
+- mutex_lock(&card->memory_mutex);
+- card->total_pcm_alloc_bytes += dmab->bytes;
+- mutex_unlock(&card->memory_mutex);
++ /* the actual allocation size might be bigger than requested,
++ * and we need to correct the account
++ */
++ if (dmab->bytes != size)
++ update_allocated_size(card, dmab->bytes - size);
++ } else {
++ /* take back on allocation failure */
++ decrease_allocated_size(card, size);
+ }
+ return err;
+ }
+@@ -53,10 +84,7 @@ static void do_free_pages(struct snd_card *card, struct snd_dma_buffer *dmab)
+ {
+ if (!dmab->area)
+ return;
+- mutex_lock(&card->memory_mutex);
+- WARN_ON(card->total_pcm_alloc_bytes < dmab->bytes);
+- card->total_pcm_alloc_bytes -= dmab->bytes;
+- mutex_unlock(&card->memory_mutex);
++ decrease_allocated_size(card, dmab->bytes);
+ snd_dma_free_pages(dmab);
+ dmab->area = NULL;
+ }
+--
+2.40.1
+
--- /dev/null
+From efe080ecb2c1838b16755d52864cb872c4719797 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Aug 2023 15:19:04 +0800
+Subject: bonding: fix macvlan over alb bond support
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit e74216b8def3803e98ae536de78733e9d7f3b109 ]
+
+The commit 14af9963ba1e ("bonding: Support macvlans on top of tlb/rlb mode
+bonds") aims to enable the use of macvlans on top of rlb bond mode. However,
+the current rlb bond mode only handles ARP packets to update remote neighbor
+entries. This causes an issue when a macvlan is on top of the bond, and
+remote devices send packets to the macvlan using the bond's MAC address
+as the destination. After delivering the packets to the macvlan, the macvlan
+will rejects them as the MAC address is incorrect. Consequently, this commit
+makes macvlan over bond non-functional.
+
+To address this problem, one potential solution is to check for the presence
+of a macvlan port on the bond device using netif_is_macvlan_port(bond->dev)
+and return NULL in the rlb_arp_xmit() function. However, this approach
+doesn't fully resolve the situation when a VLAN exists between the bond and
+macvlan.
+
+So let's just do a partial revert for commit 14af9963ba1e in rlb_arp_xmit().
+As the comment said, Don't modify or load balance ARPs that do not originate
+locally.
+
+Fixes: 14af9963ba1e ("bonding: Support macvlans on top of tlb/rlb mode bonds")
+Reported-by: susan.zheng@veritas.com
+Closes: https://bugzilla.redhat.com/show_bug.cgi?id=2117816
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Acked-by: Jay Vosburgh <jay.vosburgh@canonical.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_alb.c | 6 +++---
+ include/net/bonding.h | 11 +----------
+ 2 files changed, 4 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
+index 152f76f869278..64ba465741a78 100644
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -656,10 +656,10 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
+ return NULL;
+ arp = (struct arp_pkt *)skb_network_header(skb);
+
+- /* Don't modify or load balance ARPs that do not originate locally
+- * (e.g.,arrive via a bridge).
++ /* Don't modify or load balance ARPs that do not originate
++ * from the bond itself or a VLAN directly above the bond.
+ */
+- if (!bond_slave_has_mac_rx(bond, arp->mac_src))
++ if (!bond_slave_has_mac_rcu(bond, arp->mac_src))
+ return NULL;
+
+ if (arp->op_code == htons(ARPOP_REPLY)) {
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index 34b6f7241a41e..82d128c0fe6df 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -698,23 +698,14 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond,
+ }
+
+ /* Caller must hold rcu_read_lock() for read */
+-static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac)
++static inline bool bond_slave_has_mac_rcu(struct bonding *bond, const u8 *mac)
+ {
+ struct list_head *iter;
+ struct slave *tmp;
+- struct netdev_hw_addr *ha;
+
+ bond_for_each_slave_rcu(bond, tmp, iter)
+ if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
+ return true;
+-
+- if (netdev_uc_empty(bond->dev))
+- return false;
+-
+- netdev_for_each_uc_addr(ha, bond->dev)
+- if (ether_addr_equal_64bits(mac, ha->addr))
+- return true;
+-
+ return false;
+ }
+
+--
+2.40.1
+
--- /dev/null
+From 29181dda1ffad2882b0962069e03e4a025ea24bd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Aug 2023 01:58:20 +0000
+Subject: dccp: annotate data-races in dccp_poll()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit cba3f1786916063261e3e5ccbb803abc325b24ef ]
+
+We changed tcp_poll() over time, bug never updated dccp.
+
+Note that we also could remove dccp instead of maintaining it.
+
+Fixes: 7c657876b63c ("[DCCP]: Initial implementation")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20230818015820.2701595-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/dccp/proto.c | 20 ++++++++++++--------
+ 1 file changed, 12 insertions(+), 8 deletions(-)
+
+diff --git a/net/dccp/proto.c b/net/dccp/proto.c
+index 3293c1e3aa5d5..c647035a36d18 100644
+--- a/net/dccp/proto.c
++++ b/net/dccp/proto.c
+@@ -324,11 +324,15 @@ EXPORT_SYMBOL_GPL(dccp_disconnect);
+ __poll_t dccp_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+ {
+- __poll_t mask;
+ struct sock *sk = sock->sk;
++ __poll_t mask;
++ u8 shutdown;
++ int state;
+
+ sock_poll_wait(file, sock, wait);
+- if (sk->sk_state == DCCP_LISTEN)
++
++ state = inet_sk_state_load(sk);
++ if (state == DCCP_LISTEN)
+ return inet_csk_listen_poll(sk);
+
+ /* Socket is not locked. We are protected from async events
+@@ -337,20 +341,21 @@ __poll_t dccp_poll(struct file *file, struct socket *sock,
+ */
+
+ mask = 0;
+- if (sk->sk_err)
++ if (READ_ONCE(sk->sk_err))
+ mask = EPOLLERR;
++ shutdown = READ_ONCE(sk->sk_shutdown);
+
+- if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
++ if (shutdown == SHUTDOWN_MASK || state == DCCP_CLOSED)
+ mask |= EPOLLHUP;
+- if (sk->sk_shutdown & RCV_SHUTDOWN)
++ if (shutdown & RCV_SHUTDOWN)
+ mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
+
+ /* Connected? */
+- if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
++ if ((1 << state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
+ if (atomic_read(&sk->sk_rmem_alloc) > 0)
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+- if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
++ if (!(shutdown & SEND_SHUTDOWN)) {
+ if (sk_stream_is_writeable(sk)) {
+ mask |= EPOLLOUT | EPOLLWRNORM;
+ } else { /* send SIGIO later */
+@@ -368,7 +373,6 @@ __poll_t dccp_poll(struct file *file, struct socket *sock,
+ }
+ return mask;
+ }
+-
+ EXPORT_SYMBOL_GPL(dccp_poll);
+
+ int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
+--
+2.40.1
+
--- /dev/null
+From a9ba02380120fa5401345f0e73a8f206b981d762 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Apr 2022 16:06:34 -0400
+Subject: dlm: improve plock logging if interrupted
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit bcfad4265cedf3adcac355e994ef9771b78407bd ]
+
+This patch changes the log level if a plock is removed when interrupted
+from debug to info. Additional it signals now that the plock entity was
+removed to let the user know what's happening.
+
+If on a dev_write() a pending plock cannot be find it will signal that
+it might have been removed because wait interruption.
+
+Before this patch there might be a "dev_write no op ..." info message
+and the users can only guess that the plock was removed before because
+the wait interruption. To be sure that is the case we log both messages
+on the same log level.
+
+Let both message be logged on info layer because it should not happened
+a lot and if it happens it should be clear why the op was not found.
+
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Stable-dep-of: 57e2c2f2d94c ("fs: dlm: fix mismatch of plock results from userspace")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/plock.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index f3482e936cc25..f74d5a28ad27c 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -161,11 +161,12 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+
+ rv = wait_event_killable(recv_wq, (op->done != 0));
+ if (rv == -ERESTARTSYS) {
+- log_debug(ls, "%s: wait killed %llx", __func__,
+- (unsigned long long)number);
+ spin_lock(&ops_lock);
+ list_del(&op->list);
+ spin_unlock(&ops_lock);
++ log_print("%s: wait interrupted %x %llx, op removed",
++ __func__, ls->ls_global_id,
++ (unsigned long long)number);
+ dlm_release_plock_op(op);
+ do_unlock_close(ls, number, file, fl);
+ goto out;
+@@ -469,8 +470,8 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+ else
+ wake_up(&recv_wq);
+ } else
+- log_print("dev_write no op %x %llx", info.fsid,
+- (unsigned long long)info.number);
++ log_print("%s: no op %x %llx - may got interrupted?", __func__,
++ info.fsid, (unsigned long long)info.number);
+ return count;
+ }
+
+--
+2.40.1
+
--- /dev/null
+From 509c3a17a55477259b12d196911dc35f32f4649f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Apr 2022 14:05:31 -0400
+Subject: dlm: replace usage of found with dedicated list iterator variable
+
+From: Jakob Koschel <jakobkoschel@gmail.com>
+
+[ Upstream commit dc1acd5c94699389a9ed023e94dd860c846ea1f6 ]
+
+To move the list iterator variable into the list_for_each_entry_*()
+macro in the future it should be avoided to use the list iterator
+variable after the loop body.
+
+To *never* use the list iterator variable after the loop it was
+concluded to use a separate iterator variable instead of a
+found boolean [1].
+
+This removes the need to use a found variable and simply checking if
+the variable was set, can determine if the break/goto was hit.
+
+Link: https://lore.kernel.org/all/CAHk-=wgRr_D8CB-D9Kg-c=EHreAsk5SqXPwr9Y7k9sA6cWXJ6w@mail.gmail.com/ [1]
+Signed-off-by: Jakob Koschel <jakobkoschel@gmail.com>
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Stable-dep-of: 57e2c2f2d94c ("fs: dlm: fix mismatch of plock results from userspace")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/lock.c | 53 +++++++++++++++++++++++-------------------------
+ fs/dlm/plock.c | 24 +++++++++++-----------
+ fs/dlm/recover.c | 39 +++++++++++++++++------------------
+ 3 files changed, 56 insertions(+), 60 deletions(-)
+
+diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
+index dde9afb6747ba..51ab06308bc73 100644
+--- a/fs/dlm/lock.c
++++ b/fs/dlm/lock.c
+@@ -1856,7 +1856,7 @@ static void del_timeout(struct dlm_lkb *lkb)
+ void dlm_scan_timeout(struct dlm_ls *ls)
+ {
+ struct dlm_rsb *r;
+- struct dlm_lkb *lkb;
++ struct dlm_lkb *lkb = NULL, *iter;
+ int do_cancel, do_warn;
+ s64 wait_us;
+
+@@ -1867,27 +1867,28 @@ void dlm_scan_timeout(struct dlm_ls *ls)
+ do_cancel = 0;
+ do_warn = 0;
+ mutex_lock(&ls->ls_timeout_mutex);
+- list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
++ list_for_each_entry(iter, &ls->ls_timeout, lkb_time_list) {
+
+ wait_us = ktime_to_us(ktime_sub(ktime_get(),
+- lkb->lkb_timestamp));
++ iter->lkb_timestamp));
+
+- if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
+- wait_us >= (lkb->lkb_timeout_cs * 10000))
++ if ((iter->lkb_exflags & DLM_LKF_TIMEOUT) &&
++ wait_us >= (iter->lkb_timeout_cs * 10000))
+ do_cancel = 1;
+
+- if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
++ if ((iter->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
+ wait_us >= dlm_config.ci_timewarn_cs * 10000)
+ do_warn = 1;
+
+ if (!do_cancel && !do_warn)
+ continue;
+- hold_lkb(lkb);
++ hold_lkb(iter);
++ lkb = iter;
+ break;
+ }
+ mutex_unlock(&ls->ls_timeout_mutex);
+
+- if (!do_cancel && !do_warn)
++ if (!lkb)
+ break;
+
+ r = lkb->lkb_resource;
+@@ -5241,21 +5242,18 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
+
+ static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
+ {
+- struct dlm_lkb *lkb;
+- int found = 0;
++ struct dlm_lkb *lkb = NULL, *iter;
+
+ mutex_lock(&ls->ls_waiters_mutex);
+- list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
+- if (lkb->lkb_flags & DLM_IFL_RESEND) {
+- hold_lkb(lkb);
+- found = 1;
++ list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) {
++ if (iter->lkb_flags & DLM_IFL_RESEND) {
++ hold_lkb(iter);
++ lkb = iter;
+ break;
+ }
+ }
+ mutex_unlock(&ls->ls_waiters_mutex);
+
+- if (!found)
+- lkb = NULL;
+ return lkb;
+ }
+
+@@ -5914,37 +5912,36 @@ int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
+ int mode, uint32_t flags, void *name, unsigned int namelen,
+ unsigned long timeout_cs, uint32_t *lkid)
+ {
+- struct dlm_lkb *lkb;
++ struct dlm_lkb *lkb = NULL, *iter;
+ struct dlm_user_args *ua;
+ int found_other_mode = 0;
+- int found = 0;
+ int rv = 0;
+
+ mutex_lock(&ls->ls_orphans_mutex);
+- list_for_each_entry(lkb, &ls->ls_orphans, lkb_ownqueue) {
+- if (lkb->lkb_resource->res_length != namelen)
++ list_for_each_entry(iter, &ls->ls_orphans, lkb_ownqueue) {
++ if (iter->lkb_resource->res_length != namelen)
+ continue;
+- if (memcmp(lkb->lkb_resource->res_name, name, namelen))
++ if (memcmp(iter->lkb_resource->res_name, name, namelen))
+ continue;
+- if (lkb->lkb_grmode != mode) {
++ if (iter->lkb_grmode != mode) {
+ found_other_mode = 1;
+ continue;
+ }
+
+- found = 1;
+- list_del_init(&lkb->lkb_ownqueue);
+- lkb->lkb_flags &= ~DLM_IFL_ORPHAN;
+- *lkid = lkb->lkb_id;
++ lkb = iter;
++ list_del_init(&iter->lkb_ownqueue);
++ iter->lkb_flags &= ~DLM_IFL_ORPHAN;
++ *lkid = iter->lkb_id;
+ break;
+ }
+ mutex_unlock(&ls->ls_orphans_mutex);
+
+- if (!found && found_other_mode) {
++ if (!lkb && found_other_mode) {
+ rv = -EAGAIN;
+ goto out;
+ }
+
+- if (!found) {
++ if (!lkb) {
+ rv = -ENOENT;
+ goto out;
+ }
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index f74d5a28ad27c..95f4662c1209a 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -434,9 +434,9 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
+ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+ loff_t *ppos)
+ {
++ struct plock_op *op = NULL, *iter;
+ struct dlm_plock_info info;
+- struct plock_op *op;
+- int found = 0, do_callback = 0;
++ int do_callback = 0;
+
+ if (count != sizeof(info))
+ return -EINVAL;
+@@ -448,23 +448,23 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+ return -EINVAL;
+
+ spin_lock(&ops_lock);
+- list_for_each_entry(op, &recv_list, list) {
+- if (op->info.fsid == info.fsid &&
+- op->info.number == info.number &&
+- op->info.owner == info.owner) {
+- list_del_init(&op->list);
+- memcpy(&op->info, &info, sizeof(info));
+- if (op->data)
++ list_for_each_entry(iter, &recv_list, list) {
++ if (iter->info.fsid == info.fsid &&
++ iter->info.number == info.number &&
++ iter->info.owner == info.owner) {
++ list_del_init(&iter->list);
++ memcpy(&iter->info, &info, sizeof(info));
++ if (iter->data)
+ do_callback = 1;
+ else
+- op->done = 1;
+- found = 1;
++ iter->done = 1;
++ op = iter;
+ break;
+ }
+ }
+ spin_unlock(&ops_lock);
+
+- if (found) {
++ if (op) {
+ if (do_callback)
+ dlm_plock_callback(op);
+ else
+diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
+index 8928e99dfd47d..df18f38a02734 100644
+--- a/fs/dlm/recover.c
++++ b/fs/dlm/recover.c
+@@ -732,10 +732,9 @@ void dlm_recovered_lock(struct dlm_rsb *r)
+
+ static void recover_lvb(struct dlm_rsb *r)
+ {
+- struct dlm_lkb *lkb, *high_lkb = NULL;
++ struct dlm_lkb *big_lkb = NULL, *iter, *high_lkb = NULL;
+ uint32_t high_seq = 0;
+ int lock_lvb_exists = 0;
+- int big_lock_exists = 0;
+ int lvblen = r->res_ls->ls_lvblen;
+
+ if (!rsb_flag(r, RSB_NEW_MASTER2) &&
+@@ -751,37 +750,37 @@ static void recover_lvb(struct dlm_rsb *r)
+ /* we are the new master, so figure out if VALNOTVALID should
+ be set, and set the rsb lvb from the best lkb available. */
+
+- list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
+- if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
++ list_for_each_entry(iter, &r->res_grantqueue, lkb_statequeue) {
++ if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
+ continue;
+
+ lock_lvb_exists = 1;
+
+- if (lkb->lkb_grmode > DLM_LOCK_CR) {
+- big_lock_exists = 1;
++ if (iter->lkb_grmode > DLM_LOCK_CR) {
++ big_lkb = iter;
+ goto setflag;
+ }
+
+- if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
+- high_lkb = lkb;
+- high_seq = lkb->lkb_lvbseq;
++ if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
++ high_lkb = iter;
++ high_seq = iter->lkb_lvbseq;
+ }
+ }
+
+- list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
+- if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
++ list_for_each_entry(iter, &r->res_convertqueue, lkb_statequeue) {
++ if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
+ continue;
+
+ lock_lvb_exists = 1;
+
+- if (lkb->lkb_grmode > DLM_LOCK_CR) {
+- big_lock_exists = 1;
++ if (iter->lkb_grmode > DLM_LOCK_CR) {
++ big_lkb = iter;
+ goto setflag;
+ }
+
+- if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
+- high_lkb = lkb;
+- high_seq = lkb->lkb_lvbseq;
++ if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
++ high_lkb = iter;
++ high_seq = iter->lkb_lvbseq;
+ }
+ }
+
+@@ -790,7 +789,7 @@ static void recover_lvb(struct dlm_rsb *r)
+ goto out;
+
+ /* lvb is invalidated if only NL/CR locks remain */
+- if (!big_lock_exists)
++ if (!big_lkb)
+ rsb_set_flag(r, RSB_VALNOTVALID);
+
+ if (!r->res_lvbptr) {
+@@ -799,9 +798,9 @@ static void recover_lvb(struct dlm_rsb *r)
+ goto out;
+ }
+
+- if (big_lock_exists) {
+- r->res_lvbseq = lkb->lkb_lvbseq;
+- memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen);
++ if (big_lkb) {
++ r->res_lvbseq = big_lkb->lkb_lvbseq;
++ memcpy(r->res_lvbptr, big_lkb->lkb_lvbptr, lvblen);
+ } else if (high_lkb) {
+ r->res_lvbseq = high_lkb->lkb_lvbseq;
+ memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen);
+--
+2.40.1
+
--- /dev/null
+From 936074a371711bae37d929bb0376717b0e3b0293 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 Apr 2021 11:57:43 -0400
+Subject: dm integrity: increase RECALC_SECTORS to improve recalculate speed
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+[ Upstream commit b1a2b9332050c7ae32a22c2c74bc443e39f37b23 ]
+
+Increase RECALC_SECTORS because it improves recalculate speed slightly
+(from 390kiB/s to 410kiB/s).
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Stable-dep-of: 6d50eb472593 ("dm integrity: reduce vmalloc space footprint on 32-bit architectures")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-integrity.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 7599a122c9563..ea08eb4ed0d95 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -35,7 +35,7 @@
+ #define MIN_LOG2_INTERLEAVE_SECTORS 3
+ #define MAX_LOG2_INTERLEAVE_SECTORS 31
+ #define METADATA_WORKQUEUE_MAX_ACTIVE 16
+-#define RECALC_SECTORS 8192
++#define RECALC_SECTORS 32768
+ #define RECALC_WRITE_SUPER 16
+ #define BITMAP_BLOCK_SIZE 4096 /* don't change it */
+ #define BITMAP_FLUSH_INTERVAL (10 * HZ)
+--
+2.40.1
+
--- /dev/null
+From 266a6fe275c55fed02aa8393a8964f85c76645af Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Jun 2023 16:44:34 +0200
+Subject: dm integrity: reduce vmalloc space footprint on 32-bit architectures
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+[ Upstream commit 6d50eb4725934fd22f5eeccb401000687c790fd0 ]
+
+It was reported that dm-integrity runs out of vmalloc space on 32-bit
+architectures. On x86, there is only 128MiB vmalloc space and dm-integrity
+consumes it quickly because it has a 64MiB journal and 8MiB recalculate
+buffer.
+
+Fix this by reducing the size of the journal to 4MiB and the size of
+the recalculate buffer to 1MiB, so that multiple dm-integrity devices
+can be created and activated on 32-bit architectures.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-integrity.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index ea08eb4ed0d95..1667ac1406098 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -31,11 +31,11 @@
+ #define DEFAULT_BUFFER_SECTORS 128
+ #define DEFAULT_JOURNAL_WATERMARK 50
+ #define DEFAULT_SYNC_MSEC 10000
+-#define DEFAULT_MAX_JOURNAL_SECTORS 131072
++#define DEFAULT_MAX_JOURNAL_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 131072 : 8192)
+ #define MIN_LOG2_INTERLEAVE_SECTORS 3
+ #define MAX_LOG2_INTERLEAVE_SECTORS 31
+ #define METADATA_WORKQUEUE_MAX_ACTIVE 16
+-#define RECALC_SECTORS 32768
++#define RECALC_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 32768 : 2048)
+ #define RECALC_WRITE_SUPER 16
+ #define BITMAP_BLOCK_SIZE 4096 /* don't change it */
+ #define BITMAP_FLUSH_INTERVAL (10 * HZ)
+--
+2.40.1
+
--- /dev/null
+From bae944f87f89a9248a0c4027255b672ba2d12556 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Jun 2023 17:00:28 -0400
+Subject: drm/amd/display: check TG is non-null before checking if enabled
+
+From: Taimur Hassan <syed.hassan@amd.com>
+
+[ Upstream commit 5a25cefc0920088bb9afafeb80ad3dcd84fe278b ]
+
+[Why & How]
+If there is no TG allocation we can dereference a NULL pointer when
+checking if the TG is enabled.
+
+Cc: Mario Limonciello <mario.limonciello@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Acked-by: Alan Liu <haoping.liu@amd.com>
+Signed-off-by: Taimur Hassan <syed.hassan@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 8bdbf3c31194b..1c669f115dd80 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -3282,7 +3282,8 @@ void dcn10_wait_for_mpcc_disconnect(
+ if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
+ struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
+
+- if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
++ if (pipe_ctx->stream_res.tg &&
++ pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
+ res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
+ pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
+ hubp->funcs->set_blank(hubp, true);
+--
+2.40.1
+
--- /dev/null
+From 8849ffd1093aaad1d4c0d387f531032f91812e20 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Mar 2022 12:08:43 -0400
+Subject: drm/amd/display: do not wait for mpc idle if tg is disabled
+
+From: Josip Pavic <Josip.Pavic@amd.com>
+
+[ Upstream commit 2513ed4f937999c0446fd824f7564f76b697d722 ]
+
+[Why]
+When booting, the driver waits for the MPC idle bit to be set as part of
+pipe initialization. However, on some systems this occurs before OTG is
+enabled, and since the MPC idle bit won't be set until the vupdate
+signal occurs (which requires OTG to be enabled), this never happens and
+the wait times out. This can add hundreds of milliseconds to the boot
+time.
+
+[How]
+Do not wait for mpc idle if tg is disabled
+
+Reviewed-by: Jun Lei <Jun.Lei@amd.com>
+Acked-by: Pavle Kotarac <Pavle.Kotarac@amd.com>
+Signed-off-by: Josip Pavic <Josip.Pavic@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: 5a25cefc0920 ("drm/amd/display: check TG is non-null before checking if enabled")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 71a85c5306ed0..8bdbf3c31194b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -3282,7 +3282,8 @@ void dcn10_wait_for_mpcc_disconnect(
+ if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
+ struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
+
+- res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
++ if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
++ res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
+ pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
+ hubp->funcs->set_blank(hubp, true);
+ }
+--
+2.40.1
+
--- /dev/null
+From 01b056a1f6f861526b8450fb36eb035c8bd0a380 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 25 Jun 2023 00:16:49 +0800
+Subject: fbdev: fix potential OOB read in fast_imageblit()
+
+From: Zhang Shurong <zhang_shurong@foxmail.com>
+
+[ Upstream commit c2d22806aecb24e2de55c30a06e5d6eb297d161d ]
+
+There is a potential OOB read at fast_imageblit, for
+"colortab[(*src >> 4)]" can become a negative value due to
+"const char *s = image->data, *src".
+This change makes sure the index for colortab always positive
+or zero.
+
+Similar commit:
+https://patchwork.kernel.org/patch/11746067
+
+Potential bug report:
+https://groups.google.com/g/syzkaller-bugs/c/9ubBXKeKXf4/m/k-QXy4UgAAAJ
+
+Signed-off-by: Zhang Shurong <zhang_shurong@foxmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/video/fbdev/core/sysimgblt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c
+index 335e92b813fc4..665ef7a0a2495 100644
+--- a/drivers/video/fbdev/core/sysimgblt.c
++++ b/drivers/video/fbdev/core/sysimgblt.c
+@@ -189,7 +189,7 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+ u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel;
+ u32 ppw = 32/bpp, spitch = (image->width + 7)/8;
+ u32 bit_mask, eorx, shift;
+- const char *s = image->data, *src;
++ const u8 *s = image->data, *src;
+ u32 *dst;
+ const u32 *tab;
+ size_t tablen;
+--
+2.40.1
+
--- /dev/null
+From b88536d39dd2f69693861237654e2a77d23867e4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 13 Mar 2022 20:29:51 +0100
+Subject: fbdev: Fix sys_imageblit() for arbitrary image widths
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+[ Upstream commit 61bfcb6a3b981e8f19e044ac8c3de6edbe6caf70 ]
+
+Commit 6f29e04938bf ("fbdev: Improve performance of sys_imageblit()")
+broke sys_imageblit() for image width that are not aligned to 8-bit
+boundaries. Fix this by handling the trailing pixels on each line
+separately. The performance improvements in the original commit do not
+regress by this change.
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Fixes: 6f29e04938bf ("fbdev: Improve performance of sys_imageblit()")
+Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
+Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Tested-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Cc: Thomas Zimmermann <tzimmermann@suse.de>
+Cc: Javier Martinez Canillas <javierm@redhat.com>
+Cc: Sam Ravnborg <sam@ravnborg.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220313192952.12058-2-tzimmermann@suse.de
+Stable-dep-of: c2d22806aecb ("fbdev: fix potential OOB read in fast_imageblit()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/video/fbdev/core/sysimgblt.c | 29 ++++++++++++++++++++++++----
+ 1 file changed, 25 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c
+index 722c327a381bd..335e92b813fc4 100644
+--- a/drivers/video/fbdev/core/sysimgblt.c
++++ b/drivers/video/fbdev/core/sysimgblt.c
+@@ -188,7 +188,7 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+ {
+ u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel;
+ u32 ppw = 32/bpp, spitch = (image->width + 7)/8;
+- u32 bit_mask, eorx;
++ u32 bit_mask, eorx, shift;
+ const char *s = image->data, *src;
+ u32 *dst;
+ const u32 *tab;
+@@ -229,17 +229,23 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+
+ for (i = image->height; i--; ) {
+ dst = dst1;
++ shift = 8;
+ src = s;
+
++ /*
++ * Manually unroll the per-line copying loop for better
++ * performance. This works until we processed the last
++ * completely filled source byte (inclusive).
++ */
+ switch (ppw) {
+ case 4: /* 8 bpp */
+- for (j = k; j; j -= 2, ++src) {
++ for (j = k; j >= 2; j -= 2, ++src) {
+ *dst++ = colortab[(*src >> 4) & bit_mask];
+ *dst++ = colortab[(*src >> 0) & bit_mask];
+ }
+ break;
+ case 2: /* 16 bpp */
+- for (j = k; j; j -= 4, ++src) {
++ for (j = k; j >= 4; j -= 4, ++src) {
+ *dst++ = colortab[(*src >> 6) & bit_mask];
+ *dst++ = colortab[(*src >> 4) & bit_mask];
+ *dst++ = colortab[(*src >> 2) & bit_mask];
+@@ -247,7 +253,7 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+ }
+ break;
+ case 1: /* 32 bpp */
+- for (j = k; j; j -= 8, ++src) {
++ for (j = k; j >= 8; j -= 8, ++src) {
+ *dst++ = colortab[(*src >> 7) & bit_mask];
+ *dst++ = colortab[(*src >> 6) & bit_mask];
+ *dst++ = colortab[(*src >> 5) & bit_mask];
+@@ -259,6 +265,21 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+ }
+ break;
+ }
++
++ /*
++ * For image widths that are not a multiple of 8, there
++ * are trailing pixels left on the current line. Print
++ * them as well.
++ */
++ for (; j--; ) {
++ shift -= ppw;
++ *dst++ = colortab[(*src >> shift) & bit_mask];
++ if (!shift) {
++ shift = 8;
++ ++src;
++ }
++ }
++
+ dst1 += p->fix.line_length;
+ s += spitch;
+ }
+--
+2.40.1
+
--- /dev/null
+From 0193082d46874ea98ce221dcb67981757c30217a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Feb 2022 20:38:01 +0100
+Subject: fbdev: Improve performance of sys_imageblit()
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+[ Upstream commit 6f29e04938bf509fccfad490a74284cf158891ce ]
+
+Improve the performance of sys_imageblit() by manually unrolling
+the inner blitting loop and moving some invariants out. The compiler
+failed to do this automatically. The resulting binary code was even
+slower than the cfb_imageblit() helper, which uses the same algorithm,
+but operates on I/O memory.
+
+A microbenchmark measures the average number of CPU cycles
+for sys_imageblit() after a stabilizing period of a few minutes
+(i7-4790, FullHD, simpledrm, kernel with debugging). The value
+for CFB is given as a reference.
+
+ sys_imageblit(), new: 25934 cycles
+ sys_imageblit(), old: 35944 cycles
+ cfb_imageblit(): 30566 cycles
+
+In the optimized case, sys_imageblit() is now ~30% faster than before
+and ~20% faster than cfb_imageblit().
+
+v2:
+ * move switch out of inner loop (Gerd)
+ * remove test for alignment of dst1 (Sam)
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
+Acked-by: Sam Ravnborg <sam@ravnborg.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220223193804.18636-3-tzimmermann@suse.de
+Stable-dep-of: c2d22806aecb ("fbdev: fix potential OOB read in fast_imageblit()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/video/fbdev/core/sysimgblt.c | 49 +++++++++++++++++++++-------
+ 1 file changed, 38 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c
+index a4d05b1b17d7d..722c327a381bd 100644
+--- a/drivers/video/fbdev/core/sysimgblt.c
++++ b/drivers/video/fbdev/core/sysimgblt.c
+@@ -188,23 +188,29 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+ {
+ u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel;
+ u32 ppw = 32/bpp, spitch = (image->width + 7)/8;
+- u32 bit_mask, end_mask, eorx, shift;
++ u32 bit_mask, eorx;
+ const char *s = image->data, *src;
+ u32 *dst;
+- const u32 *tab = NULL;
++ const u32 *tab;
++ size_t tablen;
++ u32 colortab[16];
+ int i, j, k;
+
+ switch (bpp) {
+ case 8:
+ tab = fb_be_math(p) ? cfb_tab8_be : cfb_tab8_le;
++ tablen = 16;
+ break;
+ case 16:
+ tab = fb_be_math(p) ? cfb_tab16_be : cfb_tab16_le;
++ tablen = 4;
+ break;
+ case 32:
+- default:
+ tab = cfb_tab32;
++ tablen = 2;
+ break;
++ default:
++ return;
+ }
+
+ for (i = ppw-1; i--; ) {
+@@ -218,19 +224,40 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+ eorx = fgx ^ bgx;
+ k = image->width/ppw;
+
++ for (i = 0; i < tablen; ++i)
++ colortab[i] = (tab[i] & eorx) ^ bgx;
++
+ for (i = image->height; i--; ) {
+ dst = dst1;
+- shift = 8;
+ src = s;
+
+- for (j = k; j--; ) {
+- shift -= ppw;
+- end_mask = tab[(*src >> shift) & bit_mask];
+- *dst++ = (end_mask & eorx) ^ bgx;
+- if (!shift) {
+- shift = 8;
+- src++;
++ switch (ppw) {
++ case 4: /* 8 bpp */
++ for (j = k; j; j -= 2, ++src) {
++ *dst++ = colortab[(*src >> 4) & bit_mask];
++ *dst++ = colortab[(*src >> 0) & bit_mask];
++ }
++ break;
++ case 2: /* 16 bpp */
++ for (j = k; j; j -= 4, ++src) {
++ *dst++ = colortab[(*src >> 6) & bit_mask];
++ *dst++ = colortab[(*src >> 4) & bit_mask];
++ *dst++ = colortab[(*src >> 2) & bit_mask];
++ *dst++ = colortab[(*src >> 0) & bit_mask];
++ }
++ break;
++ case 1: /* 32 bpp */
++ for (j = k; j; j -= 8, ++src) {
++ *dst++ = colortab[(*src >> 7) & bit_mask];
++ *dst++ = colortab[(*src >> 6) & bit_mask];
++ *dst++ = colortab[(*src >> 5) & bit_mask];
++ *dst++ = colortab[(*src >> 4) & bit_mask];
++ *dst++ = colortab[(*src >> 3) & bit_mask];
++ *dst++ = colortab[(*src >> 2) & bit_mask];
++ *dst++ = colortab[(*src >> 1) & bit_mask];
++ *dst++ = colortab[(*src >> 0) & bit_mask];
+ }
++ break;
+ }
+ dst1 += p->fix.line_length;
+ s += spitch;
+--
+2.40.1
+
--- /dev/null
+From fe9a1a910a5790ff306878fb142ddd74bd814cc7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jun 2022 14:45:06 -0400
+Subject: fs: dlm: add pid to debug log
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit 19d7ca051d303622c423b4cb39e6bde5d177328b ]
+
+This patch adds the pid information which requested the lock operation
+to the debug log output.
+
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Stable-dep-of: 57e2c2f2d94c ("fs: dlm: fix mismatch of plock results from userspace")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/plock.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index 95f4662c1209a..f685d56a4f909 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -164,9 +164,9 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ spin_lock(&ops_lock);
+ list_del(&op->list);
+ spin_unlock(&ops_lock);
+- log_print("%s: wait interrupted %x %llx, op removed",
++ log_print("%s: wait interrupted %x %llx pid %d, op removed",
+ __func__, ls->ls_global_id,
+- (unsigned long long)number);
++ (unsigned long long)number, op->info.pid);
+ dlm_release_plock_op(op);
+ do_unlock_close(ls, number, file, fl);
+ goto out;
+--
+2.40.1
+
--- /dev/null
+From 22ac3685dedef1e89e8c849a44b88b4fe2fe15ec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jun 2022 14:45:05 -0400
+Subject: fs: dlm: change plock interrupted message to debug again
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit ea06d4cabf529eefbe7e89e3a8325f1f89355ccd ]
+
+This patch reverses the commit bcfad4265ced ("dlm: improve plock logging
+if interrupted") by moving it to debug level and notifying the user an op
+was removed.
+
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Stable-dep-of: 57e2c2f2d94c ("fs: dlm: fix mismatch of plock results from userspace")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/plock.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index f685d56a4f909..0d00ca2c44c71 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -164,7 +164,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ spin_lock(&ops_lock);
+ list_del(&op->list);
+ spin_unlock(&ops_lock);
+- log_print("%s: wait interrupted %x %llx pid %d, op removed",
++ log_debug(ls, "%s: wait interrupted %x %llx pid %d",
+ __func__, ls->ls_global_id,
+ (unsigned long long)number, op->info.pid);
+ dlm_release_plock_op(op);
+@@ -470,7 +470,7 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+ else
+ wake_up(&recv_wq);
+ } else
+- log_print("%s: no op %x %llx - may got interrupted?", __func__,
++ log_print("%s: no op %x %llx", __func__,
+ info.fsid, (unsigned long long)info.number);
+ return count;
+ }
+--
+2.40.1
+
--- /dev/null
+From d7ccde4e53e28676a1032efe58e435361fcef599 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 May 2023 12:02:04 -0400
+Subject: fs: dlm: fix mismatch of plock results from userspace
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit 57e2c2f2d94cfd551af91cedfa1af6d972487197 ]
+
+When a waiting plock request (F_SETLKW) is sent to userspace
+for processing (dlm_controld), the result is returned at a
+later time. That result could be incorrectly matched to a
+different waiting request in cases where the owner field is
+the same (e.g. different threads in a process.) This is fixed
+by comparing all the properties in the request and reply.
+
+The results for non-waiting plock requests are now matched
+based on list order because the results are returned in the
+same order they were sent.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/plock.c | 58 +++++++++++++++++++++++++++++++++++++++-----------
+ 1 file changed, 45 insertions(+), 13 deletions(-)
+
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index fa8969c0a5f55..28735e8c5e206 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -405,7 +405,7 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
+ if (op->info.flags & DLM_PLOCK_FL_CLOSE)
+ list_del(&op->list);
+ else
+- list_move(&op->list, &recv_list);
++ list_move_tail(&op->list, &recv_list);
+ memcpy(&info, &op->info, sizeof(info));
+ }
+ spin_unlock(&ops_lock);
+@@ -443,20 +443,52 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+ if (check_version(&info))
+ return -EINVAL;
+
++ /*
++ * The results for waiting ops (SETLKW) can be returned in any
++ * order, so match all fields to find the op. The results for
++ * non-waiting ops are returned in the order that they were sent
++ * to userspace, so match the result with the first non-waiting op.
++ */
+ spin_lock(&ops_lock);
+- list_for_each_entry(iter, &recv_list, list) {
+- if (iter->info.fsid == info.fsid &&
+- iter->info.number == info.number &&
+- iter->info.owner == info.owner) {
+- list_del_init(&iter->list);
+- memcpy(&iter->info, &info, sizeof(info));
+- if (iter->data)
+- do_callback = 1;
+- else
+- iter->done = 1;
+- op = iter;
+- break;
++ if (info.wait) {
++ list_for_each_entry(iter, &recv_list, list) {
++ if (iter->info.fsid == info.fsid &&
++ iter->info.number == info.number &&
++ iter->info.owner == info.owner &&
++ iter->info.pid == info.pid &&
++ iter->info.start == info.start &&
++ iter->info.end == info.end &&
++ iter->info.ex == info.ex &&
++ iter->info.wait) {
++ op = iter;
++ break;
++ }
+ }
++ } else {
++ list_for_each_entry(iter, &recv_list, list) {
++ if (!iter->info.wait) {
++ op = iter;
++ break;
++ }
++ }
++ }
++
++ if (op) {
++ /* Sanity check that op and info match. */
++ if (info.wait)
++ WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK);
++ else
++ WARN_ON(op->info.fsid != info.fsid ||
++ op->info.number != info.number ||
++ op->info.owner != info.owner ||
++ op->info.optype != info.optype);
++
++ list_del_init(&op->list);
++ memcpy(&op->info, &info, sizeof(info));
++ if (op->data)
++ do_callback = 1;
++ else
++ op->done = 1;
+ }
+ spin_unlock(&ops_lock);
+
+--
+2.40.1
+
--- /dev/null
+From 585988b3919454ba9fcebb4a1004b6429cad99c1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jun 2022 14:45:08 -0400
+Subject: fs: dlm: use dlm_plock_info for do_unlock_close
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit 4d413ae9ced4180c0e2114553c3a7560b509b0f8 ]
+
+This patch refactors do_unlock_close() by using only struct dlm_plock_info
+as a parameter.
+
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Stable-dep-of: 57e2c2f2d94c ("fs: dlm: fix mismatch of plock results from userspace")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/plock.c | 16 ++++++----------
+ 1 file changed, 6 insertions(+), 10 deletions(-)
+
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index 0d00ca2c44c71..fa8969c0a5f55 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -80,8 +80,7 @@ static void send_op(struct plock_op *op)
+ abandoned waiter. So, we have to insert the unlock-close when the
+ lock call is interrupted. */
+
+-static void do_unlock_close(struct dlm_ls *ls, u64 number,
+- struct file *file, struct file_lock *fl)
++static void do_unlock_close(const struct dlm_plock_info *info)
+ {
+ struct plock_op *op;
+
+@@ -90,15 +89,12 @@ static void do_unlock_close(struct dlm_ls *ls, u64 number,
+ return;
+
+ op->info.optype = DLM_PLOCK_OP_UNLOCK;
+- op->info.pid = fl->fl_pid;
+- op->info.fsid = ls->ls_global_id;
+- op->info.number = number;
++ op->info.pid = info->pid;
++ op->info.fsid = info->fsid;
++ op->info.number = info->number;
+ op->info.start = 0;
+ op->info.end = OFFSET_MAX;
+- if (fl->fl_lmops && fl->fl_lmops->lm_grant)
+- op->info.owner = (__u64) fl->fl_pid;
+- else
+- op->info.owner = (__u64)(long) fl->fl_owner;
++ op->info.owner = info->owner;
+
+ op->info.flags |= DLM_PLOCK_FL_CLOSE;
+ send_op(op);
+@@ -168,7 +164,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ __func__, ls->ls_global_id,
+ (unsigned long long)number, op->info.pid);
+ dlm_release_plock_op(op);
+- do_unlock_close(ls, number, file, fl);
++ do_unlock_close(&op->info);
+ goto out;
+ }
+
+--
+2.40.1
+
--- /dev/null
+From f32b2acf30e7ecf499a042705c454e56840b847d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Aug 2023 16:51:10 -0700
+Subject: ice: fix receive buffer size miscalculation
+
+From: Jesse Brandeburg <jesse.brandeburg@intel.com>
+
+[ Upstream commit 10083aef784031fa9f06c19a1b182e6fad5338d9 ]
+
+The driver is misconfiguring the hardware for some values of MTU such that
+it could use multiple descriptors to receive a packet when it could have
+simply used one.
+
+Change the driver to use a round-up instead of the result of a shift, as
+the shift can truncate the lower bits of the size, and result in the
+problem noted above. It also aligns this driver with similar code in i40e.
+
+The insidiousness of this problem is that everything works with the wrong
+size, it's just not working as well as it could, as some MTU sizes end up
+using two or more descriptors, and there is no way to tell that is
+happening without looking at ice_trace or a bus analyzer.
+
+Fixes: efc2214b6047 ("ice: Add support for XDP")
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
+Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
+Tested-by: Pucha Himasekhar Reddy <himasekharx.reddy.pucha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_base.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
+index 1929847b8c404..59df4c9bd8f90 100644
+--- a/drivers/net/ethernet/intel/ice/ice_base.c
++++ b/drivers/net/ethernet/intel/ice/ice_base.c
+@@ -353,7 +353,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
+ /* Receive Packet Data Buffer Size.
+ * The Packet Data Buffer Size is defined in 128 byte units.
+ */
+- rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
++ rlan_ctx.dbuf = DIV_ROUND_UP(ring->rx_buf_len,
++ BIT_ULL(ICE_RLAN_CTX_DBUF_S));
+
+ /* use 32 byte descriptors */
+ rlan_ctx.dsize = 1;
+--
+2.40.1
+
--- /dev/null
+From b95f3fa563744e67a4018374bca0171a2fc5ebef Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Aug 2023 10:19:27 -0700
+Subject: igb: Avoid starting unnecessary workqueues
+
+From: Alessio Igor Bogani <alessio.bogani@elettra.eu>
+
+[ Upstream commit b888c510f7b3d64ca75fc0f43b4a4bd1a611312f ]
+
+If ptp_clock_register() fails or CONFIG_PTP isn't enabled, avoid starting
+PTP related workqueues.
+
+In this way we can fix this:
+ BUG: unable to handle page fault for address: ffffc9000440b6f8
+ #PF: supervisor read access in kernel mode
+ #PF: error_code(0x0000) - not-present page
+ PGD 100000067 P4D 100000067 PUD 1001e0067 PMD 107dc5067 PTE 0
+ Oops: 0000 [#1] PREEMPT SMP
+ [...]
+ Workqueue: events igb_ptp_overflow_check
+ RIP: 0010:igb_rd32+0x1f/0x60
+ [...]
+ Call Trace:
+ igb_ptp_read_82580+0x20/0x50
+ timecounter_read+0x15/0x60
+ igb_ptp_overflow_check+0x1a/0x50
+ process_one_work+0x1cb/0x3c0
+ worker_thread+0x53/0x3f0
+ ? rescuer_thread+0x370/0x370
+ kthread+0x142/0x160
+ ? kthread_associate_blkcg+0xc0/0xc0
+ ret_from_fork+0x1f/0x30
+
+Fixes: 1f6e8178d685 ("igb: Prevent dropped Tx timestamps via work items and interrupts.")
+Fixes: d339b1331616 ("igb: add PTP Hardware Clock code")
+Signed-off-by: Alessio Igor Bogani <alessio.bogani@elettra.eu>
+Tested-by: Arpana Arland <arpanax.arland@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://lore.kernel.org/r/20230821171927.2203644-1-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igb/igb_ptp.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
+index 86a576201f5ff..0dbbb32905fa4 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
++++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
+@@ -1262,18 +1262,6 @@ void igb_ptp_init(struct igb_adapter *adapter)
+ return;
+ }
+
+- spin_lock_init(&adapter->tmreg_lock);
+- INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
+-
+- if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
+- INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
+- igb_ptp_overflow_check);
+-
+- adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+- adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+-
+- igb_ptp_reset(adapter);
+-
+ adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
+ &adapter->pdev->dev);
+ if (IS_ERR(adapter->ptp_clock)) {
+@@ -1283,6 +1271,18 @@ void igb_ptp_init(struct igb_adapter *adapter)
+ dev_info(&adapter->pdev->dev, "added PHC on %s\n",
+ adapter->netdev->name);
+ adapter->ptp_flags |= IGB_PTP_ENABLED;
++
++ spin_lock_init(&adapter->tmreg_lock);
++ INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
++
++ if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
++ INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
++ igb_ptp_overflow_check);
++
++ adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
++ adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
++
++ igb_ptp_reset(adapter);
+ }
+ }
+
+--
+2.40.1
+
--- /dev/null
+From 1d756303b5d5e1e13348316d7015d1eb8f0f24cc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Aug 2023 22:54:49 +0800
+Subject: ipvlan: Fix a reference count leak warning in ipvlan_ns_exit()
+
+From: Lu Wei <luwei32@huawei.com>
+
+[ Upstream commit 043d5f68d0ccdda91029b4b6dce7eeffdcfad281 ]
+
+There are two network devices(veth1 and veth3) in ns1, and ipvlan1 with
+L3S mode and ipvlan2 with L2 mode are created based on them as
+figure (1). In this case, ipvlan_register_nf_hook() will be called to
+register nf hook which is needed by ipvlans in L3S mode in ns1 and value
+of ipvl_nf_hook_refcnt is set to 1.
+
+(1)
+ ns1 ns2
+ ------------ ------------
+
+ veth1--ipvlan1 (L3S)
+
+ veth3--ipvlan2 (L2)
+
+(2)
+ ns1 ns2
+ ------------ ------------
+
+ veth1--ipvlan1 (L3S)
+
+ ipvlan2 (L2) veth3
+ | |
+ |------->-------->--------->--------
+ migrate
+
+When veth3 migrates from ns1 to ns2 as figure (2), veth3 will register in
+ns2 and calls call_netdevice_notifiers with NETDEV_REGISTER event:
+
+dev_change_net_namespace
+ call_netdevice_notifiers
+ ipvlan_device_event
+ ipvlan_migrate_l3s_hook
+ ipvlan_register_nf_hook(newnet) (I)
+ ipvlan_unregister_nf_hook(oldnet) (II)
+
+In function ipvlan_migrate_l3s_hook(), ipvl_nf_hook_refcnt in ns1 is not 0
+since veth1 with ipvlan1 still in ns1, (I) and (II) will be called to
+register nf_hook in ns2 and unregister nf_hook in ns1. As a result,
+ipvl_nf_hook_refcnt in ns1 is decreased incorrectly and this in ns2
+is increased incorrectly. When the second net namespace is removed, a
+reference count leak warning in ipvlan_ns_exit() will be triggered.
+
+This patch add a check before ipvlan_migrate_l3s_hook() is called. The
+warning can be triggered as follows:
+
+$ ip netns add ns1
+$ ip netns add ns2
+$ ip netns exec ns1 ip link add veth1 type veth peer name veth2
+$ ip netns exec ns1 ip link add veth3 type veth peer name veth4
+$ ip netns exec ns1 ip link add ipv1 link veth1 type ipvlan mode l3s
+$ ip netns exec ns1 ip link add ipv2 link veth3 type ipvlan mode l2
+$ ip netns exec ns1 ip link set veth3 netns ns2
+$ ip net del ns2
+
+Fixes: 3133822f5ac1 ("ipvlan: use pernet operations and restrict l3s hooks to master netns")
+Signed-off-by: Lu Wei <luwei32@huawei.com>
+Reviewed-by: Florian Westphal <fw@strlen.de>
+Link: https://lore.kernel.org/r/20230817145449.141827-1-luwei32@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ipvlan/ipvlan_main.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
+index 60b7d93bb834e..93be7dd571fc5 100644
+--- a/drivers/net/ipvlan/ipvlan_main.c
++++ b/drivers/net/ipvlan/ipvlan_main.c
+@@ -745,7 +745,8 @@ static int ipvlan_device_event(struct notifier_block *unused,
+
+ write_pnet(&port->pnet, newnet);
+
+- ipvlan_migrate_l3s_hook(oldnet, newnet);
++ if (port->mode == IPVLAN_MODE_L3S)
++ ipvlan_migrate_l3s_hook(oldnet, newnet);
+ break;
+ }
+ case NETDEV_UNREGISTER:
+--
+2.40.1
+
--- /dev/null
+From 71352093e04380198bbd16d2759639b3acdb13ab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Nov 2020 14:41:59 +0100
+Subject: libceph, rbd: ignore addr->type while comparing in some cases
+
+From: Ilya Dryomov <idryomov@gmail.com>
+
+[ Upstream commit 313771e80fd253d4b5472e61a2d12b03c5293aa9 ]
+
+For libceph, this ensures that libceph instance sharing (share option)
+continues to work. For rbd, this avoids blocklisting alive lock owners
+(locker addr is always LEGACY, while watcher addr is ANY in nautilus).
+
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Stable-dep-of: 588159009d5b ("rbd: retrieve and check lock owner twice before blocklisting")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/rbd.c | 8 ++++++--
+ include/linux/ceph/msgr.h | 9 ++++++++-
+ net/ceph/mon_client.c | 6 ++++--
+ 3 files changed, 18 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 63491748dc8d7..7b8731cddd9ea 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -3979,8 +3979,12 @@ static int find_watcher(struct rbd_device *rbd_dev,
+
+ sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
+ for (i = 0; i < num_watchers; i++) {
+- if (!memcmp(&watchers[i].addr, &locker->info.addr,
+- sizeof(locker->info.addr)) &&
++ /*
++ * Ignore addr->type while comparing. This mimics
++ * entity_addr_t::get_legacy_str() + strcmp().
++ */
++ if (ceph_addr_equal_no_type(&watchers[i].addr,
++ &locker->info.addr) &&
+ watchers[i].cookie == cookie) {
+ struct rbd_client_id cid = {
+ .gid = le64_to_cpu(watchers[i].name.num),
+diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h
+index 9e50aede46c83..7bde0af29a814 100644
+--- a/include/linux/ceph/msgr.h
++++ b/include/linux/ceph/msgr.h
+@@ -61,11 +61,18 @@ extern const char *ceph_entity_type_name(int type);
+ * entity_addr -- network address
+ */
+ struct ceph_entity_addr {
+- __le32 type;
++ __le32 type; /* CEPH_ENTITY_ADDR_TYPE_* */
+ __le32 nonce; /* unique id for process (e.g. pid) */
+ struct sockaddr_storage in_addr;
+ } __attribute__ ((packed));
+
++static inline bool ceph_addr_equal_no_type(const struct ceph_entity_addr *lhs,
++ const struct ceph_entity_addr *rhs)
++{
++ return !memcmp(&lhs->in_addr, &rhs->in_addr, sizeof(lhs->in_addr)) &&
++ lhs->nonce == rhs->nonce;
++}
++
+ struct ceph_entity_inst {
+ struct ceph_entity_name name;
+ struct ceph_entity_addr addr;
+diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
+index c4cf2529d08ba..ef5c174102d5e 100644
+--- a/net/ceph/mon_client.c
++++ b/net/ceph/mon_client.c
+@@ -96,9 +96,11 @@ int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr)
+ {
+ int i;
+
+- for (i = 0; i < m->num_mon; i++)
+- if (memcmp(addr, &m->mon_inst[i].addr, sizeof(*addr)) == 0)
++ for (i = 0; i < m->num_mon; i++) {
++ if (ceph_addr_equal_no_type(addr, &m->mon_inst[i].addr))
+ return 1;
++ }
++
+ return 0;
+ }
+
+--
+2.40.1
+
--- /dev/null
+From 29afbe0e30b7ea01605f84523b1c3354d5c2e911 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Apr 2023 10:33:44 +0100
+Subject: MIPS: cpu-features: Enable octeon_cache by cpu_type
+
+From: Jiaxun Yang <jiaxun.yang@flygoat.com>
+
+[ Upstream commit f641519409a73403ee6612b8648b95a688ab85c2 ]
+
+cpu_has_octeon_cache was tied to 0 for generic cpu-features,
+whith this generic kernel built for octeon CPU won't boot.
+
+Just enable this flag by cpu_type. It won't hurt orther platforms
+because compiler will eliminate the code path on other processors.
+
+Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Stable-dep-of: 5487a7b60695 ("MIPS: cpu-features: Use boot_cpu_type for CPU type based features")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/include/asm/cpu-features.h | 19 ++++++++++++++++++-
+ 1 file changed, 18 insertions(+), 1 deletion(-)
+
+diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
+index 8294eaa6f902d..8baa998fea0d8 100644
+--- a/arch/mips/include/asm/cpu-features.h
++++ b/arch/mips/include/asm/cpu-features.h
+@@ -126,7 +126,24 @@
+ #define cpu_has_tx39_cache __opt(MIPS_CPU_TX39_CACHE)
+ #endif
+ #ifndef cpu_has_octeon_cache
+-#define cpu_has_octeon_cache 0
++#define cpu_has_octeon_cache \
++({ \
++ int __res; \
++ \
++ switch (current_cpu_type()) { \
++ case CPU_CAVIUM_OCTEON: \
++ case CPU_CAVIUM_OCTEON_PLUS: \
++ case CPU_CAVIUM_OCTEON2: \
++ case CPU_CAVIUM_OCTEON3: \
++ __res = 1; \
++ break; \
++ \
++ default: \
++ __res = 0; \
++ } \
++ \
++ __res; \
++})
+ #endif
+ /* Don't override `cpu_has_fpu' to 1 or the "nofpu" option won't work. */
+ #ifndef cpu_has_fpu
+--
+2.40.1
+
--- /dev/null
+From a63f95d85297538bce89e296971b18334585c33c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 13:51:22 +0800
+Subject: MIPS: cpu-features: Use boot_cpu_type for CPU type based features
+
+From: Jiaxun Yang <jiaxun.yang@flygoat.com>
+
+[ Upstream commit 5487a7b60695a92cf998350e4beac17144c91fcd ]
+
+Some CPU feature macros were using current_cpu_type to mark feature
+availability.
+
+However current_cpu_type will use smp_processor_id, which is prohibited
+under preemptable context.
+
+Since those features are all uniform on all CPUs in a SMP system, use
+boot_cpu_type instead of current_cpu_type to fix preemptable kernel.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/include/asm/cpu-features.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
+index 8baa998fea0d8..dd03bc905841f 100644
+--- a/arch/mips/include/asm/cpu-features.h
++++ b/arch/mips/include/asm/cpu-features.h
+@@ -130,7 +130,7 @@
+ ({ \
+ int __res; \
+ \
+- switch (current_cpu_type()) { \
++ switch (boot_cpu_type()) { \
+ case CPU_CAVIUM_OCTEON: \
+ case CPU_CAVIUM_OCTEON_PLUS: \
+ case CPU_CAVIUM_OCTEON2: \
+@@ -370,7 +370,7 @@
+ ({ \
+ int __res; \
+ \
+- switch (current_cpu_type()) { \
++ switch (boot_cpu_type()) { \
+ case CPU_M14KC: \
+ case CPU_74K: \
+ case CPU_1074K: \
+--
+2.40.1
+
--- /dev/null
+From 0dfa9af71d13083e3ca41ea9b45fde77362a9b2d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Aug 2023 13:12:21 +0800
+Subject: net: bcmgenet: Fix return value check for fixed_phy_register()
+
+From: Ruan Jinjie <ruanjinjie@huawei.com>
+
+[ Upstream commit 32bbe64a1386065ab2aef8ce8cae7c689d0add6e ]
+
+The fixed_phy_register() function returns error pointers and never
+returns NULL. Update the checks accordingly.
+
+Fixes: b0ba512e25d7 ("net: bcmgenet: enable driver to work without a device tree")
+Signed-off-by: Ruan Jinjie <ruanjinjie@huawei.com>
+Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
+Acked-by: Doug Berger <opendmb@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/genet/bcmmii.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+index 99aba64f03c2f..2b0538f2af639 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+@@ -568,7 +568,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
+ };
+
+ phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+- if (!phydev || IS_ERR(phydev)) {
++ if (IS_ERR(phydev)) {
+ dev_err(kdev, "failed to register fixed PHY device\n");
+ return -ENODEV;
+ }
+--
+2.40.1
+
--- /dev/null
+From c26f566301d2d47240d856ab1eaccb731f66016c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Aug 2023 13:12:20 +0800
+Subject: net: bgmac: Fix return value check for fixed_phy_register()
+
+From: Ruan Jinjie <ruanjinjie@huawei.com>
+
+[ Upstream commit 23a14488ea5882dea5851b65c9fce2127ee8fcad ]
+
+The fixed_phy_register() function returns error pointers and never
+returns NULL. Update the checks accordingly.
+
+Fixes: c25b23b8a387 ("bgmac: register fixed PHY for ARM BCM470X / BCM5301X chipsets")
+Signed-off-by: Ruan Jinjie <ruanjinjie@huawei.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bgmac.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
+index ab8ee93316354..a4f6143e66fe9 100644
+--- a/drivers/net/ethernet/broadcom/bgmac.c
++++ b/drivers/net/ethernet/broadcom/bgmac.c
+@@ -1448,7 +1448,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac)
+ int err;
+
+ phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+- if (!phy_dev || IS_ERR(phy_dev)) {
++ if (IS_ERR(phy_dev)) {
+ dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
+ return -ENODEV;
+ }
+--
+2.40.1
+
--- /dev/null
+From 46a5f6153a497fe8d5023aa330431caa52722d53 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Jan 2022 11:10:59 -0800
+Subject: net: remove bond_slave_has_mac_rcu()
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 8b0fdcdc3a7d44aff907f0103f5ffb86b12bfe71 ]
+
+No caller since v3.16.
+
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: e74216b8def3 ("bonding: fix macvlan over alb bond support")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/bonding.h | 14 --------------
+ 1 file changed, 14 deletions(-)
+
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index a248caff969f5..34b6f7241a41e 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -697,20 +697,6 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond,
+ return NULL;
+ }
+
+-/* Caller must hold rcu_read_lock() for read */
+-static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond,
+- const u8 *mac)
+-{
+- struct list_head *iter;
+- struct slave *tmp;
+-
+- bond_for_each_slave_rcu(bond, tmp, iter)
+- if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
+- return tmp;
+-
+- return NULL;
+-}
+-
+ /* Caller must hold rcu_read_lock() for read */
+ static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac)
+ {
+--
+2.40.1
+
--- /dev/null
+From 8fa2b0c690df279051e2bfd8e60826ac19a5fb6e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Aug 2023 06:12:31 -0400
+Subject: net/sched: fix a qdisc modification with ambiguous command request
+
+From: Jamal Hadi Salim <jhs@mojatatu.com>
+
+[ Upstream commit da71714e359b64bd7aab3bd56ec53f307f058133 ]
+
+When replacing an existing root qdisc, with one that is of the same kind, the
+request boils down to essentially a parameterization change i.e not one that
+requires allocation and grafting of a new qdisc. syzbot was able to create a
+scenario which resulted in a taprio qdisc replacing an existing taprio qdisc
+with a combination of NLM_F_CREATE, NLM_F_REPLACE and NLM_F_EXCL leading to
+create and graft scenario.
+The fix ensures that only when the qdisc kinds are different that we should
+allow a create and graft, otherwise it goes into the "change" codepath.
+
+While at it, fix the code and comments to improve readability.
+
+While syzbot was able to create the issue, it did not zone on the root cause.
+Analysis from Vladimir Oltean <vladimir.oltean@nxp.com> helped narrow it down.
+
+v1->V2 changes:
+- remove "inline" function definition (Vladmir)
+- remove extrenous braces in branches (Vladmir)
+- change inline function names (Pedro)
+- Run tdc tests (Victor)
+v2->v3 changes:
+- dont break else/if (Simon)
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: syzbot+a3618a167af2021433cd@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/netdev/20230816225759.g25x76kmgzya2gei@skbuf/T/
+Tested-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Tested-by: Victor Nogueira <victor@mojatatu.com>
+Reviewed-by: Pedro Tammela <pctammela@mojatatu.com>
+Reviewed-by: Victor Nogueira <victor@mojatatu.com>
+Signed-off-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_api.c | 53 ++++++++++++++++++++++++++++++++++-----------
+ 1 file changed, 40 insertions(+), 13 deletions(-)
+
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index fb50e3f3283f9..5c2d230790db9 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1513,10 +1513,28 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+ return 0;
+ }
+
++static bool req_create_or_replace(struct nlmsghdr *n)
++{
++ return (n->nlmsg_flags & NLM_F_CREATE &&
++ n->nlmsg_flags & NLM_F_REPLACE);
++}
++
++static bool req_create_exclusive(struct nlmsghdr *n)
++{
++ return (n->nlmsg_flags & NLM_F_CREATE &&
++ n->nlmsg_flags & NLM_F_EXCL);
++}
++
++static bool req_change(struct nlmsghdr *n)
++{
++ return (!(n->nlmsg_flags & NLM_F_CREATE) &&
++ !(n->nlmsg_flags & NLM_F_REPLACE) &&
++ !(n->nlmsg_flags & NLM_F_EXCL));
++}
++
+ /*
+ * Create/change qdisc.
+ */
+-
+ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+ struct netlink_ext_ack *extack)
+ {
+@@ -1613,27 +1631,35 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+ *
+ * We know, that some child q is already
+ * attached to this parent and have choice:
+- * either to change it or to create/graft new one.
++ * 1) change it or 2) create/graft new one.
++ * If the requested qdisc kind is different
++ * than the existing one, then we choose graft.
++ * If they are the same then this is "change"
++ * operation - just let it fallthrough..
+ *
+ * 1. We are allowed to create/graft only
+- * if CREATE and REPLACE flags are set.
++ * if the request is explicitly stating
++ * "please create if it doesn't exist".
+ *
+- * 2. If EXCL is set, requestor wanted to say,
+- * that qdisc tcm_handle is not expected
++ * 2. If the request is to exclusive create
++ * then the qdisc tcm_handle is not expected
+ * to exist, so that we choose create/graft too.
+ *
+ * 3. The last case is when no flags are set.
++ * This will happen when for example tc
++ * utility issues a "change" command.
+ * Alas, it is sort of hole in API, we
+ * cannot decide what to do unambiguously.
+- * For now we select create/graft, if
+- * user gave KIND, which does not match existing.
++ * For now we select create/graft.
+ */
+- if ((n->nlmsg_flags & NLM_F_CREATE) &&
+- (n->nlmsg_flags & NLM_F_REPLACE) &&
+- ((n->nlmsg_flags & NLM_F_EXCL) ||
+- (tca[TCA_KIND] &&
+- nla_strcmp(tca[TCA_KIND], q->ops->id))))
+- goto create_n_graft;
++ if (tca[TCA_KIND] &&
++ nla_strcmp(tca[TCA_KIND], q->ops->id)) {
++ if (req_create_or_replace(n) ||
++ req_create_exclusive(n))
++ goto create_n_graft;
++ else if (req_change(n))
++ goto create_n_graft2;
++ }
+ }
+ }
+ } else {
+@@ -1667,6 +1693,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+ NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
+ return -ENOENT;
+ }
++create_n_graft2:
+ if (clid == TC_H_INGRESS) {
+ if (dev_ingress_queue(dev)) {
+ q = qdisc_create(dev, dev_ingress_queue(dev), p,
+--
+2.40.1
+
--- /dev/null
+From 799b1a82d33a7bbb86565f9258a7e11205a81d7d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Aug 2023 18:26:02 -0700
+Subject: net: validate veth and vxcan peer ifindexes
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit f534f6581ec084fe94d6759f7672bd009794b07e ]
+
+veth and vxcan need to make sure the ifindexes of the peer
+are not negative, core does not validate this.
+
+Using iproute2 with user-space-level checking removed:
+
+Before:
+
+ # ./ip link add index 10 type veth peer index -1
+ # ip link show
+ 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
+ link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
+ 2: enp1s0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000
+ link/ether 52:54:00:74:b2:03 brd ff:ff:ff:ff:ff:ff
+ 10: veth1@veth0: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
+ link/ether 8a:90:ff:57:6d:5d brd ff:ff:ff:ff:ff:ff
+ -1: veth0@veth1: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
+ link/ether ae:ed:18:e6:fa:7f brd ff:ff:ff:ff:ff:ff
+
+Now:
+
+ $ ./ip link add index 10 type veth peer index -1
+ Error: ifindex can't be negative.
+
+This problem surfaced in net-next because an explicit WARN()
+was added, the root cause is older.
+
+Fixes: e6f8f1a739b6 ("veth: Allow to create peer link with given ifindex")
+Fixes: a8f820a380a2 ("can: add Virtual CAN Tunnel driver (vxcan)")
+Reported-by: syzbot+5ba06978f34abb058571@syzkaller.appspotmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/vxcan.c | 7 +------
+ drivers/net/veth.c | 5 +----
+ include/net/rtnetlink.h | 4 ++--
+ net/core/rtnetlink.c | 22 ++++++++++++++++++----
+ 4 files changed, 22 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
+index 282c53ef76d23..1bfede407270d 100644
+--- a/drivers/net/can/vxcan.c
++++ b/drivers/net/can/vxcan.c
+@@ -179,12 +179,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
+
+ nla_peer = data[VXCAN_INFO_PEER];
+ ifmp = nla_data(nla_peer);
+- err = rtnl_nla_parse_ifla(peer_tb,
+- nla_data(nla_peer) +
+- sizeof(struct ifinfomsg),
+- nla_len(nla_peer) -
+- sizeof(struct ifinfomsg),
+- NULL);
++ err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
+ if (err < 0)
+ return err;
+
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index 5aa23a036ed36..4ba86fa4d6497 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -1313,10 +1313,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
+
+ nla_peer = data[VETH_INFO_PEER];
+ ifmp = nla_data(nla_peer);
+- err = rtnl_nla_parse_ifla(peer_tb,
+- nla_data(nla_peer) + sizeof(struct ifinfomsg),
+- nla_len(nla_peer) - sizeof(struct ifinfomsg),
+- NULL);
++ err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
+ if (err < 0)
+ return err;
+
+diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
+index 4da61c950e931..5c2a73bbfabee 100644
+--- a/include/net/rtnetlink.h
++++ b/include/net/rtnetlink.h
+@@ -166,8 +166,8 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
+ int rtnl_delete_link(struct net_device *dev);
+ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
+
+-int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
+- struct netlink_ext_ack *exterr);
++int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
++ struct netlink_ext_ack *exterr);
+ struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid);
+
+ #define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind)
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index ce37a052b9c32..cee86a2b3a036 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2161,13 +2161,27 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+ return err;
+ }
+
+-int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
+- struct netlink_ext_ack *exterr)
++int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
++ struct netlink_ext_ack *exterr)
+ {
+- return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy,
++ const struct ifinfomsg *ifmp;
++ const struct nlattr *attrs;
++ size_t len;
++
++ ifmp = nla_data(nla_peer);
++ attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg);
++ len = nla_len(nla_peer) - sizeof(struct ifinfomsg);
++
++ if (ifmp->ifi_index < 0) {
++ NL_SET_ERR_MSG_ATTR(exterr, nla_peer,
++ "ifindex can't be negative");
++ return -EINVAL;
++ }
++
++ return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy,
+ exterr);
+ }
+-EXPORT_SYMBOL(rtnl_nla_parse_ifla);
++EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg);
+
+ struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
+ {
+--
+2.40.1
+
--- /dev/null
+From 1e4fe0416d81ea8cfd8ad90f7e83f4da063b57d5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Aug 2023 19:49:52 +0200
+Subject: netfilter: nf_tables: fix out of memory error handling
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 5e1be4cdc98c989d5387ce94ff15b5ad06a5b681 ]
+
+Several instances of pipapo_resize() don't propagate allocation failures,
+this causes a crash when fault injection is enabled for gfp_kernel slabs.
+
+Fixes: 3c4287f62044 ("nf_tables: Add set type for arbitrary concatenation of ranges")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_set_pipapo.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 3be93175b3ffd..50f840e312b03 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -901,12 +901,14 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
+ static int pipapo_insert(struct nft_pipapo_field *f, const uint8_t *k,
+ int mask_bits)
+ {
+- int rule = f->rules++, group, ret, bit_offset = 0;
++ int rule = f->rules, group, ret, bit_offset = 0;
+
+- ret = pipapo_resize(f, f->rules - 1, f->rules);
++ ret = pipapo_resize(f, f->rules, f->rules + 1);
+ if (ret)
+ return ret;
+
++ f->rules++;
++
+ for (group = 0; group < f->groups; group++) {
+ int i, v;
+ u8 mask;
+@@ -1051,7 +1053,9 @@ static int pipapo_expand(struct nft_pipapo_field *f,
+ step++;
+ if (step >= len) {
+ if (!masks) {
+- pipapo_insert(f, base, 0);
++ err = pipapo_insert(f, base, 0);
++ if (err < 0)
++ return err;
+ masks = 1;
+ }
+ goto out;
+@@ -1234,6 +1238,9 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
+ else
+ ret = pipapo_expand(f, start, end, f->groups * f->bb);
+
++ if (ret < 0)
++ return ret;
++
+ if (f->bsize > bsize_max)
+ bsize_max = f->bsize;
+
+--
+2.40.1
+
--- /dev/null
+From b44aaf8fe4a9ebef0b929a8a3144d7a6038e2596 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Jul 2023 14:59:30 +0300
+Subject: NFSv4: fix out path in __nfs4_get_acl_uncached
+
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+
+[ Upstream commit f4e89f1a6dab4c063fc1e823cc9dddc408ff40cf ]
+
+Another highly rare error case when a page allocating loop (inside
+__nfs4_get_acl_uncached, this time) is not properly unwound on error.
+Since pages array is allocated being uninitialized, need to free only
+lower array indices. NULL checks were useful before commit 62a1573fcf84
+("NFSv4 fix acl retrieval over krb5i/krb5p mounts") when the array had
+been initialized to zero on stack.
+
+Found by Linux Verification Center (linuxtesting.org).
+
+Fixes: 62a1573fcf84 ("NFSv4 fix acl retrieval over krb5i/krb5p mounts")
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Reviewed-by: Benjamin Coddington <bcodding@redhat.com>
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/nfs4proc.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index b9567cc8698ed..2d583bd378869 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5864,9 +5864,8 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
+ out_ok:
+ ret = res.acl_len;
+ out_free:
+- for (i = 0; i < npages; i++)
+- if (pages[i])
+- __free_page(pages[i]);
++ while (--i >= 0)
++ __free_page(pages[i]);
+ if (res.acl_scratch)
+ __free_page(res.acl_scratch);
+ kfree(pages);
+--
+2.40.1
+
--- /dev/null
+From 2aa2e38bf246be7bfaf4ade3bf4fddd670fd2af6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Aug 2023 12:00:06 +0530
+Subject: octeontx2-af: SDP: fix receive link config
+
+From: Hariprasad Kelam <hkelam@marvell.com>
+
+[ Upstream commit 05f3d5bc23524bed6f043dfe6b44da687584f9fb ]
+
+On SDP interfaces, frame oversize and undersize errors are
+observed as driver is not considering packet sizes of all
+subscribers of the link before updating the link config.
+
+This patch fixes the same.
+
+Fixes: 9b7dd87ac071 ("octeontx2-af: Support to modify min/max allowed packet lengths")
+Signed-off-by: Hariprasad Kelam <hkelam@marvell.com>
+Signed-off-by: Sunil Goutham <sgoutham@marvell.com>
+Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
+Link: https://lore.kernel.org/r/20230817063006.10366-1-hkelam@marvell.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 449f5224d1aeb..e549b09c347a7 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -2876,9 +2876,10 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
+ if (link < 0)
+ return NIX_AF_ERR_RX_LINK_INVALID;
+
+- nix_find_link_frs(rvu, req, pcifunc);
+
+ linkcfg:
++ nix_find_link_frs(rvu, req, pcifunc);
++
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
+ cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
+ if (req->update_minlen)
+--
+2.40.1
+
--- /dev/null
+From 8fa602db4bdb469683c739cc1c88d11907e43937 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Apr 2023 21:15:57 +0200
+Subject: PCI: acpiphp: Reassign resources on bridge if necessary
+
+From: Igor Mammedov <imammedo@redhat.com>
+
+[ Upstream commit 40613da52b13fb21c5566f10b287e0ca8c12c4e9 ]
+
+When using ACPI PCI hotplug, hotplugging a device with large BARs may fail
+if bridge windows programmed by firmware are not large enough.
+
+Reproducer:
+ $ qemu-kvm -monitor stdio -M q35 -m 4G \
+ -global ICH9-LPC.acpi-pci-hotplug-with-bridge-support=on \
+ -device id=rp1,pcie-root-port,bus=pcie.0,chassis=4 \
+ disk_image
+
+ wait till linux guest boots, then hotplug device:
+ (qemu) device_add qxl,bus=rp1
+
+ hotplug on guest side fails with:
+ pci 0000:01:00.0: [1b36:0100] type 00 class 0x038000
+ pci 0000:01:00.0: reg 0x10: [mem 0x00000000-0x03ffffff]
+ pci 0000:01:00.0: reg 0x14: [mem 0x00000000-0x03ffffff]
+ pci 0000:01:00.0: reg 0x18: [mem 0x00000000-0x00001fff]
+ pci 0000:01:00.0: reg 0x1c: [io 0x0000-0x001f]
+ pci 0000:01:00.0: BAR 0: no space for [mem size 0x04000000]
+ pci 0000:01:00.0: BAR 0: failed to assign [mem size 0x04000000]
+ pci 0000:01:00.0: BAR 1: no space for [mem size 0x04000000]
+ pci 0000:01:00.0: BAR 1: failed to assign [mem size 0x04000000]
+ pci 0000:01:00.0: BAR 2: assigned [mem 0xfe800000-0xfe801fff]
+ pci 0000:01:00.0: BAR 3: assigned [io 0x1000-0x101f]
+ qxl 0000:01:00.0: enabling device (0000 -> 0003)
+ Unable to create vram_mapping
+ qxl: probe of 0000:01:00.0 failed with error -12
+
+However when using native PCIe hotplug
+ '-global ICH9-LPC.acpi-pci-hotplug-with-bridge-support=off'
+it works fine, since kernel attempts to reassign unused resources.
+
+Use the same machinery as native PCIe hotplug to (re)assign resources.
+
+Link: https://lore.kernel.org/r/20230424191557.2464760-1-imammedo@redhat.com
+Signed-off-by: Igor Mammedov <imammedo@redhat.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Acked-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Rafael J. Wysocki <rafael@kernel.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/hotplug/acpiphp_glue.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
+index f031302ad4019..44c0b025f09e1 100644
+--- a/drivers/pci/hotplug/acpiphp_glue.c
++++ b/drivers/pci/hotplug/acpiphp_glue.c
+@@ -489,7 +489,6 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
+ acpiphp_native_scan_bridge(dev);
+ }
+ } else {
+- LIST_HEAD(add_list);
+ int max, pass;
+
+ acpiphp_rescan_slot(slot);
+@@ -503,12 +502,10 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
+ if (pass && dev->subordinate) {
+ check_hotplug_bridge(slot, dev);
+ pcibios_resource_survey_bus(dev->subordinate);
+- __pci_bus_size_bridges(dev->subordinate,
+- &add_list);
+ }
+ }
+ }
+- __pci_bus_assign_resources(bus, &add_list, NULL);
++ pci_assign_unassigned_bridge_resources(bus->self);
+ }
+
+ acpiphp_sanitize_bus(bus);
+--
+2.40.1
+
--- /dev/null
+From 64f9027f1c5074ad193b2e2efb6f5163ad474bdd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Jun 2023 13:52:13 +0200
+Subject: rbd: make get_lock_owner_info() return a single locker or NULL
+
+From: Ilya Dryomov <idryomov@gmail.com>
+
+[ Upstream commit f38cb9d9c2045dad16eead4a2e1aedfddd94603b ]
+
+Make the "num_lockers can be only 0 or 1" assumption explicit and
+simplify the API by getting rid of output parameters in preparation
+for calling get_lock_owner_info() twice before blocklisting.
+
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Reviewed-by: Dongsheng Yang <dongsheng.yang@easystack.cn>
+Stable-dep-of: 588159009d5b ("rbd: retrieve and check lock owner twice before blocklisting")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/rbd.c | 84 +++++++++++++++++++++++++++------------------
+ 1 file changed, 51 insertions(+), 33 deletions(-)
+
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 7b8731cddd9ea..dcb43c633c5e7 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -3914,10 +3914,17 @@ static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
+ list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
+ }
+
+-static int get_lock_owner_info(struct rbd_device *rbd_dev,
+- struct ceph_locker **lockers, u32 *num_lockers)
++static void free_locker(struct ceph_locker *locker)
++{
++ if (locker)
++ ceph_free_lockers(locker, 1);
++}
++
++static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev)
+ {
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
++ struct ceph_locker *lockers;
++ u32 num_lockers;
+ u8 lock_type;
+ char *lock_tag;
+ int ret;
+@@ -3926,39 +3933,45 @@ static int get_lock_owner_info(struct rbd_device *rbd_dev,
+
+ ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
+ &rbd_dev->header_oloc, RBD_LOCK_NAME,
+- &lock_type, &lock_tag, lockers, num_lockers);
+- if (ret)
+- return ret;
++ &lock_type, &lock_tag, &lockers, &num_lockers);
++ if (ret) {
++ rbd_warn(rbd_dev, "failed to retrieve lockers: %d", ret);
++ return ERR_PTR(ret);
++ }
+
+- if (*num_lockers == 0) {
++ if (num_lockers == 0) {
+ dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
++ lockers = NULL;
+ goto out;
+ }
+
+ if (strcmp(lock_tag, RBD_LOCK_TAG)) {
+ rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
+ lock_tag);
+- ret = -EBUSY;
+- goto out;
++ goto err_busy;
+ }
+
+ if (lock_type == CEPH_CLS_LOCK_SHARED) {
+ rbd_warn(rbd_dev, "shared lock type detected");
+- ret = -EBUSY;
+- goto out;
++ goto err_busy;
+ }
+
+- if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
++ WARN_ON(num_lockers != 1);
++ if (strncmp(lockers[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
+ strlen(RBD_LOCK_COOKIE_PREFIX))) {
+ rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
+- (*lockers)[0].id.cookie);
+- ret = -EBUSY;
+- goto out;
++ lockers[0].id.cookie);
++ goto err_busy;
+ }
+
+ out:
+ kfree(lock_tag);
+- return ret;
++ return lockers;
++
++err_busy:
++ kfree(lock_tag);
++ ceph_free_lockers(lockers, num_lockers);
++ return ERR_PTR(-EBUSY);
+ }
+
+ static int find_watcher(struct rbd_device *rbd_dev,
+@@ -4012,51 +4025,56 @@ static int find_watcher(struct rbd_device *rbd_dev,
+ static int rbd_try_lock(struct rbd_device *rbd_dev)
+ {
+ struct ceph_client *client = rbd_dev->rbd_client->client;
+- struct ceph_locker *lockers;
+- u32 num_lockers;
++ struct ceph_locker *locker;
+ int ret;
+
+ for (;;) {
++ locker = NULL;
++
+ ret = rbd_lock(rbd_dev);
+ if (ret != -EBUSY)
+- return ret;
++ goto out;
+
+ /* determine if the current lock holder is still alive */
+- ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
+- if (ret)
+- return ret;
+-
+- if (num_lockers == 0)
++ locker = get_lock_owner_info(rbd_dev);
++ if (IS_ERR(locker)) {
++ ret = PTR_ERR(locker);
++ locker = NULL;
++ goto out;
++ }
++ if (!locker)
+ goto again;
+
+- ret = find_watcher(rbd_dev, lockers);
++ ret = find_watcher(rbd_dev, locker);
+ if (ret)
+ goto out; /* request lock or error */
+
+ rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
+- ENTITY_NAME(lockers[0].id.name));
++ ENTITY_NAME(locker->id.name));
+
+ ret = ceph_monc_blocklist_add(&client->monc,
+- &lockers[0].info.addr);
++ &locker->info.addr);
+ if (ret) {
+- rbd_warn(rbd_dev, "blocklist of %s%llu failed: %d",
+- ENTITY_NAME(lockers[0].id.name), ret);
++ rbd_warn(rbd_dev, "failed to blocklist %s%llu: %d",
++ ENTITY_NAME(locker->id.name), ret);
+ goto out;
+ }
+
+ ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
+ &rbd_dev->header_oloc, RBD_LOCK_NAME,
+- lockers[0].id.cookie,
+- &lockers[0].id.name);
+- if (ret && ret != -ENOENT)
++ locker->id.cookie, &locker->id.name);
++ if (ret && ret != -ENOENT) {
++ rbd_warn(rbd_dev, "failed to break header lock: %d",
++ ret);
+ goto out;
++ }
+
+ again:
+- ceph_free_lockers(lockers, num_lockers);
++ free_locker(locker);
+ }
+
+ out:
+- ceph_free_lockers(lockers, num_lockers);
++ free_locker(locker);
+ return ret;
+ }
+
+--
+2.40.1
+
--- /dev/null
+From 18cfdc88fd7f7fa684a37c06d8c2a23aba6baa98 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 1 Aug 2023 19:14:24 +0200
+Subject: rbd: prevent busy loop when requesting exclusive lock
+
+From: Ilya Dryomov <idryomov@gmail.com>
+
+[ Upstream commit 9d01e07fd1bfb4daae156ab528aa196f5ac2b2bc ]
+
+Due to rbd_try_acquire_lock() effectively swallowing all but
+EBLOCKLISTED error from rbd_try_lock() ("request lock anyway") and
+rbd_request_lock() returning ETIMEDOUT error not only for an actual
+notify timeout but also when the lock owner doesn't respond, a busy
+loop inside of rbd_acquire_lock() between rbd_try_acquire_lock() and
+rbd_request_lock() is possible.
+
+Requesting the lock on EBUSY error (returned by get_lock_owner_info()
+if an incompatible lock or invalid lock owner is detected) makes very
+little sense. The same goes for ETIMEDOUT error (might pop up pretty
+much anywhere if osd_request_timeout option is set) and many others.
+
+Just fail I/O requests on rbd_dev->acquiring_list immediately on any
+error from rbd_try_lock().
+
+Cc: stable@vger.kernel.org # 588159009d5b: rbd: retrieve and check lock owner twice before blocklisting
+Cc: stable@vger.kernel.org
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Reviewed-by: Dongsheng Yang <dongsheng.yang@easystack.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/rbd.c | 28 +++++++++++++++-------------
+ 1 file changed, 15 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 60d3a143ff450..95cbd5790ed60 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -3740,7 +3740,7 @@ static int rbd_lock(struct rbd_device *rbd_dev)
+ ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
+ RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
+ RBD_LOCK_TAG, "", 0);
+- if (ret)
++ if (ret && ret != -EEXIST)
+ return ret;
+
+ __rbd_lock(rbd_dev, cookie);
+@@ -3944,7 +3944,7 @@ static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev)
+ &rbd_dev->header_oloc, RBD_LOCK_NAME,
+ &lock_type, &lock_tag, &lockers, &num_lockers);
+ if (ret) {
+- rbd_warn(rbd_dev, "failed to retrieve lockers: %d", ret);
++ rbd_warn(rbd_dev, "failed to get header lockers: %d", ret);
+ return ERR_PTR(ret);
+ }
+
+@@ -3996,8 +3996,10 @@ static int find_watcher(struct rbd_device *rbd_dev,
+ ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
+ &rbd_dev->header_oloc, &watchers,
+ &num_watchers);
+- if (ret)
++ if (ret) {
++ rbd_warn(rbd_dev, "failed to get watchers: %d", ret);
+ return ret;
++ }
+
+ sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
+ for (i = 0; i < num_watchers; i++) {
+@@ -4041,8 +4043,12 @@ static int rbd_try_lock(struct rbd_device *rbd_dev)
+ locker = refreshed_locker = NULL;
+
+ ret = rbd_lock(rbd_dev);
+- if (ret != -EBUSY)
++ if (!ret)
++ goto out;
++ if (ret != -EBUSY) {
++ rbd_warn(rbd_dev, "failed to lock header: %d", ret);
+ goto out;
++ }
+
+ /* determine if the current lock holder is still alive */
+ locker = get_lock_owner_info(rbd_dev);
+@@ -4145,11 +4151,8 @@ static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
+
+ ret = rbd_try_lock(rbd_dev);
+ if (ret < 0) {
+- rbd_warn(rbd_dev, "failed to lock header: %d", ret);
+- if (ret == -EBLOCKLISTED)
+- goto out;
+-
+- ret = 1; /* request lock anyway */
++ rbd_warn(rbd_dev, "failed to acquire lock: %d", ret);
++ goto out;
+ }
+ if (ret > 0) {
+ up_write(&rbd_dev->lock_rwsem);
+@@ -6699,12 +6702,11 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
+ cancel_delayed_work_sync(&rbd_dev->lock_dwork);
+ if (!ret)
+ ret = -ETIMEDOUT;
+- }
+
+- if (ret) {
+- rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret);
+- return ret;
++ rbd_warn(rbd_dev, "failed to acquire lock: %ld", ret);
+ }
++ if (ret)
++ return ret;
+
+ /*
+ * The lock may have been released by now, unless automatic lock
+--
+2.40.1
+
--- /dev/null
+From 7f8f02aef733b2cf81fb804de9309eaafe217ce3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 22 Jul 2023 20:28:08 +0200
+Subject: rbd: retrieve and check lock owner twice before blocklisting
+
+From: Ilya Dryomov <idryomov@gmail.com>
+
+[ Upstream commit 588159009d5b7a09c3e5904cffddbe4a4e170301 ]
+
+An attempt to acquire exclusive lock can race with the current lock
+owner closing the image:
+
+1. lock is held by client123, rbd_lock() returns -EBUSY
+2. get_lock_owner_info() returns client123 instance details
+3. client123 closes the image, lock is released
+4. find_watcher() returns 0 as there is no matching watcher anymore
+5. client123 instance gets erroneously blocklisted
+
+Particularly impacted is mirror snapshot scheduler in snapshot-based
+mirroring since it happens to open and close images a lot (images are
+opened only for as long as it takes to take the next mirror snapshot,
+the same client instance is used for all images).
+
+To reduce the potential for erroneous blocklisting, retrieve the lock
+owner again after find_watcher() returns 0. If it's still there, make
+sure it matches the previously detected lock owner.
+
+Cc: stable@vger.kernel.org # f38cb9d9c204: rbd: make get_lock_owner_info() return a single locker or NULL
+Cc: stable@vger.kernel.org # 8ff2c64c9765: rbd: harden get_lock_owner_info() a bit
+Cc: stable@vger.kernel.org
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Reviewed-by: Dongsheng Yang <dongsheng.yang@easystack.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/rbd.c | 25 +++++++++++++++++++++++--
+ 1 file changed, 23 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index dcb43c633c5e7..60d3a143ff450 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -3914,6 +3914,15 @@ static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
+ list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
+ }
+
++static bool locker_equal(const struct ceph_locker *lhs,
++ const struct ceph_locker *rhs)
++{
++ return lhs->id.name.type == rhs->id.name.type &&
++ lhs->id.name.num == rhs->id.name.num &&
++ !strcmp(lhs->id.cookie, rhs->id.cookie) &&
++ ceph_addr_equal_no_type(&lhs->info.addr, &rhs->info.addr);
++}
++
+ static void free_locker(struct ceph_locker *locker)
+ {
+ if (locker)
+@@ -4025,11 +4034,11 @@ static int find_watcher(struct rbd_device *rbd_dev,
+ static int rbd_try_lock(struct rbd_device *rbd_dev)
+ {
+ struct ceph_client *client = rbd_dev->rbd_client->client;
+- struct ceph_locker *locker;
++ struct ceph_locker *locker, *refreshed_locker;
+ int ret;
+
+ for (;;) {
+- locker = NULL;
++ locker = refreshed_locker = NULL;
+
+ ret = rbd_lock(rbd_dev);
+ if (ret != -EBUSY)
+@@ -4049,6 +4058,16 @@ static int rbd_try_lock(struct rbd_device *rbd_dev)
+ if (ret)
+ goto out; /* request lock or error */
+
++ refreshed_locker = get_lock_owner_info(rbd_dev);
++ if (IS_ERR(refreshed_locker)) {
++ ret = PTR_ERR(refreshed_locker);
++ refreshed_locker = NULL;
++ goto out;
++ }
++ if (!refreshed_locker ||
++ !locker_equal(locker, refreshed_locker))
++ goto again;
++
+ rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
+ ENTITY_NAME(locker->id.name));
+
+@@ -4070,10 +4089,12 @@ static int rbd_try_lock(struct rbd_device *rbd_dev)
+ }
+
+ again:
++ free_locker(refreshed_locker);
+ free_locker(locker);
+ }
+
+ out:
++ free_locker(refreshed_locker);
+ free_locker(locker);
+ return ret;
+ }
+--
+2.40.1
+
--- /dev/null
+From 0560ac660b042b6079d1e935a57dda2f99ce5494 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Aug 2023 09:43:48 +0300
+Subject: rtnetlink: Reject negative ifindexes in RTM_NEWLINK
+
+From: Ido Schimmel <idosch@nvidia.com>
+
+[ Upstream commit 30188bd7838c16a98a520db1fe9df01ffc6ed368 ]
+
+Negative ifindexes are illegal, but the kernel does not validate the
+ifindex in the ancillary header of RTM_NEWLINK messages, resulting in
+the kernel generating a warning [1] when such an ifindex is specified.
+
+Fix by rejecting negative ifindexes.
+
+[1]
+WARNING: CPU: 0 PID: 5031 at net/core/dev.c:9593 dev_index_reserve+0x1a2/0x1c0 net/core/dev.c:9593
+[...]
+Call Trace:
+ <TASK>
+ register_netdevice+0x69a/0x1490 net/core/dev.c:10081
+ br_dev_newlink+0x27/0x110 net/bridge/br_netlink.c:1552
+ rtnl_newlink_create net/core/rtnetlink.c:3471 [inline]
+ __rtnl_newlink+0x115e/0x18c0 net/core/rtnetlink.c:3688
+ rtnl_newlink+0x67/0xa0 net/core/rtnetlink.c:3701
+ rtnetlink_rcv_msg+0x439/0xd30 net/core/rtnetlink.c:6427
+ netlink_rcv_skb+0x16b/0x440 net/netlink/af_netlink.c:2545
+ netlink_unicast_kernel net/netlink/af_netlink.c:1342 [inline]
+ netlink_unicast+0x536/0x810 net/netlink/af_netlink.c:1368
+ netlink_sendmsg+0x93c/0xe40 net/netlink/af_netlink.c:1910
+ sock_sendmsg_nosec net/socket.c:728 [inline]
+ sock_sendmsg+0xd9/0x180 net/socket.c:751
+ ____sys_sendmsg+0x6ac/0x940 net/socket.c:2538
+ ___sys_sendmsg+0x135/0x1d0 net/socket.c:2592
+ __sys_sendmsg+0x117/0x1e0 net/socket.c:2621
+ do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ do_syscall_64+0x38/0xb0 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+Fixes: 38f7b870d4a6 ("[RTNETLINK]: Link creation API")
+Reported-by: syzbot+5ba06978f34abb058571@syzkaller.appspotmail.com
+Signed-off-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: Jiri Pirko <jiri@nvidia.com>
+Reviewed-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://lore.kernel.org/r/20230823064348.2252280-1-idosch@nvidia.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/rtnetlink.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index ffa97613314ff..021dcfdae2835 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -3296,6 +3296,9 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ if (ifm->ifi_index > 0) {
+ link_specified = true;
+ dev = __dev_get_by_index(net, ifm->ifi_index);
++ } else if (ifm->ifi_index < 0) {
++ NL_SET_ERR_MSG(extack, "ifindex can't be negative");
++ return -EINVAL;
+ } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) {
+ link_specified = true;
+ dev = rtnl_dev_get(net, NULL, tb[IFLA_ALT_IFNAME], ifname);
+--
+2.40.1
+
--- /dev/null
+From d63c94c896b118081b0e92be0fd41f78224b035f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Apr 2022 18:53:27 +0200
+Subject: rtnetlink: return ENODEV when ifname does not exist and group is
+ given
+
+From: Florent Fourcot <florent.fourcot@wifirst.fr>
+
+[ Upstream commit ef2a7c9065cea4e3fbc0390e82d05141abbccd7f ]
+
+When the interface does not exist, and a group is given, the given
+parameters are being set to all interfaces of the given group. The given
+IFNAME/ALT_IF_NAME are being ignored in that case.
+
+That can be dangerous since a typo (or a deleted interface) can produce
+weird side effects for caller:
+
+Case 1:
+
+ IFLA_IFNAME=valid_interface
+ IFLA_GROUP=1
+ MTU=1234
+
+Case 1 will update MTU and group of the given interface "valid_interface".
+
+Case 2:
+
+ IFLA_IFNAME=doesnotexist
+ IFLA_GROUP=1
+ MTU=1234
+
+Case 2 will update MTU of all interfaces in group 1. IFLA_IFNAME is
+ignored in this case
+
+This behaviour is not consistent and dangerous. In order to fix this issue,
+we now return ENODEV when the given IFNAME does not exist.
+
+Signed-off-by: Florent Fourcot <florent.fourcot@wifirst.fr>
+Signed-off-by: Brian Baboch <brian.baboch@wifirst.fr>
+Reviewed-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Stable-dep-of: 30188bd7838c ("rtnetlink: Reject negative ifindexes in RTM_NEWLINK")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/rtnetlink.c | 18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index cee86a2b3a036..ffa97613314ff 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -3272,6 +3272,7 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct ifinfomsg *ifm;
+ char ifname[IFNAMSIZ];
+ struct nlattr **data;
++ bool link_specified;
+ int err;
+
+ #ifdef CONFIG_MODULES
+@@ -3292,12 +3293,16 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ ifname[0] = '\0';
+
+ ifm = nlmsg_data(nlh);
+- if (ifm->ifi_index > 0)
++ if (ifm->ifi_index > 0) {
++ link_specified = true;
+ dev = __dev_get_by_index(net, ifm->ifi_index);
+- else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
++ } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) {
++ link_specified = true;
+ dev = rtnl_dev_get(net, NULL, tb[IFLA_ALT_IFNAME], ifname);
+- else
++ } else {
++ link_specified = false;
+ dev = NULL;
++ }
+
+ master_dev = NULL;
+ m_ops = NULL;
+@@ -3400,7 +3405,12 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ }
+
+ if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
+- if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
++ /* No dev found and NLM_F_CREATE not set. Requested dev does not exist,
++ * or it's for a group
++ */
++ if (link_specified)
++ return -ENODEV;
++ if (tb[IFLA_GROUP])
+ return rtnl_group_changelink(skb, net,
+ nla_get_u32(tb[IFLA_GROUP]),
+ ifm, extack, tb);
+--
+2.40.1
+
objtool-x86-fix-srso-mess.patch
+nfsv4-fix-out-path-in-__nfs4_get_acl_uncached.patch
+xprtrdma-remap-receive-buffers-after-a-reconnect.patch
+pci-acpiphp-reassign-resources-on-bridge-if-necessar.patch
+dlm-improve-plock-logging-if-interrupted.patch
+dlm-replace-usage-of-found-with-dedicated-list-itera.patch
+fs-dlm-add-pid-to-debug-log.patch
+fs-dlm-change-plock-interrupted-message-to-debug-aga.patch
+fs-dlm-use-dlm_plock_info-for-do_unlock_close.patch
+fs-dlm-fix-mismatch-of-plock-results-from-userspace.patch
+mips-cpu-features-enable-octeon_cache-by-cpu_type.patch
+mips-cpu-features-use-boot_cpu_type-for-cpu-type-bas.patch
+fbdev-improve-performance-of-sys_imageblit.patch
+fbdev-fix-sys_imageblit-for-arbitrary-image-widths.patch
+fbdev-fix-potential-oob-read-in-fast_imageblit.patch
+dm-integrity-increase-recalc_sectors-to-improve-reca.patch
+dm-integrity-reduce-vmalloc-space-footprint-on-32-bi.patch
+alsa-pcm-fix-potential-data-race-at-pcm-memory-alloc.patch
+drm-amd-display-do-not-wait-for-mpc-idle-if-tg-is-di.patch
+drm-amd-display-check-tg-is-non-null-before-checking.patch
+libceph-rbd-ignore-addr-type-while-comparing-in-some.patch
+rbd-make-get_lock_owner_info-return-a-single-locker-.patch
+rbd-retrieve-and-check-lock-owner-twice-before-block.patch
+rbd-prevent-busy-loop-when-requesting-exclusive-lock.patch
+tracing-fix-cpu-buffers-unavailable-due-to-record_di.patch
+tracing-fix-memleak-due-to-race-between-current_trac.patch
+octeontx2-af-sdp-fix-receive-link-config.patch
+sock-annotate-data-races-around-prot-memory_pressure.patch
+dccp-annotate-data-races-in-dccp_poll.patch
+ipvlan-fix-a-reference-count-leak-warning-in-ipvlan_.patch
+net-bgmac-fix-return-value-check-for-fixed_phy_regis.patch
+net-bcmgenet-fix-return-value-check-for-fixed_phy_re.patch
+net-validate-veth-and-vxcan-peer-ifindexes.patch
+ice-fix-receive-buffer-size-miscalculation.patch
+igb-avoid-starting-unnecessary-workqueues.patch
+net-sched-fix-a-qdisc-modification-with-ambiguous-co.patch
+netfilter-nf_tables-fix-out-of-memory-error-handling.patch
+rtnetlink-return-enodev-when-ifname-does-not-exist-a.patch
+rtnetlink-reject-negative-ifindexes-in-rtm_newlink.patch
+net-remove-bond_slave_has_mac_rcu.patch
+bonding-fix-macvlan-over-alb-bond-support.patch
--- /dev/null
+From f5ad6ff2e8df49c181261c5f83f5ae32afc56e39 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Aug 2023 01:51:32 +0000
+Subject: sock: annotate data-races around prot->memory_pressure
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 76f33296d2e09f63118db78125c95ef56df438e9 ]
+
+*prot->memory_pressure is read/writen locklessly, we need
+to add proper annotations.
+
+A recent commit added a new race, it is time to audit all accesses.
+
+Fixes: 2d0c88e84e48 ("sock: Fix misuse of sk_under_memory_pressure()")
+Fixes: 4d93df0abd50 ("[SCTP]: Rewrite of sctp buffer management code")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Abel Wu <wuyun.abel@bytedance.com>
+Reviewed-by: Shakeel Butt <shakeelb@google.com>
+Link: https://lore.kernel.org/r/20230818015132.2699348-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/sock.h | 7 ++++---
+ net/sctp/socket.c | 2 +-
+ 2 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 665e388593752..234196d904238 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1233,6 +1233,7 @@ struct proto {
+ /*
+ * Pressure flag: try to collapse.
+ * Technical note: it is used by multiple contexts non atomically.
++ * Make sure to use READ_ONCE()/WRITE_ONCE() for all reads/writes.
+ * All the __sk_mem_schedule() is of this nature: accounting
+ * is strict, actions are advisory and have some latency.
+ */
+@@ -1349,7 +1350,7 @@ static inline bool sk_has_memory_pressure(const struct sock *sk)
+ static inline bool sk_under_global_memory_pressure(const struct sock *sk)
+ {
+ return sk->sk_prot->memory_pressure &&
+- !!*sk->sk_prot->memory_pressure;
++ !!READ_ONCE(*sk->sk_prot->memory_pressure);
+ }
+
+ static inline bool sk_under_memory_pressure(const struct sock *sk)
+@@ -1361,7 +1362,7 @@ static inline bool sk_under_memory_pressure(const struct sock *sk)
+ mem_cgroup_under_socket_pressure(sk->sk_memcg))
+ return true;
+
+- return !!*sk->sk_prot->memory_pressure;
++ return !!READ_ONCE(*sk->sk_prot->memory_pressure);
+ }
+
+ static inline long
+@@ -1415,7 +1416,7 @@ proto_memory_pressure(struct proto *prot)
+ {
+ if (!prot->memory_pressure)
+ return false;
+- return !!*prot->memory_pressure;
++ return !!READ_ONCE(*prot->memory_pressure);
+ }
+
+
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 534364bb871a3..fa4d31b507f29 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -97,7 +97,7 @@ struct percpu_counter sctp_sockets_allocated;
+
+ static void sctp_enter_memory_pressure(struct sock *sk)
+ {
+- sctp_memory_pressure = 1;
++ WRITE_ONCE(sctp_memory_pressure, 1);
+ }
+
+
+--
+2.40.1
+
--- /dev/null
+From b26bdfe955cc125463b84a642071e3b3793bfa39 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 5 Aug 2023 11:38:15 +0800
+Subject: tracing: Fix cpu buffers unavailable due to 'record_disabled' missed
+
+From: Zheng Yejian <zhengyejian1@huawei.com>
+
+[ Upstream commit b71645d6af10196c46cbe3732de2ea7d36b3ff6d ]
+
+Trace ring buffer can no longer record anything after executing
+following commands at the shell prompt:
+
+ # cd /sys/kernel/tracing
+ # cat tracing_cpumask
+ fff
+ # echo 0 > tracing_cpumask
+ # echo 1 > snapshot
+ # echo fff > tracing_cpumask
+ # echo 1 > tracing_on
+ # echo "hello world" > trace_marker
+ -bash: echo: write error: Bad file descriptor
+
+The root cause is that:
+ 1. After `echo 0 > tracing_cpumask`, 'record_disabled' of cpu buffers
+ in 'tr->array_buffer.buffer' became 1 (see tracing_set_cpumask());
+ 2. After `echo 1 > snapshot`, 'tr->array_buffer.buffer' is swapped
+ with 'tr->max_buffer.buffer', then the 'record_disabled' became 0
+ (see update_max_tr());
+ 3. After `echo fff > tracing_cpumask`, the 'record_disabled' become -1;
+Then array_buffer and max_buffer are both unavailable due to value of
+'record_disabled' is not 0.
+
+To fix it, enable or disable both array_buffer and max_buffer at the same
+time in tracing_set_cpumask().
+
+Link: https://lkml.kernel.org/r/20230805033816.3284594-2-zhengyejian1@huawei.com
+
+Cc: <mhiramat@kernel.org>
+Cc: <vnagarnaik@google.com>
+Cc: <shuah@kernel.org>
+Fixes: 71babb2705e2 ("tracing: change CPU ring buffer state from tracing_cpumask")
+Signed-off-by: Zheng Yejian <zhengyejian1@huawei.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 167f2a19fd8a2..d0c9769938ad2 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4843,11 +4843,17 @@ int tracing_set_cpumask(struct trace_array *tr,
+ !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
+ atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
+ ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
++#ifdef CONFIG_TRACER_MAX_TRACE
++ ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
++#endif
+ }
+ if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
+ cpumask_test_cpu(cpu, tracing_cpumask_new)) {
+ atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
+ ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
++#ifdef CONFIG_TRACER_MAX_TRACE
++ ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
++#endif
+ }
+ }
+ arch_spin_unlock(&tr->max_lock);
+--
+2.40.1
+
--- /dev/null
+From f8b25240ec10d3c33df374e10cb47ed7aa932940 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Aug 2023 20:55:39 +0800
+Subject: tracing: Fix memleak due to race between current_tracer and trace
+
+From: Zheng Yejian <zhengyejian1@huawei.com>
+
+[ Upstream commit eecb91b9f98d6427d4af5fdb8f108f52572a39e7 ]
+
+Kmemleak report a leak in graph_trace_open():
+
+ unreferenced object 0xffff0040b95f4a00 (size 128):
+ comm "cat", pid 204981, jiffies 4301155872 (age 99771.964s)
+ hex dump (first 32 bytes):
+ e0 05 e7 b4 ab 7d 00 00 0b 00 01 00 00 00 00 00 .....}..........
+ f4 00 01 10 00 a0 ff ff 00 00 00 00 65 00 10 00 ............e...
+ backtrace:
+ [<000000005db27c8b>] kmem_cache_alloc_trace+0x348/0x5f0
+ [<000000007df90faa>] graph_trace_open+0xb0/0x344
+ [<00000000737524cd>] __tracing_open+0x450/0xb10
+ [<0000000098043327>] tracing_open+0x1a0/0x2a0
+ [<00000000291c3876>] do_dentry_open+0x3c0/0xdc0
+ [<000000004015bcd6>] vfs_open+0x98/0xd0
+ [<000000002b5f60c9>] do_open+0x520/0x8d0
+ [<00000000376c7820>] path_openat+0x1c0/0x3e0
+ [<00000000336a54b5>] do_filp_open+0x14c/0x324
+ [<000000002802df13>] do_sys_openat2+0x2c4/0x530
+ [<0000000094eea458>] __arm64_sys_openat+0x130/0x1c4
+ [<00000000a71d7881>] el0_svc_common.constprop.0+0xfc/0x394
+ [<00000000313647bf>] do_el0_svc+0xac/0xec
+ [<000000002ef1c651>] el0_svc+0x20/0x30
+ [<000000002fd4692a>] el0_sync_handler+0xb0/0xb4
+ [<000000000c309c35>] el0_sync+0x160/0x180
+
+The root cause is descripted as follows:
+
+ __tracing_open() { // 1. File 'trace' is being opened;
+ ...
+ *iter->trace = *tr->current_trace; // 2. Tracer 'function_graph' is
+ // currently set;
+ ...
+ iter->trace->open(iter); // 3. Call graph_trace_open() here,
+ // and memory are allocated in it;
+ ...
+ }
+
+ s_start() { // 4. The opened file is being read;
+ ...
+ *iter->trace = *tr->current_trace; // 5. If tracer is switched to
+ // 'nop' or others, then memory
+ // in step 3 are leaked!!!
+ ...
+ }
+
+To fix it, in s_start(), close tracer before switching then reopen the
+new tracer after switching. And some tracers like 'wakeup' may not update
+'iter->private' in some cases when reopen, then it should be cleared
+to avoid being mistakenly closed again.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20230817125539.1646321-1-zhengyejian1@huawei.com
+
+Fixes: d7350c3f4569 ("tracing/core: make the read callbacks reentrants")
+Signed-off-by: Zheng Yejian <zhengyejian1@huawei.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace.c | 9 ++++++++-
+ kernel/trace/trace_irqsoff.c | 3 ++-
+ kernel/trace/trace_sched_wakeup.c | 2 ++
+ 3 files changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index d0c9769938ad2..597487a7f1bfb 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -3782,8 +3782,15 @@ static void *s_start(struct seq_file *m, loff_t *pos)
+ * will point to the same string as current_trace->name.
+ */
+ mutex_lock(&trace_types_lock);
+- if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
++ if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) {
++ /* Close iter->trace before switching to the new current tracer */
++ if (iter->trace->close)
++ iter->trace->close(iter);
+ *iter->trace = *tr->current_trace;
++ /* Reopen the new current tracer */
++ if (iter->trace->open)
++ iter->trace->open(iter);
++ }
+ mutex_unlock(&trace_types_lock);
+
+ #ifdef CONFIG_TRACER_MAX_TRACE
+diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
+index ee4571b624bcb..619a60944bb6d 100644
+--- a/kernel/trace/trace_irqsoff.c
++++ b/kernel/trace/trace_irqsoff.c
+@@ -228,7 +228,8 @@ static void irqsoff_trace_open(struct trace_iterator *iter)
+ {
+ if (is_graph(iter->tr))
+ graph_trace_open(iter);
+-
++ else
++ iter->private = NULL;
+ }
+
+ static void irqsoff_trace_close(struct trace_iterator *iter)
+diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
+index 97b10bb31a1f0..037e1e863b17f 100644
+--- a/kernel/trace/trace_sched_wakeup.c
++++ b/kernel/trace/trace_sched_wakeup.c
+@@ -171,6 +171,8 @@ static void wakeup_trace_open(struct trace_iterator *iter)
+ {
+ if (is_graph(iter->tr))
+ graph_trace_open(iter);
++ else
++ iter->private = NULL;
+ }
+
+ static void wakeup_trace_close(struct trace_iterator *iter)
+--
+2.40.1
+
--- /dev/null
+From 556598ba9c18382448b3e77eb8ce8e2b5f1ce9d7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Jul 2023 14:18:29 -0400
+Subject: xprtrdma: Remap Receive buffers after a reconnect
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit 895cedc1791916e8a98864f12b656702fad0bb67 ]
+
+On server-initiated disconnect, rpcrdma_xprt_disconnect() was DMA-
+unmapping the Receive buffers, but rpcrdma_post_recvs() neglected
+to remap them after a new connection had been established. The
+result was immediate failure of the new connection with the Receives
+flushing with LOCAL_PROT_ERR.
+
+Fixes: 671c450b6fe0 ("xprtrdma: Fix oops in Receive handler after device removal")
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sunrpc/xprtrdma/verbs.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index 338b06de86d16..d015576f3081a 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -972,9 +972,6 @@ struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
+ if (!rep->rr_rdmabuf)
+ goto out_free;
+
+- if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf))
+- goto out_free_regbuf;
+-
+ xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf),
+ rdmab_length(rep->rr_rdmabuf));
+ rep->rr_cqe.done = rpcrdma_wc_receive;
+@@ -987,8 +984,6 @@ struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
+ list_add(&rep->rr_all, &r_xprt->rx_buf.rb_all_reps);
+ return rep;
+
+-out_free_regbuf:
+- rpcrdma_regbuf_free(rep->rr_rdmabuf);
+ out_free:
+ kfree(rep);
+ out:
+@@ -1425,6 +1420,10 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp)
+ rep = rpcrdma_rep_create(r_xprt, temp);
+ if (!rep)
+ break;
++ if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) {
++ rpcrdma_rep_put(buf, rep);
++ break;
++ }
+
+ trace_xprtrdma_post_recv(rep);
+ rep->rr_recv_wr.next = wr;
+--
+2.40.1
+