--- /dev/null
+From e23f7fda6bfb8b8979121c9b15a66e3c836387ac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Aug 2023 15:19:04 +0800
+Subject: bonding: fix macvlan over alb bond support
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit e74216b8def3803e98ae536de78733e9d7f3b109 ]
+
+The commit 14af9963ba1e ("bonding: Support macvlans on top of tlb/rlb mode
+bonds") aims to enable the use of macvlans on top of rlb bond mode. However,
+the current rlb bond mode only handles ARP packets to update remote neighbor
+entries. This causes an issue when a macvlan is on top of the bond, and
+remote devices send packets to the macvlan using the bond's MAC address
+as the destination. After delivering the packets to the macvlan, the macvlan
+will rejects them as the MAC address is incorrect. Consequently, this commit
+makes macvlan over bond non-functional.
+
+To address this problem, one potential solution is to check for the presence
+of a macvlan port on the bond device using netif_is_macvlan_port(bond->dev)
+and return NULL in the rlb_arp_xmit() function. However, this approach
+doesn't fully resolve the situation when a VLAN exists between the bond and
+macvlan.
+
+So let's just do a partial revert for commit 14af9963ba1e in rlb_arp_xmit().
+As the comment said, Don't modify or load balance ARPs that do not originate
+locally.
+
+Fixes: 14af9963ba1e ("bonding: Support macvlans on top of tlb/rlb mode bonds")
+Reported-by: susan.zheng@veritas.com
+Closes: https://bugzilla.redhat.com/show_bug.cgi?id=2117816
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Acked-by: Jay Vosburgh <jay.vosburgh@canonical.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_alb.c | 6 +++---
+ include/net/bonding.h | 11 +----------
+ 2 files changed, 4 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
+index 3fc439d924451..e03f4883858ac 100644
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -671,10 +671,10 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
+ return NULL;
+ arp = (struct arp_pkt *)skb_network_header(skb);
+
+- /* Don't modify or load balance ARPs that do not originate locally
+- * (e.g.,arrive via a bridge).
++ /* Don't modify or load balance ARPs that do not originate
++ * from the bond itself or a VLAN directly above the bond.
+ */
+- if (!bond_slave_has_mac_rx(bond, arp->mac_src))
++ if (!bond_slave_has_mac_rcu(bond, arp->mac_src))
+ return NULL;
+
+ if (arp->op_code == htons(ARPOP_REPLY)) {
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index ab862e2e34520..7d317434e3d13 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -675,23 +675,14 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond,
+ }
+
+ /* Caller must hold rcu_read_lock() for read */
+-static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac)
++static inline bool bond_slave_has_mac_rcu(struct bonding *bond, const u8 *mac)
+ {
+ struct list_head *iter;
+ struct slave *tmp;
+- struct netdev_hw_addr *ha;
+
+ bond_for_each_slave_rcu(bond, tmp, iter)
+ if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
+ return true;
+-
+- if (netdev_uc_empty(bond->dev))
+- return false;
+-
+- netdev_for_each_uc_addr(ha, bond->dev)
+- if (ether_addr_equal_64bits(mac, ha->addr))
+- return true;
+-
+ return false;
+ }
+
+--
+2.40.1
+
--- /dev/null
+From 6df6f48ddf2e553c4dadaa574cca7ff1adedfb56 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Aug 2023 01:58:20 +0000
+Subject: dccp: annotate data-races in dccp_poll()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit cba3f1786916063261e3e5ccbb803abc325b24ef ]
+
+We changed tcp_poll() over time, bug never updated dccp.
+
+Note that we also could remove dccp instead of maintaining it.
+
+Fixes: 7c657876b63c ("[DCCP]: Initial implementation")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20230818015820.2701595-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/dccp/proto.c | 20 ++++++++++++--------
+ 1 file changed, 12 insertions(+), 8 deletions(-)
+
+diff --git a/net/dccp/proto.c b/net/dccp/proto.c
+index 27de4dc1ff512..c4ea0159ce2e8 100644
+--- a/net/dccp/proto.c
++++ b/net/dccp/proto.c
+@@ -328,11 +328,15 @@ EXPORT_SYMBOL_GPL(dccp_disconnect);
+ __poll_t dccp_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+ {
+- __poll_t mask;
+ struct sock *sk = sock->sk;
++ __poll_t mask;
++ u8 shutdown;
++ int state;
+
+ sock_poll_wait(file, sock, wait);
+- if (sk->sk_state == DCCP_LISTEN)
++
++ state = inet_sk_state_load(sk);
++ if (state == DCCP_LISTEN)
+ return inet_csk_listen_poll(sk);
+
+ /* Socket is not locked. We are protected from async events
+@@ -341,20 +345,21 @@ __poll_t dccp_poll(struct file *file, struct socket *sock,
+ */
+
+ mask = 0;
+- if (sk->sk_err)
++ if (READ_ONCE(sk->sk_err))
+ mask = EPOLLERR;
++ shutdown = READ_ONCE(sk->sk_shutdown);
+
+- if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
++ if (shutdown == SHUTDOWN_MASK || state == DCCP_CLOSED)
+ mask |= EPOLLHUP;
+- if (sk->sk_shutdown & RCV_SHUTDOWN)
++ if (shutdown & RCV_SHUTDOWN)
+ mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
+
+ /* Connected? */
+- if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
++ if ((1 << state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
+ if (atomic_read(&sk->sk_rmem_alloc) > 0)
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+- if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
++ if (!(shutdown & SEND_SHUTDOWN)) {
+ if (sk_stream_is_writeable(sk)) {
+ mask |= EPOLLOUT | EPOLLWRNORM;
+ } else { /* send SIGIO later */
+@@ -372,7 +377,6 @@ __poll_t dccp_poll(struct file *file, struct socket *sock,
+ }
+ return mask;
+ }
+-
+ EXPORT_SYMBOL_GPL(dccp_poll);
+
+ int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
+--
+2.40.1
+
--- /dev/null
+From 69301b4cdfcbbe6770288a3ae0f2dc8d3403fdd0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Apr 2022 16:06:34 -0400
+Subject: dlm: improve plock logging if interrupted
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit bcfad4265cedf3adcac355e994ef9771b78407bd ]
+
+This patch changes the log level if a plock is removed when interrupted
+from debug to info. Additional it signals now that the plock entity was
+removed to let the user know what's happening.
+
+If on a dev_write() a pending plock cannot be find it will signal that
+it might have been removed because wait interruption.
+
+Before this patch there might be a "dev_write no op ..." info message
+and the users can only guess that the plock was removed before because
+the wait interruption. To be sure that is the case we log both messages
+on the same log level.
+
+Let both message be logged on info layer because it should not happened
+a lot and if it happens it should be clear why the op was not found.
+
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Stable-dep-of: 57e2c2f2d94c ("fs: dlm: fix mismatch of plock results from userspace")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/plock.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index 9fef426ce6f41..5f3643890f1e0 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -164,11 +164,12 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+
+ rv = wait_event_killable(recv_wq, (op->done != 0));
+ if (rv == -ERESTARTSYS) {
+- log_debug(ls, "%s: wait killed %llx", __func__,
+- (unsigned long long)number);
+ spin_lock(&ops_lock);
+ list_del(&op->list);
+ spin_unlock(&ops_lock);
++ log_print("%s: wait interrupted %x %llx, op removed",
++ __func__, ls->ls_global_id,
++ (unsigned long long)number);
+ dlm_release_plock_op(op);
+ do_unlock_close(ls, number, file, fl);
+ goto out;
+@@ -472,8 +473,8 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+ else
+ wake_up(&recv_wq);
+ } else
+- log_print("dev_write no op %x %llx", info.fsid,
+- (unsigned long long)info.number);
++ log_print("%s: no op %x %llx - may got interrupted?", __func__,
++ info.fsid, (unsigned long long)info.number);
+ return count;
+ }
+
+--
+2.40.1
+
--- /dev/null
+From 962e39aca736676a640016c0e6117e4e11149d86 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Apr 2022 14:05:31 -0400
+Subject: dlm: replace usage of found with dedicated list iterator variable
+
+From: Jakob Koschel <jakobkoschel@gmail.com>
+
+[ Upstream commit dc1acd5c94699389a9ed023e94dd860c846ea1f6 ]
+
+To move the list iterator variable into the list_for_each_entry_*()
+macro in the future it should be avoided to use the list iterator
+variable after the loop body.
+
+To *never* use the list iterator variable after the loop it was
+concluded to use a separate iterator variable instead of a
+found boolean [1].
+
+This removes the need to use a found variable and simply checking if
+the variable was set, can determine if the break/goto was hit.
+
+Link: https://lore.kernel.org/all/CAHk-=wgRr_D8CB-D9Kg-c=EHreAsk5SqXPwr9Y7k9sA6cWXJ6w@mail.gmail.com/ [1]
+Signed-off-by: Jakob Koschel <jakobkoschel@gmail.com>
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Stable-dep-of: 57e2c2f2d94c ("fs: dlm: fix mismatch of plock results from userspace")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/lock.c | 53 +++++++++++++++++++++++-------------------------
+ fs/dlm/plock.c | 24 +++++++++++-----------
+ fs/dlm/recover.c | 39 +++++++++++++++++------------------
+ 3 files changed, 56 insertions(+), 60 deletions(-)
+
+diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
+index d4e204473e76b..0864481d8551c 100644
+--- a/fs/dlm/lock.c
++++ b/fs/dlm/lock.c
+@@ -1858,7 +1858,7 @@ static void del_timeout(struct dlm_lkb *lkb)
+ void dlm_scan_timeout(struct dlm_ls *ls)
+ {
+ struct dlm_rsb *r;
+- struct dlm_lkb *lkb;
++ struct dlm_lkb *lkb = NULL, *iter;
+ int do_cancel, do_warn;
+ s64 wait_us;
+
+@@ -1869,27 +1869,28 @@ void dlm_scan_timeout(struct dlm_ls *ls)
+ do_cancel = 0;
+ do_warn = 0;
+ mutex_lock(&ls->ls_timeout_mutex);
+- list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
++ list_for_each_entry(iter, &ls->ls_timeout, lkb_time_list) {
+
+ wait_us = ktime_to_us(ktime_sub(ktime_get(),
+- lkb->lkb_timestamp));
++ iter->lkb_timestamp));
+
+- if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
+- wait_us >= (lkb->lkb_timeout_cs * 10000))
++ if ((iter->lkb_exflags & DLM_LKF_TIMEOUT) &&
++ wait_us >= (iter->lkb_timeout_cs * 10000))
+ do_cancel = 1;
+
+- if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
++ if ((iter->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
+ wait_us >= dlm_config.ci_timewarn_cs * 10000)
+ do_warn = 1;
+
+ if (!do_cancel && !do_warn)
+ continue;
+- hold_lkb(lkb);
++ hold_lkb(iter);
++ lkb = iter;
+ break;
+ }
+ mutex_unlock(&ls->ls_timeout_mutex);
+
+- if (!do_cancel && !do_warn)
++ if (!lkb)
+ break;
+
+ r = lkb->lkb_resource;
+@@ -5243,21 +5244,18 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
+
+ static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
+ {
+- struct dlm_lkb *lkb;
+- int found = 0;
++ struct dlm_lkb *lkb = NULL, *iter;
+
+ mutex_lock(&ls->ls_waiters_mutex);
+- list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
+- if (lkb->lkb_flags & DLM_IFL_RESEND) {
+- hold_lkb(lkb);
+- found = 1;
++ list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) {
++ if (iter->lkb_flags & DLM_IFL_RESEND) {
++ hold_lkb(iter);
++ lkb = iter;
+ break;
+ }
+ }
+ mutex_unlock(&ls->ls_waiters_mutex);
+
+- if (!found)
+- lkb = NULL;
+ return lkb;
+ }
+
+@@ -5916,37 +5914,36 @@ int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
+ int mode, uint32_t flags, void *name, unsigned int namelen,
+ unsigned long timeout_cs, uint32_t *lkid)
+ {
+- struct dlm_lkb *lkb;
++ struct dlm_lkb *lkb = NULL, *iter;
+ struct dlm_user_args *ua;
+ int found_other_mode = 0;
+- int found = 0;
+ int rv = 0;
+
+ mutex_lock(&ls->ls_orphans_mutex);
+- list_for_each_entry(lkb, &ls->ls_orphans, lkb_ownqueue) {
+- if (lkb->lkb_resource->res_length != namelen)
++ list_for_each_entry(iter, &ls->ls_orphans, lkb_ownqueue) {
++ if (iter->lkb_resource->res_length != namelen)
+ continue;
+- if (memcmp(lkb->lkb_resource->res_name, name, namelen))
++ if (memcmp(iter->lkb_resource->res_name, name, namelen))
+ continue;
+- if (lkb->lkb_grmode != mode) {
++ if (iter->lkb_grmode != mode) {
+ found_other_mode = 1;
+ continue;
+ }
+
+- found = 1;
+- list_del_init(&lkb->lkb_ownqueue);
+- lkb->lkb_flags &= ~DLM_IFL_ORPHAN;
+- *lkid = lkb->lkb_id;
++ lkb = iter;
++ list_del_init(&iter->lkb_ownqueue);
++ iter->lkb_flags &= ~DLM_IFL_ORPHAN;
++ *lkid = iter->lkb_id;
+ break;
+ }
+ mutex_unlock(&ls->ls_orphans_mutex);
+
+- if (!found && found_other_mode) {
++ if (!lkb && found_other_mode) {
+ rv = -EAGAIN;
+ goto out;
+ }
+
+- if (!found) {
++ if (!lkb) {
+ rv = -ENOENT;
+ goto out;
+ }
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index 5f3643890f1e0..7e26e677c6b24 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -437,9 +437,9 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
+ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+ loff_t *ppos)
+ {
++ struct plock_op *op = NULL, *iter;
+ struct dlm_plock_info info;
+- struct plock_op *op;
+- int found = 0, do_callback = 0;
++ int do_callback = 0;
+
+ if (count != sizeof(info))
+ return -EINVAL;
+@@ -451,23 +451,23 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+ return -EINVAL;
+
+ spin_lock(&ops_lock);
+- list_for_each_entry(op, &recv_list, list) {
+- if (op->info.fsid == info.fsid &&
+- op->info.number == info.number &&
+- op->info.owner == info.owner) {
+- list_del_init(&op->list);
+- memcpy(&op->info, &info, sizeof(info));
+- if (op->data)
++ list_for_each_entry(iter, &recv_list, list) {
++ if (iter->info.fsid == info.fsid &&
++ iter->info.number == info.number &&
++ iter->info.owner == info.owner) {
++ list_del_init(&iter->list);
++ memcpy(&iter->info, &info, sizeof(info));
++ if (iter->data)
+ do_callback = 1;
+ else
+- op->done = 1;
+- found = 1;
++ iter->done = 1;
++ op = iter;
+ break;
+ }
+ }
+ spin_unlock(&ops_lock);
+
+- if (found) {
++ if (op) {
+ if (do_callback)
+ dlm_plock_callback(op);
+ else
+diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
+index ce2aa54ca2e24..98b710cc9cf30 100644
+--- a/fs/dlm/recover.c
++++ b/fs/dlm/recover.c
+@@ -734,10 +734,9 @@ void dlm_recovered_lock(struct dlm_rsb *r)
+
+ static void recover_lvb(struct dlm_rsb *r)
+ {
+- struct dlm_lkb *lkb, *high_lkb = NULL;
++ struct dlm_lkb *big_lkb = NULL, *iter, *high_lkb = NULL;
+ uint32_t high_seq = 0;
+ int lock_lvb_exists = 0;
+- int big_lock_exists = 0;
+ int lvblen = r->res_ls->ls_lvblen;
+
+ if (!rsb_flag(r, RSB_NEW_MASTER2) &&
+@@ -753,37 +752,37 @@ static void recover_lvb(struct dlm_rsb *r)
+ /* we are the new master, so figure out if VALNOTVALID should
+ be set, and set the rsb lvb from the best lkb available. */
+
+- list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
+- if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
++ list_for_each_entry(iter, &r->res_grantqueue, lkb_statequeue) {
++ if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
+ continue;
+
+ lock_lvb_exists = 1;
+
+- if (lkb->lkb_grmode > DLM_LOCK_CR) {
+- big_lock_exists = 1;
++ if (iter->lkb_grmode > DLM_LOCK_CR) {
++ big_lkb = iter;
+ goto setflag;
+ }
+
+- if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
+- high_lkb = lkb;
+- high_seq = lkb->lkb_lvbseq;
++ if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
++ high_lkb = iter;
++ high_seq = iter->lkb_lvbseq;
+ }
+ }
+
+- list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
+- if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
++ list_for_each_entry(iter, &r->res_convertqueue, lkb_statequeue) {
++ if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
+ continue;
+
+ lock_lvb_exists = 1;
+
+- if (lkb->lkb_grmode > DLM_LOCK_CR) {
+- big_lock_exists = 1;
++ if (iter->lkb_grmode > DLM_LOCK_CR) {
++ big_lkb = iter;
+ goto setflag;
+ }
+
+- if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
+- high_lkb = lkb;
+- high_seq = lkb->lkb_lvbseq;
++ if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
++ high_lkb = iter;
++ high_seq = iter->lkb_lvbseq;
+ }
+ }
+
+@@ -792,7 +791,7 @@ static void recover_lvb(struct dlm_rsb *r)
+ goto out;
+
+ /* lvb is invalidated if only NL/CR locks remain */
+- if (!big_lock_exists)
++ if (!big_lkb)
+ rsb_set_flag(r, RSB_VALNOTVALID);
+
+ if (!r->res_lvbptr) {
+@@ -801,9 +800,9 @@ static void recover_lvb(struct dlm_rsb *r)
+ goto out;
+ }
+
+- if (big_lock_exists) {
+- r->res_lvbseq = lkb->lkb_lvbseq;
+- memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen);
++ if (big_lkb) {
++ r->res_lvbseq = big_lkb->lkb_lvbseq;
++ memcpy(r->res_lvbptr, big_lkb->lkb_lvbptr, lvblen);
+ } else if (high_lkb) {
+ r->res_lvbseq = high_lkb->lkb_lvbseq;
+ memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen);
+--
+2.40.1
+
--- /dev/null
+From 1303f55dbef5d11a331f313d26a1595ef37955a4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 Apr 2021 11:57:43 -0400
+Subject: dm integrity: increase RECALC_SECTORS to improve recalculate speed
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+[ Upstream commit b1a2b9332050c7ae32a22c2c74bc443e39f37b23 ]
+
+Increase RECALC_SECTORS because it improves recalculate speed slightly
+(from 390kiB/s to 410kiB/s).
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Stable-dep-of: 6d50eb472593 ("dm integrity: reduce vmalloc space footprint on 32-bit architectures")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-integrity.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 0a4e440948f0d..eead731a1aeda 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -33,7 +33,7 @@
+ #define MIN_LOG2_INTERLEAVE_SECTORS 3
+ #define MAX_LOG2_INTERLEAVE_SECTORS 31
+ #define METADATA_WORKQUEUE_MAX_ACTIVE 16
+-#define RECALC_SECTORS 8192
++#define RECALC_SECTORS 32768
+ #define RECALC_WRITE_SUPER 16
+
+ /*
+--
+2.40.1
+
--- /dev/null
+From 8189e680756d054154df6e9edca772bc00213044 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Jun 2023 16:44:34 +0200
+Subject: dm integrity: reduce vmalloc space footprint on 32-bit architectures
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+[ Upstream commit 6d50eb4725934fd22f5eeccb401000687c790fd0 ]
+
+It was reported that dm-integrity runs out of vmalloc space on 32-bit
+architectures. On x86, there is only 128MiB vmalloc space and dm-integrity
+consumes it quickly because it has a 64MiB journal and 8MiB recalculate
+buffer.
+
+Fix this by reducing the size of the journal to 4MiB and the size of
+the recalculate buffer to 1MiB, so that multiple dm-integrity devices
+can be created and activated on 32-bit architectures.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-integrity.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index eead731a1aeda..234464c1c050e 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -29,11 +29,11 @@
+ #define DEFAULT_BUFFER_SECTORS 128
+ #define DEFAULT_JOURNAL_WATERMARK 50
+ #define DEFAULT_SYNC_MSEC 10000
+-#define DEFAULT_MAX_JOURNAL_SECTORS 131072
++#define DEFAULT_MAX_JOURNAL_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 131072 : 8192)
+ #define MIN_LOG2_INTERLEAVE_SECTORS 3
+ #define MAX_LOG2_INTERLEAVE_SECTORS 31
+ #define METADATA_WORKQUEUE_MAX_ACTIVE 16
+-#define RECALC_SECTORS 32768
++#define RECALC_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 32768 : 2048)
+ #define RECALC_WRITE_SUPER 16
+
+ /*
+--
+2.40.1
+
--- /dev/null
+From 765c875034bda06d832e76c72d48332e15f8ced3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Jun 2023 17:00:28 -0400
+Subject: drm/amd/display: check TG is non-null before checking if enabled
+
+From: Taimur Hassan <syed.hassan@amd.com>
+
+[ Upstream commit 5a25cefc0920088bb9afafeb80ad3dcd84fe278b ]
+
+[Why & How]
+If there is no TG allocation we can dereference a NULL pointer when
+checking if the TG is enabled.
+
+Cc: Mario Limonciello <mario.limonciello@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Acked-by: Alan Liu <haoping.liu@amd.com>
+Signed-off-by: Taimur Hassan <syed.hassan@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index fc75337aa0a7f..ddec675ba690a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2529,7 +2529,8 @@ static void dcn10_wait_for_mpcc_disconnect(
+ if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
+ struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
+
+- if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
++ if (pipe_ctx->stream_res.tg &&
++ pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
+ res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
+ pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
+ hubp->funcs->set_blank(hubp, true);
+--
+2.40.1
+
--- /dev/null
+From b36db3c1a964a9d7f52a4eae0faf4ee29fc546ba Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Mar 2022 12:08:43 -0400
+Subject: drm/amd/display: do not wait for mpc idle if tg is disabled
+
+From: Josip Pavic <Josip.Pavic@amd.com>
+
+[ Upstream commit 2513ed4f937999c0446fd824f7564f76b697d722 ]
+
+[Why]
+When booting, the driver waits for the MPC idle bit to be set as part of
+pipe initialization. However, on some systems this occurs before OTG is
+enabled, and since the MPC idle bit won't be set until the vupdate
+signal occurs (which requires OTG to be enabled), this never happens and
+the wait times out. This can add hundreds of milliseconds to the boot
+time.
+
+[How]
+Do not wait for mpc idle if tg is disabled
+
+Reviewed-by: Jun Lei <Jun.Lei@amd.com>
+Acked-by: Pavle Kotarac <Pavle.Kotarac@amd.com>
+Signed-off-by: Josip Pavic <Josip.Pavic@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: 5a25cefc0920 ("drm/amd/display: check TG is non-null before checking if enabled")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index ead221ccb93e0..fc75337aa0a7f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2529,7 +2529,8 @@ static void dcn10_wait_for_mpcc_disconnect(
+ if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
+ struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
+
+- res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
++ if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
++ res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
+ pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
+ hubp->funcs->set_blank(hubp, true);
+ /*DC_LOG_ERROR(dc->ctx->logger,
+--
+2.40.1
+
--- /dev/null
+From 0020c35a0ab004ee65d5cac87a0e77c318bfedd5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2020 15:21:38 +0900
+Subject: exfat: add bitmap operations
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Namjae Jeon <namjae.jeon@samsung.com>
+
+[ Upstream commit 1e49a94cf707204b66a3fb242f2814712c941f52 ]
+
+This adds the implementation of bitmap operations for exfat.
+
+Signed-off-by: Namjae Jeon <namjae.jeon@samsung.com>
+Signed-off-by: Sungjong Seo <sj1557.seo@samsung.com>
+Reviewed-by: Pali Rohár <pali.rohar@gmail.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Stable-dep-of: daf60d6cca26 ("exfat: use kvmalloc_array/kvfree instead of kmalloc_array/kfree")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/exfat/balloc.c | 280 ++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 280 insertions(+)
+ create mode 100644 fs/exfat/balloc.c
+
+diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
+new file mode 100644
+index 0000000000000..6a04cc02565a1
+--- /dev/null
++++ b/fs/exfat/balloc.c
+@@ -0,0 +1,280 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
++ */
++
++#include <linux/blkdev.h>
++#include <linux/slab.h>
++#include <linux/buffer_head.h>
++
++#include "exfat_raw.h"
++#include "exfat_fs.h"
++
++static const unsigned char free_bit[] = {
++ 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2,/* 0 ~ 19*/
++ 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3,/* 20 ~ 39*/
++ 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2,/* 40 ~ 59*/
++ 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4,/* 60 ~ 79*/
++ 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2,/* 80 ~ 99*/
++ 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3,/*100 ~ 119*/
++ 0, 1, 0, 2, 0, 1, 0, 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2,/*120 ~ 139*/
++ 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5,/*140 ~ 159*/
++ 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2,/*160 ~ 179*/
++ 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3,/*180 ~ 199*/
++ 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2,/*200 ~ 219*/
++ 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4,/*220 ~ 239*/
++ 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 /*240 ~ 254*/
++};
++
++static const unsigned char used_bit[] = {
++ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3,/* 0 ~ 19*/
++ 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4,/* 20 ~ 39*/
++ 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5,/* 40 ~ 59*/
++ 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,/* 60 ~ 79*/
++ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4,/* 80 ~ 99*/
++ 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6,/*100 ~ 119*/
++ 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4,/*120 ~ 139*/
++ 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,/*140 ~ 159*/
++ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5,/*160 ~ 179*/
++ 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5,/*180 ~ 199*/
++ 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6,/*200 ~ 219*/
++ 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,/*220 ~ 239*/
++ 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 /*240 ~ 255*/
++};
++
++/*
++ * Allocation Bitmap Management Functions
++ */
++static int exfat_allocate_bitmap(struct super_block *sb,
++ struct exfat_dentry *ep)
++{
++ struct exfat_sb_info *sbi = EXFAT_SB(sb);
++ long long map_size;
++ unsigned int i, need_map_size;
++ sector_t sector;
++
++ sbi->map_clu = le32_to_cpu(ep->dentry.bitmap.start_clu);
++ map_size = le64_to_cpu(ep->dentry.bitmap.size);
++ need_map_size = ((EXFAT_DATA_CLUSTER_COUNT(sbi) - 1) / BITS_PER_BYTE)
++ + 1;
++ if (need_map_size != map_size) {
++ exfat_msg(sb, KERN_ERR,
++ "bogus allocation bitmap size(need : %u, cur : %lld)",
++ need_map_size, map_size);
++ /*
++ * Only allowed when bogus allocation
++ * bitmap size is large
++ */
++ if (need_map_size > map_size)
++ return -EIO;
++ }
++ sbi->map_sectors = ((need_map_size - 1) >>
++ (sb->s_blocksize_bits)) + 1;
++ sbi->vol_amap = kmalloc_array(sbi->map_sectors,
++ sizeof(struct buffer_head *), GFP_KERNEL);
++ if (!sbi->vol_amap)
++ return -ENOMEM;
++
++ sector = exfat_cluster_to_sector(sbi, sbi->map_clu);
++ for (i = 0; i < sbi->map_sectors; i++) {
++ sbi->vol_amap[i] = sb_bread(sb, sector + i);
++ if (!sbi->vol_amap[i]) {
++ /* release all buffers and free vol_amap */
++ int j = 0;
++
++ while (j < i)
++ brelse(sbi->vol_amap[j++]);
++
++ kfree(sbi->vol_amap);
++ sbi->vol_amap = NULL;
++ return -EIO;
++ }
++ }
++
++ sbi->pbr_bh = NULL;
++ return 0;
++}
++
++int exfat_load_bitmap(struct super_block *sb)
++{
++ unsigned int i, type;
++ struct exfat_chain clu;
++ struct exfat_sb_info *sbi = EXFAT_SB(sb);
++
++ exfat_chain_set(&clu, sbi->root_dir, 0, ALLOC_FAT_CHAIN);
++ while (clu.dir != EXFAT_EOF_CLUSTER) {
++ for (i = 0; i < sbi->dentries_per_clu; i++) {
++ struct exfat_dentry *ep;
++ struct buffer_head *bh;
++
++ ep = exfat_get_dentry(sb, &clu, i, &bh, NULL);
++ if (!ep)
++ return -EIO;
++
++ type = exfat_get_entry_type(ep);
++ if (type == TYPE_UNUSED)
++ break;
++ if (type != TYPE_BITMAP)
++ continue;
++ if (ep->dentry.bitmap.flags == 0x0) {
++ int err;
++
++ err = exfat_allocate_bitmap(sb, ep);
++ brelse(bh);
++ return err;
++ }
++ brelse(bh);
++ }
++
++ if (exfat_get_next_cluster(sb, &clu.dir))
++ return -EIO;
++ }
++
++ return -EINVAL;
++}
++
++void exfat_free_bitmap(struct exfat_sb_info *sbi)
++{
++ int i;
++
++ brelse(sbi->pbr_bh);
++
++ for (i = 0; i < sbi->map_sectors; i++)
++ __brelse(sbi->vol_amap[i]);
++
++ kfree(sbi->vol_amap);
++}
++
++/*
++ * If the value of "clu" is 0, it means cluster 2 which is the first cluster of
++ * the cluster heap.
++ */
++int exfat_set_bitmap(struct inode *inode, unsigned int clu)
++{
++ int i, b;
++ unsigned int ent_idx;
++ struct super_block *sb = inode->i_sb;
++ struct exfat_sb_info *sbi = EXFAT_SB(sb);
++
++ WARN_ON(clu < EXFAT_FIRST_CLUSTER);
++ ent_idx = CLUSTER_TO_BITMAP_ENT(clu);
++ i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
++ b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
++
++ set_bit_le(b, sbi->vol_amap[i]->b_data);
++ exfat_update_bh(sb, sbi->vol_amap[i], IS_DIRSYNC(inode));
++ return 0;
++}
++
++/*
++ * If the value of "clu" is 0, it means cluster 2 which is the first cluster of
++ * the cluster heap.
++ */
++void exfat_clear_bitmap(struct inode *inode, unsigned int clu)
++{
++ int i, b;
++ unsigned int ent_idx;
++ struct super_block *sb = inode->i_sb;
++ struct exfat_sb_info *sbi = EXFAT_SB(sb);
++ struct exfat_mount_options *opts = &sbi->options;
++
++ WARN_ON(clu < EXFAT_FIRST_CLUSTER);
++ ent_idx = CLUSTER_TO_BITMAP_ENT(clu);
++ i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
++ b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
++
++ clear_bit_le(b, sbi->vol_amap[i]->b_data);
++ exfat_update_bh(sb, sbi->vol_amap[i], IS_DIRSYNC(inode));
++
++ if (opts->discard) {
++ int ret_discard;
++
++ ret_discard = sb_issue_discard(sb,
++ exfat_cluster_to_sector(sbi, clu +
++ EXFAT_RESERVED_CLUSTERS),
++ (1 << sbi->sect_per_clus_bits), GFP_NOFS, 0);
++
++ if (ret_discard == -EOPNOTSUPP) {
++ exfat_msg(sb, KERN_ERR,
++ "discard not supported by device, disabling");
++ opts->discard = 0;
++ }
++ }
++}
++
++/*
++ * If the value of "clu" is 0, it means cluster 2 which is the first cluster of
++ * the cluster heap.
++ */
++unsigned int exfat_find_free_bitmap(struct super_block *sb, unsigned int clu)
++{
++ unsigned int i, map_i, map_b, ent_idx;
++ unsigned int clu_base, clu_free;
++ unsigned char k, clu_mask;
++ struct exfat_sb_info *sbi = EXFAT_SB(sb);
++
++ WARN_ON(clu < EXFAT_FIRST_CLUSTER);
++ ent_idx = CLUSTER_TO_BITMAP_ENT(clu);
++ clu_base = BITMAP_ENT_TO_CLUSTER(ent_idx & ~(BITS_PER_BYTE_MASK));
++ clu_mask = IGNORED_BITS_REMAINED(clu, clu_base);
++
++ map_i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
++ map_b = BITMAP_OFFSET_BYTE_IN_SECTOR(sb, ent_idx);
++
++ for (i = EXFAT_FIRST_CLUSTER; i < sbi->num_clusters;
++ i += BITS_PER_BYTE) {
++ k = *(sbi->vol_amap[map_i]->b_data + map_b);
++ if (clu_mask > 0) {
++ k |= clu_mask;
++ clu_mask = 0;
++ }
++ if (k < 0xFF) {
++ clu_free = clu_base + free_bit[k];
++ if (clu_free < sbi->num_clusters)
++ return clu_free;
++ }
++ clu_base += BITS_PER_BYTE;
++
++ if (++map_b >= sb->s_blocksize ||
++ clu_base >= sbi->num_clusters) {
++ if (++map_i >= sbi->map_sectors) {
++ clu_base = EXFAT_FIRST_CLUSTER;
++ map_i = 0;
++ }
++ map_b = 0;
++ }
++ }
++
++ return EXFAT_EOF_CLUSTER;
++}
++
++int exfat_count_used_clusters(struct super_block *sb, unsigned int *ret_count)
++{
++ struct exfat_sb_info *sbi = EXFAT_SB(sb);
++ unsigned int count = 0;
++ unsigned int i, map_i = 0, map_b = 0;
++ unsigned int total_clus = EXFAT_DATA_CLUSTER_COUNT(sbi);
++ unsigned int last_mask = total_clus & BITS_PER_BYTE_MASK;
++ unsigned char clu_bits;
++ const unsigned char last_bit_mask[] = {0, 0b00000001, 0b00000011,
++ 0b00000111, 0b00001111, 0b00011111, 0b00111111, 0b01111111};
++
++ total_clus &= ~last_mask;
++ for (i = 0; i < total_clus; i += BITS_PER_BYTE) {
++ clu_bits = *(sbi->vol_amap[map_i]->b_data + map_b);
++ count += used_bit[clu_bits];
++ if (++map_b >= (unsigned int)sb->s_blocksize) {
++ map_i++;
++ map_b = 0;
++ }
++ }
++
++ if (last_mask) {
++ clu_bits = *(sbi->vol_amap[map_i]->b_data + map_b);
++ clu_bits &= last_bit_mask[last_mask];
++ count += used_bit[clu_bits];
++ }
++
++ *ret_count = count;
++ return 0;
++}
+--
+2.40.1
+
--- /dev/null
+From 08e0003605d52d1c70788d8ca2a531cd4aff5a3f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Jul 2023 15:15:15 +0800
+Subject: exfat: use kvmalloc_array/kvfree instead of kmalloc_array/kfree
+
+From: gaoming <gaoming20@hihonor.com>
+
+[ Upstream commit daf60d6cca26e50d65dac374db92e58de745ad26 ]
+
+The call stack shown below is a scenario in the Linux 4.19 kernel.
+Allocating memory failed where exfat fs use kmalloc_array due to
+system memory fragmentation, while the u-disk was inserted without
+recognition.
+Devices such as u-disk using the exfat file system are pluggable and
+may be insert into the system at any time.
+However, long-term running systems cannot guarantee the continuity of
+physical memory. Therefore, it's necessary to address this issue.
+
+Binder:2632_6: page allocation failure: order:4,
+ mode:0x6040c0(GFP_KERNEL|__GFP_COMP), nodemask=(null)
+Call trace:
+[242178.097582] dump_backtrace+0x0/0x4
+[242178.097589] dump_stack+0xf4/0x134
+[242178.097598] warn_alloc+0xd8/0x144
+[242178.097603] __alloc_pages_nodemask+0x1364/0x1384
+[242178.097608] kmalloc_order+0x2c/0x510
+[242178.097612] kmalloc_order_trace+0x40/0x16c
+[242178.097618] __kmalloc+0x360/0x408
+[242178.097624] load_alloc_bitmap+0x160/0x284
+[242178.097628] exfat_fill_super+0xa3c/0xe7c
+[242178.097635] mount_bdev+0x2e8/0x3a0
+[242178.097638] exfat_fs_mount+0x40/0x50
+[242178.097643] mount_fs+0x138/0x2e8
+[242178.097649] vfs_kern_mount+0x90/0x270
+[242178.097655] do_mount+0x798/0x173c
+[242178.097659] ksys_mount+0x114/0x1ac
+[242178.097665] __arm64_sys_mount+0x24/0x34
+[242178.097671] el0_svc_common+0xb8/0x1b8
+[242178.097676] el0_svc_handler+0x74/0x90
+[242178.097681] el0_svc+0x8/0x340
+
+By analyzing the exfat code,we found that continuous physical memory
+is not required here,so kvmalloc_array is used can solve this problem.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: gaoming <gaoming20@hihonor.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/exfat/balloc.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
+index 6a04cc02565a1..322aa78c50fb4 100644
+--- a/fs/exfat/balloc.c
++++ b/fs/exfat/balloc.c
+@@ -70,7 +70,7 @@ static int exfat_allocate_bitmap(struct super_block *sb,
+ }
+ sbi->map_sectors = ((need_map_size - 1) >>
+ (sb->s_blocksize_bits)) + 1;
+- sbi->vol_amap = kmalloc_array(sbi->map_sectors,
++ sbi->vol_amap = kvmalloc_array(sbi->map_sectors,
+ sizeof(struct buffer_head *), GFP_KERNEL);
+ if (!sbi->vol_amap)
+ return -ENOMEM;
+@@ -85,7 +85,7 @@ static int exfat_allocate_bitmap(struct super_block *sb,
+ while (j < i)
+ brelse(sbi->vol_amap[j++]);
+
+- kfree(sbi->vol_amap);
++ kvfree(sbi->vol_amap);
+ sbi->vol_amap = NULL;
+ return -EIO;
+ }
+@@ -142,7 +142,7 @@ void exfat_free_bitmap(struct exfat_sb_info *sbi)
+ for (i = 0; i < sbi->map_sectors; i++)
+ __brelse(sbi->vol_amap[i]);
+
+- kfree(sbi->vol_amap);
++ kvfree(sbi->vol_amap);
+ }
+
+ /*
+--
+2.40.1
+
--- /dev/null
+From 20784bd62f794b875d93f596c7cce2f36950944f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 25 Jun 2023 00:16:49 +0800
+Subject: fbdev: fix potential OOB read in fast_imageblit()
+
+From: Zhang Shurong <zhang_shurong@foxmail.com>
+
+[ Upstream commit c2d22806aecb24e2de55c30a06e5d6eb297d161d ]
+
+There is a potential OOB read at fast_imageblit, for
+"colortab[(*src >> 4)]" can become a negative value due to
+"const char *s = image->data, *src".
+This change makes sure the index for colortab always positive
+or zero.
+
+Similar commit:
+https://patchwork.kernel.org/patch/11746067
+
+Potential bug report:
+https://groups.google.com/g/syzkaller-bugs/c/9ubBXKeKXf4/m/k-QXy4UgAAAJ
+
+Signed-off-by: Zhang Shurong <zhang_shurong@foxmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/video/fbdev/core/sysimgblt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c
+index 335e92b813fc4..665ef7a0a2495 100644
+--- a/drivers/video/fbdev/core/sysimgblt.c
++++ b/drivers/video/fbdev/core/sysimgblt.c
+@@ -189,7 +189,7 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+ u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel;
+ u32 ppw = 32/bpp, spitch = (image->width + 7)/8;
+ u32 bit_mask, eorx, shift;
+- const char *s = image->data, *src;
++ const u8 *s = image->data, *src;
+ u32 *dst;
+ const u32 *tab;
+ size_t tablen;
+--
+2.40.1
+
--- /dev/null
+From e1cca0a03c04260fd928c958d3b55a9d8dbd61b4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 13 Mar 2022 20:29:51 +0100
+Subject: fbdev: Fix sys_imageblit() for arbitrary image widths
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+[ Upstream commit 61bfcb6a3b981e8f19e044ac8c3de6edbe6caf70 ]
+
+Commit 6f29e04938bf ("fbdev: Improve performance of sys_imageblit()")
+broke sys_imageblit() for image width that are not aligned to 8-bit
+boundaries. Fix this by handling the trailing pixels on each line
+separately. The performance improvements in the original commit do not
+regress by this change.
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Fixes: 6f29e04938bf ("fbdev: Improve performance of sys_imageblit()")
+Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
+Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Tested-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Cc: Thomas Zimmermann <tzimmermann@suse.de>
+Cc: Javier Martinez Canillas <javierm@redhat.com>
+Cc: Sam Ravnborg <sam@ravnborg.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220313192952.12058-2-tzimmermann@suse.de
+Stable-dep-of: c2d22806aecb ("fbdev: fix potential OOB read in fast_imageblit()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/video/fbdev/core/sysimgblt.c | 29 ++++++++++++++++++++++++----
+ 1 file changed, 25 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c
+index 722c327a381bd..335e92b813fc4 100644
+--- a/drivers/video/fbdev/core/sysimgblt.c
++++ b/drivers/video/fbdev/core/sysimgblt.c
+@@ -188,7 +188,7 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+ {
+ u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel;
+ u32 ppw = 32/bpp, spitch = (image->width + 7)/8;
+- u32 bit_mask, eorx;
++ u32 bit_mask, eorx, shift;
+ const char *s = image->data, *src;
+ u32 *dst;
+ const u32 *tab;
+@@ -229,17 +229,23 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+
+ for (i = image->height; i--; ) {
+ dst = dst1;
++ shift = 8;
+ src = s;
+
++ /*
++ * Manually unroll the per-line copying loop for better
++ * performance. This works until we processed the last
++ * completely filled source byte (inclusive).
++ */
+ switch (ppw) {
+ case 4: /* 8 bpp */
+- for (j = k; j; j -= 2, ++src) {
++ for (j = k; j >= 2; j -= 2, ++src) {
+ *dst++ = colortab[(*src >> 4) & bit_mask];
+ *dst++ = colortab[(*src >> 0) & bit_mask];
+ }
+ break;
+ case 2: /* 16 bpp */
+- for (j = k; j; j -= 4, ++src) {
++ for (j = k; j >= 4; j -= 4, ++src) {
+ *dst++ = colortab[(*src >> 6) & bit_mask];
+ *dst++ = colortab[(*src >> 4) & bit_mask];
+ *dst++ = colortab[(*src >> 2) & bit_mask];
+@@ -247,7 +253,7 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+ }
+ break;
+ case 1: /* 32 bpp */
+- for (j = k; j; j -= 8, ++src) {
++ for (j = k; j >= 8; j -= 8, ++src) {
+ *dst++ = colortab[(*src >> 7) & bit_mask];
+ *dst++ = colortab[(*src >> 6) & bit_mask];
+ *dst++ = colortab[(*src >> 5) & bit_mask];
+@@ -259,6 +265,21 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+ }
+ break;
+ }
++
++ /*
++ * For image widths that are not a multiple of 8, there
++ * are trailing pixels left on the current line. Print
++ * them as well.
++ */
++ for (; j--; ) {
++ shift -= ppw;
++ *dst++ = colortab[(*src >> shift) & bit_mask];
++ if (!shift) {
++ shift = 8;
++ ++src;
++ }
++ }
++
+ dst1 += p->fix.line_length;
+ s += spitch;
+ }
+--
+2.40.1
+
--- /dev/null
+From 6b9d53b783ef66a3f3f12c1f018ad9822a2c9e39 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Feb 2022 20:38:01 +0100
+Subject: fbdev: Improve performance of sys_imageblit()
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+[ Upstream commit 6f29e04938bf509fccfad490a74284cf158891ce ]
+
+Improve the performance of sys_imageblit() by manually unrolling
+the inner blitting loop and moving some invariants out. The compiler
+failed to do this automatically. The resulting binary code was even
+slower than the cfb_imageblit() helper, which uses the same algorithm,
+but operates on I/O memory.
+
+A microbenchmark measures the average number of CPU cycles
+for sys_imageblit() after a stabilizing period of a few minutes
+(i7-4790, FullHD, simpledrm, kernel with debugging). The value
+for CFB is given as a reference.
+
+ sys_imageblit(), new: 25934 cycles
+ sys_imageblit(), old: 35944 cycles
+ cfb_imageblit(): 30566 cycles
+
+In the optimized case, sys_imageblit() is now ~30% faster than before
+and ~20% faster than cfb_imageblit().
+
+v2:
+ * move switch out of inner loop (Gerd)
+ * remove test for alignment of dst1 (Sam)
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
+Acked-by: Sam Ravnborg <sam@ravnborg.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220223193804.18636-3-tzimmermann@suse.de
+Stable-dep-of: c2d22806aecb ("fbdev: fix potential OOB read in fast_imageblit()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/video/fbdev/core/sysimgblt.c | 49 +++++++++++++++++++++-------
+ 1 file changed, 38 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c
+index a4d05b1b17d7d..722c327a381bd 100644
+--- a/drivers/video/fbdev/core/sysimgblt.c
++++ b/drivers/video/fbdev/core/sysimgblt.c
+@@ -188,23 +188,29 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+ {
+ u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel;
+ u32 ppw = 32/bpp, spitch = (image->width + 7)/8;
+- u32 bit_mask, end_mask, eorx, shift;
++ u32 bit_mask, eorx;
+ const char *s = image->data, *src;
+ u32 *dst;
+- const u32 *tab = NULL;
++ const u32 *tab;
++ size_t tablen;
++ u32 colortab[16];
+ int i, j, k;
+
+ switch (bpp) {
+ case 8:
+ tab = fb_be_math(p) ? cfb_tab8_be : cfb_tab8_le;
++ tablen = 16;
+ break;
+ case 16:
+ tab = fb_be_math(p) ? cfb_tab16_be : cfb_tab16_le;
++ tablen = 4;
+ break;
+ case 32:
+- default:
+ tab = cfb_tab32;
++ tablen = 2;
+ break;
++ default:
++ return;
+ }
+
+ for (i = ppw-1; i--; ) {
+@@ -218,19 +224,40 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+ eorx = fgx ^ bgx;
+ k = image->width/ppw;
+
++ for (i = 0; i < tablen; ++i)
++ colortab[i] = (tab[i] & eorx) ^ bgx;
++
+ for (i = image->height; i--; ) {
+ dst = dst1;
+- shift = 8;
+ src = s;
+
+- for (j = k; j--; ) {
+- shift -= ppw;
+- end_mask = tab[(*src >> shift) & bit_mask];
+- *dst++ = (end_mask & eorx) ^ bgx;
+- if (!shift) {
+- shift = 8;
+- src++;
++ switch (ppw) {
++ case 4: /* 8 bpp */
++ for (j = k; j; j -= 2, ++src) {
++ *dst++ = colortab[(*src >> 4) & bit_mask];
++ *dst++ = colortab[(*src >> 0) & bit_mask];
++ }
++ break;
++ case 2: /* 16 bpp */
++ for (j = k; j; j -= 4, ++src) {
++ *dst++ = colortab[(*src >> 6) & bit_mask];
++ *dst++ = colortab[(*src >> 4) & bit_mask];
++ *dst++ = colortab[(*src >> 2) & bit_mask];
++ *dst++ = colortab[(*src >> 0) & bit_mask];
++ }
++ break;
++ case 1: /* 32 bpp */
++ for (j = k; j; j -= 8, ++src) {
++ *dst++ = colortab[(*src >> 7) & bit_mask];
++ *dst++ = colortab[(*src >> 6) & bit_mask];
++ *dst++ = colortab[(*src >> 5) & bit_mask];
++ *dst++ = colortab[(*src >> 4) & bit_mask];
++ *dst++ = colortab[(*src >> 3) & bit_mask];
++ *dst++ = colortab[(*src >> 2) & bit_mask];
++ *dst++ = colortab[(*src >> 1) & bit_mask];
++ *dst++ = colortab[(*src >> 0) & bit_mask];
+ }
++ break;
+ }
+ dst1 += p->fix.line_length;
+ s += spitch;
+--
+2.40.1
+
--- /dev/null
+From c63570b6a50a93808042b515bdb8866785f1350f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jun 2022 14:45:06 -0400
+Subject: fs: dlm: add pid to debug log
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit 19d7ca051d303622c423b4cb39e6bde5d177328b ]
+
+This patch adds the pid information which requested the lock operation
+to the debug log output.
+
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Stable-dep-of: 57e2c2f2d94c ("fs: dlm: fix mismatch of plock results from userspace")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/plock.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index 7e26e677c6b24..254d20eb6f4fd 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -167,9 +167,9 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ spin_lock(&ops_lock);
+ list_del(&op->list);
+ spin_unlock(&ops_lock);
+- log_print("%s: wait interrupted %x %llx, op removed",
++ log_print("%s: wait interrupted %x %llx pid %d, op removed",
+ __func__, ls->ls_global_id,
+- (unsigned long long)number);
++ (unsigned long long)number, op->info.pid);
+ dlm_release_plock_op(op);
+ do_unlock_close(ls, number, file, fl);
+ goto out;
+--
+2.40.1
+
--- /dev/null
+From 3f9c48507b8942f7fb682f14fe623519bd3d2c04 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jun 2022 14:45:05 -0400
+Subject: fs: dlm: change plock interrupted message to debug again
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit ea06d4cabf529eefbe7e89e3a8325f1f89355ccd ]
+
+This patch reverses the commit bcfad4265ced ("dlm: improve plock logging
+if interrupted") by moving it to debug level and notifying the user an op
+was removed.
+
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Stable-dep-of: 57e2c2f2d94c ("fs: dlm: fix mismatch of plock results from userspace")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/plock.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index 254d20eb6f4fd..7c9e873a01b78 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -167,7 +167,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ spin_lock(&ops_lock);
+ list_del(&op->list);
+ spin_unlock(&ops_lock);
+- log_print("%s: wait interrupted %x %llx pid %d, op removed",
++ log_debug(ls, "%s: wait interrupted %x %llx pid %d",
+ __func__, ls->ls_global_id,
+ (unsigned long long)number, op->info.pid);
+ dlm_release_plock_op(op);
+@@ -473,7 +473,7 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+ else
+ wake_up(&recv_wq);
+ } else
+- log_print("%s: no op %x %llx - may got interrupted?", __func__,
++ log_print("%s: no op %x %llx", __func__,
+ info.fsid, (unsigned long long)info.number);
+ return count;
+ }
+--
+2.40.1
+
--- /dev/null
+From a9bd5b2d7fffe5d63a9066fcb1a16fa81ead8987 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 May 2023 12:02:04 -0400
+Subject: fs: dlm: fix mismatch of plock results from userspace
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit 57e2c2f2d94cfd551af91cedfa1af6d972487197 ]
+
+When a waiting plock request (F_SETLKW) is sent to userspace
+for processing (dlm_controld), the result is returned at a
+later time. That result could be incorrectly matched to a
+different waiting request in cases where the owner field is
+the same (e.g. different threads in a process.) This is fixed
+by comparing all the properties in the request and reply.
+
+The results for non-waiting plock requests are now matched
+based on list order because the results are returned in the
+same order they were sent.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/plock.c | 58 +++++++++++++++++++++++++++++++++++++++-----------
+ 1 file changed, 45 insertions(+), 13 deletions(-)
+
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index 4a5452fd87cb0..0501821182b1e 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -408,7 +408,7 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
+ if (op->info.flags & DLM_PLOCK_FL_CLOSE)
+ list_del(&op->list);
+ else
+- list_move(&op->list, &recv_list);
++ list_move_tail(&op->list, &recv_list);
+ memcpy(&info, &op->info, sizeof(info));
+ }
+ spin_unlock(&ops_lock);
+@@ -446,20 +446,52 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+ if (check_version(&info))
+ return -EINVAL;
+
++ /*
++ * The results for waiting ops (SETLKW) can be returned in any
++ * order, so match all fields to find the op. The results for
++ * non-waiting ops are returned in the order that they were sent
++ * to userspace, so match the result with the first non-waiting op.
++ */
+ spin_lock(&ops_lock);
+- list_for_each_entry(iter, &recv_list, list) {
+- if (iter->info.fsid == info.fsid &&
+- iter->info.number == info.number &&
+- iter->info.owner == info.owner) {
+- list_del_init(&iter->list);
+- memcpy(&iter->info, &info, sizeof(info));
+- if (iter->data)
+- do_callback = 1;
+- else
+- iter->done = 1;
+- op = iter;
+- break;
++ if (info.wait) {
++ list_for_each_entry(iter, &recv_list, list) {
++ if (iter->info.fsid == info.fsid &&
++ iter->info.number == info.number &&
++ iter->info.owner == info.owner &&
++ iter->info.pid == info.pid &&
++ iter->info.start == info.start &&
++ iter->info.end == info.end &&
++ iter->info.ex == info.ex &&
++ iter->info.wait) {
++ op = iter;
++ break;
++ }
+ }
++ } else {
++ list_for_each_entry(iter, &recv_list, list) {
++ if (!iter->info.wait) {
++ op = iter;
++ break;
++ }
++ }
++ }
++
++ if (op) {
++ /* Sanity check that op and info match. */
++ if (info.wait)
++ WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK);
++ else
++ WARN_ON(op->info.fsid != info.fsid ||
++ op->info.number != info.number ||
++ op->info.owner != info.owner ||
++ op->info.optype != info.optype);
++
++ list_del_init(&op->list);
++ memcpy(&op->info, &info, sizeof(info));
++ if (op->data)
++ do_callback = 1;
++ else
++ op->done = 1;
+ }
+ spin_unlock(&ops_lock);
+
+--
+2.40.1
+
--- /dev/null
+From d1478487ec567e20780a4e93d16865e8cfd9f269 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jun 2022 14:45:08 -0400
+Subject: fs: dlm: use dlm_plock_info for do_unlock_close
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit 4d413ae9ced4180c0e2114553c3a7560b509b0f8 ]
+
+This patch refactors do_unlock_close() by using only struct dlm_plock_info
+as a parameter.
+
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Stable-dep-of: 57e2c2f2d94c ("fs: dlm: fix mismatch of plock results from userspace")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/plock.c | 16 ++++++----------
+ 1 file changed, 6 insertions(+), 10 deletions(-)
+
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index 7c9e873a01b78..4a5452fd87cb0 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -83,8 +83,7 @@ static void send_op(struct plock_op *op)
+ abandoned waiter. So, we have to insert the unlock-close when the
+ lock call is interrupted. */
+
+-static void do_unlock_close(struct dlm_ls *ls, u64 number,
+- struct file *file, struct file_lock *fl)
++static void do_unlock_close(const struct dlm_plock_info *info)
+ {
+ struct plock_op *op;
+
+@@ -93,15 +92,12 @@ static void do_unlock_close(struct dlm_ls *ls, u64 number,
+ return;
+
+ op->info.optype = DLM_PLOCK_OP_UNLOCK;
+- op->info.pid = fl->fl_pid;
+- op->info.fsid = ls->ls_global_id;
+- op->info.number = number;
++ op->info.pid = info->pid;
++ op->info.fsid = info->fsid;
++ op->info.number = info->number;
+ op->info.start = 0;
+ op->info.end = OFFSET_MAX;
+- if (fl->fl_lmops && fl->fl_lmops->lm_grant)
+- op->info.owner = (__u64) fl->fl_pid;
+- else
+- op->info.owner = (__u64)(long) fl->fl_owner;
++ op->info.owner = info->owner;
+
+ op->info.flags |= DLM_PLOCK_FL_CLOSE;
+ send_op(op);
+@@ -171,7 +167,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ __func__, ls->ls_global_id,
+ (unsigned long long)number, op->info.pid);
+ dlm_release_plock_op(op);
+- do_unlock_close(ls, number, file, fl);
++ do_unlock_close(&op->info);
+ goto out;
+ }
+
+--
+2.40.1
+
--- /dev/null
+From 88eb468bbb034a33af34737cdc8c3441c48a9e13 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Aug 2023 10:19:27 -0700
+Subject: igb: Avoid starting unnecessary workqueues
+
+From: Alessio Igor Bogani <alessio.bogani@elettra.eu>
+
+[ Upstream commit b888c510f7b3d64ca75fc0f43b4a4bd1a611312f ]
+
+If ptp_clock_register() fails or CONFIG_PTP isn't enabled, avoid starting
+PTP related workqueues.
+
+In this way we can fix this:
+ BUG: unable to handle page fault for address: ffffc9000440b6f8
+ #PF: supervisor read access in kernel mode
+ #PF: error_code(0x0000) - not-present page
+ PGD 100000067 P4D 100000067 PUD 1001e0067 PMD 107dc5067 PTE 0
+ Oops: 0000 [#1] PREEMPT SMP
+ [...]
+ Workqueue: events igb_ptp_overflow_check
+ RIP: 0010:igb_rd32+0x1f/0x60
+ [...]
+ Call Trace:
+ igb_ptp_read_82580+0x20/0x50
+ timecounter_read+0x15/0x60
+ igb_ptp_overflow_check+0x1a/0x50
+ process_one_work+0x1cb/0x3c0
+ worker_thread+0x53/0x3f0
+ ? rescuer_thread+0x370/0x370
+ kthread+0x142/0x160
+ ? kthread_associate_blkcg+0xc0/0xc0
+ ret_from_fork+0x1f/0x30
+
+Fixes: 1f6e8178d685 ("igb: Prevent dropped Tx timestamps via work items and interrupts.")
+Fixes: d339b1331616 ("igb: add PTP Hardware Clock code")
+Signed-off-by: Alessio Igor Bogani <alessio.bogani@elettra.eu>
+Tested-by: Arpana Arland <arpanax.arland@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://lore.kernel.org/r/20230821171927.2203644-1-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igb/igb_ptp.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
+index 29ced6b74d364..be2e743e65de9 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
++++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
+@@ -1181,18 +1181,6 @@ void igb_ptp_init(struct igb_adapter *adapter)
+ return;
+ }
+
+- spin_lock_init(&adapter->tmreg_lock);
+- INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
+-
+- if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
+- INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
+- igb_ptp_overflow_check);
+-
+- adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+- adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+-
+- igb_ptp_reset(adapter);
+-
+ adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
+ &adapter->pdev->dev);
+ if (IS_ERR(adapter->ptp_clock)) {
+@@ -1202,6 +1190,18 @@ void igb_ptp_init(struct igb_adapter *adapter)
+ dev_info(&adapter->pdev->dev, "added PHC on %s\n",
+ adapter->netdev->name);
+ adapter->ptp_flags |= IGB_PTP_ENABLED;
++
++ spin_lock_init(&adapter->tmreg_lock);
++ INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
++
++ if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
++ INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
++ igb_ptp_overflow_check);
++
++ adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
++ adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
++
++ igb_ptp_reset(adapter);
+ }
+ }
+
+--
+2.40.1
+
--- /dev/null
+From dd1202502be5d7735b6875c611b4f739a02ed654 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Apr 2023 10:33:44 +0100
+Subject: MIPS: cpu-features: Enable octeon_cache by cpu_type
+
+From: Jiaxun Yang <jiaxun.yang@flygoat.com>
+
+[ Upstream commit f641519409a73403ee6612b8648b95a688ab85c2 ]
+
+cpu_has_octeon_cache was tied to 0 for generic cpu-features,
+whith this generic kernel built for octeon CPU won't boot.
+
+Just enable this flag by cpu_type. It won't hurt orther platforms
+because compiler will eliminate the code path on other processors.
+
+Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Stable-dep-of: 5487a7b60695 ("MIPS: cpu-features: Use boot_cpu_type for CPU type based features")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/include/asm/cpu-features.h | 19 ++++++++++++++++++-
+ 1 file changed, 18 insertions(+), 1 deletion(-)
+
+diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
+index 4e2ee743088fd..73fa4c3337f89 100644
+--- a/arch/mips/include/asm/cpu-features.h
++++ b/arch/mips/include/asm/cpu-features.h
+@@ -111,7 +111,24 @@
+ #define cpu_has_tx39_cache __opt(MIPS_CPU_TX39_CACHE)
+ #endif
+ #ifndef cpu_has_octeon_cache
+-#define cpu_has_octeon_cache 0
++#define cpu_has_octeon_cache \
++({ \
++ int __res; \
++ \
++ switch (current_cpu_type()) { \
++ case CPU_CAVIUM_OCTEON: \
++ case CPU_CAVIUM_OCTEON_PLUS: \
++ case CPU_CAVIUM_OCTEON2: \
++ case CPU_CAVIUM_OCTEON3: \
++ __res = 1; \
++ break; \
++ \
++ default: \
++ __res = 0; \
++ } \
++ \
++ __res; \
++})
+ #endif
+ /* Don't override `cpu_has_fpu' to 1 or the "nofpu" option won't work. */
+ #ifndef cpu_has_fpu
+--
+2.40.1
+
--- /dev/null
+From 94ea3e3a51589301d601d6cfc8582acdb725f8aa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 13:51:22 +0800
+Subject: MIPS: cpu-features: Use boot_cpu_type for CPU type based features
+
+From: Jiaxun Yang <jiaxun.yang@flygoat.com>
+
+[ Upstream commit 5487a7b60695a92cf998350e4beac17144c91fcd ]
+
+Some CPU feature macros were using current_cpu_type to mark feature
+availability.
+
+However current_cpu_type will use smp_processor_id, which is prohibited
+under preemptable context.
+
+Since those features are all uniform on all CPUs in a SMP system, use
+boot_cpu_type instead of current_cpu_type to fix preemptable kernel.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/include/asm/cpu-features.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
+index 73fa4c3337f89..51faee4207454 100644
+--- a/arch/mips/include/asm/cpu-features.h
++++ b/arch/mips/include/asm/cpu-features.h
+@@ -115,7 +115,7 @@
+ ({ \
+ int __res; \
+ \
+- switch (current_cpu_type()) { \
++ switch (boot_cpu_type()) { \
+ case CPU_CAVIUM_OCTEON: \
+ case CPU_CAVIUM_OCTEON_PLUS: \
+ case CPU_CAVIUM_OCTEON2: \
+@@ -349,7 +349,7 @@
+ ({ \
+ int __res; \
+ \
+- switch (current_cpu_type()) { \
++ switch (boot_cpu_type()) { \
+ case CPU_M14KC: \
+ case CPU_74K: \
+ case CPU_1074K: \
+--
+2.40.1
+
--- /dev/null
+From 3595f5afde04c97b5e06ebca6fe6a4b32ea7d1bf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Jan 2022 11:10:59 -0800
+Subject: net: remove bond_slave_has_mac_rcu()
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 8b0fdcdc3a7d44aff907f0103f5ffb86b12bfe71 ]
+
+No caller since v3.16.
+
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: e74216b8def3 ("bonding: fix macvlan over alb bond support")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/bonding.h | 14 --------------
+ 1 file changed, 14 deletions(-)
+
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index c458f084f7bb9..ab862e2e34520 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -674,20 +674,6 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond,
+ return NULL;
+ }
+
+-/* Caller must hold rcu_read_lock() for read */
+-static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond,
+- const u8 *mac)
+-{
+- struct list_head *iter;
+- struct slave *tmp;
+-
+- bond_for_each_slave_rcu(bond, tmp, iter)
+- if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
+- return tmp;
+-
+- return NULL;
+-}
+-
+ /* Caller must hold rcu_read_lock() for read */
+ static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac)
+ {
+--
+2.40.1
+
--- /dev/null
+From bffd4581b5c5695e1a01418a3b7d8d24305c2ee3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Aug 2023 06:12:31 -0400
+Subject: net/sched: fix a qdisc modification with ambiguous command request
+
+From: Jamal Hadi Salim <jhs@mojatatu.com>
+
+[ Upstream commit da71714e359b64bd7aab3bd56ec53f307f058133 ]
+
+When replacing an existing root qdisc, with one that is of the same kind, the
+request boils down to essentially a parameterization change i.e not one that
+requires allocation and grafting of a new qdisc. syzbot was able to create a
+scenario which resulted in a taprio qdisc replacing an existing taprio qdisc
+with a combination of NLM_F_CREATE, NLM_F_REPLACE and NLM_F_EXCL leading to
+create and graft scenario.
+The fix ensures that only when the qdisc kinds are different that we should
+allow a create and graft, otherwise it goes into the "change" codepath.
+
+While at it, fix the code and comments to improve readability.
+
+While syzbot was able to create the issue, it did not zone on the root cause.
+Analysis from Vladimir Oltean <vladimir.oltean@nxp.com> helped narrow it down.
+
+v1->V2 changes:
+- remove "inline" function definition (Vladmir)
+- remove extrenous braces in branches (Vladmir)
+- change inline function names (Pedro)
+- Run tdc tests (Victor)
+v2->v3 changes:
+- dont break else/if (Simon)
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: syzbot+a3618a167af2021433cd@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/netdev/20230816225759.g25x76kmgzya2gei@skbuf/T/
+Tested-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Tested-by: Victor Nogueira <victor@mojatatu.com>
+Reviewed-by: Pedro Tammela <pctammela@mojatatu.com>
+Reviewed-by: Victor Nogueira <victor@mojatatu.com>
+Signed-off-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_api.c | 53 ++++++++++++++++++++++++++++++++++-----------
+ 1 file changed, 40 insertions(+), 13 deletions(-)
+
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 5a0e71873e24b..8105563593b6f 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1438,10 +1438,28 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+ return 0;
+ }
+
++static bool req_create_or_replace(struct nlmsghdr *n)
++{
++ return (n->nlmsg_flags & NLM_F_CREATE &&
++ n->nlmsg_flags & NLM_F_REPLACE);
++}
++
++static bool req_create_exclusive(struct nlmsghdr *n)
++{
++ return (n->nlmsg_flags & NLM_F_CREATE &&
++ n->nlmsg_flags & NLM_F_EXCL);
++}
++
++static bool req_change(struct nlmsghdr *n)
++{
++ return (!(n->nlmsg_flags & NLM_F_CREATE) &&
++ !(n->nlmsg_flags & NLM_F_REPLACE) &&
++ !(n->nlmsg_flags & NLM_F_EXCL));
++}
++
+ /*
+ * Create/change qdisc.
+ */
+-
+ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+ struct netlink_ext_ack *extack)
+ {
+@@ -1538,27 +1556,35 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+ *
+ * We know, that some child q is already
+ * attached to this parent and have choice:
+- * either to change it or to create/graft new one.
++ * 1) change it or 2) create/graft new one.
++ * If the requested qdisc kind is different
++ * than the existing one, then we choose graft.
++ * If they are the same then this is "change"
++ * operation - just let it fallthrough..
+ *
+ * 1. We are allowed to create/graft only
+- * if CREATE and REPLACE flags are set.
++ * if the request is explicitly stating
++ * "please create if it doesn't exist".
+ *
+- * 2. If EXCL is set, requestor wanted to say,
+- * that qdisc tcm_handle is not expected
++ * 2. If the request is to exclusive create
++ * then the qdisc tcm_handle is not expected
+ * to exist, so that we choose create/graft too.
+ *
+ * 3. The last case is when no flags are set.
++ * This will happen when for example tc
++ * utility issues a "change" command.
+ * Alas, it is sort of hole in API, we
+ * cannot decide what to do unambiguously.
+- * For now we select create/graft, if
+- * user gave KIND, which does not match existing.
++ * For now we select create/graft.
+ */
+- if ((n->nlmsg_flags & NLM_F_CREATE) &&
+- (n->nlmsg_flags & NLM_F_REPLACE) &&
+- ((n->nlmsg_flags & NLM_F_EXCL) ||
+- (tca[TCA_KIND] &&
+- nla_strcmp(tca[TCA_KIND], q->ops->id))))
+- goto create_n_graft;
++ if (tca[TCA_KIND] &&
++ nla_strcmp(tca[TCA_KIND], q->ops->id)) {
++ if (req_create_or_replace(n) ||
++ req_create_exclusive(n))
++ goto create_n_graft;
++ else if (req_change(n))
++ goto create_n_graft2;
++ }
+ }
+ }
+ } else {
+@@ -1592,6 +1618,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+ NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
+ return -ENOENT;
+ }
++create_n_graft2:
+ if (clid == TC_H_INGRESS) {
+ if (dev_ingress_queue(dev)) {
+ q = qdisc_create(dev, dev_ingress_queue(dev), p,
+--
+2.40.1
+
--- /dev/null
+From 6f394a1f02a095df2a8dc23a1df75ce9612e5e4f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Apr 2023 21:15:57 +0200
+Subject: PCI: acpiphp: Reassign resources on bridge if necessary
+
+From: Igor Mammedov <imammedo@redhat.com>
+
+[ Upstream commit 40613da52b13fb21c5566f10b287e0ca8c12c4e9 ]
+
+When using ACPI PCI hotplug, hotplugging a device with large BARs may fail
+if bridge windows programmed by firmware are not large enough.
+
+Reproducer:
+ $ qemu-kvm -monitor stdio -M q35 -m 4G \
+ -global ICH9-LPC.acpi-pci-hotplug-with-bridge-support=on \
+ -device id=rp1,pcie-root-port,bus=pcie.0,chassis=4 \
+ disk_image
+
+ wait till linux guest boots, then hotplug device:
+ (qemu) device_add qxl,bus=rp1
+
+ hotplug on guest side fails with:
+ pci 0000:01:00.0: [1b36:0100] type 00 class 0x038000
+ pci 0000:01:00.0: reg 0x10: [mem 0x00000000-0x03ffffff]
+ pci 0000:01:00.0: reg 0x14: [mem 0x00000000-0x03ffffff]
+ pci 0000:01:00.0: reg 0x18: [mem 0x00000000-0x00001fff]
+ pci 0000:01:00.0: reg 0x1c: [io 0x0000-0x001f]
+ pci 0000:01:00.0: BAR 0: no space for [mem size 0x04000000]
+ pci 0000:01:00.0: BAR 0: failed to assign [mem size 0x04000000]
+ pci 0000:01:00.0: BAR 1: no space for [mem size 0x04000000]
+ pci 0000:01:00.0: BAR 1: failed to assign [mem size 0x04000000]
+ pci 0000:01:00.0: BAR 2: assigned [mem 0xfe800000-0xfe801fff]
+ pci 0000:01:00.0: BAR 3: assigned [io 0x1000-0x101f]
+ qxl 0000:01:00.0: enabling device (0000 -> 0003)
+ Unable to create vram_mapping
+ qxl: probe of 0000:01:00.0 failed with error -12
+
+However when using native PCIe hotplug
+ '-global ICH9-LPC.acpi-pci-hotplug-with-bridge-support=off'
+it works fine, since kernel attempts to reassign unused resources.
+
+Use the same machinery as native PCIe hotplug to (re)assign resources.
+
+Link: https://lore.kernel.org/r/20230424191557.2464760-1-imammedo@redhat.com
+Signed-off-by: Igor Mammedov <imammedo@redhat.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Acked-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Rafael J. Wysocki <rafael@kernel.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/hotplug/acpiphp_glue.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
+index 3d8844e7090a8..0c1ae63c6dbc4 100644
+--- a/drivers/pci/hotplug/acpiphp_glue.c
++++ b/drivers/pci/hotplug/acpiphp_glue.c
+@@ -496,7 +496,6 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
+ acpiphp_native_scan_bridge(dev);
+ }
+ } else {
+- LIST_HEAD(add_list);
+ int max, pass;
+
+ acpiphp_rescan_slot(slot);
+@@ -510,12 +509,10 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
+ if (pass && dev->subordinate) {
+ check_hotplug_bridge(slot, dev);
+ pcibios_resource_survey_bus(dev->subordinate);
+- __pci_bus_size_bridges(dev->subordinate,
+- &add_list);
+ }
+ }
+ }
+- __pci_bus_assign_resources(bus, &add_list, NULL);
++ pci_assign_unassigned_bridge_resources(bus->self);
+ }
+
+ acpiphp_sanitize_bus(bus);
+--
+2.40.1
+
--- /dev/null
+From 3e519320c4c3f305e84e3bcb2596366dd4e3469b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 Sep 2018 07:05:53 +0000
+Subject: powerpc/32: add stack protector support
+
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+
+[ Upstream commit c3ff2a5193fa61b1b284cfb1d79628814ed0e95a ]
+
+This functionality was tentatively added in the past
+(commit 6533b7c16ee5 ("powerpc: Initial stack protector
+(-fstack-protector) support")) but had to be reverted
+(commit f2574030b0e3 ("powerpc: Revert the initial stack
+protector support") because of GCC implementing it differently
+whether it had been built with libc support or not.
+
+Now, GCC offers the possibility to manually set the
+stack-protector mode (global or tls) regardless of libc support.
+
+This time, the patch selects HAVE_STACKPROTECTOR only if
+-mstack-protector-guard=tls is supported by GCC.
+
+On PPC32, as register r2 points to current task_struct at
+all time, the stack_canary located inside task_struct can be
+used directly by using the following GCC options:
+-mstack-protector-guard=tls
+-mstack-protector-guard-reg=r2
+-mstack-protector-guard-offset=offsetof(struct task_struct, stack_canary))
+
+The protector is disabled for prom_init and bootx_init as
+it is too early to handle it properly.
+
+ $ echo CORRUPT_STACK > /sys/kernel/debug/provoke-crash/DIRECT
+[ 134.943666] Kernel panic - not syncing: stack-protector: Kernel stack is corrupted in: lkdtm_CORRUPT_STACK+0x64/0x64
+[ 134.943666]
+[ 134.955414] CPU: 0 PID: 283 Comm: sh Not tainted 4.18.0-s3k-dev-12143-ga3272be41209 #835
+[ 134.963380] Call Trace:
+[ 134.965860] [c6615d60] [c001f76c] panic+0x118/0x260 (unreliable)
+[ 134.971775] [c6615dc0] [c001f654] panic+0x0/0x260
+[ 134.976435] [c6615dd0] [c032c368] lkdtm_CORRUPT_STACK_STRONG+0x0/0x64
+[ 134.982769] [c6615e00] [ffffffff] 0xffffffff
+
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Stable-dep-of: 25ea739ea1d4 ("powerpc: Fail build if using recordmcount with binutils v2.37")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/Kconfig | 1 +
+ arch/powerpc/Makefile | 10 +++++++
+ arch/powerpc/include/asm/stackprotector.h | 34 +++++++++++++++++++++++
+ arch/powerpc/kernel/Makefile | 2 ++
+ arch/powerpc/kernel/asm-offsets.c | 3 ++
+ arch/powerpc/platforms/powermac/Makefile | 1 +
+ 6 files changed, 51 insertions(+)
+ create mode 100644 arch/powerpc/include/asm/stackprotector.h
+
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index f0e09d5f0bedd..3be56d857d57f 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -181,6 +181,7 @@ config PPC
+ select HAVE_ARCH_SECCOMP_FILTER
+ select HAVE_ARCH_TRACEHOOK
+ select HAVE_CBPF_JIT if !PPC64
++ select HAVE_STACKPROTECTOR if $(cc-option,-mstack-protector-guard=tls) && PPC32
+ select HAVE_CONTEXT_TRACKING if PPC64
+ select HAVE_DEBUG_KMEMLEAK
+ select HAVE_DEBUG_STACKOVERFLOW
+diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
+index b2e0fd8735627..4cea663d5d49b 100644
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -113,6 +113,9 @@ KBUILD_LDFLAGS += -m elf$(BITS)$(LDEMULATION)
+ KBUILD_ARFLAGS += --target=elf$(BITS)-$(GNUTARGET)
+ endif
+
++cflags-$(CONFIG_STACKPROTECTOR) += -mstack-protector-guard=tls
++cflags-$(CONFIG_STACKPROTECTOR) += -mstack-protector-guard-reg=r2
++
+ LDFLAGS_vmlinux-y := -Bstatic
+ LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) := -pie
+ LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-y)
+@@ -419,6 +422,13 @@ archclean:
+
+ archprepare: checkbin
+
++ifdef CONFIG_STACKPROTECTOR
++prepare: stack_protector_prepare
++
++stack_protector_prepare: prepare0
++ $(eval KBUILD_CFLAGS += -mstack-protector-guard-offset=$(shell awk '{if ($$2 == "TASK_CANARY") print $$3;}' include/generated/asm-offsets.h))
++endif
++
+ # Use the file '.tmp_gas_check' for binutils tests, as gas won't output
+ # to stdout and these checks are run even on install targets.
+ TOUT := .tmp_gas_check
+diff --git a/arch/powerpc/include/asm/stackprotector.h b/arch/powerpc/include/asm/stackprotector.h
+new file mode 100644
+index 0000000000000..d05d969c98c21
+--- /dev/null
++++ b/arch/powerpc/include/asm/stackprotector.h
+@@ -0,0 +1,34 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * GCC stack protector support.
++ *
++ */
++
++#ifndef _ASM_STACKPROTECTOR_H
++#define _ASM_STACKPROTECTOR_H
++
++#include <linux/random.h>
++#include <linux/version.h>
++#include <asm/reg.h>
++#include <asm/current.h>
++
++/*
++ * Initialize the stackprotector canary value.
++ *
++ * NOTE: this must only be called from functions that never return,
++ * and it must always be inlined.
++ */
++static __always_inline void boot_init_stack_canary(void)
++{
++ unsigned long canary;
++
++ /* Try to get a semi random initial value. */
++ canary = get_random_canary();
++ canary ^= mftb();
++ canary ^= LINUX_VERSION_CODE;
++ canary &= CANARY_MASK;
++
++ current->stack_canary = canary;
++}
++
++#endif /* _ASM_STACKPROTECTOR_H */
+diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
+index bf19c5514d6c2..cccea292af683 100644
+--- a/arch/powerpc/kernel/Makefile
++++ b/arch/powerpc/kernel/Makefile
+@@ -21,6 +21,8 @@ CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+ CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+ CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+
++CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)
++
+ ifdef CONFIG_FUNCTION_TRACER
+ # Do not trace early boot code
+ CFLAGS_REMOVE_cputable.o = $(CC_FLAGS_FTRACE)
+diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
+index 50400f213bbf2..c2288c73d56d1 100644
+--- a/arch/powerpc/kernel/asm-offsets.c
++++ b/arch/powerpc/kernel/asm-offsets.c
+@@ -79,6 +79,9 @@ int main(void)
+ {
+ OFFSET(THREAD, task_struct, thread);
+ OFFSET(MM, task_struct, mm);
++#ifdef CONFIG_STACKPROTECTOR
++ OFFSET(TASK_CANARY, task_struct, stack_canary);
++#endif
+ OFFSET(MMCONTEXTID, mm_struct, context.id);
+ #ifdef CONFIG_PPC64
+ DEFINE(SIGSEGV, SIGSEGV);
+diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile
+index 561a67d65e4d4..923bfb3404333 100644
+--- a/arch/powerpc/platforms/powermac/Makefile
++++ b/arch/powerpc/platforms/powermac/Makefile
+@@ -1,5 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ CFLAGS_bootx_init.o += -fPIC
++CFLAGS_bootx_init.o += $(call cc-option, -fno-stack-protector)
+
+ ifdef CONFIG_FUNCTION_TRACER
+ # Do not trace early boot code
+--
+2.40.1
+
--- /dev/null
+From 04a4de89e3ea5dbe11440226e243c96b0dfd2b15 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 May 2023 11:44:36 +0530
+Subject: powerpc: Fail build if using recordmcount with binutils v2.37
+
+From: Naveen N Rao <naveen@kernel.org>
+
+[ Upstream commit 25ea739ea1d4d3de41acc4f4eb2d1a97eee0eb75 ]
+
+binutils v2.37 drops unused section symbols, which prevents recordmcount
+from capturing mcount locations in sections that have no non-weak
+symbols. This results in a build failure with a message such as:
+ Cannot find symbol for section 12: .text.perf_callchain_kernel.
+ kernel/events/callchain.o: failed
+
+The change to binutils was reverted for v2.38, so this behavior is
+specific to binutils v2.37:
+https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=c09c8b42021180eee9495bd50d8b35e683d3901b
+
+Objtool is able to cope with such sections, so this issue is specific to
+recordmcount.
+
+Fail the build and print a warning if binutils v2.37 is detected and if
+we are using recordmcount.
+
+Cc: stable@vger.kernel.org
+Suggested-by: Joel Stanley <joel@jms.id.au>
+Signed-off-by: Naveen N Rao <naveen@kernel.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/20230530061436.56925-1-naveen@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/Makefile | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
+index 2fad158173485..daddada1a3902 100644
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -439,3 +439,11 @@ checkbin:
+ echo -n '*** Please use a different binutils version.' ; \
+ false ; \
+ fi
++ @if test "x${CONFIG_FTRACE_MCOUNT_USE_RECORDMCOUNT}" = "xy" -a \
++ "x${CONFIG_LD_IS_BFD}" = "xy" -a \
++ "${CONFIG_LD_VERSION}" = "23700" ; then \
++ echo -n '*** binutils 2.37 drops unused section symbols, which recordmcount ' ; \
++ echo 'is unable to handle.' ; \
++ echo '*** Please use a different binutils version.' ; \
++ false ; \
++ fi
+--
+2.40.1
+
--- /dev/null
+From a5dca96c3c224fe9a59621206ab54098b249a4d9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 Oct 2018 15:10:24 +0900
+Subject: powerpc: remove leftover code of old GCC version checks
+
+From: Masahiro Yamada <yamada.masahiro@socionext.com>
+
+[ Upstream commit bad96de8d31ba65dc26645af5550135315ea0b19 ]
+
+Clean up the leftover of commit f2910f0e6835 ("powerpc: remove old
+GCC version checks").
+
+Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
+Acked-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Stable-dep-of: 25ea739ea1d4 ("powerpc: Fail build if using recordmcount with binutils v2.37")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/Makefile | 8 --------
+ 1 file changed, 8 deletions(-)
+
+diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
+index 4cea663d5d49b..2fad158173485 100644
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -429,10 +429,6 @@ stack_protector_prepare: prepare0
+ $(eval KBUILD_CFLAGS += -mstack-protector-guard-offset=$(shell awk '{if ($$2 == "TASK_CANARY") print $$3;}' include/generated/asm-offsets.h))
+ endif
+
+-# Use the file '.tmp_gas_check' for binutils tests, as gas won't output
+-# to stdout and these checks are run even on install targets.
+-TOUT := .tmp_gas_check
+-
+ # Check toolchain versions:
+ # - gcc-4.6 is the minimum kernel-wide version so nothing required.
+ checkbin:
+@@ -443,7 +439,3 @@ checkbin:
+ echo -n '*** Please use a different binutils version.' ; \
+ false ; \
+ fi
+-
+-
+-CLEAN_FILES += $(TOUT)
+-
+--
+2.40.1
+
--- /dev/null
+From d846c197ed0eeb1a2ce67b20e7b34fb0aa8e5142 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Jul 2023 12:16:40 +0100
+Subject: regmap: Account for register length in SMBus I/O limits
+
+From: Mark Brown <broonie@kernel.org>
+
+[ Upstream commit 0c9d2eb5e94792fe64019008a04d4df5e57625af ]
+
+The SMBus I2C buses have limits on the size of transfers they can do but
+do not factor in the register length meaning we may try to do a transfer
+longer than our length limit, the core will not take care of this.
+Future changes will factor this out into the core but there are a number
+of users that assume current behaviour so let's just do something
+conservative here.
+
+This does not take account padding bits but practically speaking these
+are very rarely if ever used on I2C buses given that they generally run
+slowly enough to mean there's no issue.
+
+Cc: stable@kernel.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Reviewed-by: Xu Yilun <yilun.xu@intel.com>
+Link: https://lore.kernel.org/r/20230712-regmap-max-transfer-v1-2-80e2aed22e83@kernel.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/base/regmap/regmap-i2c.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
+index 052170ca0255e..b707913233c9b 100644
+--- a/drivers/base/regmap/regmap-i2c.c
++++ b/drivers/base/regmap/regmap-i2c.c
+@@ -246,8 +246,8 @@ static int regmap_i2c_smbus_i2c_read(void *context, const void *reg,
+ static struct regmap_bus regmap_i2c_smbus_i2c_block = {
+ .write = regmap_i2c_smbus_i2c_write,
+ .read = regmap_i2c_smbus_i2c_read,
+- .max_raw_read = I2C_SMBUS_BLOCK_MAX,
+- .max_raw_write = I2C_SMBUS_BLOCK_MAX,
++ .max_raw_read = I2C_SMBUS_BLOCK_MAX - 1,
++ .max_raw_write = I2C_SMBUS_BLOCK_MAX - 1,
+ };
+
+ static int regmap_i2c_smbus_i2c_write_reg16(void *context, const void *data,
+@@ -303,8 +303,8 @@ static int regmap_i2c_smbus_i2c_read_reg16(void *context, const void *reg,
+ static const struct regmap_bus regmap_i2c_smbus_i2c_block_reg16 = {
+ .write = regmap_i2c_smbus_i2c_write_reg16,
+ .read = regmap_i2c_smbus_i2c_read_reg16,
+- .max_raw_read = I2C_SMBUS_BLOCK_MAX,
+- .max_raw_write = I2C_SMBUS_BLOCK_MAX,
++ .max_raw_read = I2C_SMBUS_BLOCK_MAX - 2,
++ .max_raw_write = I2C_SMBUS_BLOCK_MAX - 2,
+ };
+
+ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
+--
+2.40.1
+
--- /dev/null
+From ee5d7277ae0fd7cda3208f95b7cfa51484e5a930 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Apr 2020 20:33:58 +0800
+Subject: regmap-i2c: add 16-bit width registers support
+
+From: AceLan Kao <acelan.kao@canonical.com>
+
+[ Upstream commit 82f25bd73c0bee4d29df47007a4f7290695b7db7 ]
+
+This allows to access data with 16-bit width of registers
+via i2c SMBus block functions.
+
+The multi-command sequence of the reading function is not safe
+and may read the wrong data from other address if other commands
+are sent in-between the SMBus commands in the read function.
+
+Read performance:
+ 32768 bytes (33 kB, 32 KiB) copied, 11.4869 s, 2.9 kB/s
+Write performance(with 1-byte page):
+ 32768 bytes (33 kB, 32 KiB) copied, 129.591 s, 0.3 kB/s
+
+The implementation is inspired by below commit
+https://patchwork.ozlabs.org/patch/545292/
+
+v2: add more descriptions about the issue that maybe introduced
+ by this commit
+
+Signed-off-by: AceLan Kao <acelan.kao@canonical.com>
+Link: https://lore.kernel.org/r/20200424123358.144850-1-acelan.kao@canonical.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Stable-dep-of: 0c9d2eb5e947 ("regmap: Account for register length in SMBus I/O limits")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/base/regmap/regmap-i2c.c | 61 ++++++++++++++++++++++++++++++++
+ 1 file changed, 61 insertions(+)
+
+diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
+index 056acde5e7d34..052170ca0255e 100644
+--- a/drivers/base/regmap/regmap-i2c.c
++++ b/drivers/base/regmap/regmap-i2c.c
+@@ -250,6 +250,63 @@ static struct regmap_bus regmap_i2c_smbus_i2c_block = {
+ .max_raw_write = I2C_SMBUS_BLOCK_MAX,
+ };
+
++static int regmap_i2c_smbus_i2c_write_reg16(void *context, const void *data,
++ size_t count)
++{
++ struct device *dev = context;
++ struct i2c_client *i2c = to_i2c_client(dev);
++
++ if (count < 2)
++ return -EINVAL;
++
++ count--;
++ return i2c_smbus_write_i2c_block_data(i2c, ((u8 *)data)[0], count,
++ (u8 *)data + 1);
++}
++
++static int regmap_i2c_smbus_i2c_read_reg16(void *context, const void *reg,
++ size_t reg_size, void *val,
++ size_t val_size)
++{
++ struct device *dev = context;
++ struct i2c_client *i2c = to_i2c_client(dev);
++ int ret, count, len = val_size;
++
++ if (reg_size != 2)
++ return -EINVAL;
++
++ ret = i2c_smbus_write_byte_data(i2c, ((u16 *)reg)[0] & 0xff,
++ ((u16 *)reg)[0] >> 8);
++ if (ret < 0)
++ return ret;
++
++ count = 0;
++ do {
++ /* Current Address Read */
++ ret = i2c_smbus_read_byte(i2c);
++ if (ret < 0)
++ break;
++
++ *((u8 *)val++) = ret;
++ count++;
++ len--;
++ } while (len > 0);
++
++ if (count == val_size)
++ return 0;
++ else if (ret < 0)
++ return ret;
++ else
++ return -EIO;
++}
++
++static const struct regmap_bus regmap_i2c_smbus_i2c_block_reg16 = {
++ .write = regmap_i2c_smbus_i2c_write_reg16,
++ .read = regmap_i2c_smbus_i2c_read_reg16,
++ .max_raw_read = I2C_SMBUS_BLOCK_MAX,
++ .max_raw_write = I2C_SMBUS_BLOCK_MAX,
++};
++
+ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
+ const struct regmap_config *config)
+ {
+@@ -259,6 +316,10 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
+ i2c_check_functionality(i2c->adapter,
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ return ®map_i2c_smbus_i2c_block;
++ else if (config->val_bits == 8 && config->reg_bits == 16 &&
++ i2c_check_functionality(i2c->adapter,
++ I2C_FUNC_SMBUS_I2C_BLOCK))
++ return ®map_i2c_smbus_i2c_block_reg16;
+ else if (config->val_bits == 16 && config->reg_bits == 8 &&
+ i2c_check_functionality(i2c->adapter,
+ I2C_FUNC_SMBUS_WORD_DATA))
+--
+2.40.1
+
--- /dev/null
+From 0e476e5456507fda60380b9d99aad7ba6a34c510 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Nov 2021 08:31:09 +0100
+Subject: Revert "tty: serial: fsl_lpuart: drop earlycon entry for i.MX8QXP"
+
+From: Alexander Stein <alexander.stein@ew.tq-group.com>
+
+[ Upstream commit 4e9679738a918d8a482ac6a2cb2bb871f094bb84 ]
+
+Revert commit b4b844930f27 ("tty: serial: fsl_lpuart: drop earlycon entry
+for i.MX8QXP"), because this breaks earlycon support on imx8qm/imx8qxp.
+While it is true that for earlycon there is no difference between
+i.MX8QXP and i.MX7ULP (for now at least), there are differences
+regarding clocks and fixups for wakeup support. For that reason it was
+deemed unacceptable to add the imx7ulp compatible to device tree in
+order to get earlycon working again.
+
+Reviewed-by: Peng Fan <peng.fan@nxp.com>
+Signed-off-by: Alexander Stein <alexander.stein@ew.tq-group.com>
+Link: https://lore.kernel.org/r/20211124073109.805088-1-alexander.stein@ew.tq-group.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: e0edfdc15863 ("tty: serial: fsl_lpuart: add earlycon for imx8ulp platform")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/fsl_lpuart.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 36321d810d36f..573086aac2c82 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -2136,6 +2136,7 @@ static int __init lpuart32_imx_early_console_setup(struct earlycon_device *devic
+ OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup);
+ OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup);
+ OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup);
++OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8qxp-lpuart", lpuart32_imx_early_console_setup);
+ EARLYCON_DECLARE(lpuart, lpuart_early_console_setup);
+ EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup);
+
+--
+2.40.1
+
net-fix-the-rto-timer-retransmitting-skb-every-1ms-if-linear-option-is-enabled.patch
net-xfrm-amend-xfrma_sec_ctx-nla_policy-structure.patch
net-phy-broadcom-stub-c45-read-write-for-54810.patch
+pci-acpiphp-reassign-resources-on-bridge-if-necessar.patch
+dlm-improve-plock-logging-if-interrupted.patch
+dlm-replace-usage-of-found-with-dedicated-list-itera.patch
+fs-dlm-add-pid-to-debug-log.patch
+fs-dlm-change-plock-interrupted-message-to-debug-aga.patch
+fs-dlm-use-dlm_plock_info-for-do_unlock_close.patch
+fs-dlm-fix-mismatch-of-plock-results-from-userspace.patch
+mips-cpu-features-enable-octeon_cache-by-cpu_type.patch
+mips-cpu-features-use-boot_cpu_type-for-cpu-type-bas.patch
+revert-tty-serial-fsl_lpuart-drop-earlycon-entry-for.patch
+tty-serial-fsl_lpuart-add-earlycon-for-imx8ulp-platf.patch
+fbdev-improve-performance-of-sys_imageblit.patch
+fbdev-fix-sys_imageblit-for-arbitrary-image-widths.patch
+fbdev-fix-potential-oob-read-in-fast_imageblit.patch
+powerpc-32-add-stack-protector-support.patch
+powerpc-remove-leftover-code-of-old-gcc-version-chec.patch
+powerpc-fail-build-if-using-recordmcount-with-binuti.patch
+dm-integrity-increase-recalc_sectors-to-improve-reca.patch
+dm-integrity-reduce-vmalloc-space-footprint-on-32-bi.patch
+exfat-add-bitmap-operations.patch
+exfat-use-kvmalloc_array-kvfree-instead-of-kmalloc_a.patch
+regmap-i2c-add-16-bit-width-registers-support.patch
+regmap-account-for-register-length-in-smbus-i-o-limi.patch
+drm-amd-display-do-not-wait-for-mpc-idle-if-tg-is-di.patch
+drm-amd-display-check-tg-is-non-null-before-checking.patch
+tracing-fix-memleak-due-to-race-between-current_trac.patch
+sock-annotate-data-races-around-prot-memory_pressure.patch
+dccp-annotate-data-races-in-dccp_poll.patch
+igb-avoid-starting-unnecessary-workqueues.patch
+net-sched-fix-a-qdisc-modification-with-ambiguous-co.patch
+net-remove-bond_slave_has_mac_rcu.patch
+bonding-fix-macvlan-over-alb-bond-support.patch
--- /dev/null
+From 8d04eef7a3610c938737d587aa2d251bc3b6f6bc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Aug 2023 01:51:32 +0000
+Subject: sock: annotate data-races around prot->memory_pressure
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 76f33296d2e09f63118db78125c95ef56df438e9 ]
+
+*prot->memory_pressure is read/writen locklessly, we need
+to add proper annotations.
+
+A recent commit added a new race, it is time to audit all accesses.
+
+Fixes: 2d0c88e84e48 ("sock: Fix misuse of sk_under_memory_pressure()")
+Fixes: 4d93df0abd50 ("[SCTP]: Rewrite of sctp buffer management code")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Abel Wu <wuyun.abel@bytedance.com>
+Reviewed-by: Shakeel Butt <shakeelb@google.com>
+Link: https://lore.kernel.org/r/20230818015132.2699348-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/sock.h | 7 ++++---
+ net/sctp/socket.c | 2 +-
+ 2 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/include/net/sock.h b/include/net/sock.h
+index bcb1901ac13a5..373e34b46a3c9 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1152,6 +1152,7 @@ struct proto {
+ /*
+ * Pressure flag: try to collapse.
+ * Technical note: it is used by multiple contexts non atomically.
++ * Make sure to use READ_ONCE()/WRITE_ONCE() for all reads/writes.
+ * All the __sk_mem_schedule() is of this nature: accounting
+ * is strict, actions are advisory and have some latency.
+ */
+@@ -1268,7 +1269,7 @@ static inline bool sk_has_memory_pressure(const struct sock *sk)
+ static inline bool sk_under_global_memory_pressure(const struct sock *sk)
+ {
+ return sk->sk_prot->memory_pressure &&
+- !!*sk->sk_prot->memory_pressure;
++ !!READ_ONCE(*sk->sk_prot->memory_pressure);
+ }
+
+ static inline bool sk_under_memory_pressure(const struct sock *sk)
+@@ -1280,7 +1281,7 @@ static inline bool sk_under_memory_pressure(const struct sock *sk)
+ mem_cgroup_under_socket_pressure(sk->sk_memcg))
+ return true;
+
+- return !!*sk->sk_prot->memory_pressure;
++ return !!READ_ONCE(*sk->sk_prot->memory_pressure);
+ }
+
+ static inline long
+@@ -1334,7 +1335,7 @@ proto_memory_pressure(struct proto *prot)
+ {
+ if (!prot->memory_pressure)
+ return false;
+- return !!*prot->memory_pressure;
++ return !!READ_ONCE(*prot->memory_pressure);
+ }
+
+
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index baa825751c393..432dccd375064 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -112,7 +112,7 @@ struct percpu_counter sctp_sockets_allocated;
+
+ static void sctp_enter_memory_pressure(struct sock *sk)
+ {
+- sctp_memory_pressure = 1;
++ WRITE_ONCE(sctp_memory_pressure, 1);
+ }
+
+
+--
+2.40.1
+
--- /dev/null
+From b5c4938ea64878525cfca3d55bc6c198964d6418 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Aug 2023 20:55:39 +0800
+Subject: tracing: Fix memleak due to race between current_tracer and trace
+
+From: Zheng Yejian <zhengyejian1@huawei.com>
+
+[ Upstream commit eecb91b9f98d6427d4af5fdb8f108f52572a39e7 ]
+
+Kmemleak report a leak in graph_trace_open():
+
+ unreferenced object 0xffff0040b95f4a00 (size 128):
+ comm "cat", pid 204981, jiffies 4301155872 (age 99771.964s)
+ hex dump (first 32 bytes):
+ e0 05 e7 b4 ab 7d 00 00 0b 00 01 00 00 00 00 00 .....}..........
+ f4 00 01 10 00 a0 ff ff 00 00 00 00 65 00 10 00 ............e...
+ backtrace:
+ [<000000005db27c8b>] kmem_cache_alloc_trace+0x348/0x5f0
+ [<000000007df90faa>] graph_trace_open+0xb0/0x344
+ [<00000000737524cd>] __tracing_open+0x450/0xb10
+ [<0000000098043327>] tracing_open+0x1a0/0x2a0
+ [<00000000291c3876>] do_dentry_open+0x3c0/0xdc0
+ [<000000004015bcd6>] vfs_open+0x98/0xd0
+ [<000000002b5f60c9>] do_open+0x520/0x8d0
+ [<00000000376c7820>] path_openat+0x1c0/0x3e0
+ [<00000000336a54b5>] do_filp_open+0x14c/0x324
+ [<000000002802df13>] do_sys_openat2+0x2c4/0x530
+ [<0000000094eea458>] __arm64_sys_openat+0x130/0x1c4
+ [<00000000a71d7881>] el0_svc_common.constprop.0+0xfc/0x394
+ [<00000000313647bf>] do_el0_svc+0xac/0xec
+ [<000000002ef1c651>] el0_svc+0x20/0x30
+ [<000000002fd4692a>] el0_sync_handler+0xb0/0xb4
+ [<000000000c309c35>] el0_sync+0x160/0x180
+
+The root cause is descripted as follows:
+
+ __tracing_open() { // 1. File 'trace' is being opened;
+ ...
+ *iter->trace = *tr->current_trace; // 2. Tracer 'function_graph' is
+ // currently set;
+ ...
+ iter->trace->open(iter); // 3. Call graph_trace_open() here,
+ // and memory are allocated in it;
+ ...
+ }
+
+ s_start() { // 4. The opened file is being read;
+ ...
+ *iter->trace = *tr->current_trace; // 5. If tracer is switched to
+ // 'nop' or others, then memory
+ // in step 3 are leaked!!!
+ ...
+ }
+
+To fix it, in s_start(), close tracer before switching then reopen the
+new tracer after switching. And some tracers like 'wakeup' may not update
+'iter->private' in some cases when reopen, then it should be cleared
+to avoid being mistakenly closed again.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20230817125539.1646321-1-zhengyejian1@huawei.com
+
+Fixes: d7350c3f4569 ("tracing/core: make the read callbacks reentrants")
+Signed-off-by: Zheng Yejian <zhengyejian1@huawei.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace.c | 9 ++++++++-
+ kernel/trace/trace_irqsoff.c | 3 ++-
+ kernel/trace/trace_sched_wakeup.c | 2 ++
+ 3 files changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 9da7b10e56d23..f44c8f1fd3ec5 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -3277,8 +3277,15 @@ static void *s_start(struct seq_file *m, loff_t *pos)
+ * will point to the same string as current_trace->name.
+ */
+ mutex_lock(&trace_types_lock);
+- if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
++ if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) {
++ /* Close iter->trace before switching to the new current tracer */
++ if (iter->trace->close)
++ iter->trace->close(iter);
+ *iter->trace = *tr->current_trace;
++ /* Reopen the new current tracer */
++ if (iter->trace->open)
++ iter->trace->open(iter);
++ }
+ mutex_unlock(&trace_types_lock);
+
+ #ifdef CONFIG_TRACER_MAX_TRACE
+diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
+index 98ea6d28df15d..0f36bb59970df 100644
+--- a/kernel/trace/trace_irqsoff.c
++++ b/kernel/trace/trace_irqsoff.c
+@@ -222,7 +222,8 @@ static void irqsoff_trace_open(struct trace_iterator *iter)
+ {
+ if (is_graph(iter->tr))
+ graph_trace_open(iter);
+-
++ else
++ iter->private = NULL;
+ }
+
+ static void irqsoff_trace_close(struct trace_iterator *iter)
+diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
+index 11f4dbd9526b6..8041bd5e42624 100644
+--- a/kernel/trace/trace_sched_wakeup.c
++++ b/kernel/trace/trace_sched_wakeup.c
+@@ -287,6 +287,8 @@ static void wakeup_trace_open(struct trace_iterator *iter)
+ {
+ if (is_graph(iter->tr))
+ graph_trace_open(iter);
++ else
++ iter->private = NULL;
+ }
+
+ static void wakeup_trace_close(struct trace_iterator *iter)
+--
+2.40.1
+
--- /dev/null
+From 3aab0f2a0cb93903b52c0fddaaae27055d63bf74 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 19 Jun 2023 16:06:13 +0800
+Subject: tty: serial: fsl_lpuart: add earlycon for imx8ulp platform
+
+From: Sherry Sun <sherry.sun@nxp.com>
+
+[ Upstream commit e0edfdc15863ec80a1d9ac6e174dbccc00206dd0 ]
+
+Add earlycon support for imx8ulp platform.
+
+Signed-off-by: Sherry Sun <sherry.sun@nxp.com>
+Cc: stable <stable@kernel.org>
+Link: https://lore.kernel.org/r/20230619080613.16522-1-sherry.sun@nxp.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/fsl_lpuart.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 573086aac2c82..af23d41b98438 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -2136,6 +2136,7 @@ static int __init lpuart32_imx_early_console_setup(struct earlycon_device *devic
+ OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup);
+ OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup);
+ OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup);
++OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8ulp-lpuart", lpuart32_imx_early_console_setup);
+ OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8qxp-lpuart", lpuart32_imx_early_console_setup);
+ EARLYCON_DECLARE(lpuart, lpuart_early_console_setup);
+ EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup);
+--
+2.40.1
+