--- /dev/null
+From stable-owner@vger.kernel.org Tue May 16 20:01:54 2023
+From: Dragos-Marian Panait <dragos.panait@windriver.com>
+Date: Tue, 16 May 2023 22:00:40 +0300
+Subject: act_mirred: use the backlog for nested calls to mirred ingress
+To: stable@vger.kernel.org
+Cc: wenxu <wenxu@ucloud.cn>, Jakub Kicinski <kuba@kernel.org>, Jamal Hadi Salim <jhs@mojatatu.com>, Davide Caratti <dcaratti@redhat.com>, Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>, Paolo Abeni <pabeni@redhat.com>, William Zhao <wizhao@redhat.com>, Xin Long <lucien.xin@gmail.com>, "David S . Miller" <davem@davemloft.net>, Eric Dumazet <edumazet@google.com>, Cong Wang <xiyou.wangcong@gmail.com>, Jiri Pirko <jiri@resnulli.us>, Shuah Khan <shuah@kernel.org>, linux-kselftest@vger.kernel.org, netdev@vger.kernel.org
+Message-ID: <20230516190040.636627-4-dragos.panait@windriver.com>
+
+From: Davide Caratti <dcaratti@redhat.com>
+
+[ Upstream commit ca22da2fbd693b54dc8e3b7b54ccc9f7e9ba3640 ]
+
+William reports kernel soft-lockups on some OVS topologies when TC mirred
+egress->ingress action is hit by local TCP traffic [1].
+The same can also be reproduced with SCTP (thanks Xin for verifying), when
+client and server reach themselves through mirred egress to ingress, and
+one of the two peers sends a "heartbeat" packet (from within a timer).
+
+Enqueueing to backlog proved to fix this soft lockup; however, as Cong
+noticed [2], we should preserve - when possible - the current mirred
+behavior that counts as "overlimits" any eventual packet drop subsequent to
+the mirred forwarding action [3]. A compromise solution might use the
+backlog only when tcf_mirred_act() has a nest level greater than one:
+change tcf_mirred_forward() accordingly.
+
+Also, add a kselftest that can reproduce the lockup and verifies TC mirred
+ability to account for further packet drops after TC mirred egress->ingress
+(when the nest level is 1).
+
+ [1] https://lore.kernel.org/netdev/33dc43f587ec1388ba456b4915c75f02a8aae226.1663945716.git.dcaratti@redhat.com/
+ [2] https://lore.kernel.org/netdev/Y0w%2FWWY60gqrtGLp@pop-os.localdomain/
+ [3] such behavior is not guaranteed: for example, if RPS or skb RX
+ timestamping is enabled on the mirred target device, the kernel
+ can defer receiving the skb and return NET_RX_SUCCESS inside
+ tcf_mirred_forward().
+
+Reported-by: William Zhao <wizhao@redhat.com>
+CC: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: Davide Caratti <dcaratti@redhat.com>
+Reviewed-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+[DP: adjusted context for linux-5.10.y]
+Signed-off-by: Dragos-Marian Panait <dragos.panait@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/act_mirred.c | 7 ++
+ tools/testing/selftests/net/forwarding/tc_actions.sh | 48 ++++++++++++++++++-
+ 2 files changed, 54 insertions(+), 1 deletion(-)
+
+--- a/net/sched/act_mirred.c
++++ b/net/sched/act_mirred.c
+@@ -206,12 +206,19 @@ release_idr:
+ return err;
+ }
+
++static bool is_mirred_nested(void)
++{
++ return unlikely(__this_cpu_read(mirred_nest_level) > 1);
++}
++
+ static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
+ {
+ int err;
+
+ if (!want_ingress)
+ err = dev_queue_xmit(skb);
++ else if (is_mirred_nested())
++ err = netif_rx(skb);
+ else
+ err = netif_receive_skb(skb);
+
+--- a/tools/testing/selftests/net/forwarding/tc_actions.sh
++++ b/tools/testing/selftests/net/forwarding/tc_actions.sh
+@@ -3,7 +3,7 @@
+
+ ALL_TESTS="gact_drop_and_ok_test mirred_egress_redirect_test \
+ mirred_egress_mirror_test matchall_mirred_egress_mirror_test \
+- gact_trap_test"
++ gact_trap_test mirred_egress_to_ingress_tcp_test"
+ NUM_NETIFS=4
+ source tc_common.sh
+ source lib.sh
+@@ -153,6 +153,52 @@ gact_trap_test()
+ log_test "trap ($tcflags)"
+ }
+
++mirred_egress_to_ingress_tcp_test()
++{
++ local tmpfile=$(mktemp) tmpfile1=$(mktemp)
++
++ RET=0
++ dd conv=sparse status=none if=/dev/zero bs=1M count=2 of=$tmpfile
++ tc filter add dev $h1 protocol ip pref 100 handle 100 egress flower \
++ $tcflags ip_proto tcp src_ip 192.0.2.1 dst_ip 192.0.2.2 \
++ action ct commit nat src addr 192.0.2.2 pipe \
++ action ct clear pipe \
++ action ct commit nat dst addr 192.0.2.1 pipe \
++ action ct clear pipe \
++ action skbedit ptype host pipe \
++ action mirred ingress redirect dev $h1
++ tc filter add dev $h1 protocol ip pref 101 handle 101 egress flower \
++ $tcflags ip_proto icmp \
++ action mirred ingress redirect dev $h1
++ tc filter add dev $h1 protocol ip pref 102 handle 102 ingress flower \
++ ip_proto icmp \
++ action drop
++
++ ip vrf exec v$h1 nc --recv-only -w10 -l -p 12345 -o $tmpfile1 &
++ local rpid=$!
++ ip vrf exec v$h1 nc -w1 --send-only 192.0.2.2 12345 <$tmpfile
++ wait -n $rpid
++ cmp -s $tmpfile $tmpfile1
++ check_err $? "server output check failed"
++
++ $MZ $h1 -c 10 -p 64 -a $h1mac -b $h1mac -A 192.0.2.1 -B 192.0.2.1 \
++ -t icmp "ping,id=42,seq=5" -q
++ tc_check_packets "dev $h1 egress" 101 10
++ check_err $? "didn't mirred redirect ICMP"
++ tc_check_packets "dev $h1 ingress" 102 10
++ check_err $? "didn't drop mirred ICMP"
++ local overlimits=$(tc_rule_stats_get ${h1} 101 egress .overlimits)
++ test ${overlimits} = 10
++ check_err $? "wrong overlimits, expected 10 got ${overlimits}"
++
++ tc filter del dev $h1 egress protocol ip pref 100 handle 100 flower
++ tc filter del dev $h1 egress protocol ip pref 101 handle 101 flower
++ tc filter del dev $h1 ingress protocol ip pref 102 handle 102 flower
++
++ rm -f $tmpfile $tmpfile1
++ log_test "mirred_egress_to_ingress_tcp ($tcflags)"
++}
++
+ setup_prepare()
+ {
+ h1=${NETIFS[p1]}
--- /dev/null
+From stable-owner@vger.kernel.org Tue May 16 20:01:56 2023
+From: Dragos-Marian Panait <dragos.panait@windriver.com>
+Date: Tue, 16 May 2023 22:00:39 +0300
+Subject: net/sched: act_mirred: better wording on protection against excessive stack growth
+To: stable@vger.kernel.org
+Cc: wenxu <wenxu@ucloud.cn>, Jakub Kicinski <kuba@kernel.org>, Jamal Hadi Salim <jhs@mojatatu.com>, Davide Caratti <dcaratti@redhat.com>, Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>, Paolo Abeni <pabeni@redhat.com>, William Zhao <wizhao@redhat.com>, Xin Long <lucien.xin@gmail.com>, "David S . Miller" <davem@davemloft.net>, Eric Dumazet <edumazet@google.com>, Cong Wang <xiyou.wangcong@gmail.com>, Jiri Pirko <jiri@resnulli.us>, Shuah Khan <shuah@kernel.org>, linux-kselftest@vger.kernel.org, netdev@vger.kernel.org
+Message-ID: <20230516190040.636627-3-dragos.panait@windriver.com>
+
+From: Davide Caratti <dcaratti@redhat.com>
+
+[ Upstream commit 78dcdffe0418ac8f3f057f26fe71ccf4d8ed851f ]
+
+with commit e2ca070f89ec ("net: sched: protect against stack overflow in
+TC act_mirred"), act_mirred protected itself against excessive stack growth
+using per_cpu counter of nested calls to tcf_mirred_act(), and capping it
+to MIRRED_RECURSION_LIMIT. However, such protection does not detect
+recursion/loops in case the packet is enqueued to the backlog (for example,
+when the mirred target device has RPS or skb timestamping enabled). Change
+the wording from "recursion" to "nesting" to make it more clear to readers.
+
+CC: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: Davide Caratti <dcaratti@redhat.com>
+Reviewed-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Stable-dep-of: ca22da2fbd69 ("act_mirred: use the backlog for nested calls to mirred ingress")
+Signed-off-by: Dragos-Marian Panait <dragos.panait@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/act_mirred.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/net/sched/act_mirred.c
++++ b/net/sched/act_mirred.c
+@@ -28,8 +28,8 @@
+ static LIST_HEAD(mirred_list);
+ static DEFINE_SPINLOCK(mirred_list_lock);
+
+-#define MIRRED_RECURSION_LIMIT 4
+-static DEFINE_PER_CPU(unsigned int, mirred_rec_level);
++#define MIRRED_NEST_LIMIT 4
++static DEFINE_PER_CPU(unsigned int, mirred_nest_level);
+
+ static bool tcf_mirred_is_act_redirect(int action)
+ {
+@@ -225,7 +225,7 @@ static int tcf_mirred_act(struct sk_buff
+ struct sk_buff *skb2 = skb;
+ bool m_mac_header_xmit;
+ struct net_device *dev;
+- unsigned int rec_level;
++ unsigned int nest_level;
+ int retval, err = 0;
+ bool use_reinsert;
+ bool want_ingress;
+@@ -236,11 +236,11 @@ static int tcf_mirred_act(struct sk_buff
+ int mac_len;
+ bool at_nh;
+
+- rec_level = __this_cpu_inc_return(mirred_rec_level);
+- if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) {
++ nest_level = __this_cpu_inc_return(mirred_nest_level);
++ if (unlikely(nest_level > MIRRED_NEST_LIMIT)) {
+ net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
+ netdev_name(skb->dev));
+- __this_cpu_dec(mirred_rec_level);
++ __this_cpu_dec(mirred_nest_level);
+ return TC_ACT_SHOT;
+ }
+
+@@ -310,7 +310,7 @@ static int tcf_mirred_act(struct sk_buff
+ err = tcf_mirred_forward(res->ingress, skb);
+ if (err)
+ tcf_action_inc_overlimit_qstats(&m->common);
+- __this_cpu_dec(mirred_rec_level);
++ __this_cpu_dec(mirred_nest_level);
+ return TC_ACT_CONSUMED;
+ }
+ }
+@@ -322,7 +322,7 @@ out:
+ if (tcf_mirred_is_act_redirect(m_eaction))
+ retval = TC_ACT_SHOT;
+ }
+- __this_cpu_dec(mirred_rec_level);
++ __this_cpu_dec(mirred_nest_level);
+
+ return retval;
+ }
--- /dev/null
+From stable-owner@vger.kernel.org Tue May 16 20:01:48 2023
+From: Dragos-Marian Panait <dragos.panait@windriver.com>
+Date: Tue, 16 May 2023 22:00:38 +0300
+Subject: net/sched: act_mirred: refactor the handle of xmit
+To: stable@vger.kernel.org
+Cc: wenxu <wenxu@ucloud.cn>, Jakub Kicinski <kuba@kernel.org>, Jamal Hadi Salim <jhs@mojatatu.com>, Davide Caratti <dcaratti@redhat.com>, Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>, Paolo Abeni <pabeni@redhat.com>, William Zhao <wizhao@redhat.com>, Xin Long <lucien.xin@gmail.com>, "David S . Miller" <davem@davemloft.net>, Eric Dumazet <edumazet@google.com>, Cong Wang <xiyou.wangcong@gmail.com>, Jiri Pirko <jiri@resnulli.us>, Shuah Khan <shuah@kernel.org>, linux-kselftest@vger.kernel.org, netdev@vger.kernel.org
+Message-ID: <20230516190040.636627-2-dragos.panait@windriver.com>
+
+From: wenxu <wenxu@ucloud.cn>
+
+[ Upstream commit fa6d639930ee5cd3f932cc314f3407f07a06582d ]
+
+This one is prepare for the next patch.
+
+Signed-off-by: wenxu <wenxu@ucloud.cn>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[DP: adjusted context for linux-5.10.y]
+Signed-off-by: Dragos-Marian Panait <dragos.panait@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/sch_generic.h | 5 -----
+ net/sched/act_mirred.c | 21 +++++++++++++++------
+ 2 files changed, 15 insertions(+), 11 deletions(-)
+
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -1320,11 +1320,6 @@ void mini_qdisc_pair_init(struct mini_Qd
+ void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
+ struct tcf_block *block);
+
+-static inline int skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
+-{
+- return res->ingress ? netif_receive_skb(skb) : dev_queue_xmit(skb);
+-}
+-
+ /* Make sure qdisc is no longer in SCHED state. */
+ static inline void qdisc_synchronize(const struct Qdisc *q)
+ {
+--- a/net/sched/act_mirred.c
++++ b/net/sched/act_mirred.c
+@@ -206,6 +206,18 @@ release_idr:
+ return err;
+ }
+
++static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
++{
++ int err;
++
++ if (!want_ingress)
++ err = dev_queue_xmit(skb);
++ else
++ err = netif_receive_skb(skb);
++
++ return err;
++}
++
+ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
+ {
+@@ -295,18 +307,15 @@ static int tcf_mirred_act(struct sk_buff
+ /* let's the caller reinsert the packet, if possible */
+ if (use_reinsert) {
+ res->ingress = want_ingress;
+- if (skb_tc_reinsert(skb, res))
++ err = tcf_mirred_forward(res->ingress, skb);
++ if (err)
+ tcf_action_inc_overlimit_qstats(&m->common);
+ __this_cpu_dec(mirred_rec_level);
+ return TC_ACT_CONSUMED;
+ }
+ }
+
+- if (!want_ingress)
+- err = dev_queue_xmit(skb2);
+- else
+- err = netif_receive_skb(skb2);
+-
++ err = tcf_mirred_forward(want_ingress, skb2);
+ if (err) {
+ out:
+ tcf_action_inc_overlimit_qstats(&m->common);
--- /dev/null
+From de3004c874e740304cc4f4a83d6200acb511bbda Mon Sep 17 00:00:00 2001
+From: Roberto Sassu <roberto.sassu@huawei.com>
+Date: Tue, 14 Mar 2023 09:17:16 +0100
+Subject: ocfs2: Switch to security_inode_init_security()
+
+From: Roberto Sassu <roberto.sassu@huawei.com>
+
+commit de3004c874e740304cc4f4a83d6200acb511bbda upstream.
+
+In preparation for removing security_old_inode_init_security(), switch to
+security_inode_init_security().
+
+Extend the existing ocfs2_initxattrs() to take the
+ocfs2_security_xattr_info structure from fs_info, and populate the
+name/value/len triple with the first xattr provided by LSMs.
+
+As fs_info was not used before, ocfs2_initxattrs() can now handle the case
+of replicating the behavior of security_old_inode_init_security(), i.e.
+just obtaining the xattr, in addition to setting all xattrs provided by
+LSMs.
+
+Supporting multiple xattrs is not currently supported where
+security_old_inode_init_security() was called (mknod, symlink), as it
+requires non-trivial changes that can be done at a later time. Like for
+reiserfs, even if EVM is invoked, it will not provide an xattr (if it is
+not the first to set it, its xattr will be discarded; if it is the first,
+it does not have xattrs to calculate the HMAC on).
+
+Finally, since security_inode_init_security(), unlike
+security_old_inode_init_security(), returns zero instead of -EOPNOTSUPP if
+no xattrs were provided by LSMs or if inodes are private, additionally
+check in ocfs2_init_security_get() if the xattr name is set.
+
+If not, act as if security_old_inode_init_security() returned -EOPNOTSUPP,
+and set si->enable to zero to notify to the functions following
+ocfs2_init_security_get() that no xattrs are available.
+
+Signed-off-by: Roberto Sassu <roberto.sassu@huawei.com>
+Reviewed-by: Casey Schaufler <casey@schaufler-ca.com>
+Acked-by: Joseph Qi <joseph.qi@linux.alibaba.com>
+Reviewed-by: Mimi Zohar <zohar@linux.ibm.com>
+Signed-off-by: Paul Moore <paul@paul-moore.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ocfs2/namei.c | 2 ++
+ fs/ocfs2/xattr.c | 30 ++++++++++++++++++++++++++----
+ 2 files changed, 28 insertions(+), 4 deletions(-)
+
+--- a/fs/ocfs2/namei.c
++++ b/fs/ocfs2/namei.c
+@@ -242,6 +242,7 @@ static int ocfs2_mknod(struct inode *dir
+ int want_meta = 0;
+ int xattr_credits = 0;
+ struct ocfs2_security_xattr_info si = {
++ .name = NULL,
+ .enable = 1,
+ };
+ int did_quota_inode = 0;
+@@ -1801,6 +1802,7 @@ static int ocfs2_symlink(struct inode *d
+ int want_clusters = 0;
+ int xattr_credits = 0;
+ struct ocfs2_security_xattr_info si = {
++ .name = NULL,
+ .enable = 1,
+ };
+ int did_quota = 0, did_quota_inode = 0;
+--- a/fs/ocfs2/xattr.c
++++ b/fs/ocfs2/xattr.c
+@@ -7260,9 +7260,21 @@ static int ocfs2_xattr_security_set(cons
+ static int ocfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
+ {
++ struct ocfs2_security_xattr_info *si = fs_info;
+ const struct xattr *xattr;
+ int err = 0;
+
++ if (si) {
++ si->value = kmemdup(xattr_array->value, xattr_array->value_len,
++ GFP_KERNEL);
++ if (!si->value)
++ return -ENOMEM;
++
++ si->name = xattr_array->name;
++ si->value_len = xattr_array->value_len;
++ return 0;
++ }
++
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ err = ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY,
+ xattr->name, xattr->value,
+@@ -7278,13 +7290,23 @@ int ocfs2_init_security_get(struct inode
+ const struct qstr *qstr,
+ struct ocfs2_security_xattr_info *si)
+ {
++ int ret;
++
+ /* check whether ocfs2 support feature xattr */
+ if (!ocfs2_supports_xattr(OCFS2_SB(dir->i_sb)))
+ return -EOPNOTSUPP;
+- if (si)
+- return security_old_inode_init_security(inode, dir, qstr,
+- &si->name, &si->value,
+- &si->value_len);
++ if (si) {
++ ret = security_inode_init_security(inode, dir, qstr,
++ &ocfs2_initxattrs, si);
++ /*
++ * security_inode_init_security() does not return -EOPNOTSUPP,
++ * we have to check the xattr ourselves.
++ */
++ if (!ret && !si->name)
++ si->enable = 0;
++
++ return ret;
++ }
+
+ return security_inode_init_security(inode, dir, qstr,
+ &ocfs2_initxattrs, NULL);
s390-qdio-fix-do_sqbs-inline-assembly-constraint.patch
watchdog-sp5100_tco-immediately-trigger-upon-starting.patch
arm-dts-stm32-fix-av96-board-sai2-pin-muxing-on-stm32mp15.patch
+writeback-cgroup-remove-extra-percpu_ref_exit.patch
+net-sched-act_mirred-refactor-the-handle-of-xmit.patch
+net-sched-act_mirred-better-wording-on-protection-against-excessive-stack-growth.patch
+act_mirred-use-the-backlog-for-nested-calls-to-mirred-ingress.patch
+spi-fsl-spi-re-organise-transfer-bits_per_word-adaptation.patch
+spi-fsl-cpm-use-16-bit-mode-for-large-transfers-with-even-size.patch
+ocfs2-switch-to-security_inode_init_security.patch
--- /dev/null
+From christophe.leroy@csgroup.eu Mon May 15 15:07:59 2023
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+Date: Mon, 15 May 2023 16:07:17 +0200
+Subject:[For 5.15/5.10/5.4] spi: fsl-cpm: Use 16 bit mode for large transfers with even size
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: Christophe Leroy <christophe.leroy@csgroup.eu>, linux-kernel@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, Mark Brown <broonie@kernel.org>
+Message-ID: <3a1b8774ad7004acb594fbf220f98488dbaa2896.1684156552.git.christophe.leroy@csgroup.eu>
+
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+
+(cherry picked from upstream fc96ec826bced75cc6b9c07a4ac44bbf651337ab)
+
+On CPM, the RISC core is a lot more efficiant when doing transfers
+in 16-bits chunks than in 8-bits chunks, but unfortunately the
+words need to be byte swapped as seen in a previous commit.
+
+So, for large tranfers with an even size, allocate a temporary tx
+buffer and byte-swap data before and after transfer.
+
+This change allows setting higher speed for transfer. For instance
+on an MPC 8xx (CPM1 comms RISC processor), the documentation tells
+that transfer in byte mode at 1 kbit/s uses 0.200% of CPM load
+at 25 MHz while a word transfer at the same speed uses 0.032%
+of CPM load. This means the speed can be 6 times higher in
+word mode for the same CPM load.
+
+For the time being, only do it on CPM1 as there must be a
+trade-off between the CPM load reduction and the CPU load required
+to byte swap the data.
+
+Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Link: https://lore.kernel.org/r/f2e981f20f92dd28983c3949702a09248c23845c.1680371809.git.christophe.leroy@csgroup.eu
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/spi/spi-fsl-cpm.c | 23 +++++++++++++++++++++++
+ drivers/spi/spi-fsl-spi.c | 3 +++
+ 2 files changed, 26 insertions(+)
+
+--- a/drivers/spi/spi-fsl-cpm.c
++++ b/drivers/spi/spi-fsl-cpm.c
+@@ -21,6 +21,7 @@
+ #include <linux/spi/spi.h>
+ #include <linux/types.h>
+ #include <linux/platform_device.h>
++#include <linux/byteorder/generic.h>
+
+ #include "spi-fsl-cpm.h"
+ #include "spi-fsl-lib.h"
+@@ -120,6 +121,21 @@ int fsl_spi_cpm_bufs(struct mpc8xxx_spi
+ mspi->rx_dma = mspi->dma_dummy_rx;
+ mspi->map_rx_dma = 0;
+ }
++ if (t->bits_per_word == 16 && t->tx_buf) {
++ const u16 *src = t->tx_buf;
++ u16 *dst;
++ int i;
++
++ dst = kmalloc(t->len, GFP_KERNEL);
++ if (!dst)
++ return -ENOMEM;
++
++ for (i = 0; i < t->len >> 1; i++)
++ dst[i] = cpu_to_le16p(src + i);
++
++ mspi->tx = dst;
++ mspi->map_tx_dma = 1;
++ }
+
+ if (mspi->map_tx_dma) {
+ void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */
+@@ -173,6 +189,13 @@ void fsl_spi_cpm_bufs_complete(struct mp
+ if (mspi->map_rx_dma)
+ dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
+ mspi->xfer_in_progress = NULL;
++
++ if (t->bits_per_word == 16 && t->rx_buf) {
++ int i;
++
++ for (i = 0; i < t->len; i += 2)
++ le16_to_cpus(t->rx_buf + i);
++ }
+ }
+ EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs_complete);
+
+--- a/drivers/spi/spi-fsl-spi.c
++++ b/drivers/spi/spi-fsl-spi.c
+@@ -368,6 +368,9 @@ static int fsl_spi_do_one_msg(struct spi
+ return -EINVAL;
+ if (t->bits_per_word == 16 || t->bits_per_word == 32)
+ t->bits_per_word = 8; /* pretend its 8 bits */
++ if (t->bits_per_word == 8 && t->len >= 256 &&
++ (mpc8xxx_spi->flags & SPI_CPM1))
++ t->bits_per_word = 16;
+ }
+ }
+
--- /dev/null
+From christophe.leroy@csgroup.eu Mon May 15 15:08:03 2023
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+Date: Mon, 15 May 2023 16:07:16 +0200
+Subject:[For 5.15/5.10/5.4] spi: fsl-spi: Re-organise transfer bits_per_word adaptation
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: Christophe Leroy <christophe.leroy@csgroup.eu>, linux-kernel@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, Mark Brown <broonie@kernel.org>
+Message-ID: <1e4bfb4850ba849c316f48a0ab0d7123da0e2f54.1684156552.git.christophe.leroy@csgroup.eu>
+
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+
+(backported from upstream 8a5299a1278eadf1e08a598a5345c376206f171e)
+
+For different reasons, fsl-spi driver performs bits_per_word
+modifications for different reasons:
+- On CPU mode, to minimise amount of interrupts
+- On CPM/QE mode to work around controller byte order
+
+For CPU mode that's done in fsl_spi_prepare_message() while
+for CPM mode that's done in fsl_spi_setup_transfer().
+
+Reunify all of it in fsl_spi_prepare_message(), and catch
+impossible cases early through master's bits_per_word_mask
+instead of returning EINVAL later.
+
+Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Link: https://lore.kernel.org/r/0ce96fe96e8b07cba0613e4097cfd94d09b8919a.1680371809.git.christophe.leroy@csgroup.eu
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/spi/spi-fsl-spi.c | 50 +++++++++++++++++++++-------------------------
+ 1 file changed, 23 insertions(+), 27 deletions(-)
+
+--- a/drivers/spi/spi-fsl-spi.c
++++ b/drivers/spi/spi-fsl-spi.c
+@@ -203,26 +203,6 @@ static int mspi_apply_cpu_mode_quirks(st
+ return bits_per_word;
+ }
+
+-static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
+- struct spi_device *spi,
+- int bits_per_word)
+-{
+- /* CPM/QE uses Little Endian for words > 8
+- * so transform 16 and 32 bits words into 8 bits
+- * Unfortnatly that doesn't work for LSB so
+- * reject these for now */
+- /* Note: 32 bits word, LSB works iff
+- * tfcr/rfcr is set to CPMFCR_GBL */
+- if (spi->mode & SPI_LSB_FIRST &&
+- bits_per_word > 8)
+- return -EINVAL;
+- if (bits_per_word <= 8)
+- return bits_per_word;
+- if (bits_per_word == 16 || bits_per_word == 32)
+- return 8; /* pretend its 8 bits */
+- return -EINVAL;
+-}
+-
+ static int fsl_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+ {
+@@ -250,9 +230,6 @@ static int fsl_spi_setup_transfer(struct
+ bits_per_word = mspi_apply_cpu_mode_quirks(cs, spi,
+ mpc8xxx_spi,
+ bits_per_word);
+- else
+- bits_per_word = mspi_apply_qe_mode_quirks(cs, spi,
+- bits_per_word);
+
+ if (bits_per_word < 0)
+ return bits_per_word;
+@@ -370,14 +347,27 @@ static int fsl_spi_do_one_msg(struct spi
+ * In CPU mode, optimize large byte transfers to use larger
+ * bits_per_word values to reduce number of interrupts taken.
+ */
+- if (!(mpc8xxx_spi->flags & SPI_CPM_MODE)) {
+- list_for_each_entry(t, &m->transfers, transfer_list) {
++ list_for_each_entry(t, &m->transfers, transfer_list) {
++ if (!(mpc8xxx_spi->flags & SPI_CPM_MODE)) {
+ if (t->len < 256 || t->bits_per_word != 8)
+ continue;
+ if ((t->len & 3) == 0)
+ t->bits_per_word = 32;
+ else if ((t->len & 1) == 0)
+ t->bits_per_word = 16;
++ } else {
++ /*
++ * CPM/QE uses Little Endian for words > 8
++ * so transform 16 and 32 bits words into 8 bits
++ * Unfortnatly that doesn't work for LSB so
++ * reject these for now
++ * Note: 32 bits word, LSB works iff
++ * tfcr/rfcr is set to CPMFCR_GBL
++ */
++ if (m->spi->mode & SPI_LSB_FIRST && t->bits_per_word > 8)
++ return -EINVAL;
++ if (t->bits_per_word == 16 || t->bits_per_word == 32)
++ t->bits_per_word = 8; /* pretend its 8 bits */
+ }
+ }
+
+@@ -635,8 +625,14 @@ static struct spi_master *fsl_spi_probe(
+ if (mpc8xxx_spi->type == TYPE_GRLIB)
+ fsl_spi_grlib_probe(dev);
+
+- master->bits_per_word_mask =
+- (SPI_BPW_RANGE_MASK(4, 16) | SPI_BPW_MASK(32)) &
++ if (mpc8xxx_spi->flags & SPI_CPM_MODE)
++ master->bits_per_word_mask =
++ (SPI_BPW_RANGE_MASK(4, 8) | SPI_BPW_MASK(16) | SPI_BPW_MASK(32));
++ else
++ master->bits_per_word_mask =
++ (SPI_BPW_RANGE_MASK(4, 16) | SPI_BPW_MASK(32));
++
++ master->bits_per_word_mask &=
+ SPI_BPW_RANGE_MASK(1, mpc8xxx_spi->max_bits_per_word);
+
+ if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
--- /dev/null
+From 416e0e8ab5ff41676d04dc819bd667c6ad3f7555 Mon Sep 17 00:00:00 2001
+From: Greg Thelen <gthelen@google.com>
+Date: Sat, 20 May 2023 12:46:24 -0700
+Subject: writeback, cgroup: remove extra percpu_ref_exit()
+
+From: Greg Thelen <gthelen@google.com>
+
+5.10 stable commit 2b00b2a0e642 ("writeback, cgroup: fix null-ptr-deref
+write in bdi_split_work_to_wbs") is a backport of upstream 6.3 commit
+1ba1199ec574.
+
+In the 5.10 stable commit backport percpu_ref_exit() is called twice:
+first in cgwb_release_workfn() and then in cgwb_free_rcu(). The 2nd call
+is benign as percpu_ref_exit() internally detects there's nothing to do.
+
+This fixes an non-upstream issue that only applies to 5.10.y.
+
+Fixes: 2b00b2a0e642 ("writeback, cgroup: fix null-ptr-deref write in bdi_split_work_to_wbs")
+Signed-off-by: Greg Thelen <gthelen@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/backing-dev.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -404,7 +404,6 @@ static void cgwb_release_workfn(struct w
+ blkcg_unpin_online(blkcg);
+
+ fprop_local_destroy_percpu(&wb->memcg_completions);
+- percpu_ref_exit(&wb->refcnt);
+ wb_exit(wb);
+ call_rcu(&wb->rcu, cgwb_free_rcu);
+ }