--- /dev/null
+From dfce9cb3140592b886838e06f3e0c25fea2a9cae Mon Sep 17 00:00:00 2001
+From: Yonghong Song <yonghong.song@linux.dev>
+Date: Thu, 30 Nov 2023 18:46:40 -0800
+Subject: bpf: Fix a verifier bug due to incorrect branch offset comparison with cpu=v4
+
+From: Yonghong Song <yonghong.song@linux.dev>
+
+commit dfce9cb3140592b886838e06f3e0c25fea2a9cae upstream.
+
+Bpf cpu=v4 support is introduced in [1] and Commit 4cd58e9af8b9
+("bpf: Support new 32bit offset jmp instruction") added support for new
+32bit offset jmp instruction. Unfortunately, in function
+bpf_adj_delta_to_off(), for new branch insn with 32bit offset, the offset
+(plus/minor a small delta) compares to 16-bit offset bound
+[S16_MIN, S16_MAX], which caused the following verification failure:
+ $ ./test_progs-cpuv4 -t verif_scale_pyperf180
+ ...
+ insn 10 cannot be patched due to 16-bit range
+ ...
+ libbpf: failed to load object 'pyperf180.bpf.o'
+ scale_test:FAIL:expect_success unexpected error: -12 (errno 12)
+ #405 verif_scale_pyperf180:FAIL
+
+Note that due to recent llvm18 development, the patch [2] (already applied
+in bpf-next) needs to be applied to bpf tree for testing purpose.
+
+The fix is rather simple. For 32bit offset branch insn, the adjusted
+offset compares to [S32_MIN, S32_MAX] and then verification succeeded.
+
+ [1] https://lore.kernel.org/all/20230728011143.3710005-1-yonghong.song@linux.dev
+ [2] https://lore.kernel.org/bpf/20231110193644.3130906-1-yonghong.song@linux.dev
+
+Fixes: 4cd58e9af8b9 ("bpf: Support new 32bit offset jmp instruction")
+Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Link: https://lore.kernel.org/bpf/20231201024640.3417057-1-yonghong.song@linux.dev
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/core.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -365,14 +365,18 @@ static int bpf_adj_delta_to_imm(struct b
+ static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
+ s32 end_new, s32 curr, const bool probe_pass)
+ {
+- const s32 off_min = S16_MIN, off_max = S16_MAX;
++ s64 off_min, off_max, off;
+ s32 delta = end_new - end_old;
+- s32 off;
+
+- if (insn->code == (BPF_JMP32 | BPF_JA))
++ if (insn->code == (BPF_JMP32 | BPF_JA)) {
+ off = insn->imm;
+- else
++ off_min = S32_MIN;
++ off_max = S32_MAX;
++ } else {
+ off = insn->off;
++ off_min = S16_MIN;
++ off_max = S16_MAX;
++ }
+
+ if (curr < pos && curr + off + 1 >= end_old)
+ off += delta;
--- /dev/null
+From 8d6650646ce49e9a5b8c5c23eb94f74b1749f70f Mon Sep 17 00:00:00 2001
+From: John Fastabend <john.fastabend@gmail.com>
+Date: Fri, 1 Dec 2023 10:01:38 -0800
+Subject: bpf: syzkaller found null ptr deref in unix_bpf proto add
+
+From: John Fastabend <john.fastabend@gmail.com>
+
+commit 8d6650646ce49e9a5b8c5c23eb94f74b1749f70f upstream.
+
+I added logic to track the sock pair for stream_unix sockets so that we
+ensure lifetime of the sock matches the time a sockmap could reference
+the sock (see fixes tag). I forgot though that we allow af_unix unconnected
+sockets into a sock{map|hash} map.
+
+This is problematic because previous fixed expected sk_pair() to exist
+and did not NULL check it. Because unconnected sockets have a NULL
+sk_pair this resulted in the NULL ptr dereference found by syzkaller.
+
+BUG: KASAN: null-ptr-deref in unix_stream_bpf_update_proto+0x72/0x430 net/unix/unix_bpf.c:171
+Write of size 4 at addr 0000000000000080 by task syz-executor360/5073
+Call Trace:
+ <TASK>
+ ...
+ sock_hold include/net/sock.h:777 [inline]
+ unix_stream_bpf_update_proto+0x72/0x430 net/unix/unix_bpf.c:171
+ sock_map_init_proto net/core/sock_map.c:190 [inline]
+ sock_map_link+0xb87/0x1100 net/core/sock_map.c:294
+ sock_map_update_common+0xf6/0x870 net/core/sock_map.c:483
+ sock_map_update_elem_sys+0x5b6/0x640 net/core/sock_map.c:577
+ bpf_map_update_value+0x3af/0x820 kernel/bpf/syscall.c:167
+
+We considered just checking for the null ptr and skipping taking a ref
+on the NULL peer sock. But, if the socket is then connected() after
+being added to the sockmap we can cause the original issue again. So
+instead this patch blocks adding af_unix sockets that are not in the
+ESTABLISHED state.
+
+Reported-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot+e8030702aefd3444fb9e@syzkaller.appspotmail.com
+Fixes: 8866730aed51 ("bpf, sockmap: af_unix stream sockets need to hold ref for pair sock")
+Acked-by: Jakub Sitnicki <jakub@cloudflare.com>
+Signed-off-by: John Fastabend <john.fastabend@gmail.com>
+Link: https://lore.kernel.org/r/20231201180139.328529-2-john.fastabend@gmail.com
+Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/sock.h | 5 +++++
+ net/core/sock_map.c | 2 ++
+ 2 files changed, 7 insertions(+)
+
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2829,6 +2829,11 @@ static inline bool sk_is_tcp(const struc
+ return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP;
+ }
+
++static inline bool sk_is_stream_unix(const struct sock *sk)
++{
++ return sk->sk_family == AF_UNIX && sk->sk_type == SOCK_STREAM;
++}
++
+ /**
+ * sk_eat_skb - Release a skb if it is no longer needed
+ * @sk: socket to eat this skb from
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -538,6 +538,8 @@ static bool sock_map_sk_state_allowed(co
+ {
+ if (sk_is_tcp(sk))
+ return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
++ if (sk_is_stream_unix(sk))
++ return (1 << sk->sk_state) & TCPF_ESTABLISHED;
+ return true;
+ }
+
--- /dev/null
+From 091a4dfbb1d32b06c031edbfe2a44af100c4604f Mon Sep 17 00:00:00 2001
+From: Chao Yu <chao@kernel.org>
+Date: Mon, 21 Aug 2023 23:22:25 +0800
+Subject: f2fs: compress: fix to assign compress_level for lz4 correctly
+
+From: Chao Yu <chao@kernel.org>
+
+commit 091a4dfbb1d32b06c031edbfe2a44af100c4604f upstream.
+
+After remount, F2FS_OPTION().compress_level was assgin to
+LZ4HC_DEFAULT_CLEVEL incorrectly, result in lz4hc:9 was enabled, fix it.
+
+1. mount /dev/vdb
+/dev/vdb on /mnt/f2fs type f2fs (...,compress_algorithm=lz4,compress_log_size=2,...)
+2. mount -t f2fs -o remount,compress_log_size=3 /mnt/f2fs/
+3. mount|grep f2fs
+/dev/vdb on /mnt/f2fs type f2fs (...,compress_algorithm=lz4:9,compress_log_size=3,...)
+
+Fixes: 00e120b5e4b5 ("f2fs: assign default compression level")
+Signed-off-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/f2fs/super.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -615,7 +615,7 @@ static int f2fs_set_lz4hc_level(struct f
+ unsigned int level;
+
+ if (strlen(str) == 3) {
+- F2FS_OPTION(sbi).compress_level = LZ4HC_DEFAULT_CLEVEL;
++ F2FS_OPTION(sbi).compress_level = 0;
+ return 0;
+ }
+
--- /dev/null
+From 188a569658584e93930ab60334c5a1079c0330d8 Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo@kernel.org>
+Date: Wed, 18 Jan 2023 12:14:01 +0100
+Subject: genirq/affinity: Only build SMP-only helper functions on SMP kernels
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ingo Molnar <mingo@kernel.org>
+
+commit 188a569658584e93930ab60334c5a1079c0330d8 upstream.
+
+allnoconfig grew these new build warnings in lib/group_cpus.c:
+
+ lib/group_cpus.c:247:12: warning: ‘__group_cpus_evenly’ defined but not used [-Wunused-function]
+ lib/group_cpus.c:75:13: warning: ‘build_node_to_cpumask’ defined but not used [-Wunused-function]
+ lib/group_cpus.c:66:13: warning: ‘free_node_to_cpumask’ defined but not used [-Wunused-function]
+ lib/group_cpus.c:43:23: warning: ‘alloc_node_to_cpumask’ defined but not used [-Wunused-function]
+
+Widen the #ifdef CONFIG_SMP block to not expose unused helpers on
+non-SMP builds.
+
+Also annotate the preprocessor branches for better readability.
+
+Fixes: f7b3ea8cf72f ("genirq/affinity: Move group_cpus_evenly() into lib/")
+Cc: Ming Lei <ming.lei@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/r/20221227022905.352674-6-ming.lei@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/group_cpus.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/lib/group_cpus.c
++++ b/lib/group_cpus.c
+@@ -9,6 +9,8 @@
+ #include <linux/sort.h>
+ #include <linux/group_cpus.h>
+
++#ifdef CONFIG_SMP
++
+ static void grp_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
+ unsigned int cpus_per_grp)
+ {
+@@ -327,7 +329,6 @@ static int __group_cpus_evenly(unsigned
+ return done;
+ }
+
+-#ifdef CONFIG_SMP
+ /**
+ * group_cpus_evenly - Group all CPUs evenly per NUMA/CPU locality
+ * @numgrps: number of groups
+@@ -422,7 +423,7 @@ struct cpumask *group_cpus_evenly(unsign
+ }
+ return masks;
+ }
+-#else
++#else /* CONFIG_SMP */
+ struct cpumask *group_cpus_evenly(unsigned int numgrps)
+ {
+ struct cpumask *masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
+@@ -434,4 +435,4 @@ struct cpumask *group_cpus_evenly(unsign
+ cpumask_copy(&masks[0], cpu_possible_mask);
+ return masks;
+ }
+-#endif
++#endif /* CONFIG_SMP */
--- /dev/null
+From f910d3ba78a2677c23508f225eb047d89eb4b2b6 Mon Sep 17 00:00:00 2001
+From: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+Date: Thu, 28 Sep 2023 01:58:25 +0100
+Subject: media: qcom: camss: Comment CSID dt_id field
+
+From: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+
+commit f910d3ba78a2677c23508f225eb047d89eb4b2b6 upstream.
+
+Digging into the documentation we find that the DT_ID bitfield is used to
+map the six bit DT to a two bit ID code. This value is concatenated to the
+VC bitfield to create a CID value. DT_ID is the two least significant bits
+of CID and VC the most significant bits.
+
+Originally we set dt_id = vc * 4 in and then subsequently set dt_id = vc.
+
+commit 3c4ed72a16bc ("media: camss: sm8250: Virtual channels for CSID")
+silently fixed the multiplication by four which would give a better
+value for the generated CID without mentioning what was being done or why.
+
+Next up I haplessly changed the value back to "dt_id = vc * 4" since there
+didn't appear to be any logic behind it.
+
+Hans asked what the change was for and I honestly couldn't remember the
+provenance of it, so I dug in.
+
+Link: https://lore.kernel.org/linux-arm-msm/edd4bf9b-0e1b-883c-1a4d-50f4102c3924@xs4all.nl/
+
+Add a comment so the next hapless programmer doesn't make this same
+mistake.
+
+Signed-off-by: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/platform/qcom/camss/camss-csid-gen2.c | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+--- a/drivers/media/platform/qcom/camss/camss-csid-gen2.c
++++ b/drivers/media/platform/qcom/camss/camss-csid-gen2.c
+@@ -352,7 +352,19 @@ static void __csid_configure_stream(stru
+ phy_sel = csid->phy.csiphy_id;
+
+ if (enable) {
+- u8 dt_id = vc;
++ /*
++ * DT_ID is a two bit bitfield that is concatenated with
++ * the four least significant bits of the five bit VC
++ * bitfield to generate an internal CID value.
++ *
++ * CSID_RDI_CFG0(vc)
++ * DT_ID : 28:27
++ * VC : 26:22
++ * DT : 21:16
++ *
++ * CID : VC 3:0 << 2 | DT_ID 1:0
++ */
++ u8 dt_id = vc & 0x03;
+
+ if (tg->enabled) {
+ /* configure one DT, infinite frames */
--- /dev/null
+From a63b6622120cd03a304796dbccb80655b3a21798 Mon Sep 17 00:00:00 2001
+From: Vlad Buslov <vladbu@nvidia.com>
+Date: Tue, 24 Oct 2023 21:58:57 +0200
+Subject: net/sched: act_ct: additional checks for outdated flows
+
+From: Vlad Buslov <vladbu@nvidia.com>
+
+commit a63b6622120cd03a304796dbccb80655b3a21798 upstream.
+
+Current nf_flow_is_outdated() implementation considers any flow table flow
+which state diverged from its underlying CT connection status for teardown
+which can be problematic in the following cases:
+
+- Flow has never been offloaded to hardware in the first place either
+because flow table has hardware offload disabled (flag
+NF_FLOWTABLE_HW_OFFLOAD is not set) or because it is still pending on 'add'
+workqueue to be offloaded for the first time. The former is incorrect, the
+later generates excessive deletions and additions of flows.
+
+- Flow is already pending to be updated on the workqueue. Tearing down such
+flows will also generate excessive removals from the flow table, especially
+on highly loaded system where the latency to re-offload a flow via 'add'
+workqueue can be quite high.
+
+When considering a flow for teardown as outdated verify that it is both
+offloaded to hardware and doesn't have any pending updates.
+
+Fixes: 41f2c7c342d3 ("net/sched: act_ct: Fix promotion of offloaded unreplied tuple")
+Reviewed-by: Paul Blakey <paulb@nvidia.com>
+Signed-off-by: Vlad Buslov <vladbu@nvidia.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/act_ct.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -277,6 +277,8 @@ err_nat:
+ static bool tcf_ct_flow_is_outdated(const struct flow_offload *flow)
+ {
+ return test_bit(IPS_SEEN_REPLY_BIT, &flow->ct->status) &&
++ test_bit(IPS_HW_OFFLOAD_BIT, &flow->ct->status) &&
++ !test_bit(NF_FLOW_HW_PENDING, &flow->flags) &&
+ !test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
+ }
+
--- /dev/null
+From 9bc64bd0cd765f696fcd40fc98909b1f7c73b2ba Mon Sep 17 00:00:00 2001
+From: Vlad Buslov <vladbu@nvidia.com>
+Date: Fri, 3 Nov 2023 16:14:10 +0100
+Subject: net/sched: act_ct: Always fill offloading tuple iifidx
+
+From: Vlad Buslov <vladbu@nvidia.com>
+
+commit 9bc64bd0cd765f696fcd40fc98909b1f7c73b2ba upstream.
+
+Referenced commit doesn't always set iifidx when offloading the flow to
+hardware. Fix the following cases:
+
+- nf_conn_act_ct_ext_fill() is called before extension is created with
+nf_conn_act_ct_ext_add() in tcf_ct_act(). This can cause rule offload with
+unspecified iifidx when connection is offloaded after only single
+original-direction packet has been processed by tc data path. Always fill
+the new nf_conn_act_ct_ext instance after creating it in
+nf_conn_act_ct_ext_add().
+
+- Offloading of unidirectional UDP NEW connections is now supported, but ct
+flow iifidx field is not updated when connection is promoted to
+bidirectional which can result reply-direction iifidx to be zero when
+refreshing the connection. Fill in the extension and update flow iifidx
+before calling flow_offload_refresh().
+
+Fixes: 9795ded7f924 ("net/sched: act_ct: Fill offloading tuple iifidx")
+Reviewed-by: Paul Blakey <paulb@nvidia.com>
+Signed-off-by: Vlad Buslov <vladbu@nvidia.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Fixes: 6a9bad0069cf ("net/sched: act_ct: offload UDP NEW connections")
+Link: https://lore.kernel.org/r/20231103151410.764271-1-vladbu@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/netfilter/nf_conntrack_act_ct.h | 30 +++++++++++++++-------------
+ net/openvswitch/conntrack.c | 2 -
+ net/sched/act_ct.c | 15 +++++++++++++-
+ 3 files changed, 32 insertions(+), 15 deletions(-)
+
+--- a/include/net/netfilter/nf_conntrack_act_ct.h
++++ b/include/net/netfilter/nf_conntrack_act_ct.h
+@@ -20,7 +20,22 @@ static inline struct nf_conn_act_ct_ext
+ #endif
+ }
+
+-static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *ct)
++static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
++ enum ip_conntrack_info ctinfo)
++{
++#if IS_ENABLED(CONFIG_NET_ACT_CT)
++ struct nf_conn_act_ct_ext *act_ct_ext;
++
++ act_ct_ext = nf_conn_act_ct_ext_find(ct);
++ if (dev_net(skb->dev) == &init_net && act_ct_ext)
++ act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
++#endif
++}
++
++static inline struct
++nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct sk_buff *skb,
++ struct nf_conn *ct,
++ enum ip_conntrack_info ctinfo)
+ {
+ #if IS_ENABLED(CONFIG_NET_ACT_CT)
+ struct nf_conn_act_ct_ext *act_ct = nf_ct_ext_find(ct, NF_CT_EXT_ACT_CT);
+@@ -29,22 +44,11 @@ static inline struct nf_conn_act_ct_ext
+ return act_ct;
+
+ act_ct = nf_ct_ext_add(ct, NF_CT_EXT_ACT_CT, GFP_ATOMIC);
++ nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
+ return act_ct;
+ #else
+ return NULL;
+ #endif
+ }
+
+-static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
+- enum ip_conntrack_info ctinfo)
+-{
+-#if IS_ENABLED(CONFIG_NET_ACT_CT)
+- struct nf_conn_act_ct_ext *act_ct_ext;
+-
+- act_ct_ext = nf_conn_act_ct_ext_find(ct);
+- if (dev_net(skb->dev) == &init_net && act_ct_ext)
+- act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
+-#endif
+-}
+-
+ #endif /* _NF_CONNTRACK_ACT_CT_H */
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -1252,7 +1252,7 @@ static int ovs_ct_commit(struct net *net
+ if (err)
+ return err;
+
+- nf_conn_act_ct_ext_add(ct);
++ nf_conn_act_ct_ext_add(skb, ct, ctinfo);
+ } else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
+ labels_nonzero(&info->labels.mask)) {
+ err = ovs_ct_set_labels(ct, key, &info->labels.value,
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -394,6 +394,17 @@ static void tcf_ct_flow_tc_ifidx(struct
+ entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir];
+ }
+
++static void tcf_ct_flow_ct_ext_ifidx_update(struct flow_offload *entry)
++{
++ struct nf_conn_act_ct_ext *act_ct_ext;
++
++ act_ct_ext = nf_conn_act_ct_ext_find(entry->ct);
++ if (act_ct_ext) {
++ tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
++ tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
++ }
++}
++
+ static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
+ struct nf_conn *ct,
+ bool tcp, bool bidirectional)
+@@ -689,6 +700,8 @@ static bool tcf_ct_flow_table_lookup(str
+ else
+ ctinfo = IP_CT_ESTABLISHED_REPLY;
+
++ nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
++ tcf_ct_flow_ct_ext_ifidx_update(flow);
+ flow_offload_refresh(nf_ft, flow, force_refresh);
+ if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
+ /* Process this flow in SW to allow promoting to ASSURED */
+@@ -1191,7 +1204,7 @@ do_nat:
+ tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
+
+ if (!nf_ct_is_confirmed(ct))
+- nf_conn_act_ct_ext_add(ct);
++ nf_conn_act_ct_ext_add(skb, ct, ctinfo);
+
+ /* This will take care of sending queued events
+ * even if the connection is already confirmed.
mmc-rpmb-fixes-pause-retune-on-all-rpmb-partitions.patch
mmc-core-cancel-delayed-work-before-releasing-host.patch
mmc-sdhci-sprd-fix-emmc-init-failure-after-hw-reset.patch
+genirq-affinity-only-build-smp-only-helper-functions-on-smp-kernels.patch
+f2fs-compress-fix-to-assign-compress_level-for-lz4-correctly.patch
+net-sched-act_ct-additional-checks-for-outdated-flows.patch
+net-sched-act_ct-always-fill-offloading-tuple-iifidx.patch
+bpf-fix-a-verifier-bug-due-to-incorrect-branch-offset-comparison-with-cpu-v4.patch
+bpf-syzkaller-found-null-ptr-deref-in-unix_bpf-proto-add.patch
+media-qcom-camss-comment-csid-dt_id-field.patch