--- /dev/null
+From a181473588c13acb77f5832fec31b4d41909b92e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Dec 2024 15:19:41 +0000
+Subject: arm64: Ensure bits ASID[15:8] are masked out when the kernel uses
+ 8-bit ASIDs
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+[ Upstream commit c0900d15d31c2597dd9f634c8be2b71762199890 ]
+
+Linux currently sets the TCR_EL1.AS bit unconditionally during CPU
+bring-up. On an 8-bit ASID CPU, this is RES0 and ignored, otherwise
+16-bit ASIDs are enabled. However, if running in a VM and the hypervisor
+reports 8-bit ASIDs (ID_AA64MMFR0_EL1.ASIDBits == 0) on a 16-bit ASIDs
+CPU, Linux uses bits 8 to 63 as a generation number for tracking old
+process ASIDs. The bottom 8 bits of this generation end up being written
+to TTBR1_EL1 and also used for the ASID-based TLBI operations as the
+upper 8 bits of the ASID. Following an ASID roll-over event we can have
+threads of the same application with the same 8-bit ASID but different
+generation numbers running on separate CPUs. Both TLB caching and the
+TLBI operations will end up using different actual 16-bit ASIDs for the
+same process.
+
+A similar scenario can happen in a big.LITTLE configuration if the boot
+CPU only uses 8-bit ASIDs while secondary CPUs have 16-bit ASIDs.
+
+Ensure that the ASID generation is only tracked by bits 16 and up,
+leaving bits 15:8 as 0 if the kernel uses 8-bit ASIDs. Note that
+clearing TCR_EL1.AS is not sufficient since the architecture requires
+that the top 8 bits of the ASID passed to TLBI instructions are 0 rather
+than ignored in such configuration.
+
+Cc: stable@vger.kernel.org
+Cc: Will Deacon <will@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: James Morse <james.morse@arm.com>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20241203151941.353796-1-catalin.marinas@arm.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/mm/context.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
+index 171f2fcd3cf2..4115c40a3ccc 100644
+--- a/arch/arm64/mm/context.c
++++ b/arch/arm64/mm/context.c
+@@ -32,9 +32,9 @@ static unsigned long nr_pinned_asids;
+ static unsigned long *pinned_asid_map;
+
+ #define ASID_MASK (~GENMASK(asid_bits - 1, 0))
+-#define ASID_FIRST_VERSION (1UL << asid_bits)
++#define ASID_FIRST_VERSION (1UL << 16)
+
+-#define NUM_USER_ASIDS ASID_FIRST_VERSION
++#define NUM_USER_ASIDS (1UL << asid_bits)
+ #define ctxid2asid(asid) ((asid) & ~ASID_MASK)
+ #define asid2ctxid(asid, genid) ((asid) | (genid))
+
+--
+2.39.5
+
--- /dev/null
+From 2a7ff7de773ea7841398c01821158eb09302892e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Dec 2021 09:42:25 +0800
+Subject: arm64: mm: Rename asid2idx() to ctxid2asid()
+
+From: Yunfeng Ye <yeyunfeng@huawei.com>
+
+[ Upstream commit a3a5b763410c7bceacf41a52071134d9dc26202a ]
+
+The commit 0c8ea531b774 ("arm64: mm: Allocate ASIDs in pairs") introduce
+the asid2idx and idx2asid macro, but these macros are not really useful
+after the commit f88f42f853a8 ("arm64: context: Free up kernel ASIDs if
+KPTI is not in use").
+
+The code "(asid & ~ASID_MASK)" can be instead by a macro, which is the
+same code with asid2idx(). So rename it to ctxid2asid() for a better
+understanding.
+
+Also we add asid2ctxid() macro, the contextid can be generated based on
+the asid and generation through this macro.
+
+Signed-off-by: Yunfeng Ye <yeyunfeng@huawei.com>
+Reviewed-by: Kefeng Wang <wangkefeng.wang@huawei.com>
+Link: https://lore.kernel.org/r/c31516eb-6d15-94e0-421c-305fc010ea79@huawei.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Stable-dep-of: c0900d15d31c ("arm64: Ensure bits ASID[15:8] are masked out when the kernel uses 8-bit ASIDs")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/mm/context.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
+index 001737a8f309..171f2fcd3cf2 100644
+--- a/arch/arm64/mm/context.c
++++ b/arch/arm64/mm/context.c
+@@ -35,8 +35,8 @@ static unsigned long *pinned_asid_map;
+ #define ASID_FIRST_VERSION (1UL << asid_bits)
+
+ #define NUM_USER_ASIDS ASID_FIRST_VERSION
+-#define asid2idx(asid) ((asid) & ~ASID_MASK)
+-#define idx2asid(idx) asid2idx(idx)
++#define ctxid2asid(asid) ((asid) & ~ASID_MASK)
++#define asid2ctxid(asid, genid) ((asid) | (genid))
+
+ /* Get the ASIDBits supported by the current CPU */
+ static u32 get_cpu_asid_bits(void)
+@@ -120,7 +120,7 @@ static void flush_context(void)
+ */
+ if (asid == 0)
+ asid = per_cpu(reserved_asids, i);
+- __set_bit(asid2idx(asid), asid_map);
++ __set_bit(ctxid2asid(asid), asid_map);
+ per_cpu(reserved_asids, i) = asid;
+ }
+
+@@ -162,7 +162,7 @@ static u64 new_context(struct mm_struct *mm)
+ u64 generation = atomic64_read(&asid_generation);
+
+ if (asid != 0) {
+- u64 newasid = generation | (asid & ~ASID_MASK);
++ u64 newasid = asid2ctxid(ctxid2asid(asid), generation);
+
+ /*
+ * If our current ASID was active during a rollover, we
+@@ -183,7 +183,7 @@ static u64 new_context(struct mm_struct *mm)
+ * We had a valid ASID in a previous life, so try to re-use
+ * it if possible.
+ */
+- if (!__test_and_set_bit(asid2idx(asid), asid_map))
++ if (!__test_and_set_bit(ctxid2asid(asid), asid_map))
+ return newasid;
+ }
+
+@@ -209,7 +209,7 @@ static u64 new_context(struct mm_struct *mm)
+ set_asid:
+ __set_bit(asid, asid_map);
+ cur_idx = asid;
+- return idx2asid(asid) | generation;
++ return asid2ctxid(asid, generation);
+ }
+
+ void check_and_switch_context(struct mm_struct *mm)
+@@ -300,13 +300,13 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm)
+ }
+
+ nr_pinned_asids++;
+- __set_bit(asid2idx(asid), pinned_asid_map);
++ __set_bit(ctxid2asid(asid), pinned_asid_map);
+ refcount_set(&mm->context.pinned, 1);
+
+ out_unlock:
+ raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+
+- asid &= ~ASID_MASK;
++ asid = ctxid2asid(asid);
+
+ /* Set the equivalent of USER_ASID_BIT */
+ if (asid && arm64_kernel_unmapped_at_el0())
+@@ -327,7 +327,7 @@ void arm64_mm_context_put(struct mm_struct *mm)
+ raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+
+ if (refcount_dec_and_test(&mm->context.pinned)) {
+- __clear_bit(asid2idx(asid), pinned_asid_map);
++ __clear_bit(ctxid2asid(asid), pinned_asid_map);
+ nr_pinned_asids--;
+ }
+
+--
+2.39.5
+
--- /dev/null
+From f57edc7627cc2a70cbaa41f223c2aa7fed62e345 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 29 Dec 2024 00:44:15 +0530
+Subject: bpf: fix recursive lock when verdict program return SK_PASS
+
+From: Jiayuan Chen <mrpre@163.com>
+
+commit 8ca2a1eeadf09862190b2810697702d803ceef2d upstream.
+
+When the stream_verdict program returns SK_PASS, it places the received skb
+into its own receive queue, but a recursive lock eventually occurs, leading
+to an operating system deadlock. This issue has been present since v6.9.
+
+'''
+sk_psock_strp_data_ready
+ write_lock_bh(&sk->sk_callback_lock)
+ strp_data_ready
+ strp_read_sock
+ read_sock -> tcp_read_sock
+ strp_recv
+ cb.rcv_msg -> sk_psock_strp_read
+ # now stream_verdict return SK_PASS without peer sock assign
+ __SK_PASS = sk_psock_map_verd(SK_PASS, NULL)
+ sk_psock_verdict_apply
+ sk_psock_skb_ingress_self
+ sk_psock_skb_ingress_enqueue
+ sk_psock_data_ready
+ read_lock_bh(&sk->sk_callback_lock) <= dead lock
+
+'''
+
+This topic has been discussed before, but it has not been fixed.
+Previous discussion:
+https://lore.kernel.org/all/6684a5864ec86_403d20898@john.notmuch
+
+Fixes: 6648e613226e ("bpf, skmsg: Fix NULL pointer dereference in sk_psock_skb_ingress_enqueue")
+Reported-by: Vincent Whitchurch <vincent.whitchurch@datadoghq.com>
+Signed-off-by: Jiayuan Chen <mrpre@163.com>
+Signed-off-by: John Fastabend <john.fastabend@gmail.com>
+Acked-by: Martin KaFai Lau <martin.lau@kernel.org>
+Link: https://patch.msgid.link/20241118030910.36230-2-mrpre@163.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+[srish: Apply to stable branch linux-5.10.y]
+Signed-off-by: Srish Srinivasan <srishwap4@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/skmsg.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 51792dda1b73..890e16bbc072 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -940,9 +940,9 @@ static void sk_psock_strp_data_ready(struct sock *sk)
+ if (tls_sw_has_ctx_rx(sk)) {
+ psock->parser.saved_data_ready(sk);
+ } else {
+- write_lock_bh(&sk->sk_callback_lock);
++ read_lock_bh(&sk->sk_callback_lock);
+ strp_data_ready(&psock->parser.strp);
+- write_unlock_bh(&sk->sk_callback_lock);
++ read_unlock_bh(&sk->sk_callback_lock);
+ }
+ }
+ rcu_read_unlock();
+--
+2.39.5
+
--- /dev/null
+From 5676c59edeae22dc28eff23e26667fe64949d324 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 25 Nov 2024 22:53:14 +0200
+Subject: drm/dp_mst: Fix MST sideband message body length check
+
+From: Imre Deak <imre.deak@intel.com>
+
+[ Upstream commit bd2fccac61b40eaf08d9546acc9fef958bfe4763 ]
+
+Fix the MST sideband message body length check, which must be at least 1
+byte accounting for the message body CRC (aka message data CRC) at the
+end of the message.
+
+This fixes a case where an MST branch device returns a header with a
+correct header CRC (indicating a correctly received body length), with
+the body length being incorrectly set to 0. This will later lead to a
+memory corruption in drm_dp_sideband_append_payload() and the following
+errors in dmesg:
+
+ UBSAN: array-index-out-of-bounds in drivers/gpu/drm/display/drm_dp_mst_topology.c:786:25
+ index -1 is out of range for type 'u8 [48]'
+ Call Trace:
+ drm_dp_sideband_append_payload+0x33d/0x350 [drm_display_helper]
+ drm_dp_get_one_sb_msg+0x3ce/0x5f0 [drm_display_helper]
+ drm_dp_mst_hpd_irq_handle_event+0xc8/0x1580 [drm_display_helper]
+
+ memcpy: detected field-spanning write (size 18446744073709551615) of single field "&msg->msg[msg->curlen]" at drivers/gpu/drm/display/drm_dp_mst_topology.c:791 (size 256)
+ Call Trace:
+ drm_dp_sideband_append_payload+0x324/0x350 [drm_display_helper]
+ drm_dp_get_one_sb_msg+0x3ce/0x5f0 [drm_display_helper]
+ drm_dp_mst_hpd_irq_handle_event+0xc8/0x1580 [drm_display_helper]
+
+Cc: <stable@vger.kernel.org>
+Cc: Lyude Paul <lyude@redhat.com>
+Reviewed-by: Lyude Paul <lyude@redhat.com>
+Signed-off-by: Imre Deak <imre.deak@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241125205314.1725887-1-imre.deak@intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_dp_mst_topology.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 27305f339881..0eb2f30c1e3e 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -318,6 +318,9 @@ static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
+ hdr->broadcast = (buf[idx] >> 7) & 0x1;
+ hdr->path_msg = (buf[idx] >> 6) & 0x1;
+ hdr->msg_len = buf[idx] & 0x3f;
++ if (hdr->msg_len < 1) /* min space for body CRC */
++ return false;
++
+ idx++;
+ hdr->somt = (buf[idx] >> 7) & 0x1;
+ hdr->eomt = (buf[idx] >> 6) & 0x1;
+--
+2.39.5
+
ipv6-use-skb_expand_head-in-ip6_xmit.patch
ipv6-fix-possible-uaf-in-ip6_finish_output2.patch
bpf-check-validity-of-link-type-in-bpf_link_show_fdi.patch
+bpf-fix-recursive-lock-when-verdict-program-return-s.patch
+drm-dp_mst-fix-mst-sideband-message-body-length-chec.patch
+arm64-mm-rename-asid2idx-to-ctxid2asid.patch
+arm64-ensure-bits-asid-15-8-are-masked-out-when-the-.patch