--- /dev/null
+From 783178a804b80288daf109a52da28424019d4996 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Feb 2025 22:37:29 +0100
+Subject: arm64: dts: rockchip: adjust SMMU interrupt type on rk3588
+
+From: Patrick Wildt <patrick@blueri.se>
+
+[ Upstream commit 8546cfd08aa4b982acd2357403a1f15495d622ec ]
+
+The SMMU architecture requires wired interrupts to be edge triggered,
+which does not align with the DT description for the RK3588. This leads
+to interrupt storms, as the SMMU continues to hold the pin high and only
+pulls it down for a short amount when issuing an IRQ. Update the DT
+description to be in line with the spec and perceived reality.
+
+Signed-off-by: Patrick Wildt <patrick@blueri.se>
+Fixes: cd81d3a0695c ("arm64: dts: rockchip: add rk3588 pcie and php IOMMUs")
+Reviewed-by: Niklas Cassel <cassel@kernel.org>
+Link: https://lore.kernel.org/r/Z6pxme2Chmf3d3uK@windev.fritz.box
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/boot/dts/rockchip/rk3588-base.dtsi | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
+index fc67585b64b7b..1fd8093f2124c 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
+@@ -549,10 +549,10 @@ usb_host2_xhci: usb@fcd00000 {
+ mmu600_pcie: iommu@fc900000 {
+ compatible = "arm,smmu-v3";
+ reg = <0x0 0xfc900000 0x0 0x200000>;
+- interrupts = <GIC_SPI 369 IRQ_TYPE_LEVEL_HIGH 0>,
+- <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH 0>,
+- <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH 0>,
+- <GIC_SPI 367 IRQ_TYPE_LEVEL_HIGH 0>;
++ interrupts = <GIC_SPI 369 IRQ_TYPE_EDGE_RISING 0>,
++ <GIC_SPI 371 IRQ_TYPE_EDGE_RISING 0>,
++ <GIC_SPI 374 IRQ_TYPE_EDGE_RISING 0>,
++ <GIC_SPI 367 IRQ_TYPE_EDGE_RISING 0>;
+ interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
+ #iommu-cells = <1>;
+ status = "disabled";
+@@ -561,10 +561,10 @@ mmu600_pcie: iommu@fc900000 {
+ mmu600_php: iommu@fcb00000 {
+ compatible = "arm,smmu-v3";
+ reg = <0x0 0xfcb00000 0x0 0x200000>;
+- interrupts = <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH 0>,
+- <GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH 0>,
+- <GIC_SPI 386 IRQ_TYPE_LEVEL_HIGH 0>,
+- <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH 0>;
++ interrupts = <GIC_SPI 381 IRQ_TYPE_EDGE_RISING 0>,
++ <GIC_SPI 383 IRQ_TYPE_EDGE_RISING 0>,
++ <GIC_SPI 386 IRQ_TYPE_EDGE_RISING 0>,
++ <GIC_SPI 379 IRQ_TYPE_EDGE_RISING 0>;
+ interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
+ #iommu-cells = <1>;
+ status = "disabled";
+--
+2.39.5
+
--- /dev/null
+From 15f8feddd74295e1d120e97647bcdfafb462fa14 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Jan 2025 18:47:34 +0800
+Subject: arm64: dts: rockchip: Fix lcdpwr_en pin for Cool Pi GenBook
+
+From: Andy Yan <andyshrk@163.com>
+
+[ Upstream commit a1d939055a22be06d8c12bf53afb258b9d38575f ]
+
+According to the schematic, the lcdpwr_en pin is GPIO0_C4,
+not GPIO1_C4.
+
+Fixes: 4a8c1161b843 ("arm64: dts: rockchip: Add support for rk3588 based Cool Pi CM5 GenBook")
+Signed-off-by: Andy Yan <andyshrk@163.com>
+Link: https://lore.kernel.org/r/20250113104825.2390427-1-andyshrk@163.com
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts
+index 6418286efe40d..762d36ad733ab 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts
+@@ -101,7 +101,7 @@ vcc3v3_lcd: vcc3v3-lcd-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_lcd";
+ enable-active-high;
+- gpio = <&gpio1 RK_PC4 GPIO_ACTIVE_HIGH>;
++ gpio = <&gpio0 RK_PC4 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&lcdpwr_en>;
+ vin-supply = <&vcc3v3_sys>;
+@@ -207,7 +207,7 @@ &pcie3x4 {
+ &pinctrl {
+ lcd {
+ lcdpwr_en: lcdpwr-en {
+- rockchip,pins = <1 RK_PC4 RK_FUNC_GPIO &pcfg_pull_down>;
++ rockchip,pins = <0 RK_PC4 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
+ bl_en: bl-en {
+--
+2.39.5
+
--- /dev/null
+From f5513e4b6ca013b97b6cfb642faf877454c3dd0a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Jan 2025 17:22:46 -0800
+Subject: bpf: avoid holding freeze_mutex during mmap operation
+
+From: Andrii Nakryiko <andrii@kernel.org>
+
+[ Upstream commit bc27c52eea189e8f7492d40739b7746d67b65beb ]
+
+We use map->freeze_mutex to prevent races between map_freeze() and
+memory mapping BPF map contents with writable permissions. The way we
+naively do this means we'll hold freeze_mutex for entire duration of all
+the mm and VMA manipulations, which is completely unnecessary. This can
+potentially also lead to deadlocks, as reported by syzbot in [0].
+
+So, instead, hold freeze_mutex only during writeability checks, bump
+(proactively) "write active" count for the map, unlock the mutex and
+proceed with mmap logic. And only if something went wrong during mmap
+logic, then undo that "write active" counter increment.
+
+ [0] https://lore.kernel.org/bpf/678dcbc9.050a0220.303755.0066.GAE@google.com/
+
+Fixes: fc9702273e2e ("bpf: Add mmap() support for BPF_MAP_TYPE_ARRAY")
+Reported-by: syzbot+4dc041c686b7c816a71e@syzkaller.appspotmail.com
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Link: https://lore.kernel.org/r/20250129012246.1515826-2-andrii@kernel.org
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/syscall.c | 17 ++++++++++-------
+ 1 file changed, 10 insertions(+), 7 deletions(-)
+
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index fa43f26ce0dac..3200372ea28ce 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -936,7 +936,7 @@ static const struct vm_operations_struct bpf_map_default_vmops = {
+ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
+ {
+ struct bpf_map *map = filp->private_data;
+- int err;
++ int err = 0;
+
+ if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record))
+ return -ENOTSUPP;
+@@ -960,7 +960,12 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
+ err = -EACCES;
+ goto out;
+ }
++ bpf_map_write_active_inc(map);
+ }
++out:
++ mutex_unlock(&map->freeze_mutex);
++ if (err)
++ return err;
+
+ /* set default open/close callbacks */
+ vma->vm_ops = &bpf_map_default_vmops;
+@@ -977,13 +982,11 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
+ vm_flags_clear(vma, VM_MAYWRITE);
+
+ err = map->ops->map_mmap(map, vma);
+- if (err)
+- goto out;
++ if (err) {
++ if (vma->vm_flags & VM_WRITE)
++ bpf_map_write_active_dec(map);
++ }
+
+- if (vma->vm_flags & VM_WRITE)
+- bpf_map_write_active_inc(map);
+-out:
+- mutex_unlock(&map->freeze_mutex);
+ return err;
+ }
+
+--
+2.39.5
+
--- /dev/null
+From f3d582176a26ddee65dba1f755ded505130f55fd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jan 2025 18:09:15 +0800
+Subject: bpf: Disable non stream socket for strparser
+
+From: Jiayuan Chen <mrpre@163.com>
+
+[ Upstream commit 5459cce6bf49e72ee29be21865869c2ac42419f5 ]
+
+Currently, only TCP supports strparser, but sockmap doesn't intercept
+non-TCP connections to attach strparser. For example, with UDP, although
+the read/write handlers are replaced, strparser is not executed due to
+the lack of a read_sock operation.
+
+Furthermore, in udp_bpf_recvmsg(), it checks whether the psock has data,
+and if not, it falls back to the native UDP read interface, making
+UDP + strparser appear to read correctly. According to its commit history,
+this behavior is unexpected.
+
+Moreover, since UDP lacks the concept of streams, we intercept it directly.
+
+Fixes: 1fa1fe8ff161 ("bpf, sockmap: Test shutdown() correctly exits epoll and recv()=0")
+Signed-off-by: Jiayuan Chen <mrpre@163.com>
+Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
+Acked-by: Jakub Sitnicki <jakub@cloudflare.com>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Link: https://patch.msgid.link/20250122100917.49845-4-mrpre@163.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/sock_map.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index 2f1be9baad057..82a14f131d00c 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -303,7 +303,10 @@ static int sock_map_link(struct bpf_map *map, struct sock *sk)
+
+ write_lock_bh(&sk->sk_callback_lock);
+ if (stream_parser && stream_verdict && !psock->saved_data_ready) {
+- ret = sk_psock_init_strp(sk, psock);
++ if (sk_is_tcp(sk))
++ ret = sk_psock_init_strp(sk, psock);
++ else
++ ret = -EOPNOTSUPP;
+ if (ret) {
+ write_unlock_bh(&sk->sk_callback_lock);
+ sk_psock_put(sk, psock);
+--
+2.39.5
+
--- /dev/null
+From 0cea78923eda11e2c49660aab8f79a2130ae92f7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Dec 2024 14:10:16 +0800
+Subject: bpf: Fix deadlock when freeing cgroup storage
+
+From: Abel Wu <wuyun.abel@bytedance.com>
+
+[ Upstream commit c78f4afbd962f43a3989f45f3ca04300252b19b5 ]
+
+The following commit
+bc235cdb423a ("bpf: Prevent deadlock from recursive bpf_task_storage_[get|delete]")
+first introduced deadlock prevention for fentry/fexit programs attaching
+on bpf_task_storage helpers. That commit also employed the logic in map
+free path in its v6 version.
+
+Later bpf_cgrp_storage was first introduced in
+c4bcfb38a95e ("bpf: Implement cgroup storage available to non-cgroup-attached bpf progs")
+which faces the same issue as bpf_task_storage, instead of its busy
+counter, NULL was passed to bpf_local_storage_map_free() which opened
+a window to cause deadlock:
+
+ <TASK>
+ (acquiring local_storage->lock)
+ _raw_spin_lock_irqsave+0x3d/0x50
+ bpf_local_storage_update+0xd1/0x460
+ bpf_cgrp_storage_get+0x109/0x130
+ bpf_prog_a4d4a370ba857314_cgrp_ptr+0x139/0x170
+ ? __bpf_prog_enter_recur+0x16/0x80
+ bpf_trampoline_6442485186+0x43/0xa4
+ cgroup_storage_ptr+0x9/0x20
+ (holding local_storage->lock)
+ bpf_selem_unlink_storage_nolock.constprop.0+0x135/0x160
+ bpf_selem_unlink_storage+0x6f/0x110
+ bpf_local_storage_map_free+0xa2/0x110
+ bpf_map_free_deferred+0x5b/0x90
+ process_one_work+0x17c/0x390
+ worker_thread+0x251/0x360
+ kthread+0xd2/0x100
+ ret_from_fork+0x34/0x50
+ ret_from_fork_asm+0x1a/0x30
+ </TASK>
+
+Progs:
+ - A: SEC("fentry/cgroup_storage_ptr")
+ - cgid (BPF_MAP_TYPE_HASH)
+ Record the id of the cgroup the current task belonging
+ to in this hash map, using the address of the cgroup
+ as the map key.
+ - cgrpa (BPF_MAP_TYPE_CGRP_STORAGE)
+ If current task is a kworker, lookup the above hash
+ map using function parameter @owner as the key to get
+ its corresponding cgroup id which is then used to get
+ a trusted pointer to the cgroup through
+ bpf_cgroup_from_id(). This trusted pointer can then
+ be passed to bpf_cgrp_storage_get() to finally trigger
+ the deadlock issue.
+ - B: SEC("tp_btf/sys_enter")
+ - cgrpb (BPF_MAP_TYPE_CGRP_STORAGE)
+ The only purpose of this prog is to fill Prog A's
+ hash map by calling bpf_cgrp_storage_get() for as
+ many userspace tasks as possible.
+
+Steps to reproduce:
+ - Run A;
+ - while (true) { Run B; Destroy B; }
+
+Fix this issue by passing its busy counter to the free procedure so
+it can be properly incremented before storage/smap locking.
+
+Fixes: c4bcfb38a95e ("bpf: Implement cgroup storage available to non-cgroup-attached bpf progs")
+Signed-off-by: Abel Wu <wuyun.abel@bytedance.com>
+Acked-by: Martin KaFai Lau <martin.lau@kernel.org>
+Link: https://lore.kernel.org/r/20241221061018.37717-1-wuyun.abel@bytedance.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/bpf_cgrp_storage.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/bpf/bpf_cgrp_storage.c b/kernel/bpf/bpf_cgrp_storage.c
+index 28efd0a3f2200..6547fb7ac0dcb 100644
+--- a/kernel/bpf/bpf_cgrp_storage.c
++++ b/kernel/bpf/bpf_cgrp_storage.c
+@@ -154,7 +154,7 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
+
+ static void cgroup_storage_map_free(struct bpf_map *map)
+ {
+- bpf_local_storage_map_free(map, &cgroup_cache, NULL);
++ bpf_local_storage_map_free(map, &cgroup_cache, &bpf_cgrp_storage_busy);
+ }
+
+ /* *gfp_flags* is a hidden argument provided by the verifier */
+--
+2.39.5
+
--- /dev/null
+From 34a2af2c82bba0b71cdc697e00fbaa4af556a1e8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Feb 2025 17:00:59 +0000
+Subject: bpf: Fix softlockup in arena_map_free on 64k page kernel
+
+From: Alan Maguire <alan.maguire@oracle.com>
+
+[ Upstream commit 517e8a7835e8cfb398a0aeb0133de50e31cae32b ]
+
+On an aarch64 kernel with CONFIG_PAGE_SIZE_64KB=y,
+arena_htab tests cause a segmentation fault and soft lockup.
+The same failure is not observed with 4k pages on aarch64.
+
+It turns out arena_map_free() is calling
+apply_to_existing_page_range() with the address returned by
+bpf_arena_get_kern_vm_start(). If this address is not page-aligned
+the code ends up calling apply_to_pte_range() with that unaligned
+address causing soft lockup.
+
+Fix it by round up GUARD_SZ to PAGE_SIZE << 1 so that the
+division by 2 in bpf_arena_get_kern_vm_start() returns
+a page-aligned value.
+
+Fixes: 317460317a02 ("bpf: Introduce bpf_arena.")
+Reported-by: Colm Harrington <colm.harrington@oracle.com>
+Suggested-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Alan Maguire <alan.maguire@oracle.com>
+Link: https://lore.kernel.org/r/20250205170059.427458-1-alan.maguire@oracle.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/arena.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c
+index 93e48c7cad4ef..8c775a1401d3e 100644
+--- a/kernel/bpf/arena.c
++++ b/kernel/bpf/arena.c
+@@ -37,7 +37,7 @@
+ */
+
+ /* number of bytes addressable by LDX/STX insn with 16-bit 'off' field */
+-#define GUARD_SZ (1ull << sizeof_field(struct bpf_insn, off) * 8)
++#define GUARD_SZ round_up(1ull << sizeof_field(struct bpf_insn, off) * 8, PAGE_SIZE << 1)
+ #define KERN_VM_SZ (SZ_4G + GUARD_SZ)
+
+ struct bpf_arena {
+--
+2.39.5
+
--- /dev/null
+From 42d98b376512933a303e4df7082e4c5d5da24500 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jan 2025 18:09:14 +0800
+Subject: bpf: Fix wrong copied_seq calculation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jiayuan Chen <mrpre@163.com>
+
+[ Upstream commit 36b62df5683c315ba58c950f1a9c771c796c30ec ]
+
+'sk->copied_seq' was updated in the tcp_eat_skb() function when the action
+of a BPF program was SK_REDIRECT. For other actions, like SK_PASS, the
+update logic for 'sk->copied_seq' was moved to tcp_bpf_recvmsg_parser()
+to ensure the accuracy of the 'fionread' feature.
+
+It works for a single stream_verdict scenario, as it also modified
+sk_data_ready->sk_psock_verdict_data_ready->tcp_read_skb
+to remove updating 'sk->copied_seq'.
+
+However, for programs where both stream_parser and stream_verdict are
+active (strparser purpose), tcp_read_sock() was used instead of
+tcp_read_skb() (sk_data_ready->strp_data_ready->tcp_read_sock).
+tcp_read_sock() now still updates 'sk->copied_seq', leading to duplicate
+updates.
+
+In summary, for strparser + SK_PASS, copied_seq is redundantly calculated
+in both tcp_read_sock() and tcp_bpf_recvmsg_parser().
+
+The issue causes incorrect copied_seq calculations, which prevent
+correct data reads from the recv() interface in user-land.
+
+We do not want to add new proto_ops to implement a new version of
+tcp_read_sock, as this would introduce code complexity [1].
+
+We could have added noack and copied_seq to desc, and then called
+ops->read_sock. However, unfortunately, other modules didn’t fully
+initialize desc to zero. So, for now, we are directly calling
+tcp_read_sock_noack() in tcp_bpf.c.
+
+[1]: https://lore.kernel.org/bpf/20241218053408.437295-1-mrpre@163.com
+
+Fixes: e5c6de5fa025 ("bpf, sockmap: Incorrectly handling copied_seq")
+Suggested-by: Jakub Sitnicki <jakub@cloudflare.com>
+Signed-off-by: Jiayuan Chen <mrpre@163.com>
+Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
+Reviewed-by: Jakub Sitnicki <jakub@cloudflare.com>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Link: https://patch.msgid.link/20250122100917.49845-3-mrpre@163.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/skmsg.h | 2 ++
+ include/net/tcp.h | 8 ++++++++
+ net/core/skmsg.c | 7 +++++++
+ net/ipv4/tcp.c | 29 ++++++++++++++++++++++++-----
+ net/ipv4/tcp_bpf.c | 36 ++++++++++++++++++++++++++++++++++++
+ 5 files changed, 77 insertions(+), 5 deletions(-)
+
+diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
+index 2cbe0c22a32f3..0b9095a281b89 100644
+--- a/include/linux/skmsg.h
++++ b/include/linux/skmsg.h
+@@ -91,6 +91,8 @@ struct sk_psock {
+ struct sk_psock_progs progs;
+ #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
+ struct strparser strp;
++ u32 copied_seq;
++ u32 ingress_bytes;
+ #endif
+ struct sk_buff_head ingress_skb;
+ struct list_head ingress_msg;
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 6cd0fde806519..3255a199ef60d 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -743,6 +743,9 @@ void tcp_get_info(struct sock *, struct tcp_info *);
+ /* Read 'sendfile()'-style from a TCP socket */
+ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
+ sk_read_actor_t recv_actor);
++int tcp_read_sock_noack(struct sock *sk, read_descriptor_t *desc,
++ sk_read_actor_t recv_actor, bool noack,
++ u32 *copied_seq);
+ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
+ struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);
+ void tcp_read_done(struct sock *sk, size_t len);
+@@ -2609,6 +2612,11 @@ struct sk_psock;
+ #ifdef CONFIG_BPF_SYSCALL
+ int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
+ void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
++#ifdef CONFIG_BPF_STREAM_PARSER
++struct strparser;
++int tcp_bpf_strp_read_sock(struct strparser *strp, read_descriptor_t *desc,
++ sk_read_actor_t recv_actor);
++#endif /* CONFIG_BPF_STREAM_PARSER */
+ #endif /* CONFIG_BPF_SYSCALL */
+
+ #ifdef CONFIG_INET
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 8ad7e6755fd64..f76cbf49c68c8 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -548,6 +548,9 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
+ return num_sge;
+ }
+
++#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
++ psock->ingress_bytes += len;
++#endif
+ copied = len;
+ msg->sg.start = 0;
+ msg->sg.size = copied;
+@@ -1143,6 +1146,10 @@ int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
+ if (!ret)
+ sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED);
+
++ if (sk_is_tcp(sk)) {
++ psock->strp.cb.read_sock = tcp_bpf_strp_read_sock;
++ psock->copied_seq = tcp_sk(sk)->copied_seq;
++ }
+ return ret;
+ }
+
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 4f77bd862e957..68cb6a966b18b 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1564,12 +1564,13 @@ EXPORT_SYMBOL(tcp_recv_skb);
+ * or for 'peeking' the socket using this routine
+ * (although both would be easy to implement).
+ */
+-int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
+- sk_read_actor_t recv_actor)
++static int __tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
++ sk_read_actor_t recv_actor, bool noack,
++ u32 *copied_seq)
+ {
+ struct sk_buff *skb;
+ struct tcp_sock *tp = tcp_sk(sk);
+- u32 seq = tp->copied_seq;
++ u32 seq = *copied_seq;
+ u32 offset;
+ int copied = 0;
+
+@@ -1623,9 +1624,12 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
+ tcp_eat_recv_skb(sk, skb);
+ if (!desc->count)
+ break;
+- WRITE_ONCE(tp->copied_seq, seq);
++ WRITE_ONCE(*copied_seq, seq);
+ }
+- WRITE_ONCE(tp->copied_seq, seq);
++ WRITE_ONCE(*copied_seq, seq);
++
++ if (noack)
++ goto out;
+
+ tcp_rcv_space_adjust(sk);
+
+@@ -1634,10 +1638,25 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
+ tcp_recv_skb(sk, seq, &offset);
+ tcp_cleanup_rbuf(sk, copied);
+ }
++out:
+ return copied;
+ }
++
++int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
++ sk_read_actor_t recv_actor)
++{
++ return __tcp_read_sock(sk, desc, recv_actor, false,
++ &tcp_sk(sk)->copied_seq);
++}
+ EXPORT_SYMBOL(tcp_read_sock);
+
++int tcp_read_sock_noack(struct sock *sk, read_descriptor_t *desc,
++ sk_read_actor_t recv_actor, bool noack,
++ u32 *copied_seq)
++{
++ return __tcp_read_sock(sk, desc, recv_actor, noack, copied_seq);
++}
++
+ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
+ {
+ struct sk_buff *skb;
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index 392678ae80f4e..22e8a2af5dd8b 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -646,6 +646,42 @@ static int tcp_bpf_assert_proto_ops(struct proto *ops)
+ ops->sendmsg == tcp_sendmsg ? 0 : -ENOTSUPP;
+ }
+
++#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
++int tcp_bpf_strp_read_sock(struct strparser *strp, read_descriptor_t *desc,
++ sk_read_actor_t recv_actor)
++{
++ struct sock *sk = strp->sk;
++ struct sk_psock *psock;
++ struct tcp_sock *tp;
++ int copied = 0;
++
++ tp = tcp_sk(sk);
++ rcu_read_lock();
++ psock = sk_psock(sk);
++ if (WARN_ON_ONCE(!psock)) {
++ desc->error = -EINVAL;
++ goto out;
++ }
++
++ psock->ingress_bytes = 0;
++ copied = tcp_read_sock_noack(sk, desc, recv_actor, true,
++ &psock->copied_seq);
++ if (copied < 0)
++ goto out;
++ /* recv_actor may redirect skb to another socket (SK_REDIRECT) or
++ * just put skb into ingress queue of current socket (SK_PASS).
++ * For SK_REDIRECT, we need to ack the frame immediately but for
++ * SK_PASS, we want to delay the ack until tcp_bpf_recvmsg_parser().
++ */
++ tp->copied_seq = psock->copied_seq - psock->ingress_bytes;
++ tcp_rcv_space_adjust(sk);
++ __tcp_cleanup_rbuf(sk, copied - psock->ingress_bytes);
++out:
++ rcu_read_unlock();
++ return copied;
++}
++#endif /* CONFIG_BPF_STREAM_PARSER */
++
+ int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
+ {
+ int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
+--
+2.39.5
+
--- /dev/null
+From 1c8e2c00703e6d7b8e5199b3959615eda12a8ff4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 9 Feb 2025 23:22:35 -0800
+Subject: bpf: skip non exist keys in generic_map_lookup_batch
+
+From: Yan Zhai <yan@cloudflare.com>
+
+[ Upstream commit 5644c6b50ffee0a56c1e01430a8c88e34decb120 ]
+
+The generic_map_lookup_batch currently returns EINTR if it fails with
+ENOENT and retries several times on bpf_map_copy_value. The next batch
+would start from the same location, presuming it's a transient issue.
+This is incorrect if a map can actually have "holes", i.e.
+"get_next_key" can return a key that does not point to a valid value. At
+least the array of maps type may contain such holes legitly. Right now
+these holes show up, generic batch lookup cannot proceed any more. It
+will always fail with EINTR errors.
+
+Rather, do not retry in generic_map_lookup_batch. If it finds a non
+existing element, skip to the next key. This simple solution comes with
+a price that transient errors may not be recovered, and the iteration
+might cycle back to the first key under parallel deletion. For example,
+Hou Tao <houtao@huaweicloud.com> pointed out a following scenario:
+
+For LPM trie map:
+(1) ->map_get_next_key(map, prev_key, key) returns a valid key
+
+(2) bpf_map_copy_value() return -ENOMENT
+It means the key must be deleted concurrently.
+
+(3) goto next_key
+It swaps the prev_key and key
+
+(4) ->map_get_next_key(map, prev_key, key) again
+prev_key points to a non-existing key, for LPM trie it will treat just
+like prev_key=NULL case, the returned key will be duplicated.
+
+With the retry logic, the iteration can continue to the key next to the
+deleted one. But if we directly skip to the next key, the iteration loop
+would restart from the first key for the lpm_trie type.
+
+However, not all races may be recovered. For example, if current key is
+deleted after instead of before bpf_map_copy_value, or if the prev_key
+also gets deleted, then the loop will still restart from the first key
+for lpm_tire anyway. For generic lookup it might be better to stay
+simple, i.e. just skip to the next key. To guarantee that the output
+keys are not duplicated, it is better to implement map type specific
+batch operations, which can properly lock the trie and synchronize with
+concurrent mutators.
+
+Fixes: cb4d03ab499d ("bpf: Add generic support for lookup batch op")
+Closes: https://lore.kernel.org/bpf/Z6JXtA1M5jAZx8xD@debian.debian/
+Signed-off-by: Yan Zhai <yan@cloudflare.com>
+Acked-by: Hou Tao <houtao1@huawei.com>
+Link: https://lore.kernel.org/r/85618439eea75930630685c467ccefeac0942e2b.1739171594.git.yan@cloudflare.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/syscall.c | 18 +++++-------------
+ 1 file changed, 5 insertions(+), 13 deletions(-)
+
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 3200372ea28ce..696e5a2cbea2e 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -1872,8 +1872,6 @@ int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
+ return err;
+ }
+
+-#define MAP_LOOKUP_RETRIES 3
+-
+ int generic_map_lookup_batch(struct bpf_map *map,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+@@ -1883,8 +1881,8 @@ int generic_map_lookup_batch(struct bpf_map *map,
+ void __user *values = u64_to_user_ptr(attr->batch.values);
+ void __user *keys = u64_to_user_ptr(attr->batch.keys);
+ void *buf, *buf_prevkey, *prev_key, *key, *value;
+- int err, retry = MAP_LOOKUP_RETRIES;
+ u32 value_size, cp, max_count;
++ int err;
+
+ if (attr->batch.elem_flags & ~BPF_F_LOCK)
+ return -EINVAL;
+@@ -1930,14 +1928,8 @@ int generic_map_lookup_batch(struct bpf_map *map,
+ err = bpf_map_copy_value(map, key, value,
+ attr->batch.elem_flags);
+
+- if (err == -ENOENT) {
+- if (retry) {
+- retry--;
+- continue;
+- }
+- err = -EINTR;
+- break;
+- }
++ if (err == -ENOENT)
++ goto next_key;
+
+ if (err)
+ goto free_buf;
+@@ -1952,12 +1944,12 @@ int generic_map_lookup_batch(struct bpf_map *map,
+ goto free_buf;
+ }
+
++ cp++;
++next_key:
+ if (!prev_key)
+ prev_key = buf_prevkey;
+
+ swap(prev_key, key);
+- retry = MAP_LOOKUP_RETRIES;
+- cp++;
+ cond_resched();
+ }
+
+--
+2.39.5
+
--- /dev/null
+From 742bf1cd42def90f15753acc376592d1da336077 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jan 2025 00:06:42 +0900
+Subject: bpf, test_run: Fix use-after-free issue in eth_skb_pkt_type()
+
+From: Shigeru Yoshida <syoshida@redhat.com>
+
+[ Upstream commit 6b3d638ca897e099fa99bd6d02189d3176f80a47 ]
+
+KMSAN reported a use-after-free issue in eth_skb_pkt_type()[1]. The
+cause of the issue was that eth_skb_pkt_type() accessed skb's data
+that didn't contain an Ethernet header. This occurs when
+bpf_prog_test_run_xdp() passes an invalid value as the user_data
+argument to bpf_test_init().
+
+Fix this by returning an error when user_data is less than ETH_HLEN in
+bpf_test_init(). Additionally, remove the check for "if (user_size >
+size)" as it is unnecessary.
+
+[1]
+BUG: KMSAN: use-after-free in eth_skb_pkt_type include/linux/etherdevice.h:627 [inline]
+BUG: KMSAN: use-after-free in eth_type_trans+0x4ee/0x980 net/ethernet/eth.c:165
+ eth_skb_pkt_type include/linux/etherdevice.h:627 [inline]
+ eth_type_trans+0x4ee/0x980 net/ethernet/eth.c:165
+ __xdp_build_skb_from_frame+0x5a8/0xa50 net/core/xdp.c:635
+ xdp_recv_frames net/bpf/test_run.c:272 [inline]
+ xdp_test_run_batch net/bpf/test_run.c:361 [inline]
+ bpf_test_run_xdp_live+0x2954/0x3330 net/bpf/test_run.c:390
+ bpf_prog_test_run_xdp+0x148e/0x1b10 net/bpf/test_run.c:1318
+ bpf_prog_test_run+0x5b7/0xa30 kernel/bpf/syscall.c:4371
+ __sys_bpf+0x6a6/0xe20 kernel/bpf/syscall.c:5777
+ __do_sys_bpf kernel/bpf/syscall.c:5866 [inline]
+ __se_sys_bpf kernel/bpf/syscall.c:5864 [inline]
+ __x64_sys_bpf+0xa4/0xf0 kernel/bpf/syscall.c:5864
+ x64_sys_call+0x2ea0/0x3d90 arch/x86/include/generated/asm/syscalls_64.h:322
+ do_syscall_x64 arch/x86/entry/common.c:52 [inline]
+ do_syscall_64+0xd9/0x1d0 arch/x86/entry/common.c:83
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+
+Uninit was created at:
+ free_pages_prepare mm/page_alloc.c:1056 [inline]
+ free_unref_page+0x156/0x1320 mm/page_alloc.c:2657
+ __free_pages+0xa3/0x1b0 mm/page_alloc.c:4838
+ bpf_ringbuf_free kernel/bpf/ringbuf.c:226 [inline]
+ ringbuf_map_free+0xff/0x1e0 kernel/bpf/ringbuf.c:235
+ bpf_map_free kernel/bpf/syscall.c:838 [inline]
+ bpf_map_free_deferred+0x17c/0x310 kernel/bpf/syscall.c:862
+ process_one_work kernel/workqueue.c:3229 [inline]
+ process_scheduled_works+0xa2b/0x1b60 kernel/workqueue.c:3310
+ worker_thread+0xedf/0x1550 kernel/workqueue.c:3391
+ kthread+0x535/0x6b0 kernel/kthread.c:389
+ ret_from_fork+0x6e/0x90 arch/x86/kernel/process.c:147
+ ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:244
+
+CPU: 1 UID: 0 PID: 17276 Comm: syz.1.16450 Not tainted 6.12.0-05490-g9bb88c659673 #8
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-3.fc41 04/01/2014
+
+Fixes: be3d72a2896c ("bpf: move user_size out of bpf_test_init")
+Reported-by: syzkaller <syzkaller@googlegroups.com>
+Suggested-by: Martin KaFai Lau <martin.lau@linux.dev>
+Signed-off-by: Shigeru Yoshida <syoshida@redhat.com>
+Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
+Acked-by: Stanislav Fomichev <sdf@fomichev.me>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://patch.msgid.link/20250121150643.671650-1-syoshida@redhat.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bpf/test_run.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
+index 501ec4249fedc..8612023bec60d 100644
+--- a/net/bpf/test_run.c
++++ b/net/bpf/test_run.c
+@@ -660,12 +660,9 @@ static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
+ void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
+ void *data;
+
+- if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
++ if (user_size < ETH_HLEN || user_size > PAGE_SIZE - headroom - tailroom)
+ return ERR_PTR(-EINVAL);
+
+- if (user_size > size)
+- return ERR_PTR(-EMSGSIZE);
+-
+ size = SKB_DATA_ALIGN(size);
+ data = kzalloc(size + headroom + tailroom, GFP_USER);
+ if (!data)
+--
+2.39.5
+
--- /dev/null
+From 15fc70876631510b47c4f85178c851e4fb2feed1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Jan 2025 17:22:45 -0800
+Subject: bpf: unify VM_WRITE vs VM_MAYWRITE use in BPF map mmaping logic
+
+From: Andrii Nakryiko <andrii@kernel.org>
+
+[ Upstream commit 98671a0fd1f14e4a518ee06b19037c20014900eb ]
+
+For all BPF maps we ensure that VM_MAYWRITE is cleared when
+memory-mapping BPF map contents as initially read-only VMA. This is
+because in some cases BPF verifier relies on the underlying data to not
+be modified afterwards by user space, so once something is mapped
+read-only, it shouldn't be re-mmap'ed as read-write.
+
+As such, it's not necessary to check VM_MAYWRITE in bpf_map_mmap() and
+map->ops->map_mmap() callbacks: VM_WRITE should be consistently set for
+read-write mappings, and if VM_WRITE is not set, there is no way for
+user space to upgrade read-only mapping to read-write one.
+
+This patch cleans up this VM_WRITE vs VM_MAYWRITE handling within
+bpf_map_mmap(), which is an entry point for any BPF map mmap()-ing
+logic. We also drop unnecessary sanitization of VM_MAYWRITE in BPF
+ringbuf's map_mmap() callback implementation, as it is already performed
+by common code in bpf_map_mmap().
+
+Note, though, that in bpf_map_mmap_{open,close}() callbacks we can't
+drop VM_MAYWRITE use, because it's possible (and is outside of
+subsystem's control) to have initially read-write memory mapping, which
+is subsequently dropped to read-only by user space through mprotect().
+In such case, from BPF verifier POV it's read-write data throughout the
+lifetime of BPF map, and is counted as "active writer".
+
+But its VMAs will start out as VM_WRITE|VM_MAYWRITE, then mprotect() can
+change it to just VM_MAYWRITE (and no VM_WRITE), so when its finally
+munmap()'ed and bpf_map_mmap_close() is called, vm_flags will be just
+VM_MAYWRITE, but we still need to decrement active writer count with
+bpf_map_write_active_dec() as it's still considered to be a read-write
+mapping by the rest of BPF subsystem.
+
+Similar reasoning applies to bpf_map_mmap_open(), which is called
+whenever mmap(), munmap(), and/or mprotect() forces mm subsystem to
+split original VMA into multiple discontiguous VMAs.
+
+Memory-mapping handling is a bit tricky, yes.
+
+Cc: Jann Horn <jannh@google.com>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Cc: Shakeel Butt <shakeel.butt@linux.dev>
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Link: https://lore.kernel.org/r/20250129012246.1515826-1-andrii@kernel.org
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Stable-dep-of: bc27c52eea18 ("bpf: avoid holding freeze_mutex during mmap operation")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/ringbuf.c | 4 ----
+ kernel/bpf/syscall.c | 10 ++++++++--
+ 2 files changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
+index e1cfe890e0be6..1499d8caa9a35 100644
+--- a/kernel/bpf/ringbuf.c
++++ b/kernel/bpf/ringbuf.c
+@@ -268,8 +268,6 @@ static int ringbuf_map_mmap_kern(struct bpf_map *map, struct vm_area_struct *vma
+ /* allow writable mapping for the consumer_pos only */
+ if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EPERM;
+- } else {
+- vm_flags_clear(vma, VM_MAYWRITE);
+ }
+ /* remap_vmalloc_range() checks size and offset constraints */
+ return remap_vmalloc_range(vma, rb_map->rb,
+@@ -289,8 +287,6 @@ static int ringbuf_map_mmap_user(struct bpf_map *map, struct vm_area_struct *vma
+ * position, and the ring buffer data itself.
+ */
+ return -EPERM;
+- } else {
+- vm_flags_clear(vma, VM_MAYWRITE);
+ }
+ /* remap_vmalloc_range() checks size and offset constraints */
+ return remap_vmalloc_range(vma, rb_map->rb, vma->vm_pgoff + RINGBUF_PGOFF);
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 368ae8d231d41..fa43f26ce0dac 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -966,15 +966,21 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
+ vma->vm_ops = &bpf_map_default_vmops;
+ vma->vm_private_data = map;
+ vm_flags_clear(vma, VM_MAYEXEC);
++ /* If mapping is read-only, then disallow potentially re-mapping with
++ * PROT_WRITE by dropping VM_MAYWRITE flag. This VM_MAYWRITE clearing
++ * means that as far as BPF map's memory-mapped VMAs are concerned,
++ * VM_WRITE and VM_MAYWRITE and equivalent, if one of them is set,
++ * both should be set, so we can forget about VM_MAYWRITE and always
++ * check just VM_WRITE
++ */
+ if (!(vma->vm_flags & VM_WRITE))
+- /* disallow re-mapping with PROT_WRITE */
+ vm_flags_clear(vma, VM_MAYWRITE);
+
+ err = map->ops->map_mmap(map, vma);
+ if (err)
+ goto out;
+
+- if (vma->vm_flags & VM_MAYWRITE)
++ if (vma->vm_flags & VM_WRITE)
+ bpf_map_write_active_inc(map);
+ out:
+ mutex_unlock(&map->freeze_mutex);
+--
+2.39.5
+
--- /dev/null
+From 5a09cd00aea6aae92b32271b1c8e1f268a1d3d2c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Jan 2025 07:48:41 -0800
+Subject: drm/msm: Avoid rounding up to one jiffy
+
+From: Rob Clark <robdclark@chromium.org>
+
+[ Upstream commit 669c285620231786fffe9d87ab432e08a6ed922b ]
+
+If userspace is trying to achieve a timeout of zero, let 'em have it.
+Only round up if the timeout is greater than zero.
+
+Fixes: 4969bccd5f4e ("drm/msm: Avoid rounding down to zero jiffies")
+Signed-off-by: Rob Clark <robdclark@chromium.org>
+Reviewed-by: Akhil P Oommen <quic_akhilpo@quicinc.com>
+Patchwork: https://patchwork.freedesktop.org/patch/632264/
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/msm_drv.h | 11 ++++-------
+ 1 file changed, 4 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
+index 2e28a13446366..9526b22038ab8 100644
+--- a/drivers/gpu/drm/msm/msm_drv.h
++++ b/drivers/gpu/drm/msm/msm_drv.h
+@@ -543,15 +543,12 @@ static inline int align_pitch(int width, int bpp)
+ static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
+ {
+ ktime_t now = ktime_get();
+- s64 remaining_jiffies;
+
+- if (ktime_compare(*timeout, now) < 0) {
+- remaining_jiffies = 0;
+- } else {
+- ktime_t rem = ktime_sub(*timeout, now);
+- remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
+- }
++ if (ktime_compare(*timeout, now) <= 0)
++ return 0;
+
++ ktime_t rem = ktime_sub(*timeout, now);
++ s64 remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
+ return clamp(remaining_jiffies, 1LL, (s64)INT_MAX);
+ }
+
+--
+2.39.5
+
--- /dev/null
+From cdafc7d93c3fccc65aac8bf952169d95a60ddccb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Feb 2025 00:19:32 +0100
+Subject: drm/msm/dpu: Don't leak bits_per_component into random DSC_ENC fields
+
+From: Marijn Suijten <marijn.suijten@somainline.org>
+
+[ Upstream commit 144429831f447223253a0e4376489f84ff37d1a7 ]
+
+What used to be the input_10_bits boolean - feeding into the lowest
+bit of DSC_ENC - on MSM downstream turned into an accidental OR with
+the full bits_per_component number when it was ported to the upstream
+kernel.
+
+On typical bpc=8 setups we don't notice this because line_buf_depth is
+always an odd value (it contains bpc+1) and will also set the 4th bit
+after left-shifting by 3 (hence this |= bits_per_component is a no-op).
+
+Now that guards are being removed to allow more bits_per_component
+values besides 8 (possible since commit 49fd30a7153b ("drm/msm/dsi: use
+DRM DSC helpers for DSC setup")), a bpc of 10 will instead clash with
+the 5th bit which is convert_rgb. This is "fortunately" also always set
+to true by MSM's dsi_populate_dsc_params() already, but once a bpc of 12
+starts being used it'll write into simple_422 which is normally false.
+
+To solve all these overlaps, simply replicate downstream code and only
+set this lowest bit if bits_per_component is equal to 10. It is unclear
+why DSC requires this only for bpc=10 but not bpc=12, and also notice
+that this lowest bit wasn't set previously despite having a panel and
+patch on the list using it without any mentioned issues.
+
+Fixes: c110cfd1753e ("drm/msm/disp/dpu1: Add support for DSC")
+Signed-off-by: Marijn Suijten <marijn.suijten@somainline.org>
+Reviewed-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Patchwork: https://patchwork.freedesktop.org/patch/636311/
+Link: https://lore.kernel.org/r/20250211-dsc-10-bit-v1-1-1c85a9430d9a@somainline.org
+Signed-off-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+index 5e9aad1b2aa28..d1e0fb2139765 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+@@ -52,6 +52,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
+ u32 slice_last_group_size;
+ u32 det_thresh_flatness;
+ bool is_cmd_mode = !(mode & DSC_MODE_VIDEO);
++ bool input_10_bits = dsc->bits_per_component == 10;
+
+ DPU_REG_WRITE(c, DSC_COMMON_MODE, mode);
+
+@@ -68,7 +69,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
+ data |= (dsc->line_buf_depth << 3);
+ data |= (dsc->simple_422 << 2);
+ data |= (dsc->convert_rgb << 1);
+- data |= dsc->bits_per_component;
++ data |= input_10_bits;
+
+ DPU_REG_WRITE(c, DSC_ENC, data);
+
+--
+2.39.5
+
--- /dev/null
+From 2a447b3fb50af4360ebbb30ae263cb15f857b705 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 14 Dec 2024 00:14:18 +0200
+Subject: drm/msm/dpu: enable DPU_WB_INPUT_CTRL for DPU 5.x
+
+From: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+
+[ Upstream commit af0a4a2090cce732c70ad6c5f4145b43f39e3fe9 ]
+
+Several DPU 5.x platforms are supposed to be using DPU_WB_INPUT_CTRL,
+to bind WB and PINGPONG blocks, but they do not. Change those platforms
+to use WB_SM8250_MASK, which includes that bit.
+
+Fixes: 1f5bcc4316b3 ("drm/msm/dpu: enable writeback on SC8108X")
+Fixes: ab2b03d73a66 ("drm/msm/dpu: enable writeback on SM6125")
+Fixes: 47cebb740a83 ("drm/msm/dpu: enable writeback on SM8150")
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Reviewed-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Patchwork: https://patchwork.freedesktop.org/patch/628876/
+Link: https://lore.kernel.org/r/20241214-dpu-drop-features-v1-2-988f0662cb7e@linaro.org
+Signed-off-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h | 2 +-
+ drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h | 2 +-
+ drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h | 2 +-
+ 3 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+index 421afacb72480..36cc9dbc00b5c 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+@@ -297,7 +297,7 @@ static const struct dpu_wb_cfg sm8150_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+- .features = WB_SDM845_MASK,
++ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+index 641023b102bf5..e8eacdb47967a 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+@@ -304,7 +304,7 @@ static const struct dpu_wb_cfg sc8180x_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+- .features = WB_SDM845_MASK,
++ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
+index d039b96beb97c..76f60a2df7a89 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
+@@ -144,7 +144,7 @@ static const struct dpu_wb_cfg sm6125_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+- .features = WB_SDM845_MASK,
++ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+--
+2.39.5
+
--- /dev/null
+From 12fc3377f6cef9a651a1a1fb5c8646e5574bb31d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 14 Dec 2024 00:14:17 +0200
+Subject: drm/msm/dpu: skip watchdog timer programming through TOP on >= SM8450
+
+From: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+
+[ Upstream commit 2f69e54584475ac85ea0e3407c9198ac7c6ea8ad ]
+
+The SM8450 and later chips have DPU_MDP_PERIPH_0_REMOVED feature bit
+set, which means that those platforms have dropped some of the
+registers, including the WD TIMER-related ones. Stop providing the
+callback to program WD timer on those platforms.
+
+Fixes: 100d7ef6995d ("drm/msm/dpu: add support for SM8450")
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Reviewed-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Patchwork: https://patchwork.freedesktop.org/patch/628874/
+Link: https://lore.kernel.org/r/20241214-dpu-drop-features-v1-1-988f0662cb7e@linaro.org
+Signed-off-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+index 0f40eea7f5e24..2040bee8d512f 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+@@ -272,7 +272,7 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
+
+ if (cap & BIT(DPU_MDP_VSYNC_SEL))
+ ops->setup_vsync_source = dpu_hw_setup_vsync_sel;
+- else
++ else if (!(cap & BIT(DPU_MDP_PERIPH_0_REMOVED)))
+ ops->setup_vsync_source = dpu_hw_setup_wd_timer;
+
+ ops->get_safe_status = dpu_hw_get_safe_status;
+--
+2.39.5
+
--- /dev/null
+From f7f143b9941e857df5cade53bd63d2420634b8fb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Feb 2025 16:08:43 +0100
+Subject: drm/msm/dsi/phy: Do not overwite PHY_CMN_CLK_CFG1 when choosing
+ bitclk source
+
+From: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+
+[ Upstream commit 73f69c6be2a9f22c31c775ec03c6c286bfe12cfa ]
+
+PHY_CMN_CLK_CFG1 register has four fields being used in the driver: DSI
+clock divider, source of bitclk and two for enabling the DSI PHY PLL
+clocks.
+
+dsi_7nm_set_usecase() sets only the source of bitclk, so should leave
+all other bits untouched. Use newly introduced
+dsi_pll_cmn_clk_cfg1_update() to update respective bits without
+overwriting the rest.
+
+While shuffling the code, define and use PHY_CMN_CLK_CFG1 bitfields to
+make the code more readable and obvious.
+
+Fixes: 1ef7c99d145c ("drm/msm/dsi: add support for 7nm DSI PHY/PLL")
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Reviewed-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Patchwork: https://patchwork.freedesktop.org/patch/637380/
+Link: https://lore.kernel.org/r/20250214-drm-msm-phy-pll-cfg-reg-v3-3-0943b850722c@linaro.org
+Signed-off-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c | 4 ++--
+ drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml | 1 +
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+index 388017db45d80..798168180c1ab 100644
+--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+@@ -617,7 +617,6 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
+ static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
+ {
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
+- void __iomem *base = phy->base;
+ u32 data = 0x0; /* internal PLL */
+
+ DBG("DSI PLL%d", pll_7nm->phy->id);
+@@ -636,7 +635,8 @@ static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
+ }
+
+ /* set PLL src */
+- writel(data << 2, base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
++ dsi_pll_cmn_clk_cfg1_update(pll_7nm, DSI_7nm_PHY_CMN_CLK_CFG1_BITCLK_SEL__MASK,
++ DSI_7nm_PHY_CMN_CLK_CFG1_BITCLK_SEL(data));
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
+index cfaf78c028b13..35f7f40e405b7 100644
+--- a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
++++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
+@@ -16,6 +16,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+ <reg32 offset="0x00014" name="CLK_CFG1">
+ <bitfield name="CLK_EN" pos="5" type="boolean"/>
+ <bitfield name="CLK_EN_SEL" pos="4" type="boolean"/>
++ <bitfield name="BITCLK_SEL" low="2" high="3" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00018" name="GLBL_CTRL"/>
+ <reg32 offset="0x0001c" name="RBUF_CTRL"/>
+--
+2.39.5
+
--- /dev/null
+From cff19cbaf7550f78b9a0769728f8848befdd9534 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Feb 2025 16:08:41 +0100
+Subject: drm/msm/dsi/phy: Protect PHY_CMN_CLK_CFG0 updated from driver side
+
+From: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+
+[ Upstream commit 588257897058a0b1aa47912db4fe93c6ff5e3887 ]
+
+PHY_CMN_CLK_CFG0 register is updated by the PHY driver and by two
+divider clocks from Common Clock Framework:
+devm_clk_hw_register_divider_parent_hw(). Concurrent access by the
+clocks side is protected with spinlock, however driver's side in
+restoring state is not. Restoring state is called from
+msm_dsi_phy_enable(), so there could be a path leading to concurrent and
+conflicting updates with clock framework.
+
+Add missing lock usage on the PHY driver side, encapsulated in its own
+function so the code will be still readable.
+
+While shuffling the code, define and use PHY_CMN_CLK_CFG0 bitfields to
+make the code more readable and obvious.
+
+Fixes: 1ef7c99d145c ("drm/msm/dsi: add support for 7nm DSI PHY/PLL")
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Reviewed-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Patchwork: https://patchwork.freedesktop.org/patch/637376/
+Link: https://lore.kernel.org/r/20250214-drm-msm-phy-pll-cfg-reg-v3-1-0943b850722c@linaro.org
+Signed-off-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c | 14 ++++++++++++--
+ .../gpu/drm/msm/registers/display/dsi_phy_7nm.xml | 5 ++++-
+ 2 files changed, 16 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+index 031446c87daec..25ca649de717e 100644
+--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+@@ -372,6 +372,15 @@ static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
+ ndelay(250);
+ }
+
++static void dsi_pll_cmn_clk_cfg0_write(struct dsi_pll_7nm *pll, u32 val)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&pll->postdiv_lock, flags);
++ writel(val, pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
++ spin_unlock_irqrestore(&pll->postdiv_lock, flags);
++}
++
+ static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
+ {
+ u32 data;
+@@ -574,8 +583,9 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
+ val |= cached->pll_out_div;
+ writel(val, pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
+
+- writel(cached->bit_clk_div | (cached->pix_clk_div << 4),
+- phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
++ dsi_pll_cmn_clk_cfg0_write(pll_7nm,
++ DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(cached->bit_clk_div) |
++ DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(cached->pix_clk_div));
+
+ val = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ val &= ~0x3;
+diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
+index d54b72f924493..e0bf6e016b4ce 100644
+--- a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
++++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
+@@ -9,7 +9,10 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+ <reg32 offset="0x00004" name="REVISION_ID1"/>
+ <reg32 offset="0x00008" name="REVISION_ID2"/>
+ <reg32 offset="0x0000c" name="REVISION_ID3"/>
+- <reg32 offset="0x00010" name="CLK_CFG0"/>
++ <reg32 offset="0x00010" name="CLK_CFG0">
++ <bitfield name="DIV_CTRL_3_0" low="0" high="3" type="uint"/>
++ <bitfield name="DIV_CTRL_7_4" low="4" high="7" type="uint"/>
++ </reg32>
+ <reg32 offset="0x00014" name="CLK_CFG1"/>
+ <reg32 offset="0x00018" name="GLBL_CTRL"/>
+ <reg32 offset="0x0001c" name="RBUF_CTRL"/>
+--
+2.39.5
+
--- /dev/null
+From 7805c759287a907c5f506a38445744b4fde2dc55 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Feb 2025 16:08:42 +0100
+Subject: drm/msm/dsi/phy: Protect PHY_CMN_CLK_CFG1 against clock driver
+
+From: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+
+[ Upstream commit 5a97bc924ae0804b8dbf627e357acaa5ef761483 ]
+
+PHY_CMN_CLK_CFG1 register is updated by the PHY driver and by a mux
+clock from Common Clock Framework:
+devm_clk_hw_register_mux_parent_hws(). There could be a path leading to
+concurrent and conflicting updates between PHY driver and clock
+framework, e.g. changing the mux and enabling PLL clocks.
+
+Add dedicated spinlock to be sure all PHY_CMN_CLK_CFG1 updates are
+synchronized.
+
+While shuffling the code, define and use PHY_CMN_CLK_CFG1 bitfields to
+make the code more readable and obvious.
+
+Fixes: 1ef7c99d145c ("drm/msm/dsi: add support for 7nm DSI PHY/PLL")
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Reviewed-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Patchwork: https://patchwork.freedesktop.org/patch/637378/
+Link: https://lore.kernel.org/r/20250214-drm-msm-phy-pll-cfg-reg-v3-2-0943b850722c@linaro.org
+Signed-off-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c | 35 ++++++++++++-------
+ .../drm/msm/registers/display/dsi_phy_7nm.xml | 5 ++-
+ 2 files changed, 26 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+index 25ca649de717e..388017db45d80 100644
+--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+@@ -83,6 +83,9 @@ struct dsi_pll_7nm {
+ /* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */
+ spinlock_t postdiv_lock;
+
++ /* protects REG_DSI_7nm_PHY_CMN_CLK_CFG1 register */
++ spinlock_t pclk_mux_lock;
++
+ struct pll_7nm_cached_state cached_state;
+
+ struct dsi_pll_7nm *slave;
+@@ -381,22 +384,32 @@ static void dsi_pll_cmn_clk_cfg0_write(struct dsi_pll_7nm *pll, u32 val)
+ spin_unlock_irqrestore(&pll->postdiv_lock, flags);
+ }
+
+-static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
++static void dsi_pll_cmn_clk_cfg1_update(struct dsi_pll_7nm *pll, u32 mask,
++ u32 val)
+ {
++ unsigned long flags;
+ u32 data;
+
++ spin_lock_irqsave(&pll->pclk_mux_lock, flags);
+ data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+- writel(data & ~BIT(5), pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
++ data &= ~mask;
++ data |= val & mask;
++
++ writel(data, pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
++ spin_unlock_irqrestore(&pll->pclk_mux_lock, flags);
++}
++
++static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
++{
++ dsi_pll_cmn_clk_cfg1_update(pll, DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN, 0);
+ }
+
+ static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll)
+ {
+- u32 data;
++ u32 cfg_1 = DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN | DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN_SEL;
+
+ writel(0x04, pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_3);
+-
+- data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+- writel(data | BIT(5) | BIT(4), pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
++ dsi_pll_cmn_clk_cfg1_update(pll, cfg_1, cfg_1);
+ }
+
+ static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)
+@@ -574,7 +587,6 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
+ {
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
+ struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
+- void __iomem *phy_base = pll_7nm->phy->base;
+ u32 val;
+ int ret;
+
+@@ -586,11 +598,7 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
+ dsi_pll_cmn_clk_cfg0_write(pll_7nm,
+ DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(cached->bit_clk_div) |
+ DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(cached->pix_clk_div));
+-
+- val = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+- val &= ~0x3;
+- val |= cached->pll_mux;
+- writel(val, phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
++ dsi_pll_cmn_clk_cfg1_update(pll_7nm, 0x3, cached->pll_mux);
+
+ ret = dsi_pll_7nm_vco_set_rate(phy->vco_hw,
+ pll_7nm->vco_current_rate,
+@@ -743,7 +751,7 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
+ pll_by_2_bit,
+ }), 2, 0, pll_7nm->phy->base +
+ REG_DSI_7nm_PHY_CMN_CLK_CFG1,
+- 0, 1, 0, NULL);
++ 0, 1, 0, &pll_7nm->pclk_mux_lock);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto fail;
+@@ -788,6 +796,7 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)
+ pll_7nm_list[phy->id] = pll_7nm;
+
+ spin_lock_init(&pll_7nm->postdiv_lock);
++ spin_lock_init(&pll_7nm->pclk_mux_lock);
+
+ pll_7nm->phy = phy;
+
+diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
+index e0bf6e016b4ce..cfaf78c028b13 100644
+--- a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
++++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
+@@ -13,7 +13,10 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+ <bitfield name="DIV_CTRL_3_0" low="0" high="3" type="uint"/>
+ <bitfield name="DIV_CTRL_7_4" low="4" high="7" type="uint"/>
+ </reg32>
+- <reg32 offset="0x00014" name="CLK_CFG1"/>
++ <reg32 offset="0x00014" name="CLK_CFG1">
++ <bitfield name="CLK_EN" pos="5" type="boolean"/>
++ <bitfield name="CLK_EN_SEL" pos="4" type="boolean"/>
++ </reg32>
+ <reg32 offset="0x00018" name="GLBL_CTRL"/>
+ <reg32 offset="0x0001c" name="RBUF_CTRL"/>
+ <reg32 offset="0x00020" name="VREG_CTRL_0"/>
+--
+2.39.5
+
--- /dev/null
+From b9acd5ca781a91129134b2a22956f30bad5282ad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Feb 2025 03:28:03 -0600
+Subject: drm/nouveau/pmu: Fix gp10b firmware guard
+
+From: Aaron Kling <webgeek1234@gmail.com>
+
+[ Upstream commit 3dbc0215e3c502a9f3221576da0fdc9847fb9721 ]
+
+Most kernel configs enable multiple Tegra SoC generations, causing this
+typo to go unnoticed. But in the case where a kernel config is strictly
+for Tegra186, this is a problem.
+
+Fixes: 989863d7cbe5 ("drm/nouveau/pmu: select implementation based on available firmware")
+Signed-off-by: Aaron Kling <webgeek1234@gmail.com>
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250218-nouveau-gm10b-guard-v2-1-a4de71500d48@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
+index a6f410ba60bc9..d393bc540f862 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
+@@ -75,7 +75,7 @@ gp10b_pmu_acr = {
+ .bootstrap_multiple_falcons = gp10b_pmu_acr_bootstrap_multiple_falcons,
+ };
+
+-#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
++#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
+ MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin");
+ MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin");
+ MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");
+--
+2.39.5
+
--- /dev/null
+From a35efe4eec9f7e9232a892bf0d662c52cc0679b9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Jan 2025 14:34:41 +0800
+Subject: firmware: arm_scmi: imx: Correct tx size of scmi_imx_misc_ctrl_set
+
+From: Peng Fan <peng.fan@nxp.com>
+
+[ Upstream commit ab027c488fc4a1fff0a5b712d4bdb2d2d324e8f8 ]
+
+'struct scmi_imx_misc_ctrl_set_in' has a zero length array in the end,
+The sizeof will not count 'value[]', and hence Tx size will be smaller
+than actual size for Tx,and SCMI firmware will flag this as protocol
+error.
+
+Fix this by enlarge the Tx size with 'num * sizeof(__le32)' to count in
+the size of data.
+
+Fixes: 61c9f03e22fc ("firmware: arm_scmi: Add initial support for i.MX MISC protocol")
+Reviewed-by: Jacky Bai <ping.bai@nxp.com>
+Tested-by: Shengjiu Wang <shengjiu.wang@nxp.com>
+Acked-by: Jason Liu <jason.hui.liu@nxp.com>
+Signed-off-by: Peng Fan <peng.fan@nxp.com>
+Message-Id: <20250123063441.392555-1-peng.fan@oss.nxp.com>
+(sudeep.holla: Commit rewording and replace hardcoded sizeof(__le32) value)
+Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
+index a86ab9b35953f..2641faa329cdd 100644
+--- a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
++++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
+@@ -254,8 +254,8 @@ static int scmi_imx_misc_ctrl_set(const struct scmi_protocol_handle *ph,
+ if (num > max_num)
+ return -EINVAL;
+
+- ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_SET, sizeof(*in),
+- 0, &t);
++ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_SET,
++ sizeof(*in) + num * sizeof(__le32), 0, &t);
+ if (ret)
+ return ret;
+
+--
+2.39.5
+
--- /dev/null
+From 7f9a68479771d8a318d117536a30862ec50e3100 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Feb 2025 15:41:43 +0100
+Subject: firmware: imx: IMX_SCMI_MISC_DRV should depend on ARCH_MXC
+
+From: Geert Uytterhoeven <geert+renesas@glider.be>
+
+[ Upstream commit be6686b823b30a69b1f71bde228ce042c78a1941 ]
+
+The i.MX System Controller Management Interface firmware is only present
+on Freescale i.MX SoCs. Hence add a dependency on ARCH_MXC, to prevent
+asking the user about this driver when configuring a kernel without
+Freescale i.MX platform support.
+
+Fixes: 514b2262ade48a05 ("firmware: arm_scmi: Fix i.MX build dependency")
+Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Reviewed-by: Fabio Estevam <festevam@gmail.com>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/firmware/imx/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
+index 907cd149c40a8..c964f4924359f 100644
+--- a/drivers/firmware/imx/Kconfig
++++ b/drivers/firmware/imx/Kconfig
+@@ -25,6 +25,7 @@ config IMX_SCU
+
+ config IMX_SCMI_MISC_DRV
+ tristate "IMX SCMI MISC Protocol driver"
++ depends on ARCH_MXC || COMPILE_TEST
+ default y if ARCH_MXC
+ help
+ The System Controller Management Interface firmware (SCMI FW) is
+--
+2.39.5
+
--- /dev/null
+From e0243827b657e43d3ff4d7c2661ca2d3a7e43b63 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Feb 2025 09:11:07 -0800
+Subject: md/raid*: Fix the set_queue_limits implementations
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit fbe8f2fa971c537571994a0df532c511c4fb5537 ]
+
+queue_limits_cancel_update() must only be called if
+queue_limits_start_update() is called first. Remove the
+queue_limits_cancel_update() calls from the raid*_set_limits() functions
+because there is no corresponding queue_limits_start_update() call.
+
+Cc: Christoph Hellwig <hch@lst.de>
+Fixes: c6e56cf6b2e7 ("block: move integrity information into queue_limits")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/linux-raid/20250212171108.3483150-1-bvanassche@acm.org/
+Signed-off-by: Yu Kuai <yukuai@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/raid0.c | 4 +---
+ drivers/md/raid1.c | 4 +---
+ drivers/md/raid10.c | 4 +---
+ 3 files changed, 3 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
+index 32d5875247784..31bea72bcb01a 100644
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -385,10 +385,8 @@ static int raid0_set_limits(struct mddev *mddev)
+ lim.io_min = mddev->chunk_sectors << 9;
+ lim.io_opt = lim.io_min * mddev->raid_disks;
+ err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
+- if (err) {
+- queue_limits_cancel_update(mddev->gendisk->queue);
++ if (err)
+ return err;
+- }
+ return queue_limits_set(mddev->gendisk->queue, &lim);
+ }
+
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index d83fe3b3abc00..8a994a1975ca7 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -3171,10 +3171,8 @@ static int raid1_set_limits(struct mddev *mddev)
+ md_init_stacking_limits(&lim);
+ lim.max_write_zeroes_sectors = 0;
+ err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
+- if (err) {
+- queue_limits_cancel_update(mddev->gendisk->queue);
++ if (err)
+ return err;
+- }
+ return queue_limits_set(mddev->gendisk->queue, &lim);
+ }
+
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index daf42acc4fb6f..a214fed4f1622 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -3963,10 +3963,8 @@ static int raid10_set_queue_limits(struct mddev *mddev)
+ lim.io_min = mddev->chunk_sectors << 9;
+ lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
+ err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
+- if (err) {
+- queue_limits_cancel_update(mddev->gendisk->queue);
++ if (err)
+ return err;
+- }
+ return queue_limits_set(mddev->gendisk->queue, &lim);
+ }
+
+--
+2.39.5
+
--- /dev/null
+From d2b2a84a2ee31f4ff6fae217e3553ac400f3dca6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 31 Jan 2025 19:01:42 -0800
+Subject: net: Add rx_skb of kfree_skb to raw_tp_null_args[].
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 5da7e15fb5a12e78de974d8908f348e279922ce9 ]
+
+Yan Zhai reported a BPF prog could trigger a null-ptr-deref [0]
+in trace_kfree_skb if the prog does not check if rx_sk is NULL.
+
+Commit c53795d48ee8 ("net: add rx_sk to trace_kfree_skb") added
+rx_sk to trace_kfree_skb, but rx_sk is optional and could be NULL.
+
+Let's add kfree_skb to raw_tp_null_args[] to let the BPF verifier
+validate such a prog and prevent the issue.
+
+Now we fail to load such a prog:
+
+ libbpf: prog 'drop': -- BEGIN PROG LOAD LOG --
+ 0: R1=ctx() R10=fp0
+ ; int BPF_PROG(drop, struct sk_buff *skb, void *location, @ kfree_skb_sk_null.bpf.c:21
+ 0: (79) r3 = *(u64 *)(r1 +24)
+ func 'kfree_skb' arg3 has btf_id 5253 type STRUCT 'sock'
+ 1: R1=ctx() R3_w=trusted_ptr_or_null_sock(id=1)
+ ; bpf_printk("sk: %d, %d\n", sk, sk->__sk_common.skc_family); @ kfree_skb_sk_null.bpf.c:24
+ 1: (69) r4 = *(u16 *)(r3 +16)
+ R3 invalid mem access 'trusted_ptr_or_null_'
+ processed 2 insns (limit 1000000) max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0
+ -- END PROG LOAD LOG --
+
+Note this fix requires commit 838a10bd2ebf ("bpf: Augment raw_tp
+arguments with PTR_MAYBE_NULL").
+
+[0]:
+BUG: kernel NULL pointer dereference, address: 0000000000000010
+ PF: supervisor read access in kernel mode
+ PF: error_code(0x0000) - not-present page
+PGD 0 P4D 0
+PREEMPT SMP
+RIP: 0010:bpf_prog_5e21a6db8fcff1aa_drop+0x10/0x2d
+Call Trace:
+ <TASK>
+ ? __die+0x1f/0x60
+ ? page_fault_oops+0x148/0x420
+ ? search_bpf_extables+0x5b/0x70
+ ? fixup_exception+0x27/0x2c0
+ ? exc_page_fault+0x75/0x170
+ ? asm_exc_page_fault+0x22/0x30
+ ? bpf_prog_5e21a6db8fcff1aa_drop+0x10/0x2d
+ bpf_trace_run4+0x68/0xd0
+ ? unix_stream_connect+0x1f4/0x6f0
+ sk_skb_reason_drop+0x90/0x120
+ unix_stream_connect+0x1f4/0x6f0
+ __sys_connect+0x7f/0xb0
+ __x64_sys_connect+0x14/0x20
+ do_syscall_64+0x47/0xc30
+ entry_SYSCALL_64_after_hwframe+0x4b/0x53
+
+Fixes: c53795d48ee8 ("net: add rx_sk to trace_kfree_skb")
+Reported-by: Yan Zhai <yan@cloudflare.com>
+Closes: https://lore.kernel.org/netdev/Z50zebTRzI962e6X@debian.debian/
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Tested-by: Yan Zhai <yan@cloudflare.com>
+Acked-by: Jiri Olsa <jolsa@kernel.org>
+Link: https://lore.kernel.org/r/20250201030142.62703-1-kuniyu@amazon.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/btf.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index a44f4be592be7..2c54c148a94f3 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -6483,6 +6483,8 @@ static const struct bpf_raw_tp_null_args raw_tp_null_args[] = {
+ /* rxrpc */
+ { "rxrpc_recvdata", 0x1 },
+ { "rxrpc_resend", 0x10 },
++ /* skb */
++ {"kfree_skb", 0x1000},
+ /* sunrpc */
+ { "xs_stream_read_data", 0x1 },
+ /* ... from xprt_cong_event event class */
+--
+2.39.5
+
--- /dev/null
+From ce134c0174bcf607e65a7b03deb06fa13fdade14 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Jan 2025 19:15:23 +0100
+Subject: nouveau/svm: fix missing folio unlock + put after
+ make_device_exclusive_range()
+
+From: David Hildenbrand <david@redhat.com>
+
+[ Upstream commit b3fefbb30a1691533cb905006b69b2a474660744 ]
+
+In case we have to retry the loop, we are missing to unlock+put the
+folio. In that case, we will keep failing make_device_exclusive_range()
+because we cannot grab the folio lock, and even return from the function
+with the folio locked and referenced, effectively never succeeding the
+make_device_exclusive_range().
+
+While at it, convert the other unlock+put to use a folio as well.
+
+This was found by code inspection.
+
+Fixes: 8f187163eb89 ("nouveau/svm: implement atomic SVM access")
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Alistair Popple <apopple@nvidia.com>
+Tested-by: Alistair Popple <apopple@nvidia.com>
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250124181524.3584236-2-david@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/nouveau/nouveau_svm.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
+index b4da82ddbb6b2..8ea98f06d39af 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
+@@ -590,6 +590,7 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ struct mm_struct *mm = svmm->notifier.mm;
++ struct folio *folio;
+ struct page *page;
+ unsigned long start = args->p.addr;
+ unsigned long notifier_seq;
+@@ -616,12 +617,16 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
+ ret = -EINVAL;
+ goto out;
+ }
++ folio = page_folio(page);
+
+ mutex_lock(&svmm->mutex);
+ if (!mmu_interval_read_retry(¬ifier->notifier,
+ notifier_seq))
+ break;
+ mutex_unlock(&svmm->mutex);
++
++ folio_unlock(folio);
++ folio_put(folio);
+ }
+
+ /* Map the page on the GPU. */
+@@ -637,8 +642,8 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
+ ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
+ mutex_unlock(&svmm->mutex);
+
+- unlock_page(page);
+- put_page(page);
++ folio_unlock(folio);
++ folio_put(folio);
+
+ out:
+ mmu_interval_notifier_remove(¬ifier->notifier);
+--
+2.39.5
+
--- /dev/null
+From 5705c01ed464101432660bd71bf2b8ae8c05a539 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Feb 2025 10:05:14 -0700
+Subject: nvme/ioctl: add missing space in err message
+
+From: Caleb Sander Mateos <csander@purestorage.com>
+
+[ Upstream commit 487a3ea7b1b8ba2ca7d2c2bb3c3594dc360d6261 ]
+
+nvme_validate_passthru_nsid() logs an err message whose format string is
+split over 2 lines. There is a missing space between the two pieces,
+resulting in log lines like "... does not match nsid (1)of namespace".
+Add the missing space between ")" and "of". Also combine the format
+string pieces onto a single line to make the err message easier to grep.
+
+Fixes: e7d4b5493a2d ("nvme: factor out a nvme_validate_passthru_nsid helper")
+Signed-off-by: Caleb Sander Mateos <csander@purestorage.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/ioctl.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
+index a96976b22fa79..61af1583356c2 100644
+--- a/drivers/nvme/host/ioctl.c
++++ b/drivers/nvme/host/ioctl.c
+@@ -276,8 +276,7 @@ static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
+ {
+ if (ns && nsid != ns->head->ns_id) {
+ dev_err(ctrl->device,
+- "%s: nsid (%u) in cmd does not match nsid (%u)"
+- "of namespace\n",
++ "%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n",
+ current->comm, nsid, ns->head->ns_id);
+ return false;
+ }
+--
+2.39.5
+
--- /dev/null
+From 3ef994bfa02347a6333873b1f26fa8ef284a3f24 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Feb 2025 15:52:31 +0900
+Subject: nvme: tcp: Fix compilation warning with W=1
+
+From: Damien Le Moal <dlemoal@kernel.org>
+
+[ Upstream commit cd513e0434c3e736c549bc99bf7982658b25114d ]
+
+When compiling with W=1, a warning result for the function
+nvme_tcp_set_queue_io_cpu():
+
+host/tcp.c:1578: warning: Function parameter or struct member 'queue'
+not described in 'nvme_tcp_set_queue_io_cpu'
+host/tcp.c:1578: warning: expecting prototype for Track the number of
+queues assigned to each cpu using a global per(). Prototype was for
+nvme_tcp_set_queue_io_cpu() instead
+
+Avoid this warning by using the regular comment format for the function
+nvme_tcp_set_queue_io_cpu() instead of the kdoc comment format.
+
+Fixes: 32193789878c ("nvme-tcp: Fix I/O queue cpu spreading for multiple controllers")
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/tcp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 8305d3c128074..34eb3dabdc8a6 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1565,7 +1565,7 @@ static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
+ ctrl->io_queues[HCTX_TYPE_POLL];
+ }
+
+-/**
++/*
+ * Track the number of queues assigned to each cpu using a global per-cpu
+ * counter and select the least used cpu from the mq_map. Our goal is to spread
+ * different controllers I/O threads across different cpu cores.
+--
+2.39.5
+
--- /dev/null
+From fb8b5ac8a785482ba000ad447b518ab340538390 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Jan 2025 11:43:10 -0700
+Subject: nvme-tcp: fix connect failure on receiving partial ICResp PDU
+
+From: Caleb Sander Mateos <csander@purestorage.com>
+
+[ Upstream commit 578539e0969028f711c34d9a4565931edfe1d730 ]
+
+nvme_tcp_init_connection() attempts to receive an ICResp PDU but only
+checks that the return value from recvmsg() is non-negative. If the
+sender closes the TCP connection or sends fewer than 128 bytes, this
+check will pass even though the full PDU wasn't received.
+
+Ensure the full ICResp PDU is received by checking that recvmsg()
+returns the expected 128 bytes.
+
+Additionally set the MSG_WAITALL flag for recvmsg(), as a sender could
+split the ICResp over multiple TCP frames. Without MSG_WAITALL,
+recvmsg() could return prematurely with only part of the PDU.
+
+Fixes: 3f2304f8c6d6 ("nvme-tcp: add NVMe over TCP host driver")
+Signed-off-by: Caleb Sander Mateos <csander@purestorage.com>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/tcp.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 34eb3dabdc8a6..840ae475074d0 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1449,11 +1449,14 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
+ msg.msg_control = cbuf;
+ msg.msg_controllen = sizeof(cbuf);
+ }
++ msg.msg_flags = MSG_WAITALL;
+ ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
+ iov.iov_len, msg.msg_flags);
+- if (ret < 0) {
++ if (ret < sizeof(*icresp)) {
+ pr_warn("queue %d: failed to receive icresp, error %d\n",
+ nvme_tcp_queue_id(queue), ret);
++ if (ret >= 0)
++ ret = -ECONNRESET;
+ goto free_icresp;
+ }
+ ret = -ENOTCONN;
+--
+2.39.5
+
--- /dev/null
+From 25ad4188f3b10c8313e49ede5e416c1c798a01ed Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Feb 2025 09:31:29 +0100
+Subject: platform: cznic: CZNIC_PLATFORMS should depend on ARCH_MVEBU
+
+From: Geert Uytterhoeven <geert+renesas@glider.be>
+
+[ Upstream commit dd0f05b98925111f4530d7dab774398cdb32e9e3 ]
+
+CZ.NIC's Turris devices are based on Marvell EBU SoCs. Hence add a
+dependency on ARCH_MVEBU, to prevent asking the user about these drivers
+when configuring a kernel that cannot run on an affected CZ.NIC Turris
+system.
+
+Fixes: 992f1a3d4e88498d ("platform: cznic: Add preliminary support for Turris Omnia MCU")
+Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/platform/cznic/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/platform/cznic/Kconfig b/drivers/platform/cznic/Kconfig
+index 49c383eb67854..13e37b49d9d01 100644
+--- a/drivers/platform/cznic/Kconfig
++++ b/drivers/platform/cznic/Kconfig
+@@ -6,6 +6,7 @@
+
+ menuconfig CZNIC_PLATFORMS
+ bool "Platform support for CZ.NIC's Turris hardware"
++ depends on ARCH_MVEBU || COMPILE_TEST
+ help
+ Say Y here to be able to choose driver support for CZ.NIC's Turris
+ devices. This option alone does not add any kernel code.
+--
+2.39.5
+
--- /dev/null
+From 52088d7ebd878f221cacf8d1f3f06546e569cc41 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 31 Jan 2025 17:14:51 -0600
+Subject: power: supply: axp20x_battery: Fix fault handling for AXP717
+
+From: Chris Morgan <macromorgan@hotmail.com>
+
+[ Upstream commit 98380110bd48fbfd6a798ee11fffff893d36062c ]
+
+Correct the fault handling for the AXP717 by changing the i2c write
+from regmap_update_bits() to regmap_write_bits(). The update bits
+function does not work properly on a RW1C register where we must
+write a 1 back to an existing register to clear it.
+
+Additionally, as part of this testing I confirmed the behavior of
+errors reappearing, so remove comment about assumptions.
+
+Fixes: 6625767049c2 ("power: supply: axp20x_battery: add support for AXP717")
+Signed-off-by: Chris Morgan <macromorgan@hotmail.com>
+Reviewed-by: Chen-Yu Tsai <wens@csie.org>
+Link: https://lore.kernel.org/r/20250131231455.153447-2-macroalpha82@gmail.com
+Signed-off-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/power/supply/axp20x_battery.c | 31 +++++++++++++--------------
+ 1 file changed, 15 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c
+index f71cc90fea127..57eba1ddb17ba 100644
+--- a/drivers/power/supply/axp20x_battery.c
++++ b/drivers/power/supply/axp20x_battery.c
+@@ -466,10 +466,9 @@ static int axp717_battery_get_prop(struct power_supply *psy,
+
+ /*
+ * If a fault is detected it must also be cleared; if the
+- * condition persists it should reappear (This is an
+- * assumption, it's actually not documented). A restart was
+- * not sufficient to clear the bit in testing despite the
+- * register listed as POR.
++ * condition persists it should reappear. A restart was not
++ * sufficient to clear the bit in testing despite the register
++ * listed as POR.
+ */
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = regmap_read(axp20x_batt->regmap, AXP717_PMU_FAULT,
+@@ -480,26 +479,26 @@ static int axp717_battery_get_prop(struct power_supply *psy,
+ switch (reg & AXP717_BATT_PMU_FAULT_MASK) {
+ case AXP717_BATT_UVLO_2_5V:
+ val->intval = POWER_SUPPLY_HEALTH_DEAD;
+- regmap_update_bits(axp20x_batt->regmap,
+- AXP717_PMU_FAULT,
+- AXP717_BATT_UVLO_2_5V,
+- AXP717_BATT_UVLO_2_5V);
++ regmap_write_bits(axp20x_batt->regmap,
++ AXP717_PMU_FAULT,
++ AXP717_BATT_UVLO_2_5V,
++ AXP717_BATT_UVLO_2_5V);
+ return 0;
+
+ case AXP717_BATT_OVER_TEMP:
+ val->intval = POWER_SUPPLY_HEALTH_HOT;
+- regmap_update_bits(axp20x_batt->regmap,
+- AXP717_PMU_FAULT,
+- AXP717_BATT_OVER_TEMP,
+- AXP717_BATT_OVER_TEMP);
++ regmap_write_bits(axp20x_batt->regmap,
++ AXP717_PMU_FAULT,
++ AXP717_BATT_OVER_TEMP,
++ AXP717_BATT_OVER_TEMP);
+ return 0;
+
+ case AXP717_BATT_UNDER_TEMP:
+ val->intval = POWER_SUPPLY_HEALTH_COLD;
+- regmap_update_bits(axp20x_batt->regmap,
+- AXP717_PMU_FAULT,
+- AXP717_BATT_UNDER_TEMP,
+- AXP717_BATT_UNDER_TEMP);
++ regmap_write_bits(axp20x_batt->regmap,
++ AXP717_PMU_FAULT,
++ AXP717_BATT_UNDER_TEMP,
++ AXP717_BATT_UNDER_TEMP);
+ return 0;
+
+ default:
+--
+2.39.5
+
--- /dev/null
+From 9ef89579392ce8c45022237deea9f32d71cfe49d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Jan 2025 09:00:34 +0000
+Subject: power: supply: da9150-fg: fix potential overflow
+
+From: Andrey Vatoropin <a.vatoropin@crpt.ru>
+
+[ Upstream commit 3fb3cb4350befc4f901c54e0cb4a2a47b1302e08 ]
+
+Size of variable sd_gain equals four bytes - DA9150_QIF_SD_GAIN_SIZE.
+Size of variable shunt_val equals two bytes - DA9150_QIF_SHUNT_VAL_SIZE.
+
+The expression sd_gain * shunt_val is currently being evaluated using
+32-bit arithmetic. So during the multiplication an overflow may occur.
+
+As the value of type 'u64' is used as storage for the eventual result, put
+ULL variable at the first position of each expression in order to give the
+compiler complete information about the proper arithmetic to use. According
+to C99 the guaranteed width for a variable of type 'unsigned long long' >=
+64 bits.
+
+Remove the explicit cast to u64 as it is meaningless.
+
+Just for the sake of consistency, perform the similar trick with another
+expression concerning 'iavg'.
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Fixes: a419b4fd9138 ("power: Add support for DA9150 Fuel-Gauge")
+Signed-off-by: Andrey Vatoropin <a.vatoropin@crpt.ru>
+Link: https://lore.kernel.org/r/20250130090030.53422-1-a.vatoropin@crpt.ru
+Signed-off-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/power/supply/da9150-fg.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/power/supply/da9150-fg.c b/drivers/power/supply/da9150-fg.c
+index 652c1f213af1c..4f28ef1bba1a3 100644
+--- a/drivers/power/supply/da9150-fg.c
++++ b/drivers/power/supply/da9150-fg.c
+@@ -247,9 +247,9 @@ static int da9150_fg_current_avg(struct da9150_fg *fg,
+ DA9150_QIF_SD_GAIN_SIZE);
+ da9150_fg_read_sync_end(fg);
+
+- div = (u64) (sd_gain * shunt_val * 65536ULL);
++ div = 65536ULL * sd_gain * shunt_val;
+ do_div(div, 1000000);
+- res = (u64) (iavg * 1000000ULL);
++ res = 1000000ULL * iavg;
+ do_div(res, div);
+
+ val->intval = (int) res;
+--
+2.39.5
+
--- /dev/null
+From d4f18a9db97d338a52c6a606d7597fd8fd86a425 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Nov 2024 09:19:59 -0800
+Subject: selftests/bpf: Add tests for raw_tp null handling
+
+From: Kumar Kartikeya Dwivedi <memxor@gmail.com>
+
+[ Upstream commit d798ce3f4cab1b0d886b19ec5cc8e6b3d7e35081 ]
+
+Ensure that trusted PTR_TO_BTF_ID accesses perform PROBE_MEM handling in
+raw_tp program. Without the previous fix, this selftest crashes the
+kernel due to a NULL-pointer dereference. Also ensure that dead code
+elimination does not kick in for checks on the pointer.
+
+Reviewed-by: Jiri Olsa <jolsa@kernel.org>
+Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
+Link: https://lore.kernel.org/r/20241104171959.2938862-4-memxor@gmail.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Stable-dep-of: 5da7e15fb5a1 ("net: Add rx_skb of kfree_skb to raw_tp_null_args[].")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../bpf/bpf_testmod/bpf_testmod-events.h | 8 +++++
+ .../selftests/bpf/bpf_testmod/bpf_testmod.c | 2 ++
+ .../selftests/bpf/prog_tests/raw_tp_null.c | 25 +++++++++++++++
+ .../testing/selftests/bpf/progs/raw_tp_null.c | 32 +++++++++++++++++++
+ 4 files changed, 67 insertions(+)
+ create mode 100644 tools/testing/selftests/bpf/prog_tests/raw_tp_null.c
+ create mode 100644 tools/testing/selftests/bpf/progs/raw_tp_null.c
+
+diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h
+index 6c3b4d4f173ac..aeef86b3da747 100644
+--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h
++++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h
+@@ -40,6 +40,14 @@ DECLARE_TRACE(bpf_testmod_test_nullable_bare,
+ TP_ARGS(ctx__nullable)
+ );
+
++struct sk_buff;
++
++DECLARE_TRACE(bpf_testmod_test_raw_tp_null,
++ TP_PROTO(struct sk_buff *skb),
++ TP_ARGS(skb)
++);
++
++
+ #undef BPF_TESTMOD_DECLARE_TRACE
+ #ifdef DECLARE_TRACE_WRITABLE
+ #define BPF_TESTMOD_DECLARE_TRACE(call, proto, args, size) \
+diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+index 8835761d9a126..4e6a9e9c03687 100644
+--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
++++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+@@ -380,6 +380,8 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
+
+ (void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);
+
++ (void)trace_bpf_testmod_test_raw_tp_null(NULL);
++
+ struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
+ sizeof(int)), GFP_KERNEL);
+ if (struct_arg3 != NULL) {
+diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_null.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_null.c
+new file mode 100644
+index 0000000000000..6fa19449297e9
+--- /dev/null
++++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_null.c
+@@ -0,0 +1,25 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
++
++#include <test_progs.h>
++#include "raw_tp_null.skel.h"
++
++void test_raw_tp_null(void)
++{
++ struct raw_tp_null *skel;
++
++ skel = raw_tp_null__open_and_load();
++ if (!ASSERT_OK_PTR(skel, "raw_tp_null__open_and_load"))
++ return;
++
++ skel->bss->tid = sys_gettid();
++
++ if (!ASSERT_OK(raw_tp_null__attach(skel), "raw_tp_null__attach"))
++ goto end;
++
++ ASSERT_OK(trigger_module_test_read(2), "trigger testmod read");
++ ASSERT_EQ(skel->bss->i, 3, "invocations");
++
++end:
++ raw_tp_null__destroy(skel);
++}
+diff --git a/tools/testing/selftests/bpf/progs/raw_tp_null.c b/tools/testing/selftests/bpf/progs/raw_tp_null.c
+new file mode 100644
+index 0000000000000..457f34c151e32
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/raw_tp_null.c
+@@ -0,0 +1,32 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
++
++#include <vmlinux.h>
++#include <bpf/bpf_tracing.h>
++
++char _license[] SEC("license") = "GPL";
++
++int tid;
++int i;
++
++SEC("tp_btf/bpf_testmod_test_raw_tp_null")
++int BPF_PROG(test_raw_tp_null, struct sk_buff *skb)
++{
++ struct task_struct *task = bpf_get_current_task_btf();
++
++ if (task->pid != tid)
++ return 0;
++
++ i = i + skb->mark + 1;
++ /* The compiler may move the NULL check before this deref, which causes
++ * the load to fail as deref of scalar. Prevent that by using a barrier.
++ */
++ barrier();
++ /* If dead code elimination kicks in, the increment below will
++ * be removed. For raw_tp programs, we mark input arguments as
++ * PTR_MAYBE_NULL, so branch prediction should never kick in.
++ */
++ if (!skb)
++ i += 2;
++ return 0;
++}
+--
+2.39.5
+
net-axienet-set-mac_managed_pm.patch
tcp-drop-secpath-at-the-same-time-as-we-currently-dr.patch
net-allow-small-head-cache-usage-with-large-max_skb_.patch
+bpf-test_run-fix-use-after-free-issue-in-eth_skb_pkt.patch
+bpf-unify-vm_write-vs-vm_maywrite-use-in-bpf-map-mma.patch
+bpf-avoid-holding-freeze_mutex-during-mmap-operation.patch
+strparser-add-read_sock-callback.patch
+bpf-fix-wrong-copied_seq-calculation.patch
+bpf-disable-non-stream-socket-for-strparser.patch
+bpf-fix-deadlock-when-freeing-cgroup-storage.patch
+arm64-dts-rockchip-fix-lcdpwr_en-pin-for-cool-pi-gen.patch
+power-supply-da9150-fg-fix-potential-overflow.patch
+power-supply-axp20x_battery-fix-fault-handling-for-a.patch
+selftests-bpf-add-tests-for-raw_tp-null-handling.patch
+net-add-rx_skb-of-kfree_skb-to-raw_tp_null_args.patch
+bpf-fix-softlockup-in-arena_map_free-on-64k-page-ker.patch
+arm64-dts-rockchip-adjust-smmu-interrupt-type-on-rk3.patch
+firmware-arm_scmi-imx-correct-tx-size-of-scmi_imx_mi.patch
+md-raid-fix-the-set_queue_limits-implementations.patch
+firmware-imx-imx_scmi_misc_drv-should-depend-on-arch.patch
+platform-cznic-cznic_platforms-should-depend-on-arch.patch
+nouveau-svm-fix-missing-folio-unlock-put-after-make_.patch
+drm-msm-avoid-rounding-up-to-one-jiffy.patch
+drm-msm-dpu-skip-watchdog-timer-programming-through-.patch
+drm-msm-dpu-enable-dpu_wb_input_ctrl-for-dpu-5.x.patch
+drm-msm-dpu-don-t-leak-bits_per_component-into-rando.patch
+drm-msm-dsi-phy-protect-phy_cmn_clk_cfg0-updated-fro.patch
+drm-msm-dsi-phy-protect-phy_cmn_clk_cfg1-against-clo.patch
+drm-msm-dsi-phy-do-not-overwite-phy_cmn_clk_cfg1-whe.patch
+nvme-tcp-fix-compilation-warning-with-w-1.patch
+nvme-tcp-fix-connect-failure-on-receiving-partial-ic.patch
+nvme-ioctl-add-missing-space-in-err-message.patch
+bpf-skip-non-exist-keys-in-generic_map_lookup_batch.patch
+drm-nouveau-pmu-fix-gp10b-firmware-guard.patch
--- /dev/null
+From 19d792992236770cc065335db1991075f9f47bb4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jan 2025 18:09:13 +0800
+Subject: strparser: Add read_sock callback
+
+From: Jiayuan Chen <mrpre@163.com>
+
+[ Upstream commit 0532a79efd68a4d9686b0385e4993af4b130ff82 ]
+
+Added a new read_sock handler, allowing users to customize read operations
+instead of relying on the native socket's read_sock.
+
+Signed-off-by: Jiayuan Chen <mrpre@163.com>
+Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
+Reviewed-by: Jakub Sitnicki <jakub@cloudflare.com>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Link: https://patch.msgid.link/20250122100917.49845-2-mrpre@163.com
+Stable-dep-of: 36b62df5683c ("bpf: Fix wrong copied_seq calculation")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/networking/strparser.rst | 9 ++++++++-
+ include/net/strparser.h | 2 ++
+ net/strparser/strparser.c | 11 +++++++++--
+ 3 files changed, 19 insertions(+), 3 deletions(-)
+
+diff --git a/Documentation/networking/strparser.rst b/Documentation/networking/strparser.rst
+index 6cab1f74ae05a..7f623d1db72aa 100644
+--- a/Documentation/networking/strparser.rst
++++ b/Documentation/networking/strparser.rst
+@@ -112,7 +112,7 @@ Functions
+ Callbacks
+ =========
+
+-There are six callbacks:
++There are seven callbacks:
+
+ ::
+
+@@ -182,6 +182,13 @@ There are six callbacks:
+ the length of the message. skb->len - offset may be greater
+ then full_len since strparser does not trim the skb.
+
++ ::
++
++ int (*read_sock)(struct strparser *strp, read_descriptor_t *desc,
++ sk_read_actor_t recv_actor);
++
++ The read_sock callback is used by strparser instead of
++ sock->ops->read_sock, if provided.
+ ::
+
+ int (*read_sock_done)(struct strparser *strp, int err);
+diff --git a/include/net/strparser.h b/include/net/strparser.h
+index 41e2ce9e9e10f..0a83010b3a64a 100644
+--- a/include/net/strparser.h
++++ b/include/net/strparser.h
+@@ -43,6 +43,8 @@ struct strparser;
+ struct strp_callbacks {
+ int (*parse_msg)(struct strparser *strp, struct sk_buff *skb);
+ void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb);
++ int (*read_sock)(struct strparser *strp, read_descriptor_t *desc,
++ sk_read_actor_t recv_actor);
+ int (*read_sock_done)(struct strparser *strp, int err);
+ void (*abort_parser)(struct strparser *strp, int err);
+ void (*lock)(struct strparser *strp);
+diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
+index 8299ceb3e3739..95696f42647ec 100644
+--- a/net/strparser/strparser.c
++++ b/net/strparser/strparser.c
+@@ -347,7 +347,10 @@ static int strp_read_sock(struct strparser *strp)
+ struct socket *sock = strp->sk->sk_socket;
+ read_descriptor_t desc;
+
+- if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
++ if (unlikely(!sock || !sock->ops))
++ return -EBUSY;
++
++ if (unlikely(!strp->cb.read_sock && !sock->ops->read_sock))
+ return -EBUSY;
+
+ desc.arg.data = strp;
+@@ -355,7 +358,10 @@ static int strp_read_sock(struct strparser *strp)
+ desc.count = 1; /* give more than one skb per call */
+
+ /* sk should be locked here, so okay to do read_sock */
+- sock->ops->read_sock(strp->sk, &desc, strp_recv);
++ if (strp->cb.read_sock)
++ strp->cb.read_sock(strp, &desc, strp_recv);
++ else
++ sock->ops->read_sock(strp->sk, &desc, strp_recv);
+
+ desc.error = strp->cb.read_sock_done(strp, desc.error);
+
+@@ -468,6 +474,7 @@ int strp_init(struct strparser *strp, struct sock *sk,
+ strp->cb.unlock = cb->unlock ? : strp_sock_unlock;
+ strp->cb.rcv_msg = cb->rcv_msg;
+ strp->cb.parse_msg = cb->parse_msg;
++ strp->cb.read_sock = cb->read_sock;
+ strp->cb.read_sock_done = cb->read_sock_done ? : default_read_sock_done;
+ strp->cb.abort_parser = cb->abort_parser ? : strp_abort_strp;
+
+--
+2.39.5
+