--- /dev/null
+From 04a899573fb87273a656f178b5f920c505f68875 Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Mon, 20 Oct 2025 09:54:41 +0200
+Subject: bpf: Do not let BPF test infra emit invalid GSO types to stack
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit 04a899573fb87273a656f178b5f920c505f68875 upstream.
+
+Yinhao et al. reported that their fuzzer tool was able to trigger a
+skb_warn_bad_offload() from netif_skb_features() -> gso_features_check().
+When a BPF program - triggered via BPF test infra - pushes the packet
+to the loopback device via bpf_clone_redirect() then mentioned offload
+warning can be seen. GSO-related features are then rightfully disabled.
+
+We get into this situation due to convert___skb_to_skb() setting
+gso_segs and gso_size but not gso_type. Technically, it makes sense
+that this warning triggers since the GSO properties are malformed due
+to the gso_type. Potentially, the gso_type could be marked non-trustworthy
+through setting it at least to SKB_GSO_DODGY without any other specific
+assumptions, but that also feels wrong given we should not go further
+into the GSO engine in the first place.
+
+The checks were added in 121d57af308d ("gso: validate gso_type in GSO
+handlers") because there were malicious (syzbot) senders that combine
+a protocol with a non-matching gso_type. If we would want to drop such
+packets, gso_features_check() currently only returns feature flags via
+netif_skb_features(), so one location for potentially dropping such skbs
+could be validate_xmit_unreadable_skb(), but then otoh it would be
+an additional check in the fast-path for a very corner case. Given
+bpf_clone_redirect() is the only place where BPF test infra could emit
+such packets, lets reject them right there.
+
+Fixes: 850a88cc4096 ("bpf: Expose __sk_buff wire_len/gso_segs to BPF_PROG_TEST_RUN")
+Fixes: cf62089b0edd ("bpf: Add gso_size to __sk_buff")
+Reported-by: Yinhao Hu <dddddd@hust.edu.cn>
+Reported-by: Kaiyan Mei <M202472210@hust.edu.cn>
+Reported-by: Dongliang Mu <dzm91@hust.edu.cn>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
+Link: https://patch.msgid.link/20251020075441.127980-1-daniel@iogearbox.net
+Signed-off-by: Shung-Hsi Yu <shung-hsi.yu@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bpf/test_run.c | 5 +++++
+ net/core/filter.c | 7 +++++++
+ 2 files changed, 12 insertions(+)
+
+--- a/net/bpf/test_run.c
++++ b/net/bpf/test_run.c
+@@ -537,6 +537,11 @@ static int convert___skb_to_skb(struct s
+
+ if (__skb->gso_segs > GSO_MAX_SEGS)
+ return -EINVAL;
++
++ /* Currently GSO type is zero/unset. If this gets extended with
++ * a small list of accepted GSO types in future, the filter for
++ * an unset GSO type in bpf_clone_redirect() can be lifted.
++ */
+ skb_shinfo(skb)->gso_segs = __skb->gso_segs;
+ skb_shinfo(skb)->gso_size = __skb->gso_size;
+
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -2433,6 +2433,13 @@ BPF_CALL_3(bpf_clone_redirect, struct sk
+ if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL)))
+ return -EINVAL;
+
++ /* BPF test infra's convert___skb_to_skb() can create type-less
++ * GSO packets. gso_features_check() will detect this as a bad
++ * offload. However, lets not leak them out in the first place.
++ */
++ if (unlikely(skb_is_gso(skb) && !skb_shinfo(skb)->gso_type))
++ return -EBADMSG;
++
+ dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
+ if (unlikely(!dev))
+ return -EINVAL;
--- /dev/null
+From e09299225d5ba3916c91ef70565f7d2187e4cca0 Mon Sep 17 00:00:00 2001
+From: Paul Chaignon <paul.chaignon@gmail.com>
+Date: Tue, 22 Jul 2025 16:32:32 +0200
+Subject: bpf: Reject narrower access to pointer ctx fields
+
+From: Paul Chaignon <paul.chaignon@gmail.com>
+
+commit e09299225d5ba3916c91ef70565f7d2187e4cca0 upstream.
+
+The following BPF program, simplified from a syzkaller repro, causes a
+kernel warning:
+
+ r0 = *(u8 *)(r1 + 169);
+ exit;
+
+With pointer field sk being at offset 168 in __sk_buff. This access is
+detected as a narrower read in bpf_skb_is_valid_access because it
+doesn't match offsetof(struct __sk_buff, sk). It is therefore allowed
+and later proceeds to bpf_convert_ctx_access. Note that for the
+"is_narrower_load" case in the convert_ctx_accesses(), the insn->off
+is aligned, so the cnt may not be 0 because it matches the
+offsetof(struct __sk_buff, sk) in the bpf_convert_ctx_access. However,
+the target_size stays 0 and the verifier errors with a kernel warning:
+
+ verifier bug: error during ctx access conversion(1)
+
+This patch fixes that to return a proper "invalid bpf_context access
+off=X size=Y" error on the load instruction.
+
+The same issue affects multiple other fields in context structures that
+allow narrow access. Some other non-affected fields (for sk_msg,
+sk_lookup, and sockopt) were also changed to use bpf_ctx_range_ptr for
+consistency.
+
+Note this syzkaller crash was reported in the "Closes" link below, which
+used to be about a different bug, fixed in
+commit fce7bd8e385a ("bpf/verifier: Handle BPF_LOAD_ACQ instructions
+in insn_def_regno()"). Because syzbot somehow confused the two bugs,
+the new crash and repro didn't get reported to the mailing list.
+
+Fixes: f96da09473b52 ("bpf: simplify narrower ctx access")
+Fixes: 0df1a55afa832 ("bpf: Warn on internal verifier errors")
+Reported-by: syzbot+0ef84a7bdf5301d4cbec@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=0ef84a7bdf5301d4cbec
+Signed-off-by: Paul Chaignon <paul.chaignon@gmail.com>
+Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
+Acked-by: Eduard Zingerman <eddyz87@gmail.com>
+Link: https://patch.msgid.link/3b8dcee67ff4296903351a974ddd9c4dca768b64.1753194596.git.paul.chaignon@gmail.com
+[shung-hsi.yu: offset(struct bpf_sock_ops, skb_hwtstamp) case was
+dropped becasuse it was only added in v6.2 with commit 9bb053490f1a
+("bpf: Add hwtstamp field for the sockops prog")]
+Signed-off-by: Shung-Hsi Yu <shung-hsi.yu@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/cgroup.c | 8 ++++----
+ net/core/filter.c | 18 +++++++++---------
+ 2 files changed, 13 insertions(+), 13 deletions(-)
+
+--- a/kernel/bpf/cgroup.c
++++ b/kernel/bpf/cgroup.c
+@@ -2028,22 +2028,22 @@ static bool cg_sockopt_is_valid_access(i
+ }
+
+ switch (off) {
+- case offsetof(struct bpf_sockopt, sk):
++ case bpf_ctx_range_ptr(struct bpf_sockopt, sk):
+ if (size != sizeof(__u64))
+ return false;
+ info->reg_type = PTR_TO_SOCKET;
+ break;
+- case offsetof(struct bpf_sockopt, optval):
++ case bpf_ctx_range_ptr(struct bpf_sockopt, optval):
+ if (size != sizeof(__u64))
+ return false;
+ info->reg_type = PTR_TO_PACKET;
+ break;
+- case offsetof(struct bpf_sockopt, optval_end):
++ case bpf_ctx_range_ptr(struct bpf_sockopt, optval_end):
+ if (size != sizeof(__u64))
+ return false;
+ info->reg_type = PTR_TO_PACKET_END;
+ break;
+- case offsetof(struct bpf_sockopt, retval):
++ case bpf_ctx_range(struct bpf_sockopt, retval):
+ if (size != size_default)
+ return false;
+ return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT;
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -8080,7 +8080,7 @@ static bool bpf_skb_is_valid_access(int
+ if (size != sizeof(__u64))
+ return false;
+ break;
+- case offsetof(struct __sk_buff, sk):
++ case bpf_ctx_range_ptr(struct __sk_buff, sk):
+ if (type == BPF_WRITE || size != sizeof(__u64))
+ return false;
+ info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL;
+@@ -8597,7 +8597,7 @@ static bool sock_addr_is_valid_access(in
+ return false;
+ }
+ break;
+- case offsetof(struct bpf_sock_addr, sk):
++ case bpf_ctx_range_ptr(struct bpf_sock_addr, sk):
+ if (type != BPF_READ)
+ return false;
+ if (size != sizeof(__u64))
+@@ -8651,17 +8651,17 @@ static bool sock_ops_is_valid_access(int
+ if (size != sizeof(__u64))
+ return false;
+ break;
+- case offsetof(struct bpf_sock_ops, sk):
++ case bpf_ctx_range_ptr(struct bpf_sock_ops, sk):
+ if (size != sizeof(__u64))
+ return false;
+ info->reg_type = PTR_TO_SOCKET_OR_NULL;
+ break;
+- case offsetof(struct bpf_sock_ops, skb_data):
++ case bpf_ctx_range_ptr(struct bpf_sock_ops, skb_data):
+ if (size != sizeof(__u64))
+ return false;
+ info->reg_type = PTR_TO_PACKET;
+ break;
+- case offsetof(struct bpf_sock_ops, skb_data_end):
++ case bpf_ctx_range_ptr(struct bpf_sock_ops, skb_data_end):
+ if (size != sizeof(__u64))
+ return false;
+ info->reg_type = PTR_TO_PACKET_END;
+@@ -8735,17 +8735,17 @@ static bool sk_msg_is_valid_access(int o
+ return false;
+
+ switch (off) {
+- case offsetof(struct sk_msg_md, data):
++ case bpf_ctx_range_ptr(struct sk_msg_md, data):
+ info->reg_type = PTR_TO_PACKET;
+ if (size != sizeof(__u64))
+ return false;
+ break;
+- case offsetof(struct sk_msg_md, data_end):
++ case bpf_ctx_range_ptr(struct sk_msg_md, data_end):
+ info->reg_type = PTR_TO_PACKET_END;
+ if (size != sizeof(__u64))
+ return false;
+ break;
+- case offsetof(struct sk_msg_md, sk):
++ case bpf_ctx_range_ptr(struct sk_msg_md, sk):
+ if (size != sizeof(__u64))
+ return false;
+ info->reg_type = PTR_TO_SOCKET;
+@@ -10837,7 +10837,7 @@ static bool sk_lookup_is_valid_access(in
+ return false;
+
+ switch (off) {
+- case offsetof(struct bpf_sk_lookup, sk):
++ case bpf_ctx_range_ptr(struct bpf_sk_lookup, sk):
+ info->reg_type = PTR_TO_SOCKET_OR_NULL;
+ return size == sizeof(__u64);
+
--- /dev/null
+From b7880cb166ab62c2409046b2347261abf701530e Mon Sep 17 00:00:00 2001
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Date: Fri, 9 Jan 2026 04:13:42 +0000
+Subject: migrate: correct lock ordering for hugetlb file folios
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+commit b7880cb166ab62c2409046b2347261abf701530e upstream.
+
+Syzbot has found a deadlock (analyzed by Lance Yang):
+
+1) Task (5749): Holds folio_lock, then tries to acquire i_mmap_rwsem(read lock).
+2) Task (5754): Holds i_mmap_rwsem(write lock), then tries to acquire
+folio_lock.
+
+migrate_pages()
+ -> migrate_hugetlbs()
+ -> unmap_and_move_huge_page() <- Takes folio_lock!
+ -> remove_migration_ptes()
+ -> __rmap_walk_file()
+ -> i_mmap_lock_read() <- Waits for i_mmap_rwsem(read lock)!
+
+hugetlbfs_fallocate()
+ -> hugetlbfs_punch_hole() <- Takes i_mmap_rwsem(write lock)!
+ -> hugetlbfs_zero_partial_page()
+ -> filemap_lock_hugetlb_folio()
+ -> filemap_lock_folio()
+ -> __filemap_get_folio <- Waits for folio_lock!
+
+The migration path is the one taking locks in the wrong order according to
+the documentation at the top of mm/rmap.c. So expand the scope of the
+existing i_mmap_lock to cover the calls to remove_migration_ptes() too.
+
+This is (mostly) how it used to be after commit c0d0381ade79. That was
+removed by 336bf30eb765 for both file & anon hugetlb pages when it should
+only have been removed for anon hugetlb pages.
+
+Link: https://lkml.kernel.org/r/20260109041345.3863089-2-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Fixes: 336bf30eb765 ("hugetlbfs: fix anon huge page migration race")
+Reported-by: syzbot+2d9c96466c978346b55f@syzkaller.appspotmail.com
+Link: https://lore.kernel.org/all/68e9715a.050a0220.1186a4.000d.GAE@google.com
+Debugged-by: Lance Yang <lance.yang@linux.dev>
+Acked-by: David Hildenbrand (Red Hat) <david@kernel.org>
+Acked-by: Zi Yan <ziy@nvidia.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Byungchul Park <byungchul@sk.com>
+Cc: Gregory Price <gourry@gourry.net>
+Cc: Jann Horn <jannh@google.com>
+Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: Rakie Kim <rakie.kim@sk.com>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Ying Huang <ying.huang@linux.alibaba.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/migrate.c | 14 ++++++--------
+ 1 file changed, 6 insertions(+), 8 deletions(-)
+
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1291,6 +1291,7 @@ static int unmap_and_move_huge_page(new_
+ struct page *new_hpage;
+ struct anon_vma *anon_vma = NULL;
+ struct address_space *mapping = NULL;
++ enum ttu_flags ttu = 0;
+
+ /*
+ * Migratability of hugepages depends on architectures and their size.
+@@ -1344,9 +1345,6 @@ static int unmap_and_move_huge_page(new_
+ goto put_anon;
+
+ if (page_mapped(hpage)) {
+- bool mapping_locked = false;
+- enum ttu_flags ttu = 0;
+-
+ if (!PageAnon(hpage)) {
+ /*
+ * In shared mappings, try_to_unmap could potentially
+@@ -1358,15 +1356,11 @@ static int unmap_and_move_huge_page(new_
+ if (unlikely(!mapping))
+ goto unlock_put_anon;
+
+- mapping_locked = true;
+ ttu |= TTU_RMAP_LOCKED;
+ }
+
+ try_to_migrate(hpage, ttu);
+ page_was_mapped = 1;
+-
+- if (mapping_locked)
+- i_mmap_unlock_write(mapping);
+ }
+
+ if (!page_mapped(hpage))
+@@ -1374,7 +1368,11 @@ static int unmap_and_move_huge_page(new_
+
+ if (page_was_mapped)
+ remove_migration_ptes(hpage,
+- rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
++ rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage,
++ ttu ? true : false);
++
++ if (ttu & TTU_RMAP_LOCKED)
++ i_mmap_unlock_write(mapping);
+
+ unlock_put_anon:
+ unlock_page(new_hpage);
can-kvaser_usb-kvaser_usb_read_bulk_callback-fix-urb-memory-leak.patch
can-mcba_usb-mcba_usb_read_bulk_callback-fix-urb-memory-leak.patch
can-usb_8dev-usb_8dev_read_bulk_callback-fix-urb-memory-leak.patch
+migrate-correct-lock-ordering-for-hugetlb-file-folios.patch
+bpf-do-not-let-bpf-test-infra-emit-invalid-gso-types-to-stack.patch
+bpf-reject-narrower-access-to-pointer-ctx-fields.patch