]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
drop a bunch of patches based on review from developers
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 27 May 2025 15:06:07 +0000 (17:06 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 27 May 2025 15:06:07 +0000 (17:06 +0200)
23 files changed:
queue-5.10/bpf-prevent-unsafe-access-to-the-sock-fields-in-the-.patch [deleted file]
queue-5.10/series
queue-5.15/bpf-prevent-unsafe-access-to-the-sock-fields-in-the-.patch [deleted file]
queue-5.15/libbpf-fix-ldx-stx-st-co-re-relocation-size-adjustme.patch [deleted file]
queue-5.15/series
queue-6.1/bpf-prevent-unsafe-access-to-the-sock-fields-in-the-.patch [deleted file]
queue-6.1/btrfs-zoned-exit-btrfs_can_activate_zone-if-btrfs_fs.patch [deleted file]
queue-6.1/libbpf-fix-ldx-stx-st-co-re-relocation-size-adjustme.patch [deleted file]
queue-6.1/series
queue-6.12/bpf-prevent-unsafe-access-to-the-sock-fields-in-the-.patch [deleted file]
queue-6.12/btrfs-properly-limit-inline-data-extent-according-to.patch [deleted file]
queue-6.12/btrfs-zoned-exit-btrfs_can_activate_zone-if-btrfs_fs.patch [deleted file]
queue-6.12/libbpf-fix-ldx-stx-st-co-re-relocation-size-adjustme.patch [deleted file]
queue-6.12/series
queue-6.14/bpf-prevent-unsafe-access-to-the-sock-fields-in-the-.patch [deleted file]
queue-6.14/btrfs-properly-limit-inline-data-extent-according-to.patch [deleted file]
queue-6.14/btrfs-zoned-exit-btrfs_can_activate_zone-if-btrfs_fs.patch [deleted file]
queue-6.14/libbpf-fix-ldx-stx-st-co-re-relocation-size-adjustme.patch [deleted file]
queue-6.14/series
queue-6.6/bpf-prevent-unsafe-access-to-the-sock-fields-in-the-.patch [deleted file]
queue-6.6/btrfs-zoned-exit-btrfs_can_activate_zone-if-btrfs_fs.patch [deleted file]
queue-6.6/libbpf-fix-ldx-stx-st-co-re-relocation-size-adjustme.patch [deleted file]
queue-6.6/series

diff --git a/queue-5.10/bpf-prevent-unsafe-access-to-the-sock-fields-in-the-.patch b/queue-5.10/bpf-prevent-unsafe-access-to-the-sock-fields-in-the-.patch
deleted file mode 100644 (file)
index e343825..0000000
+++ /dev/null
@@ -1,150 +0,0 @@
-From 5bd50d78f0df2e26a439d9913668e0b1c8d6bf4c Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 20 Feb 2025 15:29:31 +0800
-Subject: bpf: Prevent unsafe access to the sock fields in the BPF timestamping
- callback
-
-From: Jason Xing <kerneljasonxing@gmail.com>
-
-[ Upstream commit fd93eaffb3f977b23bc0a48d4c8616e654fcf133 ]
-
-The subsequent patch will implement BPF TX timestamping. It will
-call the sockops BPF program without holding the sock lock.
-
-This breaks the current assumption that all sock ops programs will
-hold the sock lock. The sock's fields of the uapi's bpf_sock_ops
-requires this assumption.
-
-To address this, a new "u8 is_locked_tcp_sock;" field is added. This
-patch sets it in the current sock_ops callbacks. The "is_fullsock"
-test is then replaced by the "is_locked_tcp_sock" test during
-sock_ops_convert_ctx_access().
-
-The new TX timestamping callbacks added in the subsequent patch will
-not have this set. This will prevent unsafe access from the new
-timestamping callbacks.
-
-Potentially, we could allow read-only access. However, this would
-require identifying which callback is read-safe-only and also requires
-additional BPF instruction rewrites in the covert_ctx. Since the BPF
-program can always read everything from a socket (e.g., by using
-bpf_core_cast), this patch keeps it simple and disables all read
-and write access to any socket fields through the bpf_sock_ops
-UAPI from the new TX timestamping callback.
-
-Moreover, note that some of the fields in bpf_sock_ops are specific
-to tcp_sock, and sock_ops currently only supports tcp_sock. In
-the future, UDP timestamping will be added, which will also break
-this assumption. The same idea used in this patch will be reused.
-Considering that the current sock_ops only supports tcp_sock, the
-variable is named is_locked_"tcp"_sock.
-
-Signed-off-by: Jason Xing <kerneljasonxing@gmail.com>
-Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
-Link: https://patch.msgid.link/20250220072940.99994-4-kerneljasonxing@gmail.com
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- include/linux/filter.h | 1 +
- include/net/tcp.h      | 1 +
- net/core/filter.c      | 8 ++++----
- net/ipv4/tcp_input.c   | 2 ++
- net/ipv4/tcp_output.c  | 2 ++
- 5 files changed, 10 insertions(+), 4 deletions(-)
-
-diff --git a/include/linux/filter.h b/include/linux/filter.h
-index e3aca0dc7d9c6..a963a4495b0d0 100644
---- a/include/linux/filter.h
-+++ b/include/linux/filter.h
-@@ -1277,6 +1277,7 @@ struct bpf_sock_ops_kern {
-       void    *skb_data_end;
-       u8      op;
-       u8      is_fullsock;
-+      u8      is_locked_tcp_sock;
-       u8      remaining_opt_len;
-       u64     temp;                   /* temp and everything after is not
-                                        * initialized to 0 before calling
-diff --git a/include/net/tcp.h b/include/net/tcp.h
-index 2aad2e79ac6ad..02e8ef3a49192 100644
---- a/include/net/tcp.h
-+++ b/include/net/tcp.h
-@@ -2311,6 +2311,7 @@ static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
-       memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
-       if (sk_fullsock(sk)) {
-               sock_ops.is_fullsock = 1;
-+              sock_ops.is_locked_tcp_sock = 1;
-               sock_owned_by_me(sk);
-       }
-diff --git a/net/core/filter.c b/net/core/filter.c
-index b262cad02bad9..73df612426a2a 100644
---- a/net/core/filter.c
-+++ b/net/core/filter.c
-@@ -9194,10 +9194,10 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
-               }                                                             \
-               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
-                                               struct bpf_sock_ops_kern,     \
--                                              is_fullsock),                 \
-+                                              is_locked_tcp_sock),          \
-                                     fullsock_reg, si->src_reg,              \
-                                     offsetof(struct bpf_sock_ops_kern,      \
--                                             is_fullsock));                 \
-+                                             is_locked_tcp_sock));          \
-               *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp);         \
-               if (si->dst_reg == si->src_reg)                               \
-                       *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,       \
-@@ -9282,10 +9282,10 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
-                                              temp));                        \
-               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
-                                               struct bpf_sock_ops_kern,     \
--                                              is_fullsock),                 \
-+                                              is_locked_tcp_sock),          \
-                                     reg, si->dst_reg,                       \
-                                     offsetof(struct bpf_sock_ops_kern,      \
--                                             is_fullsock));                 \
-+                                             is_locked_tcp_sock));          \
-               *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2);                    \
-               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
-                                               struct bpf_sock_ops_kern, sk),\
-diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
-index 7c2e714527f68..5b751f9c6fd16 100644
---- a/net/ipv4/tcp_input.c
-+++ b/net/ipv4/tcp_input.c
-@@ -167,6 +167,7 @@ static void bpf_skops_parse_hdr(struct sock *sk, struct sk_buff *skb)
-       memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
-       sock_ops.op = BPF_SOCK_OPS_PARSE_HDR_OPT_CB;
-       sock_ops.is_fullsock = 1;
-+      sock_ops.is_locked_tcp_sock = 1;
-       sock_ops.sk = sk;
-       bpf_skops_init_skb(&sock_ops, skb, tcp_hdrlen(skb));
-@@ -183,6 +184,7 @@ static void bpf_skops_established(struct sock *sk, int bpf_op,
-       memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
-       sock_ops.op = bpf_op;
-       sock_ops.is_fullsock = 1;
-+      sock_ops.is_locked_tcp_sock = 1;
-       sock_ops.sk = sk;
-       /* sk with TCP_REPAIR_ON does not have skb in tcp_finish_connect */
-       if (skb)
-diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
-index 32e38ac5ee2bd..ae4f23455f985 100644
---- a/net/ipv4/tcp_output.c
-+++ b/net/ipv4/tcp_output.c
-@@ -506,6 +506,7 @@ static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
-               sock_owned_by_me(sk);
-               sock_ops.is_fullsock = 1;
-+              sock_ops.is_locked_tcp_sock = 1;
-               sock_ops.sk = sk;
-       }
-@@ -551,6 +552,7 @@ static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
-               sock_owned_by_me(sk);
-               sock_ops.is_fullsock = 1;
-+              sock_ops.is_locked_tcp_sock = 1;
-               sock_ops.sk = sk;
-       }
--- 
-2.39.5
-
index 9854843e9b2f667fe78407e5d1e7c55f08fdb7c2..2646bbcda55f78363d1e3a43d07b8f5abab10c99 100644 (file)
@@ -190,7 +190,6 @@ wifi-rtw88-fix-rtw_init_ht_cap-for-rtl8814au.patch
 wifi-rtw88-fix-rtw_desc_to_mcsrate-to-handle-mcs16-3.patch
 net-pktgen-fix-access-outside-of-user-given-buffer-i.patch
 edac-ie31200-work-around-false-positive-build-warnin.patch
-bpf-prevent-unsafe-access-to-the-sock-fields-in-the-.patch
 can-c_can-use-of_property_present-to-test-existence-.patch
 eth-mlx4-don-t-try-to-complete-xdp-frames-in-netpoll.patch
 pci-fix-old_size-lower-bound-in-calculate_iosize-too.patch
diff --git a/queue-5.15/bpf-prevent-unsafe-access-to-the-sock-fields-in-the-.patch b/queue-5.15/bpf-prevent-unsafe-access-to-the-sock-fields-in-the-.patch
deleted file mode 100644 (file)
index 6049187..0000000
+++ /dev/null
@@ -1,150 +0,0 @@
-From dccc085c23c9e1f9bd8346496fa77c07c380bfa8 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 20 Feb 2025 15:29:31 +0800
-Subject: bpf: Prevent unsafe access to the sock fields in the BPF timestamping
- callback
-
-From: Jason Xing <kerneljasonxing@gmail.com>
-
-[ Upstream commit fd93eaffb3f977b23bc0a48d4c8616e654fcf133 ]
-
-The subsequent patch will implement BPF TX timestamping. It will
-call the sockops BPF program without holding the sock lock.
-
-This breaks the current assumption that all sock ops programs will
-hold the sock lock. The sock's fields of the uapi's bpf_sock_ops
-requires this assumption.
-
-To address this, a new "u8 is_locked_tcp_sock;" field is added. This
-patch sets it in the current sock_ops callbacks. The "is_fullsock"
-test is then replaced by the "is_locked_tcp_sock" test during
-sock_ops_convert_ctx_access().
-
-The new TX timestamping callbacks added in the subsequent patch will
-not have this set. This will prevent unsafe access from the new
-timestamping callbacks.
-
-Potentially, we could allow read-only access. However, this would
-require identifying which callback is read-safe-only and also requires
-additional BPF instruction rewrites in the covert_ctx. Since the BPF
-program can always read everything from a socket (e.g., by using
-bpf_core_cast), this patch keeps it simple and disables all read
-and write access to any socket fields through the bpf_sock_ops
-UAPI from the new TX timestamping callback.
-
-Moreover, note that some of the fields in bpf_sock_ops are specific
-to tcp_sock, and sock_ops currently only supports tcp_sock. In
-the future, UDP timestamping will be added, which will also break
-this assumption. The same idea used in this patch will be reused.
-Considering that the current sock_ops only supports tcp_sock, the
-variable is named is_locked_"tcp"_sock.
-
-Signed-off-by: Jason Xing <kerneljasonxing@gmail.com>
-Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
-Link: https://patch.msgid.link/20250220072940.99994-4-kerneljasonxing@gmail.com
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- include/linux/filter.h | 1 +
- include/net/tcp.h      | 1 +
- net/core/filter.c      | 8 ++++----
- net/ipv4/tcp_input.c   | 2 ++
- net/ipv4/tcp_output.c  | 2 ++
- 5 files changed, 10 insertions(+), 4 deletions(-)
-
-diff --git a/include/linux/filter.h b/include/linux/filter.h
-index 7d8294d0d7173..e723b930bac14 100644
---- a/include/linux/filter.h
-+++ b/include/linux/filter.h
-@@ -1327,6 +1327,7 @@ struct bpf_sock_ops_kern {
-       void    *skb_data_end;
-       u8      op;
-       u8      is_fullsock;
-+      u8      is_locked_tcp_sock;
-       u8      remaining_opt_len;
-       u64     temp;                   /* temp and everything after is not
-                                        * initialized to 0 before calling
-diff --git a/include/net/tcp.h b/include/net/tcp.h
-index be91d81d66ab3..577e60ec41b8c 100644
---- a/include/net/tcp.h
-+++ b/include/net/tcp.h
-@@ -2322,6 +2322,7 @@ static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
-       memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
-       if (sk_fullsock(sk)) {
-               sock_ops.is_fullsock = 1;
-+              sock_ops.is_locked_tcp_sock = 1;
-               sock_owned_by_me(sk);
-       }
-diff --git a/net/core/filter.c b/net/core/filter.c
-index 9d358fb865e28..983aca1bf833f 100644
---- a/net/core/filter.c
-+++ b/net/core/filter.c
-@@ -9640,10 +9640,10 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
-               }                                                             \
-               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
-                                               struct bpf_sock_ops_kern,     \
--                                              is_fullsock),                 \
-+                                              is_locked_tcp_sock),          \
-                                     fullsock_reg, si->src_reg,              \
-                                     offsetof(struct bpf_sock_ops_kern,      \
--                                             is_fullsock));                 \
-+                                             is_locked_tcp_sock));          \
-               *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp);         \
-               if (si->dst_reg == si->src_reg)                               \
-                       *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,       \
-@@ -9728,10 +9728,10 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
-                                              temp));                        \
-               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
-                                               struct bpf_sock_ops_kern,     \
--                                              is_fullsock),                 \
-+                                              is_locked_tcp_sock),          \
-                                     reg, si->dst_reg,                       \
-                                     offsetof(struct bpf_sock_ops_kern,      \
--                                             is_fullsock));                 \
-+                                             is_locked_tcp_sock));          \
-               *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2);                    \
-               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
-                                               struct bpf_sock_ops_kern, sk),\
-diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
-index 8859a38b45d5e..0caf1474b9807 100644
---- a/net/ipv4/tcp_input.c
-+++ b/net/ipv4/tcp_input.c
-@@ -168,6 +168,7 @@ static void bpf_skops_parse_hdr(struct sock *sk, struct sk_buff *skb)
-       memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
-       sock_ops.op = BPF_SOCK_OPS_PARSE_HDR_OPT_CB;
-       sock_ops.is_fullsock = 1;
-+      sock_ops.is_locked_tcp_sock = 1;
-       sock_ops.sk = sk;
-       bpf_skops_init_skb(&sock_ops, skb, tcp_hdrlen(skb));
-@@ -184,6 +185,7 @@ static void bpf_skops_established(struct sock *sk, int bpf_op,
-       memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
-       sock_ops.op = bpf_op;
-       sock_ops.is_fullsock = 1;
-+      sock_ops.is_locked_tcp_sock = 1;
-       sock_ops.sk = sk;
-       /* sk with TCP_REPAIR_ON does not have skb in tcp_finish_connect */
-       if (skb)
-diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
-index 3a66d0c7d015c..3a819413d3968 100644
---- a/net/ipv4/tcp_output.c
-+++ b/net/ipv4/tcp_output.c
-@@ -507,6 +507,7 @@ static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
-               sock_owned_by_me(sk);
-               sock_ops.is_fullsock = 1;
-+              sock_ops.is_locked_tcp_sock = 1;
-               sock_ops.sk = sk;
-       }
-@@ -552,6 +553,7 @@ static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
-               sock_owned_by_me(sk);
-               sock_ops.is_fullsock = 1;
-+              sock_ops.is_locked_tcp_sock = 1;
-               sock_ops.sk = sk;
-       }
--- 
-2.39.5
-
diff --git a/queue-5.15/libbpf-fix-ldx-stx-st-co-re-relocation-size-adjustme.patch b/queue-5.15/libbpf-fix-ldx-stx-st-co-re-relocation-size-adjustme.patch
deleted file mode 100644 (file)
index ba9e99b..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-From d9f5a2f2b181f4c4c10ebdb485919eb918f6adf9 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 6 Feb 2025 17:48:08 -0800
-Subject: libbpf: fix LDX/STX/ST CO-RE relocation size adjustment logic
-
-From: Andrii Nakryiko <andrii@kernel.org>
-
-[ Upstream commit 06096d19ee3897a7e70922580159607fe315da7a ]
-
-Libbpf has a somewhat obscure feature of automatically adjusting the
-"size" of LDX/STX/ST instruction (memory store and load instructions),
-based on originally recorded access size (u8, u16, u32, or u64) and the
-actual size of the field on target kernel. This is meant to facilitate
-using BPF CO-RE on 32-bit architectures (pointers are always 64-bit in
-BPF, but host kernel's BTF will have it as 32-bit type), as well as
-generally supporting safe type changes (unsigned integer type changes
-can be transparently "relocated").
-
-One issue that surfaced only now, 5 years after this logic was
-implemented, is how this all works when dealing with fields that are
-arrays. This isn't all that easy and straightforward to hit (see
-selftests that reproduce this condition), but one of sched_ext BPF
-programs did hit it with innocent looking loop.
-
-Long story short, libbpf used to calculate entire array size, instead of
-making sure to only calculate array's element size. But it's the element
-that is loaded by LDX/STX/ST instructions (1, 2, 4, or 8 bytes), so
-that's what libbpf should check. This patch adjusts the logic for
-arrays and fixed the issue.
-
-Reported-by: Emil Tsalapatis <emil@etsalapatis.com>
-Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
-Acked-by: Eduard Zingerman <eddyz87@gmail.com>
-Link: https://lore.kernel.org/r/20250207014809.1573841-1-andrii@kernel.org
-Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- tools/lib/bpf/relo_core.c | 24 ++++++++++++++++++++----
- 1 file changed, 20 insertions(+), 4 deletions(-)
-
-diff --git a/tools/lib/bpf/relo_core.c b/tools/lib/bpf/relo_core.c
-index 4016ed492d0c2..72eb47bf7f1ca 100644
---- a/tools/lib/bpf/relo_core.c
-+++ b/tools/lib/bpf/relo_core.c
-@@ -563,7 +563,7 @@ static int bpf_core_calc_field_relo(const char *prog_name,
- {
-       const struct bpf_core_accessor *acc;
-       const struct btf_type *t;
--      __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
-+      __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id, elem_id;
-       const struct btf_member *m;
-       const struct btf_type *mt;
-       bool bitfield;
-@@ -586,8 +586,14 @@ static int bpf_core_calc_field_relo(const char *prog_name,
-       if (!acc->name) {
-               if (relo->kind == BPF_FIELD_BYTE_OFFSET) {
-                       *val = spec->bit_offset / 8;
--                      /* remember field size for load/store mem size */
--                      sz = btf__resolve_size(spec->btf, acc->type_id);
-+                      /* remember field size for load/store mem size;
-+                       * note, for arrays we care about individual element
-+                       * sizes, not the overall array size
-+                       */
-+                      t = skip_mods_and_typedefs(spec->btf, acc->type_id, &elem_id);
-+                      while (btf_is_array(t))
-+                              t = skip_mods_and_typedefs(spec->btf, btf_array(t)->type, &elem_id);
-+                      sz = btf__resolve_size(spec->btf, elem_id);
-                       if (sz < 0)
-                               return -EINVAL;
-                       *field_sz = sz;
-@@ -647,7 +653,17 @@ static int bpf_core_calc_field_relo(const char *prog_name,
-       case BPF_FIELD_BYTE_OFFSET:
-               *val = byte_off;
-               if (!bitfield) {
--                      *field_sz = byte_sz;
-+                      /* remember field size for load/store mem size;
-+                       * note, for arrays we care about individual element
-+                       * sizes, not the overall array size
-+                       */
-+                      t = skip_mods_and_typedefs(spec->btf, field_type_id, &elem_id);
-+                      while (btf_is_array(t))
-+                              t = skip_mods_and_typedefs(spec->btf, btf_array(t)->type, &elem_id);
-+                      sz = btf__resolve_size(spec->btf, elem_id);
-+                      if (sz < 0)
-+                              return -EINVAL;
-+                      *field_sz = sz;
-                       *type_id = field_type_id;
-               }
-               break;
--- 
-2.39.5
-
index d2be44ac305185ce73b322851de8812dd4ac876d..23ff7f20a5d30f45ebd5486d16c3e90f788a0812 100644 (file)
@@ -105,7 +105,6 @@ wifi-rtw88-fix-rtw_init_ht_cap-for-rtl8814au.patch
 wifi-rtw88-fix-rtw_desc_to_mcsrate-to-handle-mcs16-3.patch
 net-pktgen-fix-access-outside-of-user-given-buffer-i.patch
 edac-ie31200-work-around-false-positive-build-warnin.patch
-bpf-prevent-unsafe-access-to-the-sock-fields-in-the-.patch
 rdma-core-fix-best-page-size-finding-when-it-can-cro.patch
 can-c_can-use-of_property_present-to-test-existence-.patch
 eth-mlx4-don-t-try-to-complete-xdp-frames-in-netpoll.patch
@@ -119,7 +118,6 @@ asoc-soc-dai-check-return-value-at-snd_soc_dai_set_t.patch
 pinctrl-devicetree-do-not-goto-err-when-probing-hogs.patch
 smack-recognize-ipv4-cipso-w-o-categories.patch
 media-v4l-memset-argument-to-0-before-calling-get_mb.patch
-libbpf-fix-ldx-stx-st-co-re-relocation-size-adjustme.patch
 net-mlx4_core-avoid-impossible-mlx4_db_alloc-order-v.patch
 phy-core-don-t-require-set_mode-callback-for-phy_get.patch
 drm-amdgpu-reset-psp-cmd-to-null-after-releasing-the.patch
diff --git a/queue-6.1/bpf-prevent-unsafe-access-to-the-sock-fields-in-the-.patch b/queue-6.1/bpf-prevent-unsafe-access-to-the-sock-fields-in-the-.patch
deleted file mode 100644 (file)
index a8ec2f3..0000000
+++ /dev/null
@@ -1,150 +0,0 @@
-From 00b709040e0fdf5949dfbf02f38521e0b10943ac Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 20 Feb 2025 15:29:31 +0800
-Subject: bpf: Prevent unsafe access to the sock fields in the BPF timestamping
- callback
-
-From: Jason Xing <kerneljasonxing@gmail.com>
-
-[ Upstream commit fd93eaffb3f977b23bc0a48d4c8616e654fcf133 ]
-
-The subsequent patch will implement BPF TX timestamping. It will
-call the sockops BPF program without holding the sock lock.
-
-This breaks the current assumption that all sock ops programs will
-hold the sock lock. The sock's fields of the uapi's bpf_sock_ops
-requires this assumption.
-
-To address this, a new "u8 is_locked_tcp_sock;" field is added. This
-patch sets it in the current sock_ops callbacks. The "is_fullsock"
-test is then replaced by the "is_locked_tcp_sock" test during
-sock_ops_convert_ctx_access().
-
-The new TX timestamping callbacks added in the subsequent patch will
-not have this set. This will prevent unsafe access from the new
-timestamping callbacks.
-
-Potentially, we could allow read-only access. However, this would
-require identifying which callback is read-safe-only and also requires
-additional BPF instruction rewrites in the covert_ctx. Since the BPF
-program can always read everything from a socket (e.g., by using
-bpf_core_cast), this patch keeps it simple and disables all read
-and write access to any socket fields through the bpf_sock_ops
-UAPI from the new TX timestamping callback.
-
-Moreover, note that some of the fields in bpf_sock_ops are specific
-to tcp_sock, and sock_ops currently only supports tcp_sock. In
-the future, UDP timestamping will be added, which will also break
-this assumption. The same idea used in this patch will be reused.
-Considering that the current sock_ops only supports tcp_sock, the
-variable is named is_locked_"tcp"_sock.
-
-Signed-off-by: Jason Xing <kerneljasonxing@gmail.com>
-Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
-Link: https://patch.msgid.link/20250220072940.99994-4-kerneljasonxing@gmail.com
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- include/linux/filter.h | 1 +
- include/net/tcp.h      | 1 +
- net/core/filter.c      | 8 ++++----
- net/ipv4/tcp_input.c   | 2 ++
- net/ipv4/tcp_output.c  | 2 ++
- 5 files changed, 10 insertions(+), 4 deletions(-)
-
-diff --git a/include/linux/filter.h b/include/linux/filter.h
-index f3ef1a8965bb2..09cc8fb735f02 100644
---- a/include/linux/filter.h
-+++ b/include/linux/filter.h
-@@ -1319,6 +1319,7 @@ struct bpf_sock_ops_kern {
-       void    *skb_data_end;
-       u8      op;
-       u8      is_fullsock;
-+      u8      is_locked_tcp_sock;
-       u8      remaining_opt_len;
-       u64     temp;                   /* temp and everything after is not
-                                        * initialized to 0 before calling
-diff --git a/include/net/tcp.h b/include/net/tcp.h
-index 83e0362e3b721..63caa3181dfe6 100644
---- a/include/net/tcp.h
-+++ b/include/net/tcp.h
-@@ -2409,6 +2409,7 @@ static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
-       memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
-       if (sk_fullsock(sk)) {
-               sock_ops.is_fullsock = 1;
-+              sock_ops.is_locked_tcp_sock = 1;
-               sock_owned_by_me(sk);
-       }
-diff --git a/net/core/filter.c b/net/core/filter.c
-index 497b41ac399da..5c9f3fcb957bb 100644
---- a/net/core/filter.c
-+++ b/net/core/filter.c
-@@ -10240,10 +10240,10 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
-               }                                                             \
-               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
-                                               struct bpf_sock_ops_kern,     \
--                                              is_fullsock),                 \
-+                                              is_locked_tcp_sock),          \
-                                     fullsock_reg, si->src_reg,              \
-                                     offsetof(struct bpf_sock_ops_kern,      \
--                                             is_fullsock));                 \
-+                                             is_locked_tcp_sock));          \
-               *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp);         \
-               if (si->dst_reg == si->src_reg)                               \
-                       *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,       \
-@@ -10328,10 +10328,10 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
-                                              temp));                        \
-               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
-                                               struct bpf_sock_ops_kern,     \
--                                              is_fullsock),                 \
-+                                              is_locked_tcp_sock),          \
-                                     reg, si->dst_reg,                       \
-                                     offsetof(struct bpf_sock_ops_kern,      \
--                                             is_fullsock));                 \
-+                                             is_locked_tcp_sock));          \
-               *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2);                    \
-               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
-                                               struct bpf_sock_ops_kern, sk),\
-diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
-index db1a99df29d55..16f4a41a068e4 100644
---- a/net/ipv4/tcp_input.c
-+++ b/net/ipv4/tcp_input.c
-@@ -168,6 +168,7 @@ static void bpf_skops_parse_hdr(struct sock *sk, struct sk_buff *skb)
-       memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
-       sock_ops.op = BPF_SOCK_OPS_PARSE_HDR_OPT_CB;
-       sock_ops.is_fullsock = 1;
-+      sock_ops.is_locked_tcp_sock = 1;
-       sock_ops.sk = sk;
-       bpf_skops_init_skb(&sock_ops, skb, tcp_hdrlen(skb));
-@@ -184,6 +185,7 @@ static void bpf_skops_established(struct sock *sk, int bpf_op,
-       memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
-       sock_ops.op = bpf_op;
-       sock_ops.is_fullsock = 1;
-+      sock_ops.is_locked_tcp_sock = 1;
-       sock_ops.sk = sk;
-       /* sk with TCP_REPAIR_ON does not have skb in tcp_finish_connect */
-       if (skb)
-diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
-index 40568365cdb3b..2f109f1968253 100644
---- a/net/ipv4/tcp_output.c
-+++ b/net/ipv4/tcp_output.c
-@@ -509,6 +509,7 @@ static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
-               sock_owned_by_me(sk);
-               sock_ops.is_fullsock = 1;
-+              sock_ops.is_locked_tcp_sock = 1;
-               sock_ops.sk = sk;
-       }
-@@ -554,6 +555,7 @@ static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
-               sock_owned_by_me(sk);
-               sock_ops.is_fullsock = 1;
-+              sock_ops.is_locked_tcp_sock = 1;
-               sock_ops.sk = sk;
-       }
--- 
-2.39.5
-
diff --git a/queue-6.1/btrfs-zoned-exit-btrfs_can_activate_zone-if-btrfs_fs.patch b/queue-6.1/btrfs-zoned-exit-btrfs_can_activate_zone-if-btrfs_fs.patch
deleted file mode 100644 (file)
index 8862a9e..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-From 3edb58e7c0060652da086a00323ccb267489f30a Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 12 Feb 2025 15:05:00 +0100
-Subject: btrfs: zoned: exit btrfs_can_activate_zone if
- BTRFS_FS_NEED_ZONE_FINISH is set
-
-From: Johannes Thumshirn <johannes.thumshirn@wdc.com>
-
-[ Upstream commit 26b38e28162ef4ceb1e0482299820fbbd7dbcd92 ]
-
-If BTRFS_FS_NEED_ZONE_FINISH is already set for the whole filesystem, exit
-early in btrfs_can_activate_zone(). There's no need to check if
-BTRFS_FS_NEED_ZONE_FINISH needs to be set if it is already set.
-
-Reviewed-by: Naohiro Aota <naohiro.aota@wdc.com>
-Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
-Reviewed-by: David Sterba <dsterba@suse.com>
-Signed-off-by: David Sterba <dsterba@suse.com>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- fs/btrfs/zoned.c | 3 +++
- 1 file changed, 3 insertions(+)
-
-diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
-index 1dff64e62047e..bfd76a7dcfa02 100644
---- a/fs/btrfs/zoned.c
-+++ b/fs/btrfs/zoned.c
-@@ -2105,6 +2105,9 @@ bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
-       if (!btrfs_is_zoned(fs_info))
-               return true;
-+      if (test_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags))
-+              return false;
-+
-       /* Check if there is a device with active zones left */
-       mutex_lock(&fs_info->chunk_mutex);
-       list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
--- 
-2.39.5
-
diff --git a/queue-6.1/libbpf-fix-ldx-stx-st-co-re-relocation-size-adjustme.patch b/queue-6.1/libbpf-fix-ldx-stx-st-co-re-relocation-size-adjustme.patch
deleted file mode 100644 (file)
index b85b7c6..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-From b63da93267c554f593d3afc3ad77952552d34a13 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 6 Feb 2025 17:48:08 -0800
-Subject: libbpf: fix LDX/STX/ST CO-RE relocation size adjustment logic
-
-From: Andrii Nakryiko <andrii@kernel.org>
-
-[ Upstream commit 06096d19ee3897a7e70922580159607fe315da7a ]
-
-Libbpf has a somewhat obscure feature of automatically adjusting the
-"size" of LDX/STX/ST instruction (memory store and load instructions),
-based on originally recorded access size (u8, u16, u32, or u64) and the
-actual size of the field on target kernel. This is meant to facilitate
-using BPF CO-RE on 32-bit architectures (pointers are always 64-bit in
-BPF, but host kernel's BTF will have it as 32-bit type), as well as
-generally supporting safe type changes (unsigned integer type changes
-can be transparently "relocated").
-
-One issue that surfaced only now, 5 years after this logic was
-implemented, is how this all works when dealing with fields that are
-arrays. This isn't all that easy and straightforward to hit (see
-selftests that reproduce this condition), but one of sched_ext BPF
-programs did hit it with innocent looking loop.
-
-Long story short, libbpf used to calculate entire array size, instead of
-making sure to only calculate array's element size. But it's the element
-that is loaded by LDX/STX/ST instructions (1, 2, 4, or 8 bytes), so
-that's what libbpf should check. This patch adjusts the logic for
-arrays and fixed the issue.
-
-Reported-by: Emil Tsalapatis <emil@etsalapatis.com>
-Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
-Acked-by: Eduard Zingerman <eddyz87@gmail.com>
-Link: https://lore.kernel.org/r/20250207014809.1573841-1-andrii@kernel.org
-Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- tools/lib/bpf/relo_core.c | 24 ++++++++++++++++++++----
- 1 file changed, 20 insertions(+), 4 deletions(-)
-
-diff --git a/tools/lib/bpf/relo_core.c b/tools/lib/bpf/relo_core.c
-index c4b0e81ae2931..84f26b36f664c 100644
---- a/tools/lib/bpf/relo_core.c
-+++ b/tools/lib/bpf/relo_core.c
-@@ -683,7 +683,7 @@ static int bpf_core_calc_field_relo(const char *prog_name,
- {
-       const struct bpf_core_accessor *acc;
-       const struct btf_type *t;
--      __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
-+      __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id, elem_id;
-       const struct btf_member *m;
-       const struct btf_type *mt;
-       bool bitfield;
-@@ -706,8 +706,14 @@ static int bpf_core_calc_field_relo(const char *prog_name,
-       if (!acc->name) {
-               if (relo->kind == BPF_CORE_FIELD_BYTE_OFFSET) {
-                       *val = spec->bit_offset / 8;
--                      /* remember field size for load/store mem size */
--                      sz = btf__resolve_size(spec->btf, acc->type_id);
-+                      /* remember field size for load/store mem size;
-+                       * note, for arrays we care about individual element
-+                       * sizes, not the overall array size
-+                       */
-+                      t = skip_mods_and_typedefs(spec->btf, acc->type_id, &elem_id);
-+                      while (btf_is_array(t))
-+                              t = skip_mods_and_typedefs(spec->btf, btf_array(t)->type, &elem_id);
-+                      sz = btf__resolve_size(spec->btf, elem_id);
-                       if (sz < 0)
-                               return -EINVAL;
-                       *field_sz = sz;
-@@ -767,7 +773,17 @@ static int bpf_core_calc_field_relo(const char *prog_name,
-       case BPF_CORE_FIELD_BYTE_OFFSET:
-               *val = byte_off;
-               if (!bitfield) {
--                      *field_sz = byte_sz;
-+                      /* remember field size for load/store mem size;
-+                       * note, for arrays we care about individual element
-+                       * sizes, not the overall array size
-+                       */
-+                      t = skip_mods_and_typedefs(spec->btf, field_type_id, &elem_id);
-+                      while (btf_is_array(t))
-+                              t = skip_mods_and_typedefs(spec->btf, btf_array(t)->type, &elem_id);
-+                      sz = btf__resolve_size(spec->btf, elem_id);
-+                      if (sz < 0)
-+                              return -EINVAL;
-+                      *field_sz = sz;
-                       *type_id = field_type_id;
-               }
-               break;
--- 
-2.39.5
-
index 68586fab6c45f1a9b33b82fd1203c90695ebbc84..6a15155664cecc30db22b149c02bc0c97ed739b2 100644 (file)
@@ -54,7 +54,6 @@ btrfs-run-btrfs_error_commit_super-early.patch
 btrfs-fix-non-empty-delayed-iputs-list-on-unmount-du.patch
 btrfs-get-zone-unusable-bytes-while-holding-lock-at-.patch
 btrfs-send-return-enametoolong-when-attempting-a-pat.patch
-btrfs-zoned-exit-btrfs_can_activate_zone-if-btrfs_fs.patch
 drm-amd-display-guard-against-setting-dispclk-low-fo.patch
 i3c-master-svc-fix-missing-stop-for-master-request.patch
 dlm-make-tcp-still-work-in-multi-link-env.patch
@@ -153,7 +152,6 @@ wifi-rtw88-fix-rtw_desc_to_mcsrate-to-handle-mcs16-3.patch
 wifi-rtw89-fw-propagate-error-code-from-rtw89_h2c_tx.patch
 net-pktgen-fix-access-outside-of-user-given-buffer-i.patch
 edac-ie31200-work-around-false-positive-build-warnin.patch
-bpf-prevent-unsafe-access-to-the-sock-fields-in-the-.patch
 i3c-master-svc-flush-fifo-before-sending-dynamic-add.patch
 serial-mctrl_gpio-split-disable_ms-into-sync-and-no_.patch
 rdma-core-fix-best-page-size-finding-when-it-can-cro.patch
@@ -174,7 +172,6 @@ asoc-soc-dai-check-return-value-at-snd_soc_dai_set_t.patch
 pinctrl-devicetree-do-not-goto-err-when-probing-hogs.patch
 smack-recognize-ipv4-cipso-w-o-categories.patch
 kunit-tool-use-qboot-on-qemu-x86_64.patch
-libbpf-fix-ldx-stx-st-co-re-relocation-size-adjustme.patch
 net-mlx4_core-avoid-impossible-mlx4_db_alloc-order-v.patch
 clk-qcom-clk-alpha-pll-do-not-use-random-stack-value.patch
 serial-sh-sci-update-the-suspend-resume-support.patch
diff --git a/queue-6.12/bpf-prevent-unsafe-access-to-the-sock-fields-in-the-.patch b/queue-6.12/bpf-prevent-unsafe-access-to-the-sock-fields-in-the-.patch
deleted file mode 100644 (file)
index 808743b..0000000
+++ /dev/null
@@ -1,150 +0,0 @@
-From 27d32b043247672b9ee03864ffd7cdc7ef959691 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 20 Feb 2025 15:29:31 +0800
-Subject: bpf: Prevent unsafe access to the sock fields in the BPF timestamping
- callback
-
-From: Jason Xing <kerneljasonxing@gmail.com>
-
-[ Upstream commit fd93eaffb3f977b23bc0a48d4c8616e654fcf133 ]
-
-The subsequent patch will implement BPF TX timestamping. It will
-call the sockops BPF program without holding the sock lock.
-
-This breaks the current assumption that all sock ops programs will
-hold the sock lock. The sock's fields of the uapi's bpf_sock_ops
-requires this assumption.
-
-To address this, a new "u8 is_locked_tcp_sock;" field is added. This
-patch sets it in the current sock_ops callbacks. The "is_fullsock"
-test is then replaced by the "is_locked_tcp_sock" test during
-sock_ops_convert_ctx_access().
-
-The new TX timestamping callbacks added in the subsequent patch will
-not have this set. This will prevent unsafe access from the new
-timestamping callbacks.
-
-Potentially, we could allow read-only access. However, this would
-require identifying which callback is read-safe-only and also requires
-additional BPF instruction rewrites in the covert_ctx. Since the BPF
-program can always read everything from a socket (e.g., by using
-bpf_core_cast), this patch keeps it simple and disables all read
-and write access to any socket fields through the bpf_sock_ops
-UAPI from the new TX timestamping callback.
-
-Moreover, note that some of the fields in bpf_sock_ops are specific
-to tcp_sock, and sock_ops currently only supports tcp_sock. In
-the future, UDP timestamping will be added, which will also break
-this assumption. The same idea used in this patch will be reused.
-Considering that the current sock_ops only supports tcp_sock, the
-variable is named is_locked_"tcp"_sock.
-
-Signed-off-by: Jason Xing <kerneljasonxing@gmail.com>
-Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
-Link: https://patch.msgid.link/20250220072940.99994-4-kerneljasonxing@gmail.com
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- include/linux/filter.h | 1 +
- include/net/tcp.h      | 1 +
- net/core/filter.c      | 8 ++++----
- net/ipv4/tcp_input.c   | 2 ++
- net/ipv4/tcp_output.c  | 2 ++
- 5 files changed, 10 insertions(+), 4 deletions(-)
-
-diff --git a/include/linux/filter.h b/include/linux/filter.h
-index 5118caf8aa1c7..2b1029aeb36ae 100644
---- a/include/linux/filter.h
-+++ b/include/linux/filter.h
-@@ -1506,6 +1506,7 @@ struct bpf_sock_ops_kern {
-       void    *skb_data_end;
-       u8      op;
-       u8      is_fullsock;
-+      u8      is_locked_tcp_sock;
-       u8      remaining_opt_len;
-       u64     temp;                   /* temp and everything after is not
-                                        * initialized to 0 before calling
-diff --git a/include/net/tcp.h b/include/net/tcp.h
-index 3255a199ef60d..c4820759ee0c3 100644
---- a/include/net/tcp.h
-+++ b/include/net/tcp.h
-@@ -2667,6 +2667,7 @@ static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
-       memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
-       if (sk_fullsock(sk)) {
-               sock_ops.is_fullsock = 1;
-+              sock_ops.is_locked_tcp_sock = 1;
-               sock_owned_by_me(sk);
-       }
-diff --git a/net/core/filter.c b/net/core/filter.c
-index 99b23fd2f509c..b5de0a192258c 100644
---- a/net/core/filter.c
-+++ b/net/core/filter.c
-@@ -10379,10 +10379,10 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
-               }                                                             \
-               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
-                                               struct bpf_sock_ops_kern,     \
--                                              is_fullsock),                 \
-+                                              is_locked_tcp_sock),          \
-                                     fullsock_reg, si->src_reg,              \
-                                     offsetof(struct bpf_sock_ops_kern,      \
--                                             is_fullsock));                 \
-+                                             is_locked_tcp_sock));          \
-               *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp);         \
-               if (si->dst_reg == si->src_reg)                               \
-                       *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,       \
-@@ -10467,10 +10467,10 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
-                                              temp));                        \
-               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
-                                               struct bpf_sock_ops_kern,     \
--                                              is_fullsock),                 \
-+                                              is_locked_tcp_sock),          \
-                                     reg, si->dst_reg,                       \
-                                     offsetof(struct bpf_sock_ops_kern,      \
--                                             is_fullsock));                 \
-+                                             is_locked_tcp_sock));          \
-               *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2);                    \
-               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
-                                               struct bpf_sock_ops_kern, sk),\
-diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
-index d29219e067b7f..f5690085a2ac5 100644
---- a/net/ipv4/tcp_input.c
-+++ b/net/ipv4/tcp_input.c
-@@ -169,6 +169,7 @@ static void bpf_skops_parse_hdr(struct sock *sk, struct sk_buff *skb)
-       memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
-       sock_ops.op = BPF_SOCK_OPS_PARSE_HDR_OPT_CB;
-       sock_ops.is_fullsock = 1;
-+      sock_ops.is_locked_tcp_sock = 1;
-       sock_ops.sk = sk;
-       bpf_skops_init_skb(&sock_ops, skb, tcp_hdrlen(skb));
-@@ -185,6 +186,7 @@ static void bpf_skops_established(struct sock *sk, int bpf_op,
-       memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
-       sock_ops.op = bpf_op;
-       sock_ops.is_fullsock = 1;
-+      sock_ops.is_locked_tcp_sock = 1;
-       sock_ops.sk = sk;
-       /* sk with TCP_REPAIR_ON does not have skb in tcp_finish_connect */
-       if (skb)
-diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
-index 6d5387811c32a..ca1e52036d4d2 100644
---- a/net/ipv4/tcp_output.c
-+++ b/net/ipv4/tcp_output.c
-@@ -525,6 +525,7 @@ static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
-               sock_owned_by_me(sk);
-               sock_ops.is_fullsock = 1;
-+              sock_ops.is_locked_tcp_sock = 1;
-               sock_ops.sk = sk;
-       }
-@@ -570,6 +571,7 @@ static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
-               sock_owned_by_me(sk);
-               sock_ops.is_fullsock = 1;
-+              sock_ops.is_locked_tcp_sock = 1;
-               sock_ops.sk = sk;
-       }
--- 
-2.39.5
-
diff --git a/queue-6.12/btrfs-properly-limit-inline-data-extent-according-to.patch b/queue-6.12/btrfs-properly-limit-inline-data-extent-according-to.patch
deleted file mode 100644 (file)
index 54792af..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-From a5afc96d757771c992eb3af4629a562ec52ba1dc Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Tue, 25 Feb 2025 14:30:44 +1030
-Subject: btrfs: properly limit inline data extent according to block size
-
-From: Qu Wenruo <wqu@suse.com>
-
-[ Upstream commit 23019d3e6617a8ec99a8d2f5947aa3dd8a74a1b8 ]
-
-Btrfs utilizes inline data extent for the following cases:
-
-- Regular small files
-- Symlinks
-
-And "btrfs check" detects any file extents that are too large as an
-error.
-
-It's not a problem for 4K block size, but for the incoming smaller
-block sizes (2K), it can cause problems due to bad limits:
-
-- Non-compressed inline data extents
-  We do not allow a non-compressed inline data extent to be as large as
-  block size.
-
-- Symlinks
-  Currently the only real limit on symlinks are 4K, which can be larger
-  than 2K block size.
-
-These will result btrfs-check to report too large file extents.
-
-Fix it by adding proper size checks for the above cases.
-
-Signed-off-by: Qu Wenruo <wqu@suse.com>
-Reviewed-by: David Sterba <dsterba@suse.com>
-Signed-off-by: David Sterba <dsterba@suse.com>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- fs/btrfs/inode.c | 11 ++++++++++-
- 1 file changed, 10 insertions(+), 1 deletion(-)
-
-diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
-index 9ce1270addb04..0da2611fb9c85 100644
---- a/fs/btrfs/inode.c
-+++ b/fs/btrfs/inode.c
-@@ -623,6 +623,10 @@ static bool can_cow_file_range_inline(struct btrfs_inode *inode,
-       if (size > fs_info->sectorsize)
-               return false;
-+      /* We do not allow a non-compressed extent to be as large as block size. */
-+      if (data_len >= fs_info->sectorsize)
-+              return false;
-+
-       /* We cannot exceed the maximum inline data size. */
-       if (data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
-               return false;
-@@ -8691,7 +8695,12 @@ static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
-       struct extent_buffer *leaf;
-       name_len = strlen(symname);
--      if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
-+      /*
-+       * Symlinks utilize uncompressed inline extent data, which should not
-+       * reach block size.
-+       */
-+      if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
-+          name_len >= fs_info->sectorsize)
-               return -ENAMETOOLONG;
-       inode = new_inode(dir->i_sb);
--- 
-2.39.5
-
diff --git a/queue-6.12/btrfs-zoned-exit-btrfs_can_activate_zone-if-btrfs_fs.patch b/queue-6.12/btrfs-zoned-exit-btrfs_can_activate_zone-if-btrfs_fs.patch
deleted file mode 100644 (file)
index 63281ed..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-From 784d3a559cba3422f75db61667062152c4f14697 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 12 Feb 2025 15:05:00 +0100
-Subject: btrfs: zoned: exit btrfs_can_activate_zone if
- BTRFS_FS_NEED_ZONE_FINISH is set
-
-From: Johannes Thumshirn <johannes.thumshirn@wdc.com>
-
-[ Upstream commit 26b38e28162ef4ceb1e0482299820fbbd7dbcd92 ]
-
-If BTRFS_FS_NEED_ZONE_FINISH is already set for the whole filesystem, exit
-early in btrfs_can_activate_zone(). There's no need to check if
-BTRFS_FS_NEED_ZONE_FINISH needs to be set if it is already set.
-
-Reviewed-by: Naohiro Aota <naohiro.aota@wdc.com>
-Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
-Reviewed-by: David Sterba <dsterba@suse.com>
-Signed-off-by: David Sterba <dsterba@suse.com>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- fs/btrfs/zoned.c | 3 +++
- 1 file changed, 3 insertions(+)
-
-diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
-index 2603c9d60fd21..d1167aeb07354 100644
---- a/fs/btrfs/zoned.c
-+++ b/fs/btrfs/zoned.c
-@@ -2326,6 +2326,9 @@ bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
-       if (!btrfs_is_zoned(fs_info))
-               return true;
-+      if (test_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags))
-+              return false;
-+
-       /* Check if there is a device with active zones left */
-       mutex_lock(&fs_info->chunk_mutex);
-       spin_lock(&fs_info->zone_active_bgs_lock);
--- 
-2.39.5
-
diff --git a/queue-6.12/libbpf-fix-ldx-stx-st-co-re-relocation-size-adjustme.patch b/queue-6.12/libbpf-fix-ldx-stx-st-co-re-relocation-size-adjustme.patch
deleted file mode 100644 (file)
index f467012..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-From 12c1088c278a74c6a17b523e4cd9d975f7a0ac59 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 6 Feb 2025 17:48:08 -0800
-Subject: libbpf: fix LDX/STX/ST CO-RE relocation size adjustment logic
-
-From: Andrii Nakryiko <andrii@kernel.org>
-
-[ Upstream commit 06096d19ee3897a7e70922580159607fe315da7a ]
-
-Libbpf has a somewhat obscure feature of automatically adjusting the
-"size" of LDX/STX/ST instruction (memory store and load instructions),
-based on originally recorded access size (u8, u16, u32, or u64) and the
-actual size of the field on target kernel. This is meant to facilitate
-using BPF CO-RE on 32-bit architectures (pointers are always 64-bit in
-BPF, but host kernel's BTF will have it as 32-bit type), as well as
-generally supporting safe type changes (unsigned integer type changes
-can be transparently "relocated").
-
-One issue that surfaced only now, 5 years after this logic was
-implemented, is how this all works when dealing with fields that are
-arrays. This isn't all that easy and straightforward to hit (see
-selftests that reproduce this condition), but one of sched_ext BPF
-programs did hit it with innocent looking loop.
-
-Long story short, libbpf used to calculate entire array size, instead of
-making sure to only calculate array's element size. But it's the element
-that is loaded by LDX/STX/ST instructions (1, 2, 4, or 8 bytes), so
-that's what libbpf should check. This patch adjusts the logic for
-arrays and fixed the issue.
-
-Reported-by: Emil Tsalapatis <emil@etsalapatis.com>
-Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
-Acked-by: Eduard Zingerman <eddyz87@gmail.com>
-Link: https://lore.kernel.org/r/20250207014809.1573841-1-andrii@kernel.org
-Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- tools/lib/bpf/relo_core.c | 24 ++++++++++++++++++++----
- 1 file changed, 20 insertions(+), 4 deletions(-)
-
-diff --git a/tools/lib/bpf/relo_core.c b/tools/lib/bpf/relo_core.c
-index 63a4d5ad12d1a..26cde1b27174b 100644
---- a/tools/lib/bpf/relo_core.c
-+++ b/tools/lib/bpf/relo_core.c
-@@ -683,7 +683,7 @@ static int bpf_core_calc_field_relo(const char *prog_name,
- {
-       const struct bpf_core_accessor *acc;
-       const struct btf_type *t;
--      __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
-+      __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id, elem_id;
-       const struct btf_member *m;
-       const struct btf_type *mt;
-       bool bitfield;
-@@ -706,8 +706,14 @@ static int bpf_core_calc_field_relo(const char *prog_name,
-       if (!acc->name) {
-               if (relo->kind == BPF_CORE_FIELD_BYTE_OFFSET) {
-                       *val = spec->bit_offset / 8;
--                      /* remember field size for load/store mem size */
--                      sz = btf__resolve_size(spec->btf, acc->type_id);
-+                      /* remember field size for load/store mem size;
-+                       * note, for arrays we care about individual element
-+                       * sizes, not the overall array size
-+                       */
-+                      t = skip_mods_and_typedefs(spec->btf, acc->type_id, &elem_id);
-+                      while (btf_is_array(t))
-+                              t = skip_mods_and_typedefs(spec->btf, btf_array(t)->type, &elem_id);
-+                      sz = btf__resolve_size(spec->btf, elem_id);
-                       if (sz < 0)
-                               return -EINVAL;
-                       *field_sz = sz;
-@@ -767,7 +773,17 @@ static int bpf_core_calc_field_relo(const char *prog_name,
-       case BPF_CORE_FIELD_BYTE_OFFSET:
-               *val = byte_off;
-               if (!bitfield) {
--                      *field_sz = byte_sz;
-+                      /* remember field size for load/store mem size;
-+                       * note, for arrays we care about individual element
-+                       * sizes, not the overall array size
-+                       */
-+                      t = skip_mods_and_typedefs(spec->btf, field_type_id, &elem_id);
-+                      while (btf_is_array(t))
-+                              t = skip_mods_and_typedefs(spec->btf, btf_array(t)->type, &elem_id);
-+                      sz = btf__resolve_size(spec->btf, elem_id);
-+                      if (sz < 0)
-+                              return -EINVAL;
-+                      *field_sz = sz;
-                       *type_id = field_type_id;
-               }
-               break;
--- 
-2.39.5
-
index 77c5e9330622e01363f98e0257b3585e382b8977..3a9c131b2ec82117e7259d1caf4d01f59bafb2ba 100644 (file)
@@ -115,10 +115,8 @@ btrfs-make-btrfs_discard_workfn-block_group-ref-expl.patch
 btrfs-avoid-linker-error-in-btrfs_find_create_tree_b.patch
 btrfs-run-btrfs_error_commit_super-early.patch
 btrfs-fix-non-empty-delayed-iputs-list-on-unmount-du.patch
-btrfs-properly-limit-inline-data-extent-according-to.patch
 btrfs-get-zone-unusable-bytes-while-holding-lock-at-.patch
 btrfs-send-return-enametoolong-when-attempting-a-pat.patch
-btrfs-zoned-exit-btrfs_can_activate_zone-if-btrfs_fs.patch
 blk-cgroup-improve-policy-registration-error-handlin.patch
 drm-amdgpu-release-xcp_mgr-on-exit.patch
 drm-amd-display-guard-against-setting-dispclk-low-fo.patch
@@ -329,7 +327,6 @@ wifi-rtw89-8922a-fix-incorrect-sta-id-in-eht-mu-ppdu.patch
 net-pktgen-fix-access-outside-of-user-given-buffer-i.patch
 power-supply-axp20x_battery-update-temp-sensor-for-a.patch
 edac-ie31200-work-around-false-positive-build-warnin.patch
-bpf-prevent-unsafe-access-to-the-sock-fields-in-the-.patch
 i3c-master-svc-flush-fifo-before-sending-dynamic-add.patch
 mfd-axp20x-axp717-add-axp717_ts_pin_cfg-to-writeable.patch
 eeprom-ee1004-check-chip-before-probing.patch
@@ -371,7 +368,6 @@ smack-revert-smackfs-added-check-catlen.patch
 kunit-tool-use-qboot-on-qemu-x86_64.patch
 media-i2c-imx219-correct-the-minimum-vblanking-value.patch
 media-v4l-memset-argument-to-0-before-calling-get_mb.patch
-libbpf-fix-ldx-stx-st-co-re-relocation-size-adjustme.patch
 net-mlx4_core-avoid-impossible-mlx4_db_alloc-order-v.patch
 drm-xe-stop-ignoring-errors-from-xe_ttm_stolen_mgr_i.patch
 drm-xe-fix-xe_tile_init_noalloc-error-propagation.patch
diff --git a/queue-6.14/bpf-prevent-unsafe-access-to-the-sock-fields-in-the-.patch b/queue-6.14/bpf-prevent-unsafe-access-to-the-sock-fields-in-the-.patch
deleted file mode 100644 (file)
index 1d659b1..0000000
+++ /dev/null
@@ -1,150 +0,0 @@
-From bde15bbd09f5c71ce5cd34e322dc50311ffefa42 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 20 Feb 2025 15:29:31 +0800
-Subject: bpf: Prevent unsafe access to the sock fields in the BPF timestamping
- callback
-
-From: Jason Xing <kerneljasonxing@gmail.com>
-
-[ Upstream commit fd93eaffb3f977b23bc0a48d4c8616e654fcf133 ]
-
-The subsequent patch will implement BPF TX timestamping. It will
-call the sockops BPF program without holding the sock lock.
-
-This breaks the current assumption that all sock ops programs will
-hold the sock lock. The sock's fields of the uapi's bpf_sock_ops
-requires this assumption.
-
-To address this, a new "u8 is_locked_tcp_sock;" field is added. This
-patch sets it in the current sock_ops callbacks. The "is_fullsock"
-test is then replaced by the "is_locked_tcp_sock" test during
-sock_ops_convert_ctx_access().
-
-The new TX timestamping callbacks added in the subsequent patch will
-not have this set. This will prevent unsafe access from the new
-timestamping callbacks.
-
-Potentially, we could allow read-only access. However, this would
-require identifying which callback is read-safe-only and also requires
-additional BPF instruction rewrites in the covert_ctx. Since the BPF
-program can always read everything from a socket (e.g., by using
-bpf_core_cast), this patch keeps it simple and disables all read
-and write access to any socket fields through the bpf_sock_ops
-UAPI from the new TX timestamping callback.
-
-Moreover, note that some of the fields in bpf_sock_ops are specific
-to tcp_sock, and sock_ops currently only supports tcp_sock. In
-the future, UDP timestamping will be added, which will also break
-this assumption. The same idea used in this patch will be reused.
-Considering that the current sock_ops only supports tcp_sock, the
-variable is named is_locked_"tcp"_sock.
-
-Signed-off-by: Jason Xing <kerneljasonxing@gmail.com>
-Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
-Link: https://patch.msgid.link/20250220072940.99994-4-kerneljasonxing@gmail.com
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- include/linux/filter.h | 1 +
- include/net/tcp.h      | 1 +
- net/core/filter.c      | 8 ++++----
- net/ipv4/tcp_input.c   | 2 ++
- net/ipv4/tcp_output.c  | 2 ++
- 5 files changed, 10 insertions(+), 4 deletions(-)
-
-diff --git a/include/linux/filter.h b/include/linux/filter.h
-index a3ea462815957..d36d5d5180b11 100644
---- a/include/linux/filter.h
-+++ b/include/linux/filter.h
-@@ -1508,6 +1508,7 @@ struct bpf_sock_ops_kern {
-       void    *skb_data_end;
-       u8      op;
-       u8      is_fullsock;
-+      u8      is_locked_tcp_sock;
-       u8      remaining_opt_len;
-       u64     temp;                   /* temp and everything after is not
-                                        * initialized to 0 before calling
-diff --git a/include/net/tcp.h b/include/net/tcp.h
-index 2d08473a6dc00..33c50ea976c88 100644
---- a/include/net/tcp.h
-+++ b/include/net/tcp.h
-@@ -2671,6 +2671,7 @@ static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
-       memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
-       if (sk_fullsock(sk)) {
-               sock_ops.is_fullsock = 1;
-+              sock_ops.is_locked_tcp_sock = 1;
-               sock_owned_by_me(sk);
-       }
-diff --git a/net/core/filter.c b/net/core/filter.c
-index 6c8fbc96b14a3..7e6ad73df6b17 100644
---- a/net/core/filter.c
-+++ b/net/core/filter.c
-@@ -10367,10 +10367,10 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
-               }                                                             \
-               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
-                                               struct bpf_sock_ops_kern,     \
--                                              is_fullsock),                 \
-+                                              is_locked_tcp_sock),          \
-                                     fullsock_reg, si->src_reg,              \
-                                     offsetof(struct bpf_sock_ops_kern,      \
--                                             is_fullsock));                 \
-+                                             is_locked_tcp_sock));          \
-               *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp);         \
-               if (si->dst_reg == si->src_reg)                               \
-                       *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,       \
-@@ -10455,10 +10455,10 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
-                                              temp));                        \
-               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
-                                               struct bpf_sock_ops_kern,     \
--                                              is_fullsock),                 \
-+                                              is_locked_tcp_sock),          \
-                                     reg, si->dst_reg,                       \
-                                     offsetof(struct bpf_sock_ops_kern,      \
--                                             is_fullsock));                 \
-+                                             is_locked_tcp_sock));          \
-               *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2);                    \
-               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
-                                               struct bpf_sock_ops_kern, sk),\
-diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
-index 1b09b4d76c296..d1ed4ac74e1d0 100644
---- a/net/ipv4/tcp_input.c
-+++ b/net/ipv4/tcp_input.c
-@@ -169,6 +169,7 @@ static void bpf_skops_parse_hdr(struct sock *sk, struct sk_buff *skb)
-       memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
-       sock_ops.op = BPF_SOCK_OPS_PARSE_HDR_OPT_CB;
-       sock_ops.is_fullsock = 1;
-+      sock_ops.is_locked_tcp_sock = 1;
-       sock_ops.sk = sk;
-       bpf_skops_init_skb(&sock_ops, skb, tcp_hdrlen(skb));
-@@ -185,6 +186,7 @@ static void bpf_skops_established(struct sock *sk, int bpf_op,
-       memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
-       sock_ops.op = bpf_op;
-       sock_ops.is_fullsock = 1;
-+      sock_ops.is_locked_tcp_sock = 1;
-       sock_ops.sk = sk;
-       /* sk with TCP_REPAIR_ON does not have skb in tcp_finish_connect */
-       if (skb)
-diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
-index 6031d7f7f5198..2398b0fc62225 100644
---- a/net/ipv4/tcp_output.c
-+++ b/net/ipv4/tcp_output.c
-@@ -525,6 +525,7 @@ static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
-               sock_owned_by_me(sk);
-               sock_ops.is_fullsock = 1;
-+              sock_ops.is_locked_tcp_sock = 1;
-               sock_ops.sk = sk;
-       }
-@@ -570,6 +571,7 @@ static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
-               sock_owned_by_me(sk);
-               sock_ops.is_fullsock = 1;
-+              sock_ops.is_locked_tcp_sock = 1;
-               sock_ops.sk = sk;
-       }
--- 
-2.39.5
-
diff --git a/queue-6.14/btrfs-properly-limit-inline-data-extent-according-to.patch b/queue-6.14/btrfs-properly-limit-inline-data-extent-according-to.patch
deleted file mode 100644 (file)
index 4b1e518..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-From ec02842137bdccb74ed331a1b0a335ee22eb179c Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Tue, 25 Feb 2025 14:30:44 +1030
-Subject: btrfs: properly limit inline data extent according to block size
-
-From: Qu Wenruo <wqu@suse.com>
-
-[ Upstream commit 23019d3e6617a8ec99a8d2f5947aa3dd8a74a1b8 ]
-
-Btrfs utilizes inline data extent for the following cases:
-
-- Regular small files
-- Symlinks
-
-And "btrfs check" detects any file extents that are too large as an
-error.
-
-It's not a problem for 4K block size, but for the incoming smaller
-block sizes (2K), it can cause problems due to bad limits:
-
-- Non-compressed inline data extents
-  We do not allow a non-compressed inline data extent to be as large as
-  block size.
-
-- Symlinks
-  Currently the only real limit on symlinks are 4K, which can be larger
-  than 2K block size.
-
-These will result btrfs-check to report too large file extents.
-
-Fix it by adding proper size checks for the above cases.
-
-Signed-off-by: Qu Wenruo <wqu@suse.com>
-Reviewed-by: David Sterba <dsterba@suse.com>
-Signed-off-by: David Sterba <dsterba@suse.com>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- fs/btrfs/inode.c | 11 ++++++++++-
- 1 file changed, 10 insertions(+), 1 deletion(-)
-
-diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
-index a06fca7934d55..9a648fb130230 100644
---- a/fs/btrfs/inode.c
-+++ b/fs/btrfs/inode.c
-@@ -583,6 +583,10 @@ static bool can_cow_file_range_inline(struct btrfs_inode *inode,
-       if (size > fs_info->sectorsize)
-               return false;
-+      /* We do not allow a non-compressed extent to be as large as block size. */
-+      if (data_len >= fs_info->sectorsize)
-+              return false;
-+
-       /* We cannot exceed the maximum inline data size. */
-       if (data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
-               return false;
-@@ -8671,7 +8675,12 @@ static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
-       struct extent_buffer *leaf;
-       name_len = strlen(symname);
--      if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
-+      /*
-+       * Symlinks utilize uncompressed inline extent data, which should not
-+       * reach block size.
-+       */
-+      if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
-+          name_len >= fs_info->sectorsize)
-               return -ENAMETOOLONG;
-       inode = new_inode(dir->i_sb);
--- 
-2.39.5
-
diff --git a/queue-6.14/btrfs-zoned-exit-btrfs_can_activate_zone-if-btrfs_fs.patch b/queue-6.14/btrfs-zoned-exit-btrfs_can_activate_zone-if-btrfs_fs.patch
deleted file mode 100644 (file)
index c364ff0..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-From 1136d333d91088ecf2d5189367540a84e60449a0 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 12 Feb 2025 15:05:00 +0100
-Subject: btrfs: zoned: exit btrfs_can_activate_zone if
- BTRFS_FS_NEED_ZONE_FINISH is set
-
-From: Johannes Thumshirn <johannes.thumshirn@wdc.com>
-
-[ Upstream commit 26b38e28162ef4ceb1e0482299820fbbd7dbcd92 ]
-
-If BTRFS_FS_NEED_ZONE_FINISH is already set for the whole filesystem, exit
-early in btrfs_can_activate_zone(). There's no need to check if
-BTRFS_FS_NEED_ZONE_FINISH needs to be set if it is already set.
-
-Reviewed-by: Naohiro Aota <naohiro.aota@wdc.com>
-Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
-Reviewed-by: David Sterba <dsterba@suse.com>
-Signed-off-by: David Sterba <dsterba@suse.com>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- fs/btrfs/zoned.c | 3 +++
- 1 file changed, 3 insertions(+)
-
-diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
-index f39656668967c..4a3e02b49f295 100644
---- a/fs/btrfs/zoned.c
-+++ b/fs/btrfs/zoned.c
-@@ -2344,6 +2344,9 @@ bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
-       if (!btrfs_is_zoned(fs_info))
-               return true;
-+      if (test_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags))
-+              return false;
-+
-       /* Check if there is a device with active zones left */
-       mutex_lock(&fs_info->chunk_mutex);
-       spin_lock(&fs_info->zone_active_bgs_lock);
--- 
-2.39.5
-
diff --git a/queue-6.14/libbpf-fix-ldx-stx-st-co-re-relocation-size-adjustme.patch b/queue-6.14/libbpf-fix-ldx-stx-st-co-re-relocation-size-adjustme.patch
deleted file mode 100644 (file)
index 714c6ca..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-From e2547548c76d0c0e4aea2a3ba6a418d18ac58cd5 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 6 Feb 2025 17:48:08 -0800
-Subject: libbpf: fix LDX/STX/ST CO-RE relocation size adjustment logic
-
-From: Andrii Nakryiko <andrii@kernel.org>
-
-[ Upstream commit 06096d19ee3897a7e70922580159607fe315da7a ]
-
-Libbpf has a somewhat obscure feature of automatically adjusting the
-"size" of LDX/STX/ST instruction (memory store and load instructions),
-based on originally recorded access size (u8, u16, u32, or u64) and the
-actual size of the field on target kernel. This is meant to facilitate
-using BPF CO-RE on 32-bit architectures (pointers are always 64-bit in
-BPF, but host kernel's BTF will have it as 32-bit type), as well as
-generally supporting safe type changes (unsigned integer type changes
-can be transparently "relocated").
-
-One issue that surfaced only now, 5 years after this logic was
-implemented, is how this all works when dealing with fields that are
-arrays. This isn't all that easy and straightforward to hit (see
-selftests that reproduce this condition), but one of sched_ext BPF
-programs did hit it with innocent looking loop.
-
-Long story short, libbpf used to calculate entire array size, instead of
-making sure to only calculate array's element size. But it's the element
-that is loaded by LDX/STX/ST instructions (1, 2, 4, or 8 bytes), so
-that's what libbpf should check. This patch adjusts the logic for
-arrays and fixed the issue.
-
-Reported-by: Emil Tsalapatis <emil@etsalapatis.com>
-Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
-Acked-by: Eduard Zingerman <eddyz87@gmail.com>
-Link: https://lore.kernel.org/r/20250207014809.1573841-1-andrii@kernel.org
-Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- tools/lib/bpf/relo_core.c | 24 ++++++++++++++++++++----
- 1 file changed, 20 insertions(+), 4 deletions(-)
-
-diff --git a/tools/lib/bpf/relo_core.c b/tools/lib/bpf/relo_core.c
-index 7632e9d418271..2b83c98a11372 100644
---- a/tools/lib/bpf/relo_core.c
-+++ b/tools/lib/bpf/relo_core.c
-@@ -683,7 +683,7 @@ static int bpf_core_calc_field_relo(const char *prog_name,
- {
-       const struct bpf_core_accessor *acc;
-       const struct btf_type *t;
--      __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
-+      __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id, elem_id;
-       const struct btf_member *m;
-       const struct btf_type *mt;
-       bool bitfield;
-@@ -706,8 +706,14 @@ static int bpf_core_calc_field_relo(const char *prog_name,
-       if (!acc->name) {
-               if (relo->kind == BPF_CORE_FIELD_BYTE_OFFSET) {
-                       *val = spec->bit_offset / 8;
--                      /* remember field size for load/store mem size */
--                      sz = btf__resolve_size(spec->btf, acc->type_id);
-+                      /* remember field size for load/store mem size;
-+                       * note, for arrays we care about individual element
-+                       * sizes, not the overall array size
-+                       */
-+                      t = skip_mods_and_typedefs(spec->btf, acc->type_id, &elem_id);
-+                      while (btf_is_array(t))
-+                              t = skip_mods_and_typedefs(spec->btf, btf_array(t)->type, &elem_id);
-+                      sz = btf__resolve_size(spec->btf, elem_id);
-                       if (sz < 0)
-                               return -EINVAL;
-                       *field_sz = sz;
-@@ -767,7 +773,17 @@ static int bpf_core_calc_field_relo(const char *prog_name,
-       case BPF_CORE_FIELD_BYTE_OFFSET:
-               *val = byte_off;
-               if (!bitfield) {
--                      *field_sz = byte_sz;
-+                      /* remember field size for load/store mem size;
-+                       * note, for arrays we care about individual element
-+                       * sizes, not the overall array size
-+                       */
-+                      t = skip_mods_and_typedefs(spec->btf, field_type_id, &elem_id);
-+                      while (btf_is_array(t))
-+                              t = skip_mods_and_typedefs(spec->btf, btf_array(t)->type, &elem_id);
-+                      sz = btf__resolve_size(spec->btf, elem_id);
-+                      if (sz < 0)
-+                              return -EINVAL;
-+                      *field_sz = sz;
-                       *type_id = field_type_id;
-               }
-               break;
--- 
-2.39.5
-
index 2bf93fc3069326a9b76da132f985ce3498a9f868..7fa6f06634a343d5651ad6e513d588e94d1eef80 100644 (file)
@@ -135,10 +135,8 @@ btrfs-make-btrfs_discard_workfn-block_group-ref-expl.patch
 btrfs-avoid-linker-error-in-btrfs_find_create_tree_b.patch
 btrfs-run-btrfs_error_commit_super-early.patch
 btrfs-fix-non-empty-delayed-iputs-list-on-unmount-du.patch
-btrfs-properly-limit-inline-data-extent-according-to.patch
 btrfs-get-zone-unusable-bytes-while-holding-lock-at-.patch
 btrfs-send-return-enametoolong-when-attempting-a-pat.patch
-btrfs-zoned-exit-btrfs_can_activate_zone-if-btrfs_fs.patch
 blk-cgroup-improve-policy-registration-error-handlin.patch
 drm-amdgpu-release-xcp_mgr-on-exit.patch
 drm-amd-display-guard-against-setting-dispclk-low-fo.patch
@@ -404,7 +402,6 @@ wifi-rtw89-8922a-fix-incorrect-sta-id-in-eht-mu-ppdu.patch
 net-pktgen-fix-access-outside-of-user-given-buffer-i.patch
 power-supply-axp20x_battery-update-temp-sensor-for-a.patch
 edac-ie31200-work-around-false-positive-build-warnin.patch
-bpf-prevent-unsafe-access-to-the-sock-fields-in-the-.patch
 i3c-master-svc-flush-fifo-before-sending-dynamic-add.patch
 netdevsim-call-napi_schedule-from-a-timer-context.patch
 mfd-axp20x-axp717-add-axp717_ts_pin_cfg-to-writeable.patch
@@ -456,7 +453,6 @@ media-v4l-memset-argument-to-0-before-calling-get_mb.patch
 media-stm32-csi-use-array_size-to-search-d-phy-table.patch
 media-stm32-csi-add-missing-pm_runtime_put-on-error.patch
 media-i2c-ov2740-free-control-handler-on-error-path.patch
-libbpf-fix-ldx-stx-st-co-re-relocation-size-adjustme.patch
 bnxt_en-set-npar-1.2-support-when-registering-with-f.patch
 net-mlx4_core-avoid-impossible-mlx4_db_alloc-order-v.patch
 drm-xe-stop-ignoring-errors-from-xe_ttm_stolen_mgr_i.patch
diff --git a/queue-6.6/bpf-prevent-unsafe-access-to-the-sock-fields-in-the-.patch b/queue-6.6/bpf-prevent-unsafe-access-to-the-sock-fields-in-the-.patch
deleted file mode 100644 (file)
index 076cd16..0000000
+++ /dev/null
@@ -1,150 +0,0 @@
-From ab98c942f5101bee496c595eb35962df46d3ef3c Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 20 Feb 2025 15:29:31 +0800
-Subject: bpf: Prevent unsafe access to the sock fields in the BPF timestamping
- callback
-
-From: Jason Xing <kerneljasonxing@gmail.com>
-
-[ Upstream commit fd93eaffb3f977b23bc0a48d4c8616e654fcf133 ]
-
-The subsequent patch will implement BPF TX timestamping. It will
-call the sockops BPF program without holding the sock lock.
-
-This breaks the current assumption that all sock ops programs will
-hold the sock lock. The sock's fields of the uapi's bpf_sock_ops
-requires this assumption.
-
-To address this, a new "u8 is_locked_tcp_sock;" field is added. This
-patch sets it in the current sock_ops callbacks. The "is_fullsock"
-test is then replaced by the "is_locked_tcp_sock" test during
-sock_ops_convert_ctx_access().
-
-The new TX timestamping callbacks added in the subsequent patch will
-not have this set. This will prevent unsafe access from the new
-timestamping callbacks.
-
-Potentially, we could allow read-only access. However, this would
-require identifying which callback is read-safe-only and also requires
-additional BPF instruction rewrites in the covert_ctx. Since the BPF
-program can always read everything from a socket (e.g., by using
-bpf_core_cast), this patch keeps it simple and disables all read
-and write access to any socket fields through the bpf_sock_ops
-UAPI from the new TX timestamping callback.
-
-Moreover, note that some of the fields in bpf_sock_ops are specific
-to tcp_sock, and sock_ops currently only supports tcp_sock. In
-the future, UDP timestamping will be added, which will also break
-this assumption. The same idea used in this patch will be reused.
-Considering that the current sock_ops only supports tcp_sock, the
-variable is named is_locked_"tcp"_sock.
-
-Signed-off-by: Jason Xing <kerneljasonxing@gmail.com>
-Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
-Link: https://patch.msgid.link/20250220072940.99994-4-kerneljasonxing@gmail.com
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- include/linux/filter.h | 1 +
- include/net/tcp.h      | 1 +
- net/core/filter.c      | 8 ++++----
- net/ipv4/tcp_input.c   | 2 ++
- net/ipv4/tcp_output.c  | 2 ++
- 5 files changed, 10 insertions(+), 4 deletions(-)
-
-diff --git a/include/linux/filter.h b/include/linux/filter.h
-index adf65eacade06..f7d03676e16fa 100644
---- a/include/linux/filter.h
-+++ b/include/linux/filter.h
-@@ -1303,6 +1303,7 @@ struct bpf_sock_ops_kern {
-       void    *skb_data_end;
-       u8      op;
-       u8      is_fullsock;
-+      u8      is_locked_tcp_sock;
-       u8      remaining_opt_len;
-       u64     temp;                   /* temp and everything after is not
-                                        * initialized to 0 before calling
-diff --git a/include/net/tcp.h b/include/net/tcp.h
-index a6def0aab3ed3..658551a64e2a5 100644
---- a/include/net/tcp.h
-+++ b/include/net/tcp.h
-@@ -2462,6 +2462,7 @@ static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
-       memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
-       if (sk_fullsock(sk)) {
-               sock_ops.is_fullsock = 1;
-+              sock_ops.is_locked_tcp_sock = 1;
-               sock_owned_by_me(sk);
-       }
-diff --git a/net/core/filter.c b/net/core/filter.c
-index 5143c8a9e52ca..eff342e5fd8f5 100644
---- a/net/core/filter.c
-+++ b/net/core/filter.c
-@@ -10300,10 +10300,10 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
-               }                                                             \
-               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
-                                               struct bpf_sock_ops_kern,     \
--                                              is_fullsock),                 \
-+                                              is_locked_tcp_sock),          \
-                                     fullsock_reg, si->src_reg,              \
-                                     offsetof(struct bpf_sock_ops_kern,      \
--                                             is_fullsock));                 \
-+                                             is_locked_tcp_sock));          \
-               *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp);         \
-               if (si->dst_reg == si->src_reg)                               \
-                       *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,       \
-@@ -10388,10 +10388,10 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
-                                              temp));                        \
-               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
-                                               struct bpf_sock_ops_kern,     \
--                                              is_fullsock),                 \
-+                                              is_locked_tcp_sock),          \
-                                     reg, si->dst_reg,                       \
-                                     offsetof(struct bpf_sock_ops_kern,      \
--                                             is_fullsock));                 \
-+                                             is_locked_tcp_sock));          \
-               *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2);                    \
-               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
-                                               struct bpf_sock_ops_kern, sk),\
-diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
-index a172248b66783..4bed6f9923059 100644
---- a/net/ipv4/tcp_input.c
-+++ b/net/ipv4/tcp_input.c
-@@ -168,6 +168,7 @@ static void bpf_skops_parse_hdr(struct sock *sk, struct sk_buff *skb)
-       memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
-       sock_ops.op = BPF_SOCK_OPS_PARSE_HDR_OPT_CB;
-       sock_ops.is_fullsock = 1;
-+      sock_ops.is_locked_tcp_sock = 1;
-       sock_ops.sk = sk;
-       bpf_skops_init_skb(&sock_ops, skb, tcp_hdrlen(skb));
-@@ -184,6 +185,7 @@ static void bpf_skops_established(struct sock *sk, int bpf_op,
-       memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
-       sock_ops.op = bpf_op;
-       sock_ops.is_fullsock = 1;
-+      sock_ops.is_locked_tcp_sock = 1;
-       sock_ops.sk = sk;
-       /* sk with TCP_REPAIR_ON does not have skb in tcp_finish_connect */
-       if (skb)
-diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
-index 560273e7f7736..5d3d92e5bd42e 100644
---- a/net/ipv4/tcp_output.c
-+++ b/net/ipv4/tcp_output.c
-@@ -522,6 +522,7 @@ static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
-               sock_owned_by_me(sk);
-               sock_ops.is_fullsock = 1;
-+              sock_ops.is_locked_tcp_sock = 1;
-               sock_ops.sk = sk;
-       }
-@@ -567,6 +568,7 @@ static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
-               sock_owned_by_me(sk);
-               sock_ops.is_fullsock = 1;
-+              sock_ops.is_locked_tcp_sock = 1;
-               sock_ops.sk = sk;
-       }
--- 
-2.39.5
-
diff --git a/queue-6.6/btrfs-zoned-exit-btrfs_can_activate_zone-if-btrfs_fs.patch b/queue-6.6/btrfs-zoned-exit-btrfs_can_activate_zone-if-btrfs_fs.patch
deleted file mode 100644 (file)
index 02392a2..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-From 598ac7633343dd8d3e82fcafbed63b0febbc16d7 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 12 Feb 2025 15:05:00 +0100
-Subject: btrfs: zoned: exit btrfs_can_activate_zone if
- BTRFS_FS_NEED_ZONE_FINISH is set
-
-From: Johannes Thumshirn <johannes.thumshirn@wdc.com>
-
-[ Upstream commit 26b38e28162ef4ceb1e0482299820fbbd7dbcd92 ]
-
-If BTRFS_FS_NEED_ZONE_FINISH is already set for the whole filesystem, exit
-early in btrfs_can_activate_zone(). There's no need to check if
-BTRFS_FS_NEED_ZONE_FINISH needs to be set if it is already set.
-
-Reviewed-by: Naohiro Aota <naohiro.aota@wdc.com>
-Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
-Reviewed-by: David Sterba <dsterba@suse.com>
-Signed-off-by: David Sterba <dsterba@suse.com>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- fs/btrfs/zoned.c | 3 +++
- 1 file changed, 3 insertions(+)
-
-diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
-index 197dfafbf4013..6ef0b47facbf3 100644
---- a/fs/btrfs/zoned.c
-+++ b/fs/btrfs/zoned.c
-@@ -2220,6 +2220,9 @@ bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
-       if (!btrfs_is_zoned(fs_info))
-               return true;
-+      if (test_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags))
-+              return false;
-+
-       /* Check if there is a device with active zones left */
-       mutex_lock(&fs_info->chunk_mutex);
-       spin_lock(&fs_info->zone_active_bgs_lock);
--- 
-2.39.5
-
diff --git a/queue-6.6/libbpf-fix-ldx-stx-st-co-re-relocation-size-adjustme.patch b/queue-6.6/libbpf-fix-ldx-stx-st-co-re-relocation-size-adjustme.patch
deleted file mode 100644 (file)
index 2f85703..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-From dfee39a7da12f80d599393f6cd36f1d96fddd66a Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 6 Feb 2025 17:48:08 -0800
-Subject: libbpf: fix LDX/STX/ST CO-RE relocation size adjustment logic
-
-From: Andrii Nakryiko <andrii@kernel.org>
-
-[ Upstream commit 06096d19ee3897a7e70922580159607fe315da7a ]
-
-Libbpf has a somewhat obscure feature of automatically adjusting the
-"size" of LDX/STX/ST instruction (memory store and load instructions),
-based on originally recorded access size (u8, u16, u32, or u64) and the
-actual size of the field on target kernel. This is meant to facilitate
-using BPF CO-RE on 32-bit architectures (pointers are always 64-bit in
-BPF, but host kernel's BTF will have it as 32-bit type), as well as
-generally supporting safe type changes (unsigned integer type changes
-can be transparently "relocated").
-
-One issue that surfaced only now, 5 years after this logic was
-implemented, is how this all works when dealing with fields that are
-arrays. This isn't all that easy and straightforward to hit (see
-selftests that reproduce this condition), but one of sched_ext BPF
-programs did hit it with innocent looking loop.
-
-Long story short, libbpf used to calculate entire array size, instead of
-making sure to only calculate array's element size. But it's the element
-that is loaded by LDX/STX/ST instructions (1, 2, 4, or 8 bytes), so
-that's what libbpf should check. This patch adjusts the logic for
-arrays and fixed the issue.
-
-Reported-by: Emil Tsalapatis <emil@etsalapatis.com>
-Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
-Acked-by: Eduard Zingerman <eddyz87@gmail.com>
-Link: https://lore.kernel.org/r/20250207014809.1573841-1-andrii@kernel.org
-Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- tools/lib/bpf/relo_core.c | 24 ++++++++++++++++++++----
- 1 file changed, 20 insertions(+), 4 deletions(-)
-
-diff --git a/tools/lib/bpf/relo_core.c b/tools/lib/bpf/relo_core.c
-index 63a4d5ad12d1a..26cde1b27174b 100644
---- a/tools/lib/bpf/relo_core.c
-+++ b/tools/lib/bpf/relo_core.c
-@@ -683,7 +683,7 @@ static int bpf_core_calc_field_relo(const char *prog_name,
- {
-       const struct bpf_core_accessor *acc;
-       const struct btf_type *t;
--      __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
-+      __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id, elem_id;
-       const struct btf_member *m;
-       const struct btf_type *mt;
-       bool bitfield;
-@@ -706,8 +706,14 @@ static int bpf_core_calc_field_relo(const char *prog_name,
-       if (!acc->name) {
-               if (relo->kind == BPF_CORE_FIELD_BYTE_OFFSET) {
-                       *val = spec->bit_offset / 8;
--                      /* remember field size for load/store mem size */
--                      sz = btf__resolve_size(spec->btf, acc->type_id);
-+                      /* remember field size for load/store mem size;
-+                       * note, for arrays we care about individual element
-+                       * sizes, not the overall array size
-+                       */
-+                      t = skip_mods_and_typedefs(spec->btf, acc->type_id, &elem_id);
-+                      while (btf_is_array(t))
-+                              t = skip_mods_and_typedefs(spec->btf, btf_array(t)->type, &elem_id);
-+                      sz = btf__resolve_size(spec->btf, elem_id);
-                       if (sz < 0)
-                               return -EINVAL;
-                       *field_sz = sz;
-@@ -767,7 +773,17 @@ static int bpf_core_calc_field_relo(const char *prog_name,
-       case BPF_CORE_FIELD_BYTE_OFFSET:
-               *val = byte_off;
-               if (!bitfield) {
--                      *field_sz = byte_sz;
-+                      /* remember field size for load/store mem size;
-+                       * note, for arrays we care about individual element
-+                       * sizes, not the overall array size
-+                       */
-+                      t = skip_mods_and_typedefs(spec->btf, field_type_id, &elem_id);
-+                      while (btf_is_array(t))
-+                              t = skip_mods_and_typedefs(spec->btf, btf_array(t)->type, &elem_id);
-+                      sz = btf__resolve_size(spec->btf, elem_id);
-+                      if (sz < 0)
-+                              return -EINVAL;
-+                      *field_sz = sz;
-                       *type_id = field_type_id;
-               }
-               break;
--- 
-2.39.5
-
index c62d3da9809450ab44c9902b8723af60e3979846..9963e692a39cf375d7a92a5efc751ea30920e165 100644 (file)
@@ -78,7 +78,6 @@ btrfs-run-btrfs_error_commit_super-early.patch
 btrfs-fix-non-empty-delayed-iputs-list-on-unmount-du.patch
 btrfs-get-zone-unusable-bytes-while-holding-lock-at-.patch
 btrfs-send-return-enametoolong-when-attempting-a-pat.patch
-btrfs-zoned-exit-btrfs_can_activate_zone-if-btrfs_fs.patch
 drm-amd-display-guard-against-setting-dispclk-low-fo.patch
 i3c-master-svc-fix-missing-stop-for-master-request.patch
 dlm-make-tcp-still-work-in-multi-link-env.patch
@@ -207,7 +206,6 @@ wifi-rtw88-fix-rtw_desc_to_mcsrate-to-handle-mcs16-3.patch
 wifi-rtw89-fw-propagate-error-code-from-rtw89_h2c_tx.patch
 net-pktgen-fix-access-outside-of-user-given-buffer-i.patch
 edac-ie31200-work-around-false-positive-build-warnin.patch
-bpf-prevent-unsafe-access-to-the-sock-fields-in-the-.patch
 i3c-master-svc-flush-fifo-before-sending-dynamic-add.patch
 drm-amd-display-add-support-for-disconnected-edp-str.patch
 serial-mctrl_gpio-split-disable_ms-into-sync-and-no_.patch
@@ -234,7 +232,6 @@ smack-revert-smackfs-added-check-catlen.patch
 kunit-tool-use-qboot-on-qemu-x86_64.patch
 media-i2c-imx219-correct-the-minimum-vblanking-value.patch
 media-v4l-memset-argument-to-0-before-calling-get_mb.patch
-libbpf-fix-ldx-stx-st-co-re-relocation-size-adjustme.patch
 net-mlx4_core-avoid-impossible-mlx4_db_alloc-order-v.patch
 clk-qcom-ipq5018-allow-it-to-be-bulid-on-arm32.patch
 clk-qcom-clk-alpha-pll-do-not-use-random-stack-value.patch