]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
selftests/bpf: Extend verifier and bpf_sock tests for dst_port loads
authorJakub Sitnicki <jakub@cloudflare.com>
Wed, 3 Aug 2022 14:50:02 +0000 (17:50 +0300)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 11 Aug 2022 10:57:51 +0000 (12:57 +0200)
commit 8f50f16ff39dd4e2d43d1548ca66925652f8aff7 upstream.

Add coverage to the verifier tests and tests for reading bpf_sock fields to
ensure that 32-bit, 16-bit, and 8-bit loads from dst_port field are allowed
only at intended offsets and produce expected values.

While 16-bit and 8-bit access to dst_port field is straight-forward, 32-bit
wide loads need be allowed and produce a zero-padded 16-bit value for
backward compatibility.

Signed-off-by: Jakub Sitnicki <jakub@cloudflare.com>
Link: https://lore.kernel.org/r/20220130115518.213259-3-jakub@cloudflare.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
[OP: backport to 5.4: cherry-pick verifier changes only]
Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
tools/include/uapi/linux/bpf.h
tools/testing/selftests/bpf/verifier/sock.c

index 0bfad86ec960a3e67db78ac105acbc30ed314ab9..cb0631098f9187b5a80cd2c5371bd118cffd7617 100644 (file)
@@ -3068,7 +3068,8 @@ struct bpf_sock {
        __u32 src_ip4;
        __u32 src_ip6[4];
        __u32 src_port;         /* host byte order */
-       __u32 dst_port;         /* network byte order */
+       __be16 dst_port;        /* network byte order */
+       __u16 :16;              /* zero padding */
        __u32 dst_ip4;
        __u32 dst_ip6[4];
        __u32 state;
index 9ed192e14f5fe785613b1ecf5026fd0c499aec92..b2ce50bb935b85e897edd564aa29a2cadb657fe5 100644 (file)
        .result = ACCEPT,
 },
 {
-       "sk_fullsock(skb->sk): sk->dst_port [narrow load]",
+       "sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)",
+       .insns = {
+       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port)),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+       .result = ACCEPT,
+},
+{
+       "sk_fullsock(skb->sk): sk->dst_port [half load]",
        .insns = {
        BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
        BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
        .result = ACCEPT,
 },
 {
-       "sk_fullsock(skb->sk): sk->dst_port [load 2nd byte]",
+       "sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)",
+       .insns = {
+       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 2),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+       .result = REJECT,
+       .errstr = "invalid sock access",
+},
+{
+       "sk_fullsock(skb->sk): sk->dst_port [byte load]",
+       .insns = {
+       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_0, offsetof(struct bpf_sock, dst_port)),
+       BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 1),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+       .result = ACCEPT,
+},
+{
+       "sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)",
+       .insns = {
+       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 2),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+       .result = REJECT,
+       .errstr = "invalid sock access",
+},
+{
+       "sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)",
        .insns = {
        BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
        BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
        BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
-       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 1),
+       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_sock, dst_port)),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },