]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
bpf: Add PROG_TEST_RUN support for sk_lookup programs
authorLorenz Bauer <lmb@cloudflare.com>
Mon, 1 Aug 2022 07:29:15 +0000 (15:29 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 3 Aug 2022 10:00:52 +0000 (12:00 +0200)
commit 7c32e8f8bc33a5f4b113a630857e46634e3e143b upstream.

Allow to pass sk_lookup programs to PROG_TEST_RUN. User space
provides the full bpf_sk_lookup struct as context. Since the
context includes a socket pointer that can't be exposed
to user space we define that PROG_TEST_RUN returns the cookie
of the selected socket or zero in place of the socket pointer.

We don't support testing programs that select a reuseport socket,
since this would mean running another (unrelated) BPF program
from the sk_lookup test handler.

Signed-off-by: Lorenz Bauer <lmb@cloudflare.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20210303101816.36774-3-lmb@cloudflare.com
Signed-off-by: Tianchen Ding <dtcccc@linux.alibaba.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
include/linux/bpf.h
include/uapi/linux/bpf.h
net/bpf/test_run.c
net/core/filter.c
tools/include/uapi/linux/bpf.h

index f21bc441e3fa8dcd7f668c1f294f36d2e594d9ab..b010d45a1ecd5d68ed57d3da72e79e6fef501f14 100644 (file)
@@ -1457,6 +1457,9 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
                             const union bpf_attr *kattr,
                             union bpf_attr __user *uattr);
+int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
+                               const union bpf_attr *kattr,
+                               union bpf_attr __user *uattr);
 bool btf_ctx_access(int off, int size, enum bpf_access_type type,
                    const struct bpf_prog *prog,
                    struct bpf_insn_access_aux *info);
@@ -1671,6 +1674,13 @@ static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
        return -ENOTSUPP;
 }
 
+static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
+                                             const union bpf_attr *kattr,
+                                             union bpf_attr __user *uattr)
+{
+       return -ENOTSUPP;
+}
+
 static inline void bpf_map_put(struct bpf_map *map)
 {
 }
index 0f39fdcb2273c4ca42c7a00a88ec2a34e3faa886..2a234023821e387f2d07ec95756671721260b619 100644 (file)
@@ -5007,7 +5007,10 @@ struct bpf_pidns_info {
 
 /* User accessible data for SK_LOOKUP programs. Add new fields at the end. */
 struct bpf_sk_lookup {
-       __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
+       union {
+               __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
+               __u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */
+       };
 
        __u32 family;           /* Protocol family (AF_INET, AF_INET6) */
        __u32 protocol;         /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */
index d2a4f04df1da80537dc8f2833f97cd183ca628e7..f8b231bbbe3816bfff624c1ef5f1b6dfeebf2bfc 100644 (file)
 #include <net/bpf_sk_storage.h>
 #include <net/sock.h>
 #include <net/tcp.h>
+#include <net/net_namespace.h>
 #include <linux/error-injection.h>
 #include <linux/smp.h>
+#include <linux/sock_diag.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/bpf_test_run.h>
@@ -796,3 +798,106 @@ out:
        kfree(data);
        return ret;
 }
+
+int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
+                               union bpf_attr __user *uattr)
+{
+       struct bpf_test_timer t = { NO_PREEMPT };
+       struct bpf_prog_array *progs = NULL;
+       struct bpf_sk_lookup_kern ctx = {};
+       u32 repeat = kattr->test.repeat;
+       struct bpf_sk_lookup *user_ctx;
+       u32 retval, duration;
+       int ret = -EINVAL;
+
+       if (prog->type != BPF_PROG_TYPE_SK_LOOKUP)
+               return -EINVAL;
+
+       if (kattr->test.flags || kattr->test.cpu)
+               return -EINVAL;
+
+       if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
+           kattr->test.data_size_out)
+               return -EINVAL;
+
+       if (!repeat)
+               repeat = 1;
+
+       user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
+       if (IS_ERR(user_ctx))
+               return PTR_ERR(user_ctx);
+
+       if (!user_ctx)
+               return -EINVAL;
+
+       if (user_ctx->sk)
+               goto out;
+
+       if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
+               goto out;
+
+       if (user_ctx->local_port > U16_MAX || user_ctx->remote_port > U16_MAX) {
+               ret = -ERANGE;
+               goto out;
+       }
+
+       ctx.family = (u16)user_ctx->family;
+       ctx.protocol = (u16)user_ctx->protocol;
+       ctx.dport = (u16)user_ctx->local_port;
+       ctx.sport = (__force __be16)user_ctx->remote_port;
+
+       switch (ctx.family) {
+       case AF_INET:
+               ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
+               ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
+               break;
+
+#if IS_ENABLED(CONFIG_IPV6)
+       case AF_INET6:
+               ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
+               ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
+               break;
+#endif
+
+       default:
+               ret = -EAFNOSUPPORT;
+               goto out;
+       }
+
+       progs = bpf_prog_array_alloc(1, GFP_KERNEL);
+       if (!progs) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       progs->items[0].prog = prog;
+
+       bpf_test_timer_enter(&t);
+       do {
+               ctx.selected_sk = NULL;
+               retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, BPF_PROG_RUN);
+       } while (bpf_test_timer_continue(&t, repeat, &ret, &duration));
+       bpf_test_timer_leave(&t);
+
+       if (ret < 0)
+               goto out;
+
+       user_ctx->cookie = 0;
+       if (ctx.selected_sk) {
+               if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
+                       ret = -EOPNOTSUPP;
+                       goto out;
+               }
+
+               user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
+       }
+
+       ret = bpf_test_finish(kattr, uattr, NULL, 0, retval, duration);
+       if (!ret)
+               ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
+
+out:
+       bpf_prog_array_free(progs);
+       kfree(user_ctx);
+       return ret;
+}
index e2b491665775fc215da14de85d170853287de1e4..815edf7bc4390d3de861dcb8321b04b93556c98a 100644 (file)
@@ -10334,6 +10334,7 @@ static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type,
 }
 
 const struct bpf_prog_ops sk_lookup_prog_ops = {
+       .test_run = bpf_prog_test_run_sk_lookup,
 };
 
 const struct bpf_verifier_ops sk_lookup_verifier_ops = {
index e440cd7f32a6f3b03ba91b75a0145c1cb0127a54..b9ee2ded381ab37d5ad914f82d9fb936d5c12ba0 100644 (file)
@@ -5006,7 +5006,10 @@ struct bpf_pidns_info {
 
 /* User accessible data for SK_LOOKUP programs. Add new fields at the end. */
 struct bpf_sk_lookup {
-       __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
+       union {
+               __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
+               __u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */
+       };
 
        __u32 family;           /* Protocol family (AF_INET, AF_INET6) */
        __u32 protocol;         /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */