From: Greg Kroah-Hartman Date: Mon, 15 Jul 2024 12:09:06 +0000 (+0200) Subject: 5.10-stable patches X-Git-Tag: v4.19.318~40 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=69fdb6f668b04810bd32015564cc39498dec9217;p=thirdparty%2Fkernel%2Fstable-queue.git 5.10-stable patches added patches: bpf-allow-reads-from-uninit-stack.patch ipv6-annotate-data-races-around-cnf.disable_ipv6.patch ipv6-prevent-null-dereference-in-ip6_output.patch --- diff --git a/queue-5.10/bpf-allow-reads-from-uninit-stack.patch b/queue-5.10/bpf-allow-reads-from-uninit-stack.patch new file mode 100644 index 00000000000..984f29be43e --- /dev/null +++ b/queue-5.10/bpf-allow-reads-from-uninit-stack.patch @@ -0,0 +1,802 @@ +From 6715df8d5d24655b9fd368e904028112b54c7de1 Mon Sep 17 00:00:00 2001 +From: Eduard Zingerman +Date: Sun, 19 Feb 2023 22:04:26 +0200 +Subject: bpf: Allow reads from uninit stack + +From: Eduard Zingerman + +commit 6715df8d5d24655b9fd368e904028112b54c7de1 upstream. + +This commits updates the following functions to allow reads from +uninitialized stack locations when env->allow_uninit_stack option is +enabled: +- check_stack_read_fixed_off() +- check_stack_range_initialized(), called from: + - check_stack_read_var_off() + - check_helper_mem_access() + +Such change allows to relax logic in stacksafe() to treat STACK_MISC +and STACK_INVALID in a same way and make the following stack slot +configurations equivalent: + + | Cached state | Current state | + | stack slot | stack slot | + |------------------+------------------| + | STACK_INVALID or | STACK_INVALID or | + | STACK_MISC | STACK_SPILL or | + | | STACK_MISC or | + | | STACK_ZERO or | + | | STACK_DYNPTR | + +This leads to significant verification speed gains (see below). + +The idea was suggested by Andrii Nakryiko [1] and initial patch was +created by Alexei Starovoitov [2]. + +Currently the env->allow_uninit_stack is allowed for programs loaded +by users with CAP_PERFMON or CAP_SYS_ADMIN capabilities. + +A number of test cases from verifier/*.c were expecting uninitialized +stack access to be an error. These test cases were updated to execute +in unprivileged mode (thus preserving the tests). + +The test progs/test_global_func10.c expected "invalid indirect read +from stack" error message because of the access to uninitialized +memory region. This error is no longer possible in privileged mode. +The test is updated to provoke an error "invalid indirect access to +stack" because of access to invalid stack address (such error is not +verified by progs/test_global_func*.c series of tests). + +The following tests had to be removed because these can't be made +unprivileged: +- verifier/sock.c: + - "sk_storage_get(map, skb->sk, &stack_value, 1): partially init + stack_value" + BPF_PROG_TYPE_SCHED_CLS programs are not executed in unprivileged mode. +- verifier/var_off.c: + - "indirect variable-offset stack access, max_off+size > max_initialized" + - "indirect variable-offset stack access, uninitialized" + These tests verify that access to uninitialized stack values is + detected when stack offset is not a constant. However, variable + stack access is prohibited in unprivileged mode, thus these tests + are no longer valid. + + * * * + +Here is veristat log comparing this patch with current master on a +set of selftest binaries listed in tools/testing/selftests/bpf/veristat.cfg +and cilium BPF binaries (see [3]): + +$ ./veristat -e file,prog,states -C -f 'states_pct<-30' master.log current.log +File Program States (A) States (B) States (DIFF) +-------------------------- -------------------------- ---------- ---------- ---------------- +bpf_host.o tail_handle_ipv6_from_host 349 244 -105 (-30.09%) +bpf_host.o tail_handle_nat_fwd_ipv4 1320 895 -425 (-32.20%) +bpf_lxc.o tail_handle_nat_fwd_ipv4 1320 895 -425 (-32.20%) +bpf_sock.o cil_sock4_connect 70 48 -22 (-31.43%) +bpf_sock.o cil_sock4_sendmsg 68 46 -22 (-32.35%) +bpf_xdp.o tail_handle_nat_fwd_ipv4 1554 803 -751 (-48.33%) +bpf_xdp.o tail_lb_ipv4 6457 2473 -3984 (-61.70%) +bpf_xdp.o tail_lb_ipv6 7249 3908 -3341 (-46.09%) +pyperf600_bpf_loop.bpf.o on_event 287 145 -142 (-49.48%) +strobemeta.bpf.o on_event 15915 4772 -11143 (-70.02%) +strobemeta_nounroll2.bpf.o on_event 17087 3820 -13267 (-77.64%) +xdp_synproxy_kern.bpf.o syncookie_tc 21271 6635 -14636 (-68.81%) +xdp_synproxy_kern.bpf.o syncookie_xdp 23122 6024 -17098 (-73.95%) +-------------------------- -------------------------- ---------- ---------- ---------------- + +Note: I limited selection by states_pct<-30%. + +Inspection of differences in pyperf600_bpf_loop behavior shows that +the following patch for the test removes almost all differences: + + - a/tools/testing/selftests/bpf/progs/pyperf.h + + b/tools/testing/selftests/bpf/progs/pyperf.h + @ -266,8 +266,8 @ int __on_event(struct bpf_raw_tracepoint_args *ctx) + } + + if (event->pthread_match || !pidData->use_tls) { + - void* frame_ptr; + - FrameData frame; + + void* frame_ptr = 0; + + FrameData frame = {}; + Symbol sym = {}; + int cur_cpu = bpf_get_smp_processor_id(); + +W/o this patch the difference comes from the following pattern +(for different variables): + + static bool get_frame_data(... FrameData *frame ...) + { + ... + bpf_probe_read_user(&frame->f_code, ...); + if (!frame->f_code) + return false; + ... + bpf_probe_read_user(&frame->co_name, ...); + if (frame->co_name) + ...; + } + + int __on_event(struct bpf_raw_tracepoint_args *ctx) + { + FrameData frame; + ... + get_frame_data(... &frame ...) // indirectly via a bpf_loop & callback + ... + } + + SEC("raw_tracepoint/kfree_skb") + int on_event(struct bpf_raw_tracepoint_args* ctx) + { + ... + ret |= __on_event(ctx); + ret |= __on_event(ctx); + ... + } + +With regards to value `frame->co_name` the following is important: +- Because of the conditional `if (!frame->f_code)` each call to + __on_event() produces two states, one with `frame->co_name` marked + as STACK_MISC, another with it as is (and marked STACK_INVALID on a + first call). +- The call to bpf_probe_read_user() does not mark stack slots + corresponding to `&frame->co_name` as REG_LIVE_WRITTEN but it marks + these slots as BPF_MISC, this happens because of the following loop + in the check_helper_call(): + + for (i = 0; i < meta.access_size; i++) { + err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, + BPF_WRITE, -1, false); + if (err) + return err; + } + + Note the size of the write, it is a one byte write for each byte + touched by a helper. The BPF_B write does not lead to write marks + for the target stack slot. +- Which means that w/o this patch when second __on_event() call is + verified `if (frame->co_name)` will propagate read marks first to a + stack slot with STACK_MISC marks and second to a stack slot with + STACK_INVALID marks and these states would be considered different. + +[1] https://lore.kernel.org/bpf/CAEf4BzY3e+ZuC6HUa8dCiUovQRg2SzEk7M-dSkqNZyn=xEmnPA@mail.gmail.com/ +[2] https://lore.kernel.org/bpf/CAADnVQKs2i1iuZ5SUGuJtxWVfGYR9kDgYKhq3rNV+kBLQCu7rA@mail.gmail.com/ +[3] git@github.com:anakryiko/cilium.git + +Suggested-by: Andrii Nakryiko +Co-developed-by: Alexei Starovoitov +Signed-off-by: Eduard Zingerman +Acked-by: Andrii Nakryiko +Link: https://lore.kernel.org/r/20230219200427.606541-2-eddyz87@gmail.com +Signed-off-by: Alexei Starovoitov +Signed-off-by: Maxim Mikityanskiy +Signed-off-by: Greg Kroah-Hartman +--- + kernel/bpf/verifier.c | 11 + tools/testing/selftests/bpf/progs/test_global_func10.c | 31 + + tools/testing/selftests/bpf/verifier/calls.c | 13 + tools/testing/selftests/bpf/verifier/helper_access_var_len.c | 104 +++-- + tools/testing/selftests/bpf/verifier/int_ptr.c | 9 + tools/testing/selftests/bpf/verifier/search_pruning.c | 13 + tools/testing/selftests/bpf/verifier/sock.c | 27 - + tools/testing/selftests/bpf/verifier/spill_fill.c | 211 +++++++++++ + tools/testing/selftests/bpf/verifier/var_off.c | 52 -- + 9 files changed, 342 insertions(+), 129 deletions(-) + create mode 100644 tools/testing/selftests/bpf/progs/test_global_func10.c + +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -2807,6 +2807,8 @@ static int check_stack_read_fixed_off(st + continue; + if (type == STACK_MISC) + continue; ++ if (type == STACK_INVALID && env->allow_uninit_stack) ++ continue; + verbose(env, "invalid read from stack off %d+%d size %d\n", + off, i, size); + return -EACCES; +@@ -2844,6 +2846,8 @@ static int check_stack_read_fixed_off(st + continue; + if (type == STACK_ZERO) + continue; ++ if (type == STACK_INVALID && env->allow_uninit_stack) ++ continue; + verbose(env, "invalid read from stack off %d+%d size %d\n", + off, i, size); + return -EACCES; +@@ -4300,7 +4304,8 @@ static int check_stack_range_initialized + stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; + if (*stype == STACK_MISC) + goto mark; +- if (*stype == STACK_ZERO) { ++ if ((*stype == STACK_ZERO) || ++ (*stype == STACK_INVALID && env->allow_uninit_stack)) { + if (clobber) { + /* helper can write anything into the stack */ + *stype = STACK_MISC; +@@ -9492,6 +9497,10 @@ static bool stacksafe(struct bpf_verifie + if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) + continue; + ++ if (env->allow_uninit_stack && ++ old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC) ++ continue; ++ + /* explored stack has more populated slots than current stack + * and these slots were used + */ +--- /dev/null ++++ b/tools/testing/selftests/bpf/progs/test_global_func10.c +@@ -0,0 +1,31 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++#include ++#include ++#include ++#include "bpf_misc.h" ++ ++struct Small { ++ long x; ++}; ++ ++struct Big { ++ long x; ++ long y; ++}; ++ ++__noinline int foo(const struct Big *big) ++{ ++ if (!big) ++ return 0; ++ ++ return bpf_get_prandom_u32() < big->y; ++} ++ ++SEC("cgroup_skb/ingress") ++__failure __msg("invalid indirect access to stack") ++int global_func10(struct __sk_buff *skb) ++{ ++ const struct Small small = {.x = skb->len }; ++ ++ return foo((struct Big *)&small) ? 1 : 0; ++} +--- a/tools/testing/selftests/bpf/verifier/calls.c ++++ b/tools/testing/selftests/bpf/verifier/calls.c +@@ -1948,19 +1948,22 @@ + * that fp-8 stack slot was unused in the fall-through + * branch and will accept the program incorrectly + */ +- BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2), ++ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), ++ BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 2, 2), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), ++ BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, +- .fixup_map_hash_48b = { 6 }, +- .errstr = "invalid indirect read from stack R2 off -8+0 size 8", +- .result = REJECT, +- .prog_type = BPF_PROG_TYPE_XDP, ++ .fixup_map_hash_48b = { 7 }, ++ .errstr_unpriv = "invalid indirect read from stack R2 off -8+0 size 8", ++ .result_unpriv = REJECT, ++ /* in privileged mode reads from uninitialized stack locations are permitted */ ++ .result = ACCEPT, + }, + { + "calls: ctx read at start of subprog", +--- a/tools/testing/selftests/bpf/verifier/helper_access_var_len.c ++++ b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c +@@ -29,19 +29,30 @@ + { + "helper access to variable memory: stack, bitwise AND, zero included", + .insns = { +- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), +- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), +- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), +- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), +- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), +- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), +- BPF_MOV64_IMM(BPF_REG_3, 0), +- BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), ++ /* set max stack size */ ++ BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0), ++ /* set r3 to a random value */ ++ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), ++ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), ++ /* use bitwise AND to limit r3 range to [0, 64] */ ++ BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 64), ++ BPF_LD_MAP_FD(BPF_REG_1, 0), ++ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), ++ BPF_MOV64_IMM(BPF_REG_4, 0), ++ /* Call bpf_ringbuf_output(), it is one of a few helper functions with ++ * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode. ++ * For unpriv this should signal an error, because memory at &fp[-64] is ++ * not initialized. ++ */ ++ BPF_EMIT_CALL(BPF_FUNC_ringbuf_output), + BPF_EXIT_INSN(), + }, +- .errstr = "invalid indirect read from stack R1 off -64+0 size 64", +- .result = REJECT, +- .prog_type = BPF_PROG_TYPE_TRACEPOINT, ++ .fixup_map_ringbuf = { 4 }, ++ .errstr_unpriv = "invalid indirect read from stack R2 off -64+0 size 64", ++ .result_unpriv = REJECT, ++ /* in privileged mode reads from uninitialized stack locations are permitted */ ++ .result = ACCEPT, + }, + { + "helper access to variable memory: stack, bitwise AND + JMP, wrong max", +@@ -183,20 +194,31 @@ + { + "helper access to variable memory: stack, JMP, no min check", + .insns = { +- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), +- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), +- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), +- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), +- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), +- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3), +- BPF_MOV64_IMM(BPF_REG_3, 0), +- BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), ++ /* set max stack size */ ++ BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0), ++ /* set r3 to a random value */ ++ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), ++ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), ++ /* use JMP to limit r3 range to [0, 64] */ ++ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 64, 6), ++ BPF_LD_MAP_FD(BPF_REG_1, 0), ++ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), ++ BPF_MOV64_IMM(BPF_REG_4, 0), ++ /* Call bpf_ringbuf_output(), it is one of a few helper functions with ++ * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode. ++ * For unpriv this should signal an error, because memory at &fp[-64] is ++ * not initialized. ++ */ ++ BPF_EMIT_CALL(BPF_FUNC_ringbuf_output), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, +- .errstr = "invalid indirect read from stack R1 off -64+0 size 64", +- .result = REJECT, +- .prog_type = BPF_PROG_TYPE_TRACEPOINT, ++ .fixup_map_ringbuf = { 4 }, ++ .errstr_unpriv = "invalid indirect read from stack R2 off -64+0 size 64", ++ .result_unpriv = REJECT, ++ /* in privileged mode reads from uninitialized stack locations are permitted */ ++ .result = ACCEPT, + }, + { + "helper access to variable memory: stack, JMP (signed), no min check", +@@ -564,29 +586,41 @@ + { + "helper access to variable memory: 8 bytes leak", + .insns = { +- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), +- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), +- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), ++ /* set max stack size */ ++ BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0), ++ /* set r3 to a random value */ ++ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), ++ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), ++ BPF_LD_MAP_FD(BPF_REG_1, 0), ++ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), ++ /* Note: fp[-32] left uninitialized */ + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), +- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), +- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), +- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63), +- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), +- BPF_MOV64_IMM(BPF_REG_3, 0), +- BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), +- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), ++ /* Limit r3 range to [1, 64] */ ++ BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 63), ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 1), ++ BPF_MOV64_IMM(BPF_REG_4, 0), ++ /* Call bpf_ringbuf_output(), it is one of a few helper functions with ++ * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode. ++ * For unpriv this should signal an error, because memory region [1, 64] ++ * at &fp[-64] is not fully initialized. ++ */ ++ BPF_EMIT_CALL(BPF_FUNC_ringbuf_output), ++ BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, +- .errstr = "invalid indirect read from stack R1 off -64+32 size 64", +- .result = REJECT, +- .prog_type = BPF_PROG_TYPE_TRACEPOINT, ++ .fixup_map_ringbuf = { 3 }, ++ .errstr_unpriv = "invalid indirect read from stack R2 off -64+32 size 64", ++ .result_unpriv = REJECT, ++ /* in privileged mode reads from uninitialized stack locations are permitted */ ++ .result = ACCEPT, + }, + { + "helper access to variable memory: 8 bytes no leak (init memory)", +--- a/tools/testing/selftests/bpf/verifier/int_ptr.c ++++ b/tools/testing/selftests/bpf/verifier/int_ptr.c +@@ -54,12 +54,13 @@ + /* bpf_strtoul() */ + BPF_EMIT_CALL(BPF_FUNC_strtoul), + +- BPF_MOV64_IMM(BPF_REG_0, 1), ++ BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, +- .result = REJECT, +- .prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL, +- .errstr = "invalid indirect read from stack R4 off -16+4 size 8", ++ .result_unpriv = REJECT, ++ .errstr_unpriv = "invalid indirect read from stack R4 off -16+4 size 8", ++ /* in privileged mode reads from uninitialized stack locations are permitted */ ++ .result = ACCEPT, + }, + { + "ARG_PTR_TO_LONG misaligned", +--- a/tools/testing/selftests/bpf/verifier/search_pruning.c ++++ b/tools/testing/selftests/bpf/verifier/search_pruning.c +@@ -128,9 +128,10 @@ + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, +- .errstr = "invalid read from stack off -16+0 size 8", +- .result = REJECT, +- .prog_type = BPF_PROG_TYPE_TRACEPOINT, ++ .errstr_unpriv = "invalid read from stack off -16+0 size 8", ++ .result_unpriv = REJECT, ++ /* in privileged mode reads from uninitialized stack locations are permitted */ ++ .result = ACCEPT, + }, + { + "allocated_stack", +@@ -187,6 +188,8 @@ + BPF_EXIT_INSN(), + }, + .flags = BPF_F_TEST_STATE_FREQ, +- .errstr = "invalid read from stack off -8+1 size 8", +- .result = REJECT, ++ .errstr_unpriv = "invalid read from stack off -8+1 size 8", ++ .result_unpriv = REJECT, ++ /* in privileged mode reads from uninitialized stack locations are permitted */ ++ .result = ACCEPT, + }, +--- a/tools/testing/selftests/bpf/verifier/sock.c ++++ b/tools/testing/selftests/bpf/verifier/sock.c +@@ -531,33 +531,6 @@ + .result = ACCEPT, + }, + { +- "sk_storage_get(map, skb->sk, &stack_value, 1): partially init stack_value", +- .insns = { +- BPF_MOV64_IMM(BPF_REG_2, 0), +- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), +- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), +- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), +- BPF_MOV64_IMM(BPF_REG_0, 0), +- BPF_EXIT_INSN(), +- BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), +- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), +- BPF_MOV64_IMM(BPF_REG_0, 0), +- BPF_EXIT_INSN(), +- BPF_MOV64_IMM(BPF_REG_4, 1), +- BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), +- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -8), +- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), +- BPF_LD_MAP_FD(BPF_REG_1, 0), +- BPF_EMIT_CALL(BPF_FUNC_sk_storage_get), +- BPF_MOV64_IMM(BPF_REG_0, 0), +- BPF_EXIT_INSN(), +- }, +- .fixup_sk_storage_map = { 14 }, +- .prog_type = BPF_PROG_TYPE_SCHED_CLS, +- .result = REJECT, +- .errstr = "invalid indirect read from stack", +-}, +-{ + "bpf_map_lookup_elem(smap, &key)", + .insns = { + BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), +--- a/tools/testing/selftests/bpf/verifier/spill_fill.c ++++ b/tools/testing/selftests/bpf/verifier/spill_fill.c +@@ -104,3 +104,214 @@ + .result = ACCEPT, + .retval = POINTER_VALUE, + }, ++{ ++ "Spill and refill a u32 const scalar. Offset to skb->data", ++ .insns = { ++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, ++ offsetof(struct __sk_buff, data)), ++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, ++ offsetof(struct __sk_buff, data_end)), ++ /* r4 = 20 */ ++ BPF_MOV32_IMM(BPF_REG_4, 20), ++ /* *(u32 *)(r10 -8) = r4 */ ++ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), ++ /* r4 = *(u32 *)(r10 -8) */ ++ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8), ++ /* r0 = r2 */ ++ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), ++ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */ ++ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), ++ /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */ ++ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), ++ /* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=20 */ ++ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), ++ BPF_MOV64_IMM(BPF_REG_0, 0), ++ BPF_EXIT_INSN(), ++ }, ++ .result = ACCEPT, ++ .prog_type = BPF_PROG_TYPE_SCHED_CLS, ++}, ++{ ++ "Spill a u32 const, refill from another half of the uninit u32 from the stack", ++ .insns = { ++ /* r4 = 20 */ ++ BPF_MOV32_IMM(BPF_REG_4, 20), ++ /* *(u32 *)(r10 -8) = r4 */ ++ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), ++ /* r4 = *(u32 *)(r10 -4) fp-8=????rrrr*/ ++ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4), ++ BPF_MOV64_IMM(BPF_REG_0, 0), ++ BPF_EXIT_INSN(), ++ }, ++ .result_unpriv = REJECT, ++ .errstr_unpriv = "invalid read from stack off -4+0 size 4", ++ /* in privileged mode reads from uninitialized stack locations are permitted */ ++ .result = ACCEPT, ++}, ++{ ++ "Spill a u32 const scalar. Refill as u16. Offset to skb->data", ++ .insns = { ++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, ++ offsetof(struct __sk_buff, data)), ++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, ++ offsetof(struct __sk_buff, data_end)), ++ /* r4 = 20 */ ++ BPF_MOV32_IMM(BPF_REG_4, 20), ++ /* *(u32 *)(r10 -8) = r4 */ ++ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), ++ /* r4 = *(u16 *)(r10 -8) */ ++ BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8), ++ /* r0 = r2 */ ++ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), ++ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */ ++ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), ++ /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */ ++ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), ++ /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */ ++ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), ++ BPF_MOV64_IMM(BPF_REG_0, 0), ++ BPF_EXIT_INSN(), ++ }, ++ .result = REJECT, ++ .errstr = "invalid access to packet", ++ .prog_type = BPF_PROG_TYPE_SCHED_CLS, ++}, ++{ ++ "Spill u32 const scalars. Refill as u64. Offset to skb->data", ++ .insns = { ++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, ++ offsetof(struct __sk_buff, data)), ++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, ++ offsetof(struct __sk_buff, data_end)), ++ /* r6 = 0 */ ++ BPF_MOV32_IMM(BPF_REG_6, 0), ++ /* r7 = 20 */ ++ BPF_MOV32_IMM(BPF_REG_7, 20), ++ /* *(u32 *)(r10 -4) = r6 */ ++ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4), ++ /* *(u32 *)(r10 -8) = r7 */ ++ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8), ++ /* r4 = *(u64 *)(r10 -8) */ ++ BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8), ++ /* r0 = r2 */ ++ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), ++ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */ ++ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), ++ /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */ ++ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), ++ /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */ ++ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), ++ BPF_MOV64_IMM(BPF_REG_0, 0), ++ BPF_EXIT_INSN(), ++ }, ++ .result = REJECT, ++ .errstr = "invalid access to packet", ++ .prog_type = BPF_PROG_TYPE_SCHED_CLS, ++}, ++{ ++ "Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data", ++ .insns = { ++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, ++ offsetof(struct __sk_buff, data)), ++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, ++ offsetof(struct __sk_buff, data_end)), ++ /* r4 = 20 */ ++ BPF_MOV32_IMM(BPF_REG_4, 20), ++ /* *(u32 *)(r10 -8) = r4 */ ++ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), ++ /* r4 = *(u16 *)(r10 -6) */ ++ BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -6), ++ /* r0 = r2 */ ++ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), ++ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */ ++ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), ++ /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */ ++ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), ++ /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */ ++ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), ++ BPF_MOV64_IMM(BPF_REG_0, 0), ++ BPF_EXIT_INSN(), ++ }, ++ .result = REJECT, ++ .errstr = "invalid access to packet", ++ .prog_type = BPF_PROG_TYPE_SCHED_CLS, ++}, ++{ ++ "Spill and refill a u32 const scalar at non 8byte aligned stack addr. Offset to skb->data", ++ .insns = { ++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, ++ offsetof(struct __sk_buff, data)), ++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, ++ offsetof(struct __sk_buff, data_end)), ++ /* r4 = 20 */ ++ BPF_MOV32_IMM(BPF_REG_4, 20), ++ /* *(u32 *)(r10 -8) = r4 */ ++ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), ++ /* *(u32 *)(r10 -4) = r4 */ ++ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -4), ++ /* r4 = *(u32 *)(r10 -4), */ ++ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4), ++ /* r0 = r2 */ ++ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), ++ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=U32_MAX */ ++ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), ++ /* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */ ++ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), ++ /* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */ ++ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), ++ BPF_MOV64_IMM(BPF_REG_0, 0), ++ BPF_EXIT_INSN(), ++ }, ++ .result = REJECT, ++ .errstr = "invalid access to packet", ++ .prog_type = BPF_PROG_TYPE_SCHED_CLS, ++}, ++{ ++ "Spill and refill a umax=40 bounded scalar. Offset to skb->data", ++ .insns = { ++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, ++ offsetof(struct __sk_buff, data)), ++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, ++ offsetof(struct __sk_buff, data_end)), ++ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, ++ offsetof(struct __sk_buff, tstamp)), ++ BPF_JMP_IMM(BPF_JLE, BPF_REG_4, 40, 2), ++ BPF_MOV64_IMM(BPF_REG_0, 0), ++ BPF_EXIT_INSN(), ++ /* *(u32 *)(r10 -8) = r4 R4=umax=40 */ ++ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), ++ /* r4 = (*u32 *)(r10 - 8) */ ++ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8), ++ /* r2 += r4 R2=pkt R4=umax=40 */ ++ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_4), ++ /* r0 = r2 R2=pkt,umax=40 R4=umax=40 */ ++ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), ++ /* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */ ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 20), ++ /* if (r2 > r3) R0=pkt,umax=40 R2=pkt,off=20,umax=40 */ ++ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 1), ++ /* r0 = *(u32 *)r0 R0=pkt,r=20,umax=40 R2=pkt,off=20,r=20,umax=40 */ ++ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), ++ BPF_MOV64_IMM(BPF_REG_0, 0), ++ BPF_EXIT_INSN(), ++ }, ++ .result = ACCEPT, ++ .prog_type = BPF_PROG_TYPE_SCHED_CLS, ++}, ++{ ++ "Spill a u32 scalar at fp-4 and then at fp-8", ++ .insns = { ++ /* r4 = 4321 */ ++ BPF_MOV32_IMM(BPF_REG_4, 4321), ++ /* *(u32 *)(r10 -4) = r4 */ ++ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -4), ++ /* *(u32 *)(r10 -8) = r4 */ ++ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), ++ /* r4 = *(u64 *)(r10 -8) */ ++ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), ++ BPF_MOV64_IMM(BPF_REG_0, 0), ++ BPF_EXIT_INSN(), ++ }, ++ .result = ACCEPT, ++ .prog_type = BPF_PROG_TYPE_SCHED_CLS, ++}, +--- a/tools/testing/selftests/bpf/verifier/var_off.c ++++ b/tools/testing/selftests/bpf/verifier/var_off.c +@@ -213,31 +213,6 @@ + .prog_type = BPF_PROG_TYPE_LWT_IN, + }, + { +- "indirect variable-offset stack access, max_off+size > max_initialized", +- .insns = { +- /* Fill only the second from top 8 bytes of the stack. */ +- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0), +- /* Get an unknown value. */ +- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), +- /* Make it small and 4-byte aligned. */ +- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), +- BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16), +- /* Add it to fp. We now have either fp-12 or fp-16, but we don't know +- * which. fp-12 size 8 is partially uninitialized stack. +- */ +- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), +- /* Dereference it indirectly. */ +- BPF_LD_MAP_FD(BPF_REG_1, 0), +- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), +- BPF_MOV64_IMM(BPF_REG_0, 0), +- BPF_EXIT_INSN(), +- }, +- .fixup_map_hash_8b = { 5 }, +- .errstr = "invalid indirect read from stack R2 var_off", +- .result = REJECT, +- .prog_type = BPF_PROG_TYPE_LWT_IN, +-}, +-{ + "indirect variable-offset stack access, min_off < min_initialized", + .insns = { + /* Fill only the top 8 bytes of the stack. */ +@@ -290,33 +265,6 @@ + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + }, + { +- "indirect variable-offset stack access, uninitialized", +- .insns = { +- BPF_MOV64_IMM(BPF_REG_2, 6), +- BPF_MOV64_IMM(BPF_REG_3, 28), +- /* Fill the top 16 bytes of the stack. */ +- BPF_ST_MEM(BPF_W, BPF_REG_10, -16, 0), +- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), +- /* Get an unknown value. */ +- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, 0), +- /* Make it small and 4-byte aligned. */ +- BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 4), +- BPF_ALU64_IMM(BPF_SUB, BPF_REG_4, 16), +- /* Add it to fp. We now have either fp-12 or fp-16, we don't know +- * which, but either way it points to initialized stack. +- */ +- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_10), +- BPF_MOV64_IMM(BPF_REG_5, 8), +- /* Dereference it indirectly. */ +- BPF_EMIT_CALL(BPF_FUNC_getsockopt), +- BPF_MOV64_IMM(BPF_REG_0, 0), +- BPF_EXIT_INSN(), +- }, +- .errstr = "invalid indirect read from stack R4 var_off", +- .result = REJECT, +- .prog_type = BPF_PROG_TYPE_SOCK_OPS, +-}, +-{ + "indirect variable-offset stack access, ok", + .insns = { + /* Fill the top 16 bytes of the stack. */ diff --git a/queue-5.10/ipv6-annotate-data-races-around-cnf.disable_ipv6.patch b/queue-5.10/ipv6-annotate-data-races-around-cnf.disable_ipv6.patch new file mode 100644 index 00000000000..fe3592ce023 --- /dev/null +++ b/queue-5.10/ipv6-annotate-data-races-around-cnf.disable_ipv6.patch @@ -0,0 +1,89 @@ +From d289ab65b89c1d4d88417cb6c03e923f21f95fae Mon Sep 17 00:00:00 2001 +From: Eric Dumazet +Date: Wed, 28 Feb 2024 13:54:26 +0000 +Subject: ipv6: annotate data-races around cnf.disable_ipv6 + +From: Eric Dumazet + +commit d289ab65b89c1d4d88417cb6c03e923f21f95fae upstream. + +disable_ipv6 is read locklessly, add appropriate READ_ONCE() +and WRITE_ONCE() annotations. + +v2: do not preload net before rtnl_trylock() in + addrconf_disable_ipv6() (Jiri) + +Signed-off-by: Eric Dumazet +Reviewed-by: Jiri Pirko +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +[Ashwin: Regenerated the Patch for v5.10] +Signed-off-by: Ashwin Dayanand Kamat +Signed-off-by: Greg Kroah-Hartman +--- + net/ipv6/addrconf.c | 9 +++++---- + net/ipv6/ip6_input.c | 2 +- + net/ipv6/ip6_output.c | 2 +- + 3 files changed, 7 insertions(+), 6 deletions(-) + +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -4107,7 +4107,7 @@ static void addrconf_dad_work(struct wor + if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) && + ipv6_addr_equal(&ifp->addr, &addr)) { + /* DAD failed for link-local based on MAC */ +- idev->cnf.disable_ipv6 = 1; ++ WRITE_ONCE(idev->cnf.disable_ipv6, 1); + + pr_info("%s: IPv6 being disabled!\n", + ifp->idev->dev->name); +@@ -6220,7 +6220,8 @@ static void addrconf_disable_change(stru + idev = __in6_dev_get(dev); + if (idev) { + int changed = (!idev->cnf.disable_ipv6) ^ (!newf); +- idev->cnf.disable_ipv6 = newf; ++ ++ WRITE_ONCE(idev->cnf.disable_ipv6, newf); + if (changed) + dev_disable_change(idev); + } +@@ -6237,7 +6238,7 @@ static int addrconf_disable_ipv6(struct + + net = (struct net *)table->extra2; + old = *p; +- *p = newf; ++ WRITE_ONCE(*p, newf); + + if (p == &net->ipv6.devconf_dflt->disable_ipv6) { + rtnl_unlock(); +@@ -6245,7 +6246,7 @@ static int addrconf_disable_ipv6(struct + } + + if (p == &net->ipv6.devconf_all->disable_ipv6) { +- net->ipv6.devconf_dflt->disable_ipv6 = newf; ++ WRITE_ONCE(net->ipv6.devconf_dflt->disable_ipv6, newf); + addrconf_disable_change(net, newf); + } else if ((!newf) ^ (!old)) + dev_disable_change((struct inet6_dev *)table->extra1); +--- a/net/ipv6/ip6_input.c ++++ b/net/ipv6/ip6_input.c +@@ -165,7 +165,7 @@ static struct sk_buff *ip6_rcv_core(stru + __IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_IN, skb->len); + + if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL || +- !idev || unlikely(idev->cnf.disable_ipv6)) { ++ !idev || unlikely(READ_ONCE(idev->cnf.disable_ipv6))) { + __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS); + goto drop; + } +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -240,7 +240,7 @@ int ip6_output(struct net *net, struct s + skb->protocol = htons(ETH_P_IPV6); + skb->dev = dev; + +- if (unlikely(idev->cnf.disable_ipv6)) { ++ if (unlikely(READ_ONCE(idev->cnf.disable_ipv6))) { + IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); + kfree_skb(skb); + return 0; diff --git a/queue-5.10/ipv6-prevent-null-dereference-in-ip6_output.patch b/queue-5.10/ipv6-prevent-null-dereference-in-ip6_output.patch new file mode 100644 index 00000000000..4fb2a188e2a --- /dev/null +++ b/queue-5.10/ipv6-prevent-null-dereference-in-ip6_output.patch @@ -0,0 +1,80 @@ +From 4db783d68b9b39a411a96096c10828ff5dfada7a Mon Sep 17 00:00:00 2001 +From: Eric Dumazet +Date: Tue, 7 May 2024 16:18:42 +0000 +Subject: ipv6: prevent NULL dereference in ip6_output() + +From: Eric Dumazet + +commit 4db783d68b9b39a411a96096c10828ff5dfada7a upstream. + +According to syzbot, there is a chance that ip6_dst_idev() +returns NULL in ip6_output(). Most places in IPv6 stack +deal with a NULL idev just fine, but not here. + +syzbot reported: + +general protection fault, probably for non-canonical address 0xdffffc00000000bc: 0000 [#1] PREEMPT SMP KASAN PTI +KASAN: null-ptr-deref in range [0x00000000000005e0-0x00000000000005e7] +CPU: 0 PID: 9775 Comm: syz-executor.4 Not tainted 6.9.0-rc5-syzkaller-00157-g6a30653b604a #0 +Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 03/27/2024 + RIP: 0010:ip6_output+0x231/0x3f0 net/ipv6/ip6_output.c:237 +Code: 3c 1e 00 49 89 df 74 08 4c 89 ef e8 19 58 db f7 48 8b 44 24 20 49 89 45 00 49 89 c5 48 8d 9d e0 05 00 00 48 89 d8 48 c1 e8 03 <42> 0f b6 04 38 84 c0 4c 8b 74 24 28 0f 85 61 01 00 00 8b 1b 31 ff +RSP: 0018:ffffc9000927f0d8 EFLAGS: 00010202 +RAX: 00000000000000bc RBX: 00000000000005e0 RCX: 0000000000040000 +RDX: ffffc900131f9000 RSI: 0000000000004f47 RDI: 0000000000004f48 +RBP: 0000000000000000 R08: ffffffff8a1f0b9a R09: 1ffffffff1f51fad +R10: dffffc0000000000 R11: fffffbfff1f51fae R12: ffff8880293ec8c0 +R13: ffff88805d7fc000 R14: 1ffff1100527d91a R15: dffffc0000000000 +FS: 00007f135c6856c0(0000) GS:ffff8880b9400000(0000) knlGS:0000000000000000 +CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +CR2: 0000000020000080 CR3: 0000000064096000 CR4: 00000000003506f0 +DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 +DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 +Call Trace: + + NF_HOOK include/linux/netfilter.h:314 [inline] + ip6_xmit+0xefe/0x17f0 net/ipv6/ip6_output.c:358 + sctp_v6_xmit+0x9f2/0x13f0 net/sctp/ipv6.c:248 + sctp_packet_transmit+0x26ad/0x2ca0 net/sctp/output.c:653 + sctp_packet_singleton+0x22c/0x320 net/sctp/outqueue.c:783 + sctp_outq_flush_ctrl net/sctp/outqueue.c:914 [inline] + sctp_outq_flush+0x6d5/0x3e20 net/sctp/outqueue.c:1212 + sctp_side_effects net/sctp/sm_sideeffect.c:1198 [inline] + sctp_do_sm+0x59cc/0x60c0 net/sctp/sm_sideeffect.c:1169 + sctp_primitive_ASSOCIATE+0x95/0xc0 net/sctp/primitive.c:73 + __sctp_connect+0x9cd/0xe30 net/sctp/socket.c:1234 + sctp_connect net/sctp/socket.c:4819 [inline] + sctp_inet_connect+0x149/0x1f0 net/sctp/socket.c:4834 + __sys_connect_file net/socket.c:2048 [inline] + __sys_connect+0x2df/0x310 net/socket.c:2065 + __do_sys_connect net/socket.c:2075 [inline] + __se_sys_connect net/socket.c:2072 [inline] + __x64_sys_connect+0x7a/0x90 net/socket.c:2072 + do_syscall_x64 arch/x86/entry/common.c:52 [inline] + do_syscall_64+0xf5/0x240 arch/x86/entry/common.c:83 + entry_SYSCALL_64_after_hwframe+0x77/0x7f + +Fixes: 778d80be5269 ("ipv6: Add disable_ipv6 sysctl to disable IPv6 operaion on specific interface.") +Reported-by: syzbot +Signed-off-by: Eric Dumazet +Reviewed-by: Larysa Zaremba +Link: https://lore.kernel.org/r/20240507161842.773961-1-edumazet@google.com +Signed-off-by: Jakub Kicinski +[Ashwin: Regenerated the Patch for v5.10] +Signed-off-by: Ashwin Dayanand Kamat +Signed-off-by: Greg Kroah-Hartman +--- + net/ipv6/ip6_output.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -240,7 +240,7 @@ int ip6_output(struct net *net, struct s + skb->protocol = htons(ETH_P_IPV6); + skb->dev = dev; + +- if (unlikely(READ_ONCE(idev->cnf.disable_ipv6))) { ++ if (unlikely(!idev || READ_ONCE(idev->cnf.disable_ipv6))) { + IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); + kfree_skb(skb); + return 0; diff --git a/queue-5.10/series b/queue-5.10/series index befe6533681..b46cd742b1d 100644 --- a/queue-5.10/series +++ b/queue-5.10/series @@ -93,3 +93,6 @@ wireguard-queueing-annotate-intentional-data-race-in-cpu-round-robin.patch wireguard-send-annotate-intentional-data-race-in-checking-empty-queue.patch x86-retpoline-move-a-noendbr-annotation-to-the-srso-dummy-return-thunk.patch efi-ia64-move-ia64-only-declarations-to-new-asm-efi.h-header.patch +ipv6-annotate-data-races-around-cnf.disable_ipv6.patch +ipv6-prevent-null-dereference-in-ip6_output.patch +bpf-allow-reads-from-uninit-stack.patch