]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 15 Jul 2024 12:09:33 +0000 (14:09 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 15 Jul 2024 12:09:33 +0000 (14:09 +0200)
added patches:
bpf-allow-reads-from-uninit-stack.patch
cifs-use-origin-fullpath-for-automounts.patch

queue-6.1/bpf-allow-reads-from-uninit-stack.patch [new file with mode: 0644]
queue-6.1/cifs-use-origin-fullpath-for-automounts.patch [new file with mode: 0644]
queue-6.1/series

diff --git a/queue-6.1/bpf-allow-reads-from-uninit-stack.patch b/queue-6.1/bpf-allow-reads-from-uninit-stack.patch
new file mode 100644 (file)
index 0000000..0ce2a4b
--- /dev/null
@@ -0,0 +1,594 @@
+From 6715df8d5d24655b9fd368e904028112b54c7de1 Mon Sep 17 00:00:00 2001
+From: Eduard Zingerman <eddyz87@gmail.com>
+Date: Sun, 19 Feb 2023 22:04:26 +0200
+Subject: bpf: Allow reads from uninit stack
+
+From: Eduard Zingerman <eddyz87@gmail.com>
+
+commit 6715df8d5d24655b9fd368e904028112b54c7de1 upstream.
+
+This commits updates the following functions to allow reads from
+uninitialized stack locations when env->allow_uninit_stack option is
+enabled:
+- check_stack_read_fixed_off()
+- check_stack_range_initialized(), called from:
+  - check_stack_read_var_off()
+  - check_helper_mem_access()
+
+Such change allows to relax logic in stacksafe() to treat STACK_MISC
+and STACK_INVALID in a same way and make the following stack slot
+configurations equivalent:
+
+  |  Cached state    |  Current state   |
+  |   stack slot     |   stack slot     |
+  |------------------+------------------|
+  | STACK_INVALID or | STACK_INVALID or |
+  | STACK_MISC       | STACK_SPILL   or |
+  |                  | STACK_MISC    or |
+  |                  | STACK_ZERO    or |
+  |                  | STACK_DYNPTR     |
+
+This leads to significant verification speed gains (see below).
+
+The idea was suggested by Andrii Nakryiko [1] and initial patch was
+created by Alexei Starovoitov [2].
+
+Currently the env->allow_uninit_stack is allowed for programs loaded
+by users with CAP_PERFMON or CAP_SYS_ADMIN capabilities.
+
+A number of test cases from verifier/*.c were expecting uninitialized
+stack access to be an error. These test cases were updated to execute
+in unprivileged mode (thus preserving the tests).
+
+The test progs/test_global_func10.c expected "invalid indirect read
+from stack" error message because of the access to uninitialized
+memory region. This error is no longer possible in privileged mode.
+The test is updated to provoke an error "invalid indirect access to
+stack" because of access to invalid stack address (such error is not
+verified by progs/test_global_func*.c series of tests).
+
+The following tests had to be removed because these can't be made
+unprivileged:
+- verifier/sock.c:
+  - "sk_storage_get(map, skb->sk, &stack_value, 1): partially init
+  stack_value"
+  BPF_PROG_TYPE_SCHED_CLS programs are not executed in unprivileged mode.
+- verifier/var_off.c:
+  - "indirect variable-offset stack access, max_off+size > max_initialized"
+  - "indirect variable-offset stack access, uninitialized"
+  These tests verify that access to uninitialized stack values is
+  detected when stack offset is not a constant. However, variable
+  stack access is prohibited in unprivileged mode, thus these tests
+  are no longer valid.
+
+ * * *
+
+Here is veristat log comparing this patch with current master on a
+set of selftest binaries listed in tools/testing/selftests/bpf/veristat.cfg
+and cilium BPF binaries (see [3]):
+
+$ ./veristat -e file,prog,states -C -f 'states_pct<-30' master.log current.log
+File                        Program                     States (A)  States (B)  States    (DIFF)
+--------------------------  --------------------------  ----------  ----------  ----------------
+bpf_host.o                  tail_handle_ipv6_from_host         349         244    -105 (-30.09%)
+bpf_host.o                  tail_handle_nat_fwd_ipv4          1320         895    -425 (-32.20%)
+bpf_lxc.o                   tail_handle_nat_fwd_ipv4          1320         895    -425 (-32.20%)
+bpf_sock.o                  cil_sock4_connect                   70          48     -22 (-31.43%)
+bpf_sock.o                  cil_sock4_sendmsg                   68          46     -22 (-32.35%)
+bpf_xdp.o                   tail_handle_nat_fwd_ipv4          1554         803    -751 (-48.33%)
+bpf_xdp.o                   tail_lb_ipv4                      6457        2473   -3984 (-61.70%)
+bpf_xdp.o                   tail_lb_ipv6                      7249        3908   -3341 (-46.09%)
+pyperf600_bpf_loop.bpf.o    on_event                           287         145    -142 (-49.48%)
+strobemeta.bpf.o            on_event                         15915        4772  -11143 (-70.02%)
+strobemeta_nounroll2.bpf.o  on_event                         17087        3820  -13267 (-77.64%)
+xdp_synproxy_kern.bpf.o     syncookie_tc                     21271        6635  -14636 (-68.81%)
+xdp_synproxy_kern.bpf.o     syncookie_xdp                    23122        6024  -17098 (-73.95%)
+--------------------------  --------------------------  ----------  ----------  ----------------
+
+Note: I limited selection by states_pct<-30%.
+
+Inspection of differences in pyperf600_bpf_loop behavior shows that
+the following patch for the test removes almost all differences:
+
+    - a/tools/testing/selftests/bpf/progs/pyperf.h
+    + b/tools/testing/selftests/bpf/progs/pyperf.h
+    @ -266,8 +266,8 @ int __on_event(struct bpf_raw_tracepoint_args *ctx)
+            }
+
+            if (event->pthread_match || !pidData->use_tls) {
+    -               void* frame_ptr;
+    -               FrameData frame;
+    +               void* frame_ptr = 0;
+    +               FrameData frame = {};
+                    Symbol sym = {};
+                    int cur_cpu = bpf_get_smp_processor_id();
+
+W/o this patch the difference comes from the following pattern
+(for different variables):
+
+    static bool get_frame_data(... FrameData *frame ...)
+    {
+        ...
+        bpf_probe_read_user(&frame->f_code, ...);
+        if (!frame->f_code)
+            return false;
+        ...
+        bpf_probe_read_user(&frame->co_name, ...);
+        if (frame->co_name)
+            ...;
+    }
+
+    int __on_event(struct bpf_raw_tracepoint_args *ctx)
+    {
+        FrameData frame;
+        ...
+        get_frame_data(... &frame ...) // indirectly via a bpf_loop & callback
+        ...
+    }
+
+    SEC("raw_tracepoint/kfree_skb")
+    int on_event(struct bpf_raw_tracepoint_args* ctx)
+    {
+        ...
+        ret |= __on_event(ctx);
+        ret |= __on_event(ctx);
+        ...
+    }
+
+With regards to value `frame->co_name` the following is important:
+- Because of the conditional `if (!frame->f_code)` each call to
+  __on_event() produces two states, one with `frame->co_name` marked
+  as STACK_MISC, another with it as is (and marked STACK_INVALID on a
+  first call).
+- The call to bpf_probe_read_user() does not mark stack slots
+  corresponding to `&frame->co_name` as REG_LIVE_WRITTEN but it marks
+  these slots as BPF_MISC, this happens because of the following loop
+  in the check_helper_call():
+
+       for (i = 0; i < meta.access_size; i++) {
+               err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
+                                      BPF_WRITE, -1, false);
+               if (err)
+                       return err;
+       }
+
+  Note the size of the write, it is a one byte write for each byte
+  touched by a helper. The BPF_B write does not lead to write marks
+  for the target stack slot.
+- Which means that w/o this patch when second __on_event() call is
+  verified `if (frame->co_name)` will propagate read marks first to a
+  stack slot with STACK_MISC marks and second to a stack slot with
+  STACK_INVALID marks and these states would be considered different.
+
+[1] https://lore.kernel.org/bpf/CAEf4BzY3e+ZuC6HUa8dCiUovQRg2SzEk7M-dSkqNZyn=xEmnPA@mail.gmail.com/
+[2] https://lore.kernel.org/bpf/CAADnVQKs2i1iuZ5SUGuJtxWVfGYR9kDgYKhq3rNV+kBLQCu7rA@mail.gmail.com/
+[3] git@github.com:anakryiko/cilium.git
+
+Suggested-by: Andrii Nakryiko <andrii@kernel.org>
+Co-developed-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
+Acked-by: Andrii Nakryiko <andrii@kernel.org>
+Link: https://lore.kernel.org/r/20230219200427.606541-2-eddyz87@gmail.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Maxim Mikityanskiy <maxim@isovalent.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c                                        |   11 +
+ tools/testing/selftests/bpf/progs/test_global_func10.c       |    9 
+ tools/testing/selftests/bpf/verifier/calls.c                 |   13 -
+ tools/testing/selftests/bpf/verifier/helper_access_var_len.c |  104 +++++++----
+ tools/testing/selftests/bpf/verifier/int_ptr.c               |    9 
+ tools/testing/selftests/bpf/verifier/search_pruning.c        |   13 -
+ tools/testing/selftests/bpf/verifier/sock.c                  |   27 --
+ tools/testing/selftests/bpf/verifier/spill_fill.c            |    7 
+ tools/testing/selftests/bpf/verifier/var_off.c               |   52 -----
+ 9 files changed, 109 insertions(+), 136 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -3599,6 +3599,8 @@ static int check_stack_read_fixed_off(st
+                                               continue;
+                                       if (type == STACK_MISC)
+                                               continue;
++                                      if (type == STACK_INVALID && env->allow_uninit_stack)
++                                              continue;
+                                       verbose(env, "invalid read from stack off %d+%d size %d\n",
+                                               off, i, size);
+                                       return -EACCES;
+@@ -3636,6 +3638,8 @@ static int check_stack_read_fixed_off(st
+                               continue;
+                       if (type == STACK_ZERO)
+                               continue;
++                      if (type == STACK_INVALID && env->allow_uninit_stack)
++                              continue;
+                       verbose(env, "invalid read from stack off %d+%d size %d\n",
+                               off, i, size);
+                       return -EACCES;
+@@ -5426,7 +5430,8 @@ static int check_stack_range_initialized
+               stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
+               if (*stype == STACK_MISC)
+                       goto mark;
+-              if (*stype == STACK_ZERO) {
++              if ((*stype == STACK_ZERO) ||
++                  (*stype == STACK_INVALID && env->allow_uninit_stack)) {
+                       if (clobber) {
+                               /* helper can write anything into the stack */
+                               *stype = STACK_MISC;
+@@ -11967,6 +11972,10 @@ static bool stacksafe(struct bpf_verifie
+               if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
+                       continue;
++              if (env->allow_uninit_stack &&
++                  old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC)
++                      continue;
++
+               /* explored stack has more populated slots than current stack
+                * and these slots were used
+                */
+--- a/tools/testing/selftests/bpf/progs/test_global_func10.c
++++ b/tools/testing/selftests/bpf/progs/test_global_func10.c
+@@ -4,12 +4,12 @@
+ #include <bpf/bpf_helpers.h>
+ struct Small {
+-      int x;
++      long x;
+ };
+ struct Big {
+-      int x;
+-      int y;
++      long x;
++      long y;
+ };
+ __noinline int foo(const struct Big *big)
+@@ -21,7 +21,8 @@ __noinline int foo(const struct Big *big
+ }
+ SEC("cgroup_skb/ingress")
+-int test_cls(struct __sk_buff *skb)
++__failure __msg("invalid indirect access to stack")
++int global_func10(struct __sk_buff *skb)
+ {
+       const struct Small small = {.x = skb->len };
+--- a/tools/testing/selftests/bpf/verifier/calls.c
++++ b/tools/testing/selftests/bpf/verifier/calls.c
+@@ -2221,19 +2221,22 @@
+        * that fp-8 stack slot was unused in the fall-through
+        * branch and will accept the program incorrectly
+        */
+-      BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
++      BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
++      BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 2, 2),
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
++      BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+-      .fixup_map_hash_48b = { 6 },
+-      .errstr = "invalid indirect read from stack R2 off -8+0 size 8",
+-      .result = REJECT,
+-      .prog_type = BPF_PROG_TYPE_XDP,
++      .fixup_map_hash_48b = { 7 },
++      .errstr_unpriv = "invalid indirect read from stack R2 off -8+0 size 8",
++      .result_unpriv = REJECT,
++      /* in privileged mode reads from uninitialized stack locations are permitted */
++      .result = ACCEPT,
+ },
+ {
+       "calls: ctx read at start of subprog",
+--- a/tools/testing/selftests/bpf/verifier/helper_access_var_len.c
++++ b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c
+@@ -29,19 +29,30 @@
+ {
+       "helper access to variable memory: stack, bitwise AND, zero included",
+       .insns = {
+-      BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
+-      BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+-      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+-      BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+-      BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+-      BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
+-      BPF_MOV64_IMM(BPF_REG_3, 0),
+-      BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
++      /* set max stack size */
++      BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0),
++      /* set r3 to a random value */
++      BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
++      BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
++      /* use bitwise AND to limit r3 range to [0, 64] */
++      BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 64),
++      BPF_LD_MAP_FD(BPF_REG_1, 0),
++      BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
++      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
++      BPF_MOV64_IMM(BPF_REG_4, 0),
++      /* Call bpf_ringbuf_output(), it is one of a few helper functions with
++       * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.
++       * For unpriv this should signal an error, because memory at &fp[-64] is
++       * not initialized.
++       */
++      BPF_EMIT_CALL(BPF_FUNC_ringbuf_output),
+       BPF_EXIT_INSN(),
+       },
+-      .errstr = "invalid indirect read from stack R1 off -64+0 size 64",
+-      .result = REJECT,
+-      .prog_type = BPF_PROG_TYPE_TRACEPOINT,
++      .fixup_map_ringbuf = { 4 },
++      .errstr_unpriv = "invalid indirect read from stack R2 off -64+0 size 64",
++      .result_unpriv = REJECT,
++      /* in privileged mode reads from uninitialized stack locations are permitted */
++      .result = ACCEPT,
+ },
+ {
+       "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
+@@ -183,20 +194,31 @@
+ {
+       "helper access to variable memory: stack, JMP, no min check",
+       .insns = {
+-      BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
+-      BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+-      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+-      BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+-      BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+-      BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
+-      BPF_MOV64_IMM(BPF_REG_3, 0),
+-      BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
++      /* set max stack size */
++      BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0),
++      /* set r3 to a random value */
++      BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
++      BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
++      /* use JMP to limit r3 range to [0, 64] */
++      BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 64, 6),
++      BPF_LD_MAP_FD(BPF_REG_1, 0),
++      BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
++      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
++      BPF_MOV64_IMM(BPF_REG_4, 0),
++      /* Call bpf_ringbuf_output(), it is one of a few helper functions with
++       * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.
++       * For unpriv this should signal an error, because memory at &fp[-64] is
++       * not initialized.
++       */
++      BPF_EMIT_CALL(BPF_FUNC_ringbuf_output),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+-      .errstr = "invalid indirect read from stack R1 off -64+0 size 64",
+-      .result = REJECT,
+-      .prog_type = BPF_PROG_TYPE_TRACEPOINT,
++      .fixup_map_ringbuf = { 4 },
++      .errstr_unpriv = "invalid indirect read from stack R2 off -64+0 size 64",
++      .result_unpriv = REJECT,
++      /* in privileged mode reads from uninitialized stack locations are permitted */
++      .result = ACCEPT,
+ },
+ {
+       "helper access to variable memory: stack, JMP (signed), no min check",
+@@ -564,29 +586,41 @@
+ {
+       "helper access to variable memory: 8 bytes leak",
+       .insns = {
+-      BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
+-      BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+-      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
++      /* set max stack size */
++      BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0),
++      /* set r3 to a random value */
++      BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
++      BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
++      BPF_LD_MAP_FD(BPF_REG_1, 0),
++      BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
++      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
++      /* Note: fp[-32] left uninitialized */
+       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+-      BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+-      BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+-      BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
+-      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+-      BPF_MOV64_IMM(BPF_REG_3, 0),
+-      BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
+-      BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
++      /* Limit r3 range to [1, 64] */
++      BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 63),
++      BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 1),
++      BPF_MOV64_IMM(BPF_REG_4, 0),
++      /* Call bpf_ringbuf_output(), it is one of a few helper functions with
++       * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.
++       * For unpriv this should signal an error, because memory region [1, 64]
++       * at &fp[-64] is not fully initialized.
++       */
++      BPF_EMIT_CALL(BPF_FUNC_ringbuf_output),
++      BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+-      .errstr = "invalid indirect read from stack R1 off -64+32 size 64",
+-      .result = REJECT,
+-      .prog_type = BPF_PROG_TYPE_TRACEPOINT,
++      .fixup_map_ringbuf = { 3 },
++      .errstr_unpriv = "invalid indirect read from stack R2 off -64+32 size 64",
++      .result_unpriv = REJECT,
++      /* in privileged mode reads from uninitialized stack locations are permitted */
++      .result = ACCEPT,
+ },
+ {
+       "helper access to variable memory: 8 bytes no leak (init memory)",
+--- a/tools/testing/selftests/bpf/verifier/int_ptr.c
++++ b/tools/testing/selftests/bpf/verifier/int_ptr.c
+@@ -54,12 +54,13 @@
+               /* bpf_strtoul() */
+               BPF_EMIT_CALL(BPF_FUNC_strtoul),
+-              BPF_MOV64_IMM(BPF_REG_0, 1),
++              BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+-      .result = REJECT,
+-      .prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL,
+-      .errstr = "invalid indirect read from stack R4 off -16+4 size 8",
++      .result_unpriv = REJECT,
++      .errstr_unpriv = "invalid indirect read from stack R4 off -16+4 size 8",
++      /* in privileged mode reads from uninitialized stack locations are permitted */
++      .result = ACCEPT,
+ },
+ {
+       "ARG_PTR_TO_LONG misaligned",
+--- a/tools/testing/selftests/bpf/verifier/search_pruning.c
++++ b/tools/testing/selftests/bpf/verifier/search_pruning.c
+@@ -128,9 +128,10 @@
+               BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_8b = { 3 },
+-      .errstr = "invalid read from stack off -16+0 size 8",
+-      .result = REJECT,
+-      .prog_type = BPF_PROG_TYPE_TRACEPOINT,
++      .errstr_unpriv = "invalid read from stack off -16+0 size 8",
++      .result_unpriv = REJECT,
++      /* in privileged mode reads from uninitialized stack locations are permitted */
++      .result = ACCEPT,
+ },
+ {
+       "precision tracking for u32 spill/fill",
+@@ -258,6 +259,8 @@
+       BPF_EXIT_INSN(),
+       },
+       .flags = BPF_F_TEST_STATE_FREQ,
+-      .errstr = "invalid read from stack off -8+1 size 8",
+-      .result = REJECT,
++      .errstr_unpriv = "invalid read from stack off -8+1 size 8",
++      .result_unpriv = REJECT,
++      /* in privileged mode reads from uninitialized stack locations are permitted */
++      .result = ACCEPT,
+ },
+--- a/tools/testing/selftests/bpf/verifier/sock.c
++++ b/tools/testing/selftests/bpf/verifier/sock.c
+@@ -531,33 +531,6 @@
+       .result = ACCEPT,
+ },
+ {
+-      "sk_storage_get(map, skb->sk, &stack_value, 1): partially init stack_value",
+-      .insns = {
+-      BPF_MOV64_IMM(BPF_REG_2, 0),
+-      BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),
+-      BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+-      BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+-      BPF_MOV64_IMM(BPF_REG_0, 0),
+-      BPF_EXIT_INSN(),
+-      BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+-      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+-      BPF_MOV64_IMM(BPF_REG_0, 0),
+-      BPF_EXIT_INSN(),
+-      BPF_MOV64_IMM(BPF_REG_4, 1),
+-      BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
+-      BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -8),
+-      BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+-      BPF_LD_MAP_FD(BPF_REG_1, 0),
+-      BPF_EMIT_CALL(BPF_FUNC_sk_storage_get),
+-      BPF_MOV64_IMM(BPF_REG_0, 0),
+-      BPF_EXIT_INSN(),
+-      },
+-      .fixup_sk_storage_map = { 14 },
+-      .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+-      .result = REJECT,
+-      .errstr = "invalid indirect read from stack",
+-},
+-{
+       "bpf_map_lookup_elem(smap, &key)",
+       .insns = {
+       BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+--- a/tools/testing/selftests/bpf/verifier/spill_fill.c
++++ b/tools/testing/selftests/bpf/verifier/spill_fill.c
+@@ -171,9 +171,10 @@
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+-      .result = REJECT,
+-      .errstr = "invalid read from stack off -4+0 size 4",
+-      .prog_type = BPF_PROG_TYPE_SCHED_CLS,
++      .result_unpriv = REJECT,
++      .errstr_unpriv = "invalid read from stack off -4+0 size 4",
++      /* in privileged mode reads from uninitialized stack locations are permitted */
++      .result = ACCEPT,
+ },
+ {
+       "Spill a u32 const scalar.  Refill as u16.  Offset to skb->data",
+--- a/tools/testing/selftests/bpf/verifier/var_off.c
++++ b/tools/testing/selftests/bpf/verifier/var_off.c
+@@ -213,31 +213,6 @@
+       .prog_type = BPF_PROG_TYPE_LWT_IN,
+ },
+ {
+-      "indirect variable-offset stack access, max_off+size > max_initialized",
+-      .insns = {
+-      /* Fill only the second from top 8 bytes of the stack. */
+-      BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+-      /* Get an unknown value. */
+-      BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+-      /* Make it small and 4-byte aligned. */
+-      BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+-      BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
+-      /* Add it to fp.  We now have either fp-12 or fp-16, but we don't know
+-       * which. fp-12 size 8 is partially uninitialized stack.
+-       */
+-      BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+-      /* Dereference it indirectly. */
+-      BPF_LD_MAP_FD(BPF_REG_1, 0),
+-      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+-      BPF_MOV64_IMM(BPF_REG_0, 0),
+-      BPF_EXIT_INSN(),
+-      },
+-      .fixup_map_hash_8b = { 5 },
+-      .errstr = "invalid indirect read from stack R2 var_off",
+-      .result = REJECT,
+-      .prog_type = BPF_PROG_TYPE_LWT_IN,
+-},
+-{
+       "indirect variable-offset stack access, min_off < min_initialized",
+       .insns = {
+       /* Fill only the top 8 bytes of the stack. */
+@@ -290,33 +265,6 @@
+       .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+-      "indirect variable-offset stack access, uninitialized",
+-      .insns = {
+-      BPF_MOV64_IMM(BPF_REG_2, 6),
+-      BPF_MOV64_IMM(BPF_REG_3, 28),
+-      /* Fill the top 16 bytes of the stack. */
+-      BPF_ST_MEM(BPF_W, BPF_REG_10, -16, 0),
+-      BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+-      /* Get an unknown value. */
+-      BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, 0),
+-      /* Make it small and 4-byte aligned. */
+-      BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 4),
+-      BPF_ALU64_IMM(BPF_SUB, BPF_REG_4, 16),
+-      /* Add it to fp.  We now have either fp-12 or fp-16, we don't know
+-       * which, but either way it points to initialized stack.
+-       */
+-      BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_10),
+-      BPF_MOV64_IMM(BPF_REG_5, 8),
+-      /* Dereference it indirectly. */
+-      BPF_EMIT_CALL(BPF_FUNC_getsockopt),
+-      BPF_MOV64_IMM(BPF_REG_0, 0),
+-      BPF_EXIT_INSN(),
+-      },
+-      .errstr = "invalid indirect read from stack R4 var_off",
+-      .result = REJECT,
+-      .prog_type = BPF_PROG_TYPE_SOCK_OPS,
+-},
+-{
+       "indirect variable-offset stack access, ok",
+       .insns = {
+       /* Fill the top 16 bytes of the stack. */
diff --git a/queue-6.1/cifs-use-origin-fullpath-for-automounts.patch b/queue-6.1/cifs-use-origin-fullpath-for-automounts.patch
new file mode 100644 (file)
index 0000000..849cf9a
--- /dev/null
@@ -0,0 +1,167 @@
+From 7ad54b98fc1f141cfb70cfe2a3d6def5a85169ff Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@cjr.nz>
+Date: Sun, 18 Dec 2022 14:37:32 -0300
+Subject: cifs: use origin fullpath for automounts
+
+From: Paulo Alcantara <pc@cjr.nz>
+
+commit 7ad54b98fc1f141cfb70cfe2a3d6def5a85169ff upstream.
+
+Use TCP_Server_Info::origin_fullpath instead of cifs_tcon::tree_name
+when building source paths for automounts as it will be useful for
+domain-based DFS referrals where the connections and referrals would
+get either re-used from the cache or re-created when chasing the dfs
+link.
+
+Signed-off-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+[apanyaki: backport to v6.1-stable]
+Signed-off-by: Andrew Paniakin <apanyaki@amazon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cifs_dfs_ref.c |   34 ++++++++++++++++++++++++++++++++--
+ fs/smb/client/cifsproto.h    |   18 ++++++++++++++++++
+ fs/smb/client/dir.c          |   21 +++++++++++++++------
+ 3 files changed, 65 insertions(+), 8 deletions(-)
+
+--- a/fs/smb/client/cifs_dfs_ref.c
++++ b/fs/smb/client/cifs_dfs_ref.c
+@@ -258,6 +258,31 @@ compose_mount_options_err:
+       goto compose_mount_options_out;
+ }
++static int set_dest_addr(struct smb3_fs_context *ctx, const char *full_path)
++{
++      struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
++      char *str_addr = NULL;
++      int rc;
++
++      rc = dns_resolve_server_name_to_ip(full_path, &str_addr, NULL);
++      if (rc < 0)
++              goto out;
++
++      rc = cifs_convert_address(addr, str_addr, strlen(str_addr));
++      if (!rc) {
++              cifs_dbg(FYI, "%s: failed to convert ip address\n", __func__);
++              rc = -EINVAL;
++              goto out;
++      }
++
++      cifs_set_port(addr, ctx->port);
++      rc = 0;
++
++out:
++      kfree(str_addr);
++      return rc;
++}
++
+ /*
+  * Create a vfsmount that we can automount
+  */
+@@ -295,8 +320,7 @@ static struct vfsmount *cifs_dfs_do_auto
+       ctx = smb3_fc2context(fc);
+       page = alloc_dentry_path();
+-      /* always use tree name prefix */
+-      full_path = build_path_from_dentry_optional_prefix(mntpt, page, true);
++      full_path = dfs_get_automount_devname(mntpt, page);
+       if (IS_ERR(full_path)) {
+               mnt = ERR_CAST(full_path);
+               goto out;
+@@ -313,6 +337,12 @@ static struct vfsmount *cifs_dfs_do_auto
+       if (rc) {
+               mnt = ERR_PTR(rc);
+               goto out;
++      }
++
++      rc = set_dest_addr(ctx, full_path);
++      if (rc) {
++              mnt = ERR_PTR(rc);
++              goto out;
+       }
+       rc = smb3_parse_devname(full_path, ctx);
+--- a/fs/smb/client/cifsproto.h
++++ b/fs/smb/client/cifsproto.h
+@@ -57,8 +57,26 @@ extern void exit_cifs_idmap(void);
+ extern int init_cifs_spnego(void);
+ extern void exit_cifs_spnego(void);
+ extern const char *build_path_from_dentry(struct dentry *, void *);
++char *__build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page,
++                                             const char *tree, int tree_len,
++                                             bool prefix);
+ extern char *build_path_from_dentry_optional_prefix(struct dentry *direntry,
+                                                   void *page, bool prefix);
++static inline char *dfs_get_automount_devname(struct dentry *dentry, void *page)
++{
++      struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb);
++      struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
++      struct TCP_Server_Info *server = tcon->ses->server;
++
++      if (unlikely(!server->origin_fullpath))
++              return ERR_PTR(-EREMOTE);
++
++      return __build_path_from_dentry_optional_prefix(dentry, page,
++                                                      server->origin_fullpath,
++                                                      strlen(server->origin_fullpath),
++                                                      true);
++}
++
+ static inline void *alloc_dentry_path(void)
+ {
+       return __getname();
+--- a/fs/smb/client/dir.c
++++ b/fs/smb/client/dir.c
+@@ -78,14 +78,13 @@ build_path_from_dentry(struct dentry *di
+                                                     prefix);
+ }
+-char *
+-build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page,
+-                                     bool prefix)
++char *__build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page,
++                                             const char *tree, int tree_len,
++                                             bool prefix)
+ {
+       int dfsplen;
+       int pplen = 0;
+       struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
+-      struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+       char dirsep = CIFS_DIR_SEP(cifs_sb);
+       char *s;
+@@ -93,7 +92,7 @@ build_path_from_dentry_optional_prefix(s
+               return ERR_PTR(-ENOMEM);
+       if (prefix)
+-              dfsplen = strnlen(tcon->tree_name, MAX_TREE_SIZE + 1);
++              dfsplen = strnlen(tree, tree_len + 1);
+       else
+               dfsplen = 0;
+@@ -123,7 +122,7 @@ build_path_from_dentry_optional_prefix(s
+       }
+       if (dfsplen) {
+               s -= dfsplen;
+-              memcpy(s, tcon->tree_name, dfsplen);
++              memcpy(s, tree, dfsplen);
+               if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) {
+                       int i;
+                       for (i = 0; i < dfsplen; i++) {
+@@ -135,6 +134,16 @@ build_path_from_dentry_optional_prefix(s
+       return s;
+ }
++char *build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page,
++                                           bool prefix)
++{
++      struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
++      struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
++
++      return __build_path_from_dentry_optional_prefix(direntry, page, tcon->tree_name,
++                                                      MAX_TREE_SIZE, prefix);
++}
++
+ /*
+  * Don't allow path components longer than the server max.
+  * Don't allow the separator character in a path component.
index ce0a8be0f417f1f08b56ef82509141f69c4a90f3..0f364c30167a576d53a8f60da119017dcf37ff96 100644 (file)
@@ -78,3 +78,5 @@ misc-fastrpc-fix-dsp-capabilities-request.patch
 misc-fastrpc-avoid-updating-pd-type-for-capability-request.patch
 misc-fastrpc-copy-the-complete-capability-structure-to-user.patch
 x86-retpoline-move-a-noendbr-annotation-to-the-srso-dummy-return-thunk.patch
+cifs-use-origin-fullpath-for-automounts.patch
+bpf-allow-reads-from-uninit-stack.patch