]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 29 Jan 2018 12:34:54 +0000 (13:34 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 29 Jan 2018 12:34:54 +0000 (13:34 +0100)
added patches:
bpf-arsh-is-not-supported-in-32-bit-alu-thus-reject-it.patch
bpf-avoid-false-sharing-of-map-refcount-with-max_entries.patch
bpf-fix-32-bit-divide-by-zero.patch
bpf-fix-bpf_tail_call-x64-jit.patch
bpf-fix-divides-by-zero.patch
bpf-introduce-bpf_jit_always_on-config.patch
bpf-reject-stores-into-ctx-via-st-and-xadd.patch
x86-bpf_jit-small-optimization-in-emit_bpf_tail_call.patch

queue-4.9/bpf-arsh-is-not-supported-in-32-bit-alu-thus-reject-it.patch [new file with mode: 0644]
queue-4.9/bpf-avoid-false-sharing-of-map-refcount-with-max_entries.patch [new file with mode: 0644]
queue-4.9/bpf-fix-32-bit-divide-by-zero.patch [new file with mode: 0644]
queue-4.9/bpf-fix-bpf_tail_call-x64-jit.patch [new file with mode: 0644]
queue-4.9/bpf-fix-divides-by-zero.patch [new file with mode: 0644]
queue-4.9/bpf-introduce-bpf_jit_always_on-config.patch [new file with mode: 0644]
queue-4.9/bpf-reject-stores-into-ctx-via-st-and-xadd.patch [new file with mode: 0644]
queue-4.9/series
queue-4.9/x86-bpf_jit-small-optimization-in-emit_bpf_tail_call.patch [new file with mode: 0644]

diff --git a/queue-4.9/bpf-arsh-is-not-supported-in-32-bit-alu-thus-reject-it.patch b/queue-4.9/bpf-arsh-is-not-supported-in-32-bit-alu-thus-reject-it.patch
new file mode 100644 (file)
index 0000000..9ae0283
--- /dev/null
@@ -0,0 +1,49 @@
+From foo@baz Mon Jan 29 13:22:08 CET 2018
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Mon, 29 Jan 2018 02:48:57 +0100
+Subject: bpf: arsh is not supported in 32 bit alu thus reject it
+To: gregkh@linuxfoundation.org
+Cc: ast@kernel.org, stable@vger.kernel.org, Daniel Borkmann <daniel@iogearbox.net>
+Message-ID: <f236d9b875aa2ab63ab155cafa0f10376883da00.1517190206.git.daniel@iogearbox.net>
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ upstream commit 7891a87efc7116590eaba57acc3c422487802c6f ]
+
+The following snippet was throwing an 'unknown opcode cc' warning
+in BPF interpreter:
+
+  0: (18) r0 = 0x0
+  2: (7b) *(u64 *)(r10 -16) = r0
+  3: (cc) (u32) r0 s>>= (u32) r0
+  4: (95) exit
+
+Although a number of JITs do support BPF_ALU | BPF_ARSH | BPF_{K,X}
+generation, not all of them do and interpreter does neither. We can
+leave existing ones and implement it later in bpf-next for the
+remaining ones, but reject this properly in verifier for the time
+being.
+
+Fixes: 17a5267067f3 ("bpf: verifier (add verifier core)")
+Reported-by: syzbot+93c4904c5c70348a6890@syzkaller.appspotmail.com
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1843,6 +1843,11 @@ static int check_alu_op(struct bpf_verif
+                       return -EINVAL;
+               }
++              if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) {
++                      verbose("BPF_ARSH not supported for 32 bit ALU\n");
++                      return -EINVAL;
++              }
++
+               if ((opcode == BPF_LSH || opcode == BPF_RSH ||
+                    opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
+                       int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
diff --git a/queue-4.9/bpf-avoid-false-sharing-of-map-refcount-with-max_entries.patch b/queue-4.9/bpf-avoid-false-sharing-of-map-refcount-with-max_entries.patch
new file mode 100644 (file)
index 0000000..d990d28
--- /dev/null
@@ -0,0 +1,127 @@
+From foo@baz Mon Jan 29 13:22:08 CET 2018
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Mon, 29 Jan 2018 02:48:58 +0100
+Subject: bpf: avoid false sharing of map refcount with max_entries
+To: gregkh@linuxfoundation.org
+Cc: ast@kernel.org, stable@vger.kernel.org, Daniel Borkmann <daniel@iogearbox.net>
+Message-ID: <1ce4bdf60f6950b69dd527c41fb93f92cd3cfedc.1517190207.git.daniel@iogearbox.net>
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ upstream commit be95a845cc4402272994ce290e3ad928aff06cb9 ]
+
+In addition to commit b2157399cc98 ("bpf: prevent out-of-bounds
+speculation") also change the layout of struct bpf_map such that
+false sharing of fast-path members like max_entries is avoided
+when the maps reference counter is altered. Therefore enforce
+them to be placed into separate cachelines.
+
+pahole dump after change:
+
+  struct bpf_map {
+        const struct bpf_map_ops  * ops;                 /*     0     8 */
+        struct bpf_map *           inner_map_meta;       /*     8     8 */
+        void *                     security;             /*    16     8 */
+        enum bpf_map_type          map_type;             /*    24     4 */
+        u32                        key_size;             /*    28     4 */
+        u32                        value_size;           /*    32     4 */
+        u32                        max_entries;          /*    36     4 */
+        u32                        map_flags;            /*    40     4 */
+        u32                        pages;                /*    44     4 */
+        u32                        id;                   /*    48     4 */
+        int                        numa_node;            /*    52     4 */
+        bool                       unpriv_array;         /*    56     1 */
+
+        /* XXX 7 bytes hole, try to pack */
+
+        /* --- cacheline 1 boundary (64 bytes) --- */
+        struct user_struct *       user;                 /*    64     8 */
+        atomic_t                   refcnt;               /*    72     4 */
+        atomic_t                   usercnt;              /*    76     4 */
+        struct work_struct         work;                 /*    80    32 */
+        char                       name[16];             /*   112    16 */
+        /* --- cacheline 2 boundary (128 bytes) --- */
+
+        /* size: 128, cachelines: 2, members: 17 */
+        /* sum members: 121, holes: 1, sum holes: 7 */
+  };
+
+Now all entries in the first cacheline are read only throughout
+the life time of the map, set up once during map creation. Overall
+struct size and number of cachelines doesn't change from the
+reordering. struct bpf_map is usually first member and embedded
+in map structs in specific map implementations, so also avoid those
+members to sit at the end where it could potentially share the
+cacheline with first map values e.g. in the array since remote
+CPUs could trigger map updates just as well for those (easily
+dirtying members like max_entries intentionally as well) while
+having subsequent values in cache.
+
+Quoting from Google's Project Zero blog [1]:
+
+  Additionally, at least on the Intel machine on which this was
+  tested, bouncing modified cache lines between cores is slow,
+  apparently because the MESI protocol is used for cache coherence
+  [8]. Changing the reference counter of an eBPF array on one
+  physical CPU core causes the cache line containing the reference
+  counter to be bounced over to that CPU core, making reads of the
+  reference counter on all other CPU cores slow until the changed
+  reference counter has been written back to memory. Because the
+  length and the reference counter of an eBPF array are stored in
+  the same cache line, this also means that changing the reference
+  counter on one physical CPU core causes reads of the eBPF array's
+  length to be slow on other physical CPU cores (intentional false
+  sharing).
+
+While this doesn't 'control' the out-of-bounds speculation through
+masking the index as in commit b2157399cc98, triggering a manipulation
+of the map's reference counter is really trivial, so lets not allow
+to easily affect max_entries from it.
+
+Splitting to separate cachelines also generally makes sense from
+a performance perspective anyway in that fast-path won't have a
+cache miss if the map gets pinned, reused in other progs, etc out
+of control path, thus also avoids unintentional false sharing.
+
+  [1] https://googleprojectzero.blogspot.ch/2018/01/reading-privileged-memory-with-side.html
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/bpf.h |   16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -36,7 +36,10 @@ struct bpf_map_ops {
+ };
+ struct bpf_map {
+-      atomic_t refcnt;
++      /* 1st cacheline with read-mostly members of which some
++       * are also accessed in fast-path (e.g. ops, max_entries).
++       */
++      const struct bpf_map_ops *ops ____cacheline_aligned;
+       enum bpf_map_type map_type;
+       u32 key_size;
+       u32 value_size;
+@@ -44,10 +47,15 @@ struct bpf_map {
+       u32 map_flags;
+       u32 pages;
+       bool unpriv_array;
+-      struct user_struct *user;
+-      const struct bpf_map_ops *ops;
+-      struct work_struct work;
++      /* 7 bytes hole */
++
++      /* 2nd cacheline with misc members to avoid false sharing
++       * particularly with refcounting.
++       */
++      struct user_struct *user ____cacheline_aligned;
++      atomic_t refcnt;
+       atomic_t usercnt;
++      struct work_struct work;
+ };
+ struct bpf_map_type_list {
diff --git a/queue-4.9/bpf-fix-32-bit-divide-by-zero.patch b/queue-4.9/bpf-fix-32-bit-divide-by-zero.patch
new file mode 100644 (file)
index 0000000..2eff0e7
--- /dev/null
@@ -0,0 +1,67 @@
+From foo@baz Mon Jan 29 13:22:08 CET 2018
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Mon, 29 Jan 2018 02:49:00 +0100
+Subject: bpf: fix 32-bit divide by zero
+To: gregkh@linuxfoundation.org
+Cc: ast@kernel.org, stable@vger.kernel.org, Daniel Borkmann <daniel@iogearbox.net>
+Message-ID: <0fe1fff7b023f38a11f9bebd975e29b78e5c788b.1517190207.git.daniel@iogearbox.net>
+
+From: Alexei Starovoitov <ast@kernel.org>
+
+[ upstream commit 68fda450a7df51cff9e5a4d4a4d9d0d5f2589153 ]
+
+due to some JITs doing if (src_reg == 0) check in 64-bit mode
+for div/mod operations mask upper 32-bits of src register
+before doing the check
+
+Fixes: 622582786c9e ("net: filter: x86: internal BPF JIT")
+Fixes: 7a12b5031c6b ("sparc64: Add eBPF JIT.")
+Reported-by: syzbot+48340bb518e88849e2e3@syzkaller.appspotmail.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c |   18 ++++++++++++++++++
+ net/core/filter.c     |    4 ++++
+ 2 files changed, 22 insertions(+)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -3391,6 +3391,24 @@ static int fixup_bpf_calls(struct bpf_ve
+       for (i = 0; i < insn_cnt; i++, insn++) {
++              if (insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
++                  insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
++                      /* due to JIT bugs clear upper 32-bits of src register
++                       * before div/mod operation
++                       */
++                      insn_buf[0] = BPF_MOV32_REG(insn->src_reg, insn->src_reg);
++                      insn_buf[1] = *insn;
++                      cnt = 2;
++                      new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
++                      if (!new_prog)
++                              return -ENOMEM;
++
++                      delta    += cnt - 1;
++                      env->prog = prog = new_prog;
++                      insn      = new_prog->insnsi + i + delta;
++                      continue;
++              }
++
+               if (insn->code != (BPF_JMP | BPF_CALL))
+                       continue;
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -441,6 +441,10 @@ do_pass:
+                           convert_bpf_extensions(fp, &insn))
+                               break;
++                      if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
++                          fp->code == (BPF_ALU | BPF_MOD | BPF_X))
++                              *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
++
+                       *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
+                       break;
diff --git a/queue-4.9/bpf-fix-bpf_tail_call-x64-jit.patch b/queue-4.9/bpf-fix-bpf_tail_call-x64-jit.patch
new file mode 100644 (file)
index 0000000..952fdc6
--- /dev/null
@@ -0,0 +1,60 @@
+From foo@baz Mon Jan 29 13:22:08 CET 2018
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Mon, 29 Jan 2018 02:48:55 +0100
+Subject: bpf: fix bpf_tail_call() x64 JIT
+To: gregkh@linuxfoundation.org
+Cc: ast@kernel.org, stable@vger.kernel.org, Alexei Starovoitov <ast@fb.com>, "David S . Miller" <davem@davemloft.net>
+Message-ID: <b7bd813935a7bc6a5f4fe4a3f199034f571c9b70.1517190206.git.daniel@iogearbox.net>
+
+From: Alexei Starovoitov <ast@fb.com>
+
+[ upstream commit 90caccdd8cc0215705f18b92771b449b01e2474a ]
+
+- bpf prog_array just like all other types of bpf array accepts 32-bit index.
+  Clarify that in the comment.
+- fix x64 JIT of bpf_tail_call which was incorrectly loading 8 instead of 4 bytes
+- tighten corresponding check in the interpreter to stay consistent
+
+The JIT bug can be triggered after introduction of BPF_F_NUMA_NODE flag
+in commit 96eabe7a40aa in 4.14. Before that the map_flags would stay zero and
+though JIT code is wrong it will check bounds correctly.
+Hence two fixes tags. All other JITs don't have this problem.
+
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Fixes: 96eabe7a40aa ("bpf: Allow selecting numa node during map creation")
+Fixes: b52f00e6a715 ("x86: bpf_jit: implement bpf_tail_call() helper")
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Martin KaFai Lau <kafai@fb.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/net/bpf_jit_comp.c |    4 ++--
+ kernel/bpf/core.c           |    2 +-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -278,9 +278,9 @@ static void emit_bpf_tail_call(u8 **ppro
+       /* if (index >= array->map.max_entries)
+        *   goto out;
+        */
+-      EMIT4(0x48, 0x8B, 0x46,                   /* mov rax, qword ptr [rsi + 16] */
++      EMIT2(0x89, 0xD2);                        /* mov edx, edx */
++      EMIT3(0x39, 0x56,                         /* cmp dword ptr [rsi + 16], edx */
+             offsetof(struct bpf_array, map.max_entries));
+-      EMIT3(0x48, 0x39, 0xD0);                  /* cmp rax, rdx */
+ #define OFFSET1 43 /* number of bytes to jump */
+       EMIT2(X86_JBE, OFFSET1);                  /* jbe out */
+       label1 = cnt;
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -715,7 +715,7 @@ select_insn:
+               struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
+               struct bpf_array *array = container_of(map, struct bpf_array, map);
+               struct bpf_prog *prog;
+-              u64 index = BPF_R3;
++              u32 index = BPF_R3;
+               if (unlikely(index >= array->map.max_entries))
+                       goto out;
diff --git a/queue-4.9/bpf-fix-divides-by-zero.patch b/queue-4.9/bpf-fix-divides-by-zero.patch
new file mode 100644 (file)
index 0000000..6602ca6
--- /dev/null
@@ -0,0 +1,46 @@
+From foo@baz Mon Jan 29 13:22:08 CET 2018
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Mon, 29 Jan 2018 02:48:59 +0100
+Subject: bpf: fix divides by zero
+To: gregkh@linuxfoundation.org
+Cc: ast@kernel.org, stable@vger.kernel.org, Eric Dumazet <edumazet@google.com>
+Message-ID: <aacafad0b7353aa4acc993dc74ea5168b31d85ab.1517190207.git.daniel@iogearbox.net>
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ upstream commit c366287ebd698ef5e3de300d90cd62ee9ee7373e ]
+
+Divides by zero are not nice, lets avoid them if possible.
+
+Also do_div() seems not needed when dealing with 32bit operands,
+but this seems a minor detail.
+
+Fixes: bd4cf0ed331a ("net: filter: rework/optimize internal BPF interpreter's instruction set")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/core.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -642,7 +642,7 @@ select_insn:
+               DST = tmp;
+               CONT;
+       ALU_MOD_X:
+-              if (unlikely(SRC == 0))
++              if (unlikely((u32)SRC == 0))
+                       return 0;
+               tmp = (u32) DST;
+               DST = do_div(tmp, (u32) SRC);
+@@ -661,7 +661,7 @@ select_insn:
+               DST = div64_u64(DST, SRC);
+               CONT;
+       ALU_DIV_X:
+-              if (unlikely(SRC == 0))
++              if (unlikely((u32)SRC == 0))
+                       return 0;
+               tmp = (u32) DST;
+               do_div(tmp, (u32) SRC);
diff --git a/queue-4.9/bpf-introduce-bpf_jit_always_on-config.patch b/queue-4.9/bpf-introduce-bpf_jit_always_on-config.patch
new file mode 100644 (file)
index 0000000..6098b34
--- /dev/null
@@ -0,0 +1,212 @@
+From foo@baz Mon Jan 29 13:22:08 CET 2018
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Mon, 29 Jan 2018 02:48:56 +0100
+Subject: bpf: introduce BPF_JIT_ALWAYS_ON config
+To: gregkh@linuxfoundation.org
+Cc: ast@kernel.org, stable@vger.kernel.org, Daniel Borkmann <daniel@iogearbox.net>
+Message-ID: <e2902a6193fc8672f097d4181fe9573e64171d4e.1517190206.git.daniel@iogearbox.net>
+
+From: Alexei Starovoitov <ast@kernel.org>
+
+[ upstream commit 290af86629b25ffd1ed6232c4e9107da031705cb ]
+
+The BPF interpreter has been used as part of the spectre 2 attack CVE-2017-5715.
+
+A quote from goolge project zero blog:
+"At this point, it would normally be necessary to locate gadgets in
+the host kernel code that can be used to actually leak data by reading
+from an attacker-controlled location, shifting and masking the result
+appropriately and then using the result of that as offset to an
+attacker-controlled address for a load. But piecing gadgets together
+and figuring out which ones work in a speculation context seems annoying.
+So instead, we decided to use the eBPF interpreter, which is built into
+the host kernel - while there is no legitimate way to invoke it from inside
+a VM, the presence of the code in the host kernel's text section is sufficient
+to make it usable for the attack, just like with ordinary ROP gadgets."
+
+To make attacker job harder introduce BPF_JIT_ALWAYS_ON config
+option that removes interpreter from the kernel in favor of JIT-only mode.
+So far eBPF JIT is supported by:
+x64, arm64, arm32, sparc64, s390, powerpc64, mips64
+
+The start of JITed program is randomized and code page is marked as read-only.
+In addition "constant blinding" can be turned on with net.core.bpf_jit_harden
+
+v2->v3:
+- move __bpf_prog_ret0 under ifdef (Daniel)
+
+v1->v2:
+- fix init order, test_bpf and cBPF (Daniel's feedback)
+- fix offloaded bpf (Jakub's feedback)
+- add 'return 0' dummy in case something can invoke prog->bpf_func
+- retarget bpf tree. For bpf-next the patch would need one extra hunk.
+  It will be sent when the trees are merged back to net-next
+
+Considered doing:
+  int bpf_jit_enable __read_mostly = BPF_EBPF_JIT_DEFAULT;
+but it seems better to land the patch as-is and in bpf-next remove
+bpf_jit_enable global variable from all JITs, consolidate in one place
+and remove this jit_init() function.
+
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ init/Kconfig               |    7 +++++++
+ kernel/bpf/core.c          |   18 ++++++++++++++++++
+ lib/test_bpf.c             |   11 +++++++----
+ net/core/filter.c          |    6 ++----
+ net/core/sysctl_net_core.c |    6 ++++++
+ net/socket.c               |    9 +++++++++
+ 6 files changed, 49 insertions(+), 8 deletions(-)
+
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1609,6 +1609,13 @@ config BPF_SYSCALL
+         Enable the bpf() system call that allows to manipulate eBPF
+         programs and maps via file descriptors.
++config BPF_JIT_ALWAYS_ON
++      bool "Permanently enable BPF JIT and remove BPF interpreter"
++      depends on BPF_SYSCALL && HAVE_EBPF_JIT && BPF_JIT
++      help
++        Enables BPF JIT and removes BPF interpreter to avoid
++        speculative execution of BPF instructions by the interpreter
++
+ config SHMEM
+       bool "Use full shmem filesystem" if EXPERT
+       default y
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -458,6 +458,7 @@ noinline u64 __bpf_call_base(u64 r1, u64
+ }
+ EXPORT_SYMBOL_GPL(__bpf_call_base);
++#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+ /**
+  *    __bpf_prog_run - run eBPF program on a given context
+  *    @ctx: is the data we are operating on
+@@ -923,6 +924,13 @@ load_byte:
+ }
+ STACK_FRAME_NON_STANDARD(__bpf_prog_run); /* jump table */
++#else
++static unsigned int __bpf_prog_ret0(void *ctx, const struct bpf_insn *insn)
++{
++      return 0;
++}
++#endif
++
+ bool bpf_prog_array_compatible(struct bpf_array *array,
+                              const struct bpf_prog *fp)
+ {
+@@ -970,7 +978,11 @@ static int bpf_check_tail_call(const str
+  */
+ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
+ {
++#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+       fp->bpf_func = (void *) __bpf_prog_run;
++#else
++      fp->bpf_func = (void *) __bpf_prog_ret0;
++#endif
+       /* eBPF JITs can rewrite the program in case constant
+        * blinding is active. However, in case of error during
+@@ -979,6 +991,12 @@ struct bpf_prog *bpf_prog_select_runtime
+        * be JITed, but falls back to the interpreter.
+        */
+       fp = bpf_int_jit_compile(fp);
++#ifdef CONFIG_BPF_JIT_ALWAYS_ON
++      if (!fp->jited) {
++              *err = -ENOTSUPP;
++              return fp;
++      }
++#endif
+       bpf_prog_lock_ro(fp);
+       /* The tail call compatibility check can only be done at
+--- a/lib/test_bpf.c
++++ b/lib/test_bpf.c
+@@ -5646,9 +5646,8 @@ static struct bpf_prog *generate_filter(
+                               return NULL;
+                       }
+               }
+-              /* We don't expect to fail. */
+               if (*err) {
+-                      pr_cont("FAIL to attach err=%d len=%d\n",
++                      pr_cont("FAIL to prog_create err=%d len=%d\n",
+                               *err, fprog.len);
+                       return NULL;
+               }
+@@ -5671,6 +5670,10 @@ static struct bpf_prog *generate_filter(
+                * checks.
+                */
+               fp = bpf_prog_select_runtime(fp, err);
++              if (*err) {
++                      pr_cont("FAIL to select_runtime err=%d\n", *err);
++                      return NULL;
++              }
+               break;
+       }
+@@ -5856,8 +5859,8 @@ static __init int test_bpf(void)
+                               pass_cnt++;
+                               continue;
+                       }
+-
+-                      return err;
++                      err_cnt++;
++                      continue;
+               }
+               pr_cont("jited:%u ", fp->jited);
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -1005,11 +1005,9 @@ static struct bpf_prog *bpf_migrate_filt
+                */
+               goto out_err_free;
+-      /* We are guaranteed to never error here with cBPF to eBPF
+-       * transitions, since there's no issue with type compatibility
+-       * checks on program arrays.
+-       */
+       fp = bpf_prog_select_runtime(fp, &err);
++      if (err)
++              goto out_err_free;
+       kfree(old_prog);
+       return fp;
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -292,7 +292,13 @@ static struct ctl_table net_core_table[]
+               .data           = &bpf_jit_enable,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
++#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+               .proc_handler   = proc_dointvec
++#else
++              .proc_handler   = proc_dointvec_minmax,
++              .extra1         = &one,
++              .extra2         = &one,
++#endif
+       },
+ # ifdef CONFIG_HAVE_EBPF_JIT
+       {
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -2548,6 +2548,15 @@ out_fs:
+ core_initcall(sock_init);     /* early initcall */
++static int __init jit_init(void)
++{
++#ifdef CONFIG_BPF_JIT_ALWAYS_ON
++      bpf_jit_enable = 1;
++#endif
++      return 0;
++}
++pure_initcall(jit_init);
++
+ #ifdef CONFIG_PROC_FS
+ void socket_seq_show(struct seq_file *seq)
+ {
diff --git a/queue-4.9/bpf-reject-stores-into-ctx-via-st-and-xadd.patch b/queue-4.9/bpf-reject-stores-into-ctx-via-st-and-xadd.patch
new file mode 100644 (file)
index 0000000..c8d4676
--- /dev/null
@@ -0,0 +1,72 @@
+From foo@baz Mon Jan 29 13:22:08 CET 2018
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Mon, 29 Jan 2018 02:49:01 +0100
+Subject: bpf: reject stores into ctx via st and xadd
+To: gregkh@linuxfoundation.org
+Cc: ast@kernel.org, stable@vger.kernel.org, Daniel Borkmann <daniel@iogearbox.net>
+Message-ID: <bf502ba70d86b457adcbd695412af739a1d036f5.1517190207.git.daniel@iogearbox.net>
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ upstream commit f37a8cb84cce18762e8f86a70bd6a49a66ab964c ]
+
+Alexei found that verifier does not reject stores into context
+via BPF_ST instead of BPF_STX. And while looking at it, we
+also should not allow XADD variant of BPF_STX.
+
+The context rewriter is only assuming either BPF_LDX_MEM- or
+BPF_STX_MEM-type operations, thus reject anything other than
+that so that assumptions in the rewriter properly hold. Add
+test cases as well for BPF selftests.
+
+Fixes: d691f9e8d440 ("bpf: allow programs to write to certain skb fields")
+Reported-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c |   19 +++++++++++++++++++
+ 1 file changed, 19 insertions(+)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -702,6 +702,13 @@ static bool is_pointer_value(struct bpf_
+       return __is_pointer_value(env->allow_ptr_leaks, &env->cur_state.regs[regno]);
+ }
++static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
++{
++      const struct bpf_reg_state *reg = &env->cur_state.regs[regno];
++
++      return reg->type == PTR_TO_CTX;
++}
++
+ static int check_ptr_alignment(struct bpf_verifier_env *env,
+                              struct bpf_reg_state *reg, int off, int size)
+ {
+@@ -896,6 +903,12 @@ static int check_xadd(struct bpf_verifie
+               return -EACCES;
+       }
++      if (is_ctx_reg(env, insn->dst_reg)) {
++              verbose("BPF_XADD stores into R%d context is not allowed\n",
++                      insn->dst_reg);
++              return -EACCES;
++      }
++
+       /* check whether atomic_add can read the memory */
+       err = check_mem_access(env, insn->dst_reg, insn->off,
+                              BPF_SIZE(insn->code), BPF_READ, -1);
+@@ -3012,6 +3025,12 @@ static int do_check(struct bpf_verifier_
+                       if (err)
+                               return err;
++                      if (is_ctx_reg(env, insn->dst_reg)) {
++                              verbose("BPF_ST stores into R%d context is not allowed\n",
++                                      insn->dst_reg);
++                              return -EACCES;
++                      }
++
+                       /* check that memory (dst_reg + off) is writeable */
+                       err = check_mem_access(env, insn->dst_reg, insn->off,
+                                              BPF_SIZE(insn->code), BPF_WRITE,
index 29776a1d4e32d263b1d00ef650fba03dab862dcc..6748886e487b60b8ac8c384262d3000e21354cba 100644 (file)
@@ -56,3 +56,11 @@ flow_dissector-properly-cap-thoff-field.patch
 perf-x86-amd-power-do-not-load-amd-power-module-on-amd-platforms.patch
 x86-microcode-intel-extend-bdw-late-loading-further-with-llc-size-check.patch
 hrtimer-reset-hrtimer-cpu-base-proper-on-cpu-hotplug.patch
+x86-bpf_jit-small-optimization-in-emit_bpf_tail_call.patch
+bpf-fix-bpf_tail_call-x64-jit.patch
+bpf-introduce-bpf_jit_always_on-config.patch
+bpf-arsh-is-not-supported-in-32-bit-alu-thus-reject-it.patch
+bpf-avoid-false-sharing-of-map-refcount-with-max_entries.patch
+bpf-fix-divides-by-zero.patch
+bpf-fix-32-bit-divide-by-zero.patch
+bpf-reject-stores-into-ctx-via-st-and-xadd.patch
diff --git a/queue-4.9/x86-bpf_jit-small-optimization-in-emit_bpf_tail_call.patch b/queue-4.9/x86-bpf_jit-small-optimization-in-emit_bpf_tail_call.patch
new file mode 100644 (file)
index 0000000..aadfd37
--- /dev/null
@@ -0,0 +1,70 @@
+From foo@baz Mon Jan 29 13:22:08 CET 2018
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Mon, 29 Jan 2018 02:48:54 +0100
+Subject: x86: bpf_jit: small optimization in emit_bpf_tail_call()
+To: gregkh@linuxfoundation.org
+Cc: ast@kernel.org, stable@vger.kernel.org, Eric Dumazet <edumazet@google.com>, Daniel Borkmann <daniel@iogearbox.net>, "David S . Miller" <davem@davemloft.net>
+Message-ID: <0f909080f2ef055783fc7b394e8111e0df3c4971.1517190206.git.daniel@iogearbox.net>
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ upstream commit 84ccac6e7854ebbfb56d2fc6d5bef9be49bb304c ]
+
+Saves 4 bytes replacing following instructions :
+
+lea rax, [rsi + rdx * 8 + offsetof(...)]
+mov rax, qword ptr [rax]
+cmp rax, 0
+
+by :
+
+mov rax, [rsi + rdx * 8 + offsetof(...)]
+test rax, rax
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Alexei Starovoitov <ast@kernel.org>
+Cc: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/net/bpf_jit_comp.c |    9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -281,7 +281,7 @@ static void emit_bpf_tail_call(u8 **ppro
+       EMIT4(0x48, 0x8B, 0x46,                   /* mov rax, qword ptr [rsi + 16] */
+             offsetof(struct bpf_array, map.max_entries));
+       EMIT3(0x48, 0x39, 0xD0);                  /* cmp rax, rdx */
+-#define OFFSET1 47 /* number of bytes to jump */
++#define OFFSET1 43 /* number of bytes to jump */
+       EMIT2(X86_JBE, OFFSET1);                  /* jbe out */
+       label1 = cnt;
+@@ -290,21 +290,20 @@ static void emit_bpf_tail_call(u8 **ppro
+        */
+       EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
+       EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
+-#define OFFSET2 36
++#define OFFSET2 32
+       EMIT2(X86_JA, OFFSET2);                   /* ja out */
+       label2 = cnt;
+       EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
+       EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
+       /* prog = array->ptrs[index]; */
+-      EMIT4_off32(0x48, 0x8D, 0x84, 0xD6,       /* lea rax, [rsi + rdx * 8 + offsetof(...)] */
++      EMIT4_off32(0x48, 0x8B, 0x84, 0xD6,       /* mov rax, [rsi + rdx * 8 + offsetof(...)] */
+                   offsetof(struct bpf_array, ptrs));
+-      EMIT3(0x48, 0x8B, 0x00);                  /* mov rax, qword ptr [rax] */
+       /* if (prog == NULL)
+        *   goto out;
+        */
+-      EMIT4(0x48, 0x83, 0xF8, 0x00);            /* cmp rax, 0 */
++      EMIT3(0x48, 0x85, 0xC0);                  /* test rax,rax */
+ #define OFFSET3 10
+       EMIT2(X86_JE, OFFSET3);                   /* je out */
+       label3 = cnt;