]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 13 Jan 2018 15:46:42 +0000 (16:46 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 13 Jan 2018 15:46:42 +0000 (16:46 +0100)
added patches:
bpf-move-fixup_bpf_calls-function.patch
bpf-refactor-fixup_bpf_calls.patch

queue-4.9/bpf-move-fixup_bpf_calls-function.patch [new file with mode: 0644]
queue-4.9/bpf-refactor-fixup_bpf_calls.patch [new file with mode: 0644]
queue-4.9/series

diff --git a/queue-4.9/bpf-move-fixup_bpf_calls-function.patch b/queue-4.9/bpf-move-fixup_bpf_calls-function.patch
new file mode 100644 (file)
index 0000000..cd52e85
--- /dev/null
@@ -0,0 +1,166 @@
+From e245c5c6a5656e4d61aa7bb08e9694fd6e5b2b9d Mon Sep 17 00:00:00 2001
+From: Alexei Starovoitov <ast@fb.com>
+Date: Wed, 15 Mar 2017 18:26:39 -0700
+Subject: bpf: move fixup_bpf_calls() function
+
+From: Alexei Starovoitov <ast@fb.com>
+
+commit e245c5c6a5656e4d61aa7bb08e9694fd6e5b2b9d upstream.
+
+no functional change.
+move fixup_bpf_calls() to verifier.c
+it's being refactored in the next patch
+
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Cc: Jiri Slaby <jslaby@suse.cz>
+[backported to 4.9 - gregkh]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ kernel/bpf/syscall.c  |   54 --------------------------------------------------
+ kernel/bpf/verifier.c |   54 ++++++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 54 insertions(+), 54 deletions(-)
+
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -565,57 +565,6 @@ void bpf_register_prog_type(struct bpf_p
+       list_add(&tl->list_node, &bpf_prog_types);
+ }
+-/* fixup insn->imm field of bpf_call instructions:
+- * if (insn->imm == BPF_FUNC_map_lookup_elem)
+- *      insn->imm = bpf_map_lookup_elem - __bpf_call_base;
+- * else if (insn->imm == BPF_FUNC_map_update_elem)
+- *      insn->imm = bpf_map_update_elem - __bpf_call_base;
+- * else ...
+- *
+- * this function is called after eBPF program passed verification
+- */
+-static void fixup_bpf_calls(struct bpf_prog *prog)
+-{
+-      const struct bpf_func_proto *fn;
+-      int i;
+-
+-      for (i = 0; i < prog->len; i++) {
+-              struct bpf_insn *insn = &prog->insnsi[i];
+-
+-              if (insn->code == (BPF_JMP | BPF_CALL)) {
+-                      /* we reach here when program has bpf_call instructions
+-                       * and it passed bpf_check(), means that
+-                       * ops->get_func_proto must have been supplied, check it
+-                       */
+-                      BUG_ON(!prog->aux->ops->get_func_proto);
+-
+-                      if (insn->imm == BPF_FUNC_get_route_realm)
+-                              prog->dst_needed = 1;
+-                      if (insn->imm == BPF_FUNC_get_prandom_u32)
+-                              bpf_user_rnd_init_once();
+-                      if (insn->imm == BPF_FUNC_tail_call) {
+-                              /* mark bpf_tail_call as different opcode
+-                               * to avoid conditional branch in
+-                               * interpeter for every normal call
+-                               * and to prevent accidental JITing by
+-                               * JIT compiler that doesn't support
+-                               * bpf_tail_call yet
+-                               */
+-                              insn->imm = 0;
+-                              insn->code |= BPF_X;
+-                              continue;
+-                      }
+-
+-                      fn = prog->aux->ops->get_func_proto(insn->imm);
+-                      /* all functions that have prototype and verifier allowed
+-                       * programs to call them, must be real in-kernel functions
+-                       */
+-                      BUG_ON(!fn->func);
+-                      insn->imm = fn->func - __bpf_call_base;
+-              }
+-      }
+-}
+-
+ /* drop refcnt on maps used by eBPF program and free auxilary data */
+ static void free_used_maps(struct bpf_prog_aux *aux)
+ {
+@@ -808,9 +757,6 @@ static int bpf_prog_load(union bpf_attr
+       if (err < 0)
+               goto free_used_maps;
+-      /* fixup BPF_CALL->imm field */
+-      fixup_bpf_calls(prog);
+-
+       /* eBPF program is ready to be JITed */
+       prog = bpf_prog_select_runtime(prog, &err);
+       if (err < 0)
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -3362,6 +3362,57 @@ static int convert_ctx_accesses(struct b
+       return 0;
+ }
++/* fixup insn->imm field of bpf_call instructions:
++ * if (insn->imm == BPF_FUNC_map_lookup_elem)
++ *      insn->imm = bpf_map_lookup_elem - __bpf_call_base;
++ * else if (insn->imm == BPF_FUNC_map_update_elem)
++ *      insn->imm = bpf_map_update_elem - __bpf_call_base;
++ * else ...
++ *
++ * this function is called after eBPF program passed verification
++ */
++static void fixup_bpf_calls(struct bpf_prog *prog)
++{
++      const struct bpf_func_proto *fn;
++      int i;
++
++      for (i = 0; i < prog->len; i++) {
++              struct bpf_insn *insn = &prog->insnsi[i];
++
++              if (insn->code == (BPF_JMP | BPF_CALL)) {
++                      /* we reach here when program has bpf_call instructions
++                       * and it passed bpf_check(), means that
++                       * ops->get_func_proto must have been supplied, check it
++                       */
++                      BUG_ON(!prog->aux->ops->get_func_proto);
++
++                      if (insn->imm == BPF_FUNC_get_route_realm)
++                              prog->dst_needed = 1;
++                      if (insn->imm == BPF_FUNC_get_prandom_u32)
++                              bpf_user_rnd_init_once();
++                      if (insn->imm == BPF_FUNC_tail_call) {
++                              /* mark bpf_tail_call as different opcode
++                               * to avoid conditional branch in
++                               * interpeter for every normal call
++                               * and to prevent accidental JITing by
++                               * JIT compiler that doesn't support
++                               * bpf_tail_call yet
++                               */
++                              insn->imm = 0;
++                              insn->code |= BPF_X;
++                              continue;
++                      }
++
++                      fn = prog->aux->ops->get_func_proto(insn->imm);
++                      /* all functions that have prototype and verifier allowed
++                       * programs to call them, must be real in-kernel functions
++                       */
++                      BUG_ON(!fn->func);
++                      insn->imm = fn->func - __bpf_call_base;
++              }
++      }
++}
++
+ static void free_states(struct bpf_verifier_env *env)
+ {
+       struct bpf_verifier_state_list *sl, *sln;
+@@ -3463,6 +3514,9 @@ skip_full_check:
+               /* program is valid, convert *(u32*)(ctx + off) accesses */
+               ret = convert_ctx_accesses(env);
++      if (ret == 0)
++              fixup_bpf_calls(env->prog);
++
+       if (log_level && log_len >= log_size - 1) {
+               BUG_ON(log_len >= log_size);
+               /* verifier log exceeded user supplied buffer */
diff --git a/queue-4.9/bpf-refactor-fixup_bpf_calls.patch b/queue-4.9/bpf-refactor-fixup_bpf_calls.patch
new file mode 100644 (file)
index 0000000..f51cebc
--- /dev/null
@@ -0,0 +1,124 @@
+From 79741b3bdec01a8628368fbcfccc7d189ed606cb Mon Sep 17 00:00:00 2001
+From: Alexei Starovoitov <ast@fb.com>
+Date: Wed, 15 Mar 2017 18:26:40 -0700
+Subject: bpf: refactor fixup_bpf_calls()
+
+From: Alexei Starovoitov <ast@fb.com>
+
+commit 79741b3bdec01a8628368fbcfccc7d189ed606cb upstream.
+
+reduce indent and make it iterate over instructions similar to
+convert_ctx_accesses(). Also convert hard BUG_ON into soft verifier error.
+
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Cc: Jiri Slaby <jslaby@suse.cz>
+[Backported to 4.9.y - gregkh]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/bpf/verifier.c |   75 +++++++++++++++++++++++---------------------------
+ 1 file changed, 35 insertions(+), 40 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -3362,55 +3362,50 @@ static int convert_ctx_accesses(struct b
+       return 0;
+ }
+-/* fixup insn->imm field of bpf_call instructions:
+- * if (insn->imm == BPF_FUNC_map_lookup_elem)
+- *      insn->imm = bpf_map_lookup_elem - __bpf_call_base;
+- * else if (insn->imm == BPF_FUNC_map_update_elem)
+- *      insn->imm = bpf_map_update_elem - __bpf_call_base;
+- * else ...
++/* fixup insn->imm field of bpf_call instructions
+  *
+  * this function is called after eBPF program passed verification
+  */
+-static void fixup_bpf_calls(struct bpf_prog *prog)
++static int fixup_bpf_calls(struct bpf_verifier_env *env)
+ {
++      struct bpf_prog *prog = env->prog;
++      struct bpf_insn *insn = prog->insnsi;
+       const struct bpf_func_proto *fn;
++      const int insn_cnt = prog->len;
+       int i;
+-      for (i = 0; i < prog->len; i++) {
+-              struct bpf_insn *insn = &prog->insnsi[i];
+-
+-              if (insn->code == (BPF_JMP | BPF_CALL)) {
+-                      /* we reach here when program has bpf_call instructions
+-                       * and it passed bpf_check(), means that
+-                       * ops->get_func_proto must have been supplied, check it
+-                       */
+-                      BUG_ON(!prog->aux->ops->get_func_proto);
+-
+-                      if (insn->imm == BPF_FUNC_get_route_realm)
+-                              prog->dst_needed = 1;
+-                      if (insn->imm == BPF_FUNC_get_prandom_u32)
+-                              bpf_user_rnd_init_once();
+-                      if (insn->imm == BPF_FUNC_tail_call) {
+-                              /* mark bpf_tail_call as different opcode
+-                               * to avoid conditional branch in
+-                               * interpeter for every normal call
+-                               * and to prevent accidental JITing by
+-                               * JIT compiler that doesn't support
+-                               * bpf_tail_call yet
+-                               */
+-                              insn->imm = 0;
+-                              insn->code |= BPF_X;
+-                              continue;
+-                      }
++      for (i = 0; i < insn_cnt; i++, insn++) {
++              if (insn->code != (BPF_JMP | BPF_CALL))
++                      continue;
++
++              if (insn->imm == BPF_FUNC_get_route_realm)
++                      prog->dst_needed = 1;
++              if (insn->imm == BPF_FUNC_get_prandom_u32)
++                      bpf_user_rnd_init_once();
++              if (insn->imm == BPF_FUNC_tail_call) {
++                      /* mark bpf_tail_call as different opcode to avoid
++                       * conditional branch in the interpeter for every normal
++                       * call and to prevent accidental JITing by JIT compiler
++                       * that doesn't support bpf_tail_call yet
++                       */
++                      insn->imm = 0;
++                      insn->code |= BPF_X;
++                      continue;
++              }
+-                      fn = prog->aux->ops->get_func_proto(insn->imm);
+-                      /* all functions that have prototype and verifier allowed
+-                       * programs to call them, must be real in-kernel functions
+-                       */
+-                      BUG_ON(!fn->func);
+-                      insn->imm = fn->func - __bpf_call_base;
++              fn = prog->aux->ops->get_func_proto(insn->imm);
++              /* all functions that have prototype and verifier allowed
++               * programs to call them, must be real in-kernel functions
++               */
++              if (!fn->func) {
++                      verbose("kernel subsystem misconfigured func %d\n",
++                              insn->imm);
++                      return -EFAULT;
+               }
++              insn->imm = fn->func - __bpf_call_base;
+       }
++
++      return 0;
+ }
+ static void free_states(struct bpf_verifier_env *env)
+@@ -3515,7 +3510,7 @@ skip_full_check:
+               ret = convert_ctx_accesses(env);
+       if (ret == 0)
+-              fixup_bpf_calls(env->prog);
++              ret = fixup_bpf_calls(env);
+       if (log_level && log_len >= log_size - 1) {
+               BUG_ON(log_len >= log_size);
index 8bf1a03fd7e2ff56f5d1e4689e07425472c3fa00..ad7d8aab42667fbc345397013b7487b4a17b18bb 100644 (file)
@@ -47,3 +47,5 @@ x86-microcode-intel-extend-bdw-late-loading-with-a-revision-check.patch
 kvm-x86-add-memory-barrier-on-vmcs-field-lookup.patch
 drm-vmwgfx-potential-off-by-one-in-vmw_view_add.patch
 kaiser-set-_page_nx-only-if-supported.patch
+bpf-move-fixup_bpf_calls-function.patch
+bpf-refactor-fixup_bpf_calls.patch