From: Greg Kroah-Hartman Date: Fri, 3 Jun 2022 17:23:18 +0000 (+0200) Subject: 5.18-stable patches X-Git-Tag: v4.9.317~5 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=43cda9feb09eb302ace73723d90f5a6fa479d6e7;p=thirdparty%2Fkernel%2Fstable-queue.git 5.18-stable patches added patches: bpf-check-ptr_to_mem-mem_rdonly-in-check_helper_mem_access.patch bpf-do-write-access-check-for-kfunc-and-global-func.patch bpf-enlarge-offset-check-value-to-int_max-in-bpf_skb_-load-store-_bytes.patch bpf-fill-new-bpf_prog_pack-with-illegal-instructions.patch bpf-fix-combination-of-jit-blinding-and-pointers-to-bpf-subprogs.patch bpf-fix-excessive-memory-allocation-in-stack_map_alloc.patch bpf-fix-potential-array-overflow-in-bpf_trampoline_get_progs.patch bpf-fix-usage-of-trace-rcu-in-local-storage.patch bpf-reject-writes-for-ptr_to_map_key-in-check_helper_mem_access.patch docs-submitting-patches-fix-crossref-to-the-canonical-patch-format.patch nfs-memory-allocation-failures-are-not-server-fatal-errors.patch nfsd-fix-possible-sleep-during-nfsd4_release_lockowner.patch --- diff --git a/queue-5.18/bpf-check-ptr_to_mem-mem_rdonly-in-check_helper_mem_access.patch b/queue-5.18/bpf-check-ptr_to_mem-mem_rdonly-in-check_helper_mem_access.patch new file mode 100644 index 00000000000..d89f2fe5380 --- /dev/null +++ b/queue-5.18/bpf-check-ptr_to_mem-mem_rdonly-in-check_helper_mem_access.patch @@ -0,0 +1,66 @@ +From 97e6d7dab1ca4648821c790a2b7913d6d5d549db Mon Sep 17 00:00:00 2001 +From: Kumar Kartikeya Dwivedi +Date: Sat, 19 Mar 2022 13:38:24 +0530 +Subject: bpf: Check PTR_TO_MEM | MEM_RDONLY in check_helper_mem_access + +From: Kumar Kartikeya Dwivedi + +commit 97e6d7dab1ca4648821c790a2b7913d6d5d549db upstream. + +The commit being fixed was aiming to disallow users from incorrectly +obtaining writable pointer to memory that is only meant to be read. This +is enforced now using a MEM_RDONLY flag. + +For instance, in case of global percpu variables, when the BTF type is +not struct (e.g. bpf_prog_active), the verifier marks register type as +PTR_TO_MEM | MEM_RDONLY from bpf_this_cpu_ptr or bpf_per_cpu_ptr +helpers. However, when passing such pointer to kfunc, global funcs, or +BPF helpers, in check_helper_mem_access, there is no expectation +MEM_RDONLY flag will be set, hence it is checked as pointer to writable +memory. Later, verifier sets up argument type of global func as +PTR_TO_MEM | PTR_MAYBE_NULL, so user can use a global func to get around +the limitations imposed by this flag. + +This check will also cover global non-percpu variables that may be +introduced in kernel BTF in future. + +Also, we update the log message for PTR_TO_BUF case to be similar to +PTR_TO_MEM case, so that the reason for error is clear to user. + +Fixes: 34d3a78c681e ("bpf: Make per_cpu_ptr return rdonly PTR_TO_MEM.") +Reviewed-by: Hao Luo +Signed-off-by: Kumar Kartikeya Dwivedi +Link: https://lore.kernel.org/r/20220319080827.73251-3-memxor@gmail.com +Signed-off-by: Alexei Starovoitov +Signed-off-by: Greg Kroah-Hartman +--- + kernel/bpf/verifier.c | 12 +++++++++++- + 1 file changed, 11 insertions(+), 1 deletion(-) + +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -4876,13 +4876,23 @@ static int check_helper_mem_access(struc + return check_map_access(env, regno, reg->off, access_size, + zero_size_allowed); + case PTR_TO_MEM: ++ if (type_is_rdonly_mem(reg->type)) { ++ if (meta && meta->raw_mode) { ++ verbose(env, "R%d cannot write into %s\n", regno, ++ reg_type_str(env, reg->type)); ++ return -EACCES; ++ } ++ } + return check_mem_region_access(env, regno, reg->off, + access_size, reg->mem_size, + zero_size_allowed); + case PTR_TO_BUF: + if (type_is_rdonly_mem(reg->type)) { +- if (meta && meta->raw_mode) ++ if (meta && meta->raw_mode) { ++ verbose(env, "R%d cannot write into %s\n", regno, ++ reg_type_str(env, reg->type)); + return -EACCES; ++ } + + max_access = &env->prog->aux->max_rdonly_access; + } else { diff --git a/queue-5.18/bpf-do-write-access-check-for-kfunc-and-global-func.patch b/queue-5.18/bpf-do-write-access-check-for-kfunc-and-global-func.patch new file mode 100644 index 00000000000..7fc183b5862 --- /dev/null +++ b/queue-5.18/bpf-do-write-access-check-for-kfunc-and-global-func.patch @@ -0,0 +1,113 @@ +From be77354a3d7ebd4897ee18eca26dca6df9224c76 Mon Sep 17 00:00:00 2001 +From: Kumar Kartikeya Dwivedi +Date: Sat, 19 Mar 2022 13:38:23 +0530 +Subject: bpf: Do write access check for kfunc and global func + +From: Kumar Kartikeya Dwivedi + +commit be77354a3d7ebd4897ee18eca26dca6df9224c76 upstream. + +When passing pointer to some map value to kfunc or global func, in +verifier we are passing meta as NULL to various functions, which uses +meta->raw_mode to check whether memory is being written to. Since some +kfunc or global funcs may also write to memory pointers they receive as +arguments, we must check for write access to memory. E.g. in some case +map may be read only and this will be missed by current checks. + +However meta->raw_mode allows for uninitialized memory (e.g. on stack), +since there is not enough info available through BTF, we must perform +one call for read access (raw_mode = false), and one for write access +(raw_mode = true). + +Fixes: e5069b9c23b3 ("bpf: Support pointers in global func args") +Fixes: d583691c47dc ("bpf: Introduce mem, size argument pair support for kfunc") +Signed-off-by: Kumar Kartikeya Dwivedi +Link: https://lore.kernel.org/r/20220319080827.73251-2-memxor@gmail.com +Signed-off-by: Alexei Starovoitov +Signed-off-by: Greg Kroah-Hartman +--- + kernel/bpf/verifier.c | 44 +++++++++++++++++++++++++++++--------------- + 1 file changed, 29 insertions(+), 15 deletions(-) + +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -4934,8 +4934,7 @@ static int check_mem_size_reg(struct bpf + * out. Only upper bounds can be learned because retval is an + * int type and negative retvals are allowed. + */ +- if (meta) +- meta->msize_max_value = reg->umax_value; ++ meta->msize_max_value = reg->umax_value; + + /* The register is SCALAR_VALUE; the access check + * happens using its boundaries. +@@ -4978,24 +4977,33 @@ static int check_mem_size_reg(struct bpf + int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, + u32 regno, u32 mem_size) + { ++ bool may_be_null = type_may_be_null(reg->type); ++ struct bpf_reg_state saved_reg; ++ struct bpf_call_arg_meta meta; ++ int err; ++ + if (register_is_null(reg)) + return 0; + +- if (type_may_be_null(reg->type)) { +- /* Assuming that the register contains a value check if the memory +- * access is safe. Temporarily save and restore the register's state as +- * the conversion shouldn't be visible to a caller. +- */ +- const struct bpf_reg_state saved_reg = *reg; +- int rv; +- ++ memset(&meta, 0, sizeof(meta)); ++ /* Assuming that the register contains a value check if the memory ++ * access is safe. Temporarily save and restore the register's state as ++ * the conversion shouldn't be visible to a caller. ++ */ ++ if (may_be_null) { ++ saved_reg = *reg; + mark_ptr_not_null_reg(reg); +- rv = check_helper_mem_access(env, regno, mem_size, true, NULL); +- *reg = saved_reg; +- return rv; + } + +- return check_helper_mem_access(env, regno, mem_size, true, NULL); ++ err = check_helper_mem_access(env, regno, mem_size, true, &meta); ++ /* Check access for BPF_WRITE */ ++ meta.raw_mode = true; ++ err = err ?: check_helper_mem_access(env, regno, mem_size, true, &meta); ++ ++ if (may_be_null) ++ *reg = saved_reg; ++ ++ return err; + } + + int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, +@@ -5004,16 +5012,22 @@ int check_kfunc_mem_size_reg(struct bpf_ + struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1]; + bool may_be_null = type_may_be_null(mem_reg->type); + struct bpf_reg_state saved_reg; ++ struct bpf_call_arg_meta meta; + int err; + + WARN_ON_ONCE(regno < BPF_REG_2 || regno > BPF_REG_5); + ++ memset(&meta, 0, sizeof(meta)); ++ + if (may_be_null) { + saved_reg = *mem_reg; + mark_ptr_not_null_reg(mem_reg); + } + +- err = check_mem_size_reg(env, reg, regno, true, NULL); ++ err = check_mem_size_reg(env, reg, regno, true, &meta); ++ /* Check access for BPF_WRITE */ ++ meta.raw_mode = true; ++ err = err ?: check_mem_size_reg(env, reg, regno, true, &meta); + + if (may_be_null) + *mem_reg = saved_reg; diff --git a/queue-5.18/bpf-enlarge-offset-check-value-to-int_max-in-bpf_skb_-load-store-_bytes.patch b/queue-5.18/bpf-enlarge-offset-check-value-to-int_max-in-bpf_skb_-load-store-_bytes.patch new file mode 100644 index 00000000000..5c6eb7cc13e --- /dev/null +++ b/queue-5.18/bpf-enlarge-offset-check-value-to-int_max-in-bpf_skb_-load-store-_bytes.patch @@ -0,0 +1,44 @@ +From 45969b4152c1752089351cd6836a42a566d49bcf Mon Sep 17 00:00:00 2001 +From: Liu Jian +Date: Sat, 16 Apr 2022 18:57:59 +0800 +Subject: bpf: Enlarge offset check value to INT_MAX in bpf_skb_{load,store}_bytes + +From: Liu Jian + +commit 45969b4152c1752089351cd6836a42a566d49bcf upstream. + +The data length of skb frags + frag_list may be greater than 0xffff, and +skb_header_pointer can not handle negative offset. So, here INT_MAX is used +to check the validity of offset. Add the same change to the related function +skb_store_bytes. + +Fixes: 05c74e5e53f6 ("bpf: add bpf_skb_load_bytes helper") +Signed-off-by: Liu Jian +Signed-off-by: Daniel Borkmann +Acked-by: Song Liu +Link: https://lore.kernel.org/bpf/20220416105801.88708-2-liujian56@huawei.com +Signed-off-by: Greg Kroah-Hartman +--- + net/core/filter.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/net/core/filter.c ++++ b/net/core/filter.c +@@ -1687,7 +1687,7 @@ BPF_CALL_5(bpf_skb_store_bytes, struct s + + if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH))) + return -EINVAL; +- if (unlikely(offset > 0xffff)) ++ if (unlikely(offset > INT_MAX)) + return -EFAULT; + if (unlikely(bpf_try_make_writable(skb, offset + len))) + return -EFAULT; +@@ -1722,7 +1722,7 @@ BPF_CALL_4(bpf_skb_load_bytes, const str + { + void *ptr; + +- if (unlikely(offset > 0xffff)) ++ if (unlikely(offset > INT_MAX)) + goto err_clear; + + ptr = skb_header_pointer(skb, offset, len, to); diff --git a/queue-5.18/bpf-fill-new-bpf_prog_pack-with-illegal-instructions.patch b/queue-5.18/bpf-fill-new-bpf_prog_pack-with-illegal-instructions.patch new file mode 100644 index 00000000000..37b4feadd9b --- /dev/null +++ b/queue-5.18/bpf-fill-new-bpf_prog_pack-with-illegal-instructions.patch @@ -0,0 +1,79 @@ +From d88bb5eed04ce50cc20e7f9282977841728be798 Mon Sep 17 00:00:00 2001 +From: Song Liu +Date: Fri, 20 May 2022 16:57:51 -0700 +Subject: bpf: Fill new bpf_prog_pack with illegal instructions + +From: Song Liu + +commit d88bb5eed04ce50cc20e7f9282977841728be798 upstream. + +bpf_prog_pack enables sharing huge pages among multiple BPF programs. +These pages are marked as executable before the JIT engine fill it with +BPF programs. To make these pages safe, fill the hole bpf_prog_pack with +illegal instructions before making it executable. + +Fixes: 57631054fae6 ("bpf: Introduce bpf_prog_pack allocator") +Fixes: 33c9805860e5 ("bpf: Introduce bpf_jit_binary_pack_[alloc|finalize|free]") +Reported-by: Linus Torvalds +Signed-off-by: Song Liu +Signed-off-by: Daniel Borkmann +Link: https://lore.kernel.org/bpf/20220520235758.1858153-2-song@kernel.org +Signed-off-by: Greg Kroah-Hartman +--- + kernel/bpf/core.c | 10 ++++++---- + 1 file changed, 6 insertions(+), 4 deletions(-) + +--- a/kernel/bpf/core.c ++++ b/kernel/bpf/core.c +@@ -873,7 +873,7 @@ static size_t select_bpf_prog_pack_size( + return size; + } + +-static struct bpf_prog_pack *alloc_new_pack(void) ++static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns) + { + struct bpf_prog_pack *pack; + +@@ -886,6 +886,7 @@ static struct bpf_prog_pack *alloc_new_p + kfree(pack); + return NULL; + } ++ bpf_fill_ill_insns(pack->ptr, bpf_prog_pack_size); + bitmap_zero(pack->bitmap, bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE); + list_add_tail(&pack->list, &pack_list); + +@@ -895,7 +896,7 @@ static struct bpf_prog_pack *alloc_new_p + return pack; + } + +-static void *bpf_prog_pack_alloc(u32 size) ++static void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns) + { + unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size); + struct bpf_prog_pack *pack; +@@ -910,6 +911,7 @@ static void *bpf_prog_pack_alloc(u32 siz + size = round_up(size, PAGE_SIZE); + ptr = module_alloc(size); + if (ptr) { ++ bpf_fill_ill_insns(ptr, size); + set_vm_flush_reset_perms(ptr); + set_memory_ro((unsigned long)ptr, size / PAGE_SIZE); + set_memory_x((unsigned long)ptr, size / PAGE_SIZE); +@@ -923,7 +925,7 @@ static void *bpf_prog_pack_alloc(u32 siz + goto found_free_area; + } + +- pack = alloc_new_pack(); ++ pack = alloc_new_pack(bpf_fill_ill_insns); + if (!pack) + goto out; + +@@ -1102,7 +1104,7 @@ bpf_jit_binary_pack_alloc(unsigned int p + + if (bpf_jit_charge_modmem(size)) + return NULL; +- ro_header = bpf_prog_pack_alloc(size); ++ ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns); + if (!ro_header) { + bpf_jit_uncharge_modmem(size); + return NULL; diff --git a/queue-5.18/bpf-fix-combination-of-jit-blinding-and-pointers-to-bpf-subprogs.patch b/queue-5.18/bpf-fix-combination-of-jit-blinding-and-pointers-to-bpf-subprogs.patch new file mode 100644 index 00000000000..11dd2fbf819 --- /dev/null +++ b/queue-5.18/bpf-fix-combination-of-jit-blinding-and-pointers-to-bpf-subprogs.patch @@ -0,0 +1,62 @@ +From 4b6313cf99b0d51b49aeaea98ec76ca8161ecb80 Mon Sep 17 00:00:00 2001 +From: Alexei Starovoitov +Date: Thu, 12 May 2022 18:10:24 -0700 +Subject: bpf: Fix combination of jit blinding and pointers to bpf subprogs. + +From: Alexei Starovoitov + +commit 4b6313cf99b0d51b49aeaea98ec76ca8161ecb80 upstream. + +The combination of jit blinding and pointers to bpf subprogs causes: +[ 36.989548] BUG: unable to handle page fault for address: 0000000100000001 +[ 36.990342] #PF: supervisor instruction fetch in kernel mode +[ 36.990968] #PF: error_code(0x0010) - not-present page +[ 36.994859] RIP: 0010:0x100000001 +[ 36.995209] Code: Unable to access opcode bytes at RIP 0xffffffd7. +[ 37.004091] Call Trace: +[ 37.004351] +[ 37.004576] ? bpf_loop+0x4d/0x70 +[ 37.004932] ? bpf_prog_3899083f75e4c5de_F+0xe3/0x13b + +The jit blinding logic didn't recognize that ld_imm64 with an address +of bpf subprogram is a special instruction and proceeded to randomize it. +By itself it wouldn't have been an issue, but jit_subprogs() logic +relies on two step process to JIT all subprogs and then JIT them +again when addresses of all subprogs are known. +Blinding process in the first JIT phase caused second JIT to miss +adjustment of special ld_imm64. + +Fix this issue by ignoring special ld_imm64 instructions that don't have +user controlled constants and shouldn't be blinded. + +Fixes: 69c087ba6225 ("bpf: Add bpf_for_each_map_elem() helper") +Reported-by: Andrii Nakryiko +Signed-off-by: Alexei Starovoitov +Signed-off-by: Daniel Borkmann +Acked-by: Andrii Nakryiko +Acked-by: Martin KaFai Lau +Link: https://lore.kernel.org/bpf/20220513011025.13344-1-alexei.starovoitov@gmail.com +Signed-off-by: Greg Kroah-Hartman +--- + kernel/bpf/core.c | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +--- a/kernel/bpf/core.c ++++ b/kernel/bpf/core.c +@@ -1436,6 +1436,16 @@ struct bpf_prog *bpf_jit_blind_constants + insn = clone->insnsi; + + for (i = 0; i < insn_cnt; i++, insn++) { ++ if (bpf_pseudo_func(insn)) { ++ /* ld_imm64 with an address of bpf subprog is not ++ * a user controlled constant. Don't randomize it, ++ * since it will conflict with jit_subprogs() logic. ++ */ ++ insn++; ++ i++; ++ continue; ++ } ++ + /* We temporarily need to hold the original ld64 insn + * so that we can still access the first part in the + * second blinding run. diff --git a/queue-5.18/bpf-fix-excessive-memory-allocation-in-stack_map_alloc.patch b/queue-5.18/bpf-fix-excessive-memory-allocation-in-stack_map_alloc.patch new file mode 100644 index 00000000000..dfce9d32225 --- /dev/null +++ b/queue-5.18/bpf-fix-excessive-memory-allocation-in-stack_map_alloc.patch @@ -0,0 +1,43 @@ +From b45043192b3e481304062938a6561da2ceea46a6 Mon Sep 17 00:00:00 2001 +From: Yuntao Wang +Date: Thu, 7 Apr 2022 21:04:23 +0800 +Subject: bpf: Fix excessive memory allocation in stack_map_alloc() + +From: Yuntao Wang + +commit b45043192b3e481304062938a6561da2ceea46a6 upstream. + +The 'n_buckets * (value_size + sizeof(struct stack_map_bucket))' part of the +allocated memory for 'smap' is never used after the memlock accounting was +removed, thus get rid of it. + +[ Note, Daniel: + +Commit b936ca643ade ("bpf: rework memlock-based memory accounting for maps") +moved `cost += n_buckets * (value_size + sizeof(struct stack_map_bucket))` +up and therefore before the bpf_map_area_alloc() allocation, sigh. In a later +step commit c85d69135a91 ("bpf: move memory size checks to bpf_map_charge_init()"), +and the overflow checks of `cost >= U32_MAX - PAGE_SIZE` moved into +bpf_map_charge_init(). And then 370868107bf6 ("bpf: Eliminate rlimit-based +memory accounting for stackmap maps") finally removed the bpf_map_charge_init(). +Anyway, the original code did the allocation same way as /after/ this fix. ] + +Fixes: b936ca643ade ("bpf: rework memlock-based memory accounting for maps") +Signed-off-by: Yuntao Wang +Signed-off-by: Daniel Borkmann +Link: https://lore.kernel.org/bpf/20220407130423.798386-1-ytcoode@gmail.com +Signed-off-by: Greg Kroah-Hartman +--- + kernel/bpf/stackmap.c | 1 - + 1 file changed, 1 deletion(-) + +--- a/kernel/bpf/stackmap.c ++++ b/kernel/bpf/stackmap.c +@@ -100,7 +100,6 @@ static struct bpf_map *stack_map_alloc(u + return ERR_PTR(-E2BIG); + + cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap); +- cost += n_buckets * (value_size + sizeof(struct stack_map_bucket)); + smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr)); + if (!smap) + return ERR_PTR(-ENOMEM); diff --git a/queue-5.18/bpf-fix-potential-array-overflow-in-bpf_trampoline_get_progs.patch b/queue-5.18/bpf-fix-potential-array-overflow-in-bpf_trampoline_get_progs.patch new file mode 100644 index 00000000000..fe7aa227589 --- /dev/null +++ b/queue-5.18/bpf-fix-potential-array-overflow-in-bpf_trampoline_get_progs.patch @@ -0,0 +1,75 @@ +From a2aa95b71c9bbec793b5c5fa50f0a80d882b3e8d Mon Sep 17 00:00:00 2001 +From: Yuntao Wang +Date: Sat, 30 Apr 2022 21:08:03 +0800 +Subject: bpf: Fix potential array overflow in bpf_trampoline_get_progs() + +From: Yuntao Wang + +commit a2aa95b71c9bbec793b5c5fa50f0a80d882b3e8d upstream. + +The cnt value in the 'cnt >= BPF_MAX_TRAMP_PROGS' check does not +include BPF_TRAMP_MODIFY_RETURN bpf programs, so the number of +the attached BPF_TRAMP_MODIFY_RETURN bpf programs in a trampoline +can exceed BPF_MAX_TRAMP_PROGS. + +When this happens, the assignment '*progs++ = aux->prog' in +bpf_trampoline_get_progs() will cause progs array overflow as the +progs field in the bpf_tramp_progs struct can only hold at most +BPF_MAX_TRAMP_PROGS bpf programs. + +Fixes: 88fd9e5352fe ("bpf: Refactor trampoline update code") +Signed-off-by: Yuntao Wang +Link: https://lore.kernel.org/r/20220430130803.210624-1-ytcoode@gmail.com +Signed-off-by: Alexei Starovoitov +Signed-off-by: Greg Kroah-Hartman +--- + kernel/bpf/trampoline.c | 18 ++++++++++++------ + 1 file changed, 12 insertions(+), 6 deletions(-) + +--- a/kernel/bpf/trampoline.c ++++ b/kernel/bpf/trampoline.c +@@ -411,7 +411,7 @@ int bpf_trampoline_link_prog(struct bpf_ + { + enum bpf_tramp_prog_type kind; + int err = 0; +- int cnt; ++ int cnt = 0, i; + + kind = bpf_attach_type_to_tramp(prog); + mutex_lock(&tr->mutex); +@@ -422,7 +422,10 @@ int bpf_trampoline_link_prog(struct bpf_ + err = -EBUSY; + goto out; + } +- cnt = tr->progs_cnt[BPF_TRAMP_FENTRY] + tr->progs_cnt[BPF_TRAMP_FEXIT]; ++ ++ for (i = 0; i < BPF_TRAMP_MAX; i++) ++ cnt += tr->progs_cnt[i]; ++ + if (kind == BPF_TRAMP_REPLACE) { + /* Cannot attach extension if fentry/fexit are in use. */ + if (cnt) { +@@ -500,16 +503,19 @@ out: + + void bpf_trampoline_put(struct bpf_trampoline *tr) + { ++ int i; ++ + if (!tr) + return; + mutex_lock(&trampoline_mutex); + if (!refcount_dec_and_test(&tr->refcnt)) + goto out; + WARN_ON_ONCE(mutex_is_locked(&tr->mutex)); +- if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FENTRY]))) +- goto out; +- if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT]))) +- goto out; ++ ++ for (i = 0; i < BPF_TRAMP_MAX; i++) ++ if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[i]))) ++ goto out; ++ + /* This code will be executed even when the last bpf_tramp_image + * is alive. All progs are detached from the trampoline and the + * trampoline image is patched with jmp into epilogue to skip diff --git a/queue-5.18/bpf-fix-usage-of-trace-rcu-in-local-storage.patch b/queue-5.18/bpf-fix-usage-of-trace-rcu-in-local-storage.patch new file mode 100644 index 00000000000..c3c39028f28 --- /dev/null +++ b/queue-5.18/bpf-fix-usage-of-trace-rcu-in-local-storage.patch @@ -0,0 +1,194 @@ +From dcf456c9a095a6e71f53d6f6f004133ee851ee70 Mon Sep 17 00:00:00 2001 +From: KP Singh +Date: Mon, 18 Apr 2022 15:51:58 +0000 +Subject: bpf: Fix usage of trace RCU in local storage. + +From: KP Singh + +commit dcf456c9a095a6e71f53d6f6f004133ee851ee70 upstream. + +bpf_{sk,task,inode}_storage_free() do not need to use +call_rcu_tasks_trace as no BPF program should be accessing the owner +as it's being destroyed. The only other reader at this point is +bpf_local_storage_map_free() which uses normal RCU. + +The only path that needs trace RCU are: + +* bpf_local_storage_{delete,update} helpers +* map_{delete,update}_elem() syscalls + +Fixes: 0fe4b381a59e ("bpf: Allow bpf_local_storage to be used by sleepable programs") +Signed-off-by: KP Singh +Signed-off-by: Alexei Starovoitov +Acked-by: Martin KaFai Lau +Link: https://lore.kernel.org/bpf/20220418155158.2865678-1-kpsingh@kernel.org +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/bpf_local_storage.h | 4 ++-- + kernel/bpf/bpf_inode_storage.c | 4 ++-- + kernel/bpf/bpf_local_storage.c | 29 +++++++++++++++++++---------- + kernel/bpf/bpf_task_storage.c | 4 ++-- + net/core/bpf_sk_storage.c | 6 +++--- + 5 files changed, 28 insertions(+), 19 deletions(-) + +--- a/include/linux/bpf_local_storage.h ++++ b/include/linux/bpf_local_storage.h +@@ -143,9 +143,9 @@ void bpf_selem_link_storage_nolock(struc + + bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage, + struct bpf_local_storage_elem *selem, +- bool uncharge_omem); ++ bool uncharge_omem, bool use_trace_rcu); + +-void bpf_selem_unlink(struct bpf_local_storage_elem *selem); ++void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool use_trace_rcu); + + void bpf_selem_link_map(struct bpf_local_storage_map *smap, + struct bpf_local_storage_elem *selem); +--- a/kernel/bpf/bpf_inode_storage.c ++++ b/kernel/bpf/bpf_inode_storage.c +@@ -90,7 +90,7 @@ void bpf_inode_storage_free(struct inode + */ + bpf_selem_unlink_map(selem); + free_inode_storage = bpf_selem_unlink_storage_nolock( +- local_storage, selem, false); ++ local_storage, selem, false, false); + } + raw_spin_unlock_bh(&local_storage->lock); + rcu_read_unlock(); +@@ -149,7 +149,7 @@ static int inode_storage_delete(struct i + if (!sdata) + return -ENOENT; + +- bpf_selem_unlink(SELEM(sdata)); ++ bpf_selem_unlink(SELEM(sdata), true); + + return 0; + } +--- a/kernel/bpf/bpf_local_storage.c ++++ b/kernel/bpf/bpf_local_storage.c +@@ -106,7 +106,7 @@ static void bpf_selem_free_rcu(struct rc + */ + bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage, + struct bpf_local_storage_elem *selem, +- bool uncharge_mem) ++ bool uncharge_mem, bool use_trace_rcu) + { + struct bpf_local_storage_map *smap; + bool free_local_storage; +@@ -150,11 +150,16 @@ bool bpf_selem_unlink_storage_nolock(str + SDATA(selem)) + RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL); + +- call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_rcu); ++ if (use_trace_rcu) ++ call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_rcu); ++ else ++ kfree_rcu(selem, rcu); ++ + return free_local_storage; + } + +-static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem) ++static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem, ++ bool use_trace_rcu) + { + struct bpf_local_storage *local_storage; + bool free_local_storage = false; +@@ -169,12 +174,16 @@ static void __bpf_selem_unlink_storage(s + raw_spin_lock_irqsave(&local_storage->lock, flags); + if (likely(selem_linked_to_storage(selem))) + free_local_storage = bpf_selem_unlink_storage_nolock( +- local_storage, selem, true); ++ local_storage, selem, true, use_trace_rcu); + raw_spin_unlock_irqrestore(&local_storage->lock, flags); + +- if (free_local_storage) +- call_rcu_tasks_trace(&local_storage->rcu, ++ if (free_local_storage) { ++ if (use_trace_rcu) ++ call_rcu_tasks_trace(&local_storage->rcu, + bpf_local_storage_free_rcu); ++ else ++ kfree_rcu(local_storage, rcu); ++ } + } + + void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage, +@@ -214,14 +223,14 @@ void bpf_selem_link_map(struct bpf_local + raw_spin_unlock_irqrestore(&b->lock, flags); + } + +-void bpf_selem_unlink(struct bpf_local_storage_elem *selem) ++void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool use_trace_rcu) + { + /* Always unlink from map before unlinking from local_storage + * because selem will be freed after successfully unlinked from + * the local_storage. + */ + bpf_selem_unlink_map(selem); +- __bpf_selem_unlink_storage(selem); ++ __bpf_selem_unlink_storage(selem, use_trace_rcu); + } + + struct bpf_local_storage_data * +@@ -466,7 +475,7 @@ bpf_local_storage_update(void *owner, st + if (old_sdata) { + bpf_selem_unlink_map(SELEM(old_sdata)); + bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata), +- false); ++ false, true); + } + + unlock: +@@ -548,7 +557,7 @@ void bpf_local_storage_map_free(struct b + migrate_disable(); + __this_cpu_inc(*busy_counter); + } +- bpf_selem_unlink(selem); ++ bpf_selem_unlink(selem, false); + if (busy_counter) { + __this_cpu_dec(*busy_counter); + migrate_enable(); +--- a/kernel/bpf/bpf_task_storage.c ++++ b/kernel/bpf/bpf_task_storage.c +@@ -102,7 +102,7 @@ void bpf_task_storage_free(struct task_s + */ + bpf_selem_unlink_map(selem); + free_task_storage = bpf_selem_unlink_storage_nolock( +- local_storage, selem, false); ++ local_storage, selem, false, false); + } + raw_spin_unlock_irqrestore(&local_storage->lock, flags); + bpf_task_storage_unlock(); +@@ -192,7 +192,7 @@ static int task_storage_delete(struct ta + if (!sdata) + return -ENOENT; + +- bpf_selem_unlink(SELEM(sdata)); ++ bpf_selem_unlink(SELEM(sdata), true); + + return 0; + } +--- a/net/core/bpf_sk_storage.c ++++ b/net/core/bpf_sk_storage.c +@@ -40,7 +40,7 @@ static int bpf_sk_storage_del(struct soc + if (!sdata) + return -ENOENT; + +- bpf_selem_unlink(SELEM(sdata)); ++ bpf_selem_unlink(SELEM(sdata), true); + + return 0; + } +@@ -75,8 +75,8 @@ void bpf_sk_storage_free(struct sock *sk + * sk_storage. + */ + bpf_selem_unlink_map(selem); +- free_sk_storage = bpf_selem_unlink_storage_nolock(sk_storage, +- selem, true); ++ free_sk_storage = bpf_selem_unlink_storage_nolock( ++ sk_storage, selem, true, false); + } + raw_spin_unlock_bh(&sk_storage->lock); + rcu_read_unlock(); diff --git a/queue-5.18/bpf-reject-writes-for-ptr_to_map_key-in-check_helper_mem_access.patch b/queue-5.18/bpf-reject-writes-for-ptr_to_map_key-in-check_helper_mem_access.patch new file mode 100644 index 00000000000..049aa3b5ee1 --- /dev/null +++ b/queue-5.18/bpf-reject-writes-for-ptr_to_map_key-in-check_helper_mem_access.patch @@ -0,0 +1,36 @@ +From 7b3552d3f9f6897851fc453b5131a967167e43c2 Mon Sep 17 00:00:00 2001 +From: Kumar Kartikeya Dwivedi +Date: Sat, 19 Mar 2022 13:38:25 +0530 +Subject: bpf: Reject writes for PTR_TO_MAP_KEY in check_helper_mem_access + +From: Kumar Kartikeya Dwivedi + +commit 7b3552d3f9f6897851fc453b5131a967167e43c2 upstream. + +It is not permitted to write to PTR_TO_MAP_KEY, but the current code in +check_helper_mem_access would allow for it, reject this case as well, as +helpers taking ARG_PTR_TO_UNINIT_MEM also take PTR_TO_MAP_KEY. + +Fixes: 69c087ba6225 ("bpf: Add bpf_for_each_map_elem() helper") +Signed-off-by: Kumar Kartikeya Dwivedi +Link: https://lore.kernel.org/r/20220319080827.73251-4-memxor@gmail.com +Signed-off-by: Alexei Starovoitov +Signed-off-by: Greg Kroah-Hartman +--- + kernel/bpf/verifier.c | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -4861,6 +4861,11 @@ static int check_helper_mem_access(struc + return check_packet_access(env, regno, reg->off, access_size, + zero_size_allowed); + case PTR_TO_MAP_KEY: ++ if (meta && meta->raw_mode) { ++ verbose(env, "R%d cannot write into %s\n", regno, ++ reg_type_str(env, reg->type)); ++ return -EACCES; ++ } + return check_mem_region_access(env, regno, reg->off, access_size, + reg->map_ptr->key_size, false); + case PTR_TO_MAP_VALUE: diff --git a/queue-5.18/docs-submitting-patches-fix-crossref-to-the-canonical-patch-format.patch b/queue-5.18/docs-submitting-patches-fix-crossref-to-the-canonical-patch-format.patch new file mode 100644 index 00000000000..7de57806169 --- /dev/null +++ b/queue-5.18/docs-submitting-patches-fix-crossref-to-the-canonical-patch-format.patch @@ -0,0 +1,43 @@ +From 6d5aa418b3bd42cdccc36e94ee199af423ef7c84 Mon Sep 17 00:00:00 2001 +From: Akira Yokosawa +Date: Wed, 27 Apr 2022 18:28:39 +0900 +Subject: docs: submitting-patches: Fix crossref to 'The canonical patch format' + +From: Akira Yokosawa + +commit 6d5aa418b3bd42cdccc36e94ee199af423ef7c84 upstream. + +The reference to `explicit_in_reply_to` is pointless as when the +reference was added in the form of "#15" [1], Section 15) was "The +canonical patch format". +The reference of "#15" had not been properly updated in a couple of +reorganizations during the plain-text SubmittingPatches era. + +Fix it by using `the_canonical_patch_format`. + +[1]: 2ae19acaa50a ("Documentation: Add "how to write a good patch summary" to SubmittingPatches") + +Signed-off-by: Akira Yokosawa +Fixes: 5903019b2a5e ("Documentation/SubmittingPatches: convert it to ReST markup") +Fixes: 9b2c76777acc ("Documentation/SubmittingPatches: enrich the Sphinx output") +Cc: Jonathan Corbet +Cc: Mauro Carvalho Chehab +Cc: stable@vger.kernel.org # v4.9+ +Link: https://lore.kernel.org/r/64e105a5-50be-23f2-6cae-903a2ea98e18@gmail.com +Signed-off-by: Jonathan Corbet +Signed-off-by: Greg Kroah-Hartman +--- + Documentation/process/submitting-patches.rst | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/Documentation/process/submitting-patches.rst ++++ b/Documentation/process/submitting-patches.rst +@@ -77,7 +77,7 @@ as you intend it to. + + The maintainer will thank you if you write your patch description in a + form which can be easily pulled into Linux's source code management +-system, ``git``, as a "commit log". See :ref:`explicit_in_reply_to`. ++system, ``git``, as a "commit log". See :ref:`the_canonical_patch_format`. + + Solve only one problem per patch. If your description starts to get + long, that's a sign that you probably need to split up your patch. diff --git a/queue-5.18/nfs-memory-allocation-failures-are-not-server-fatal-errors.patch b/queue-5.18/nfs-memory-allocation-failures-are-not-server-fatal-errors.patch new file mode 100644 index 00000000000..f7086fe830b --- /dev/null +++ b/queue-5.18/nfs-memory-allocation-failures-are-not-server-fatal-errors.patch @@ -0,0 +1,32 @@ +From 452284407c18d8a522c3039339b1860afa0025a8 Mon Sep 17 00:00:00 2001 +From: Trond Myklebust +Date: Sat, 14 May 2022 10:08:10 -0400 +Subject: NFS: Memory allocation failures are not server fatal errors + +From: Trond Myklebust + +commit 452284407c18d8a522c3039339b1860afa0025a8 upstream. + +We need to filter out ENOMEM in nfs_error_is_fatal_on_server(), because +running out of memory on our client is not a server error. + +Reported-by: Olga Kornievskaia +Fixes: 2dc23afffbca ("NFS: ENOMEM should also be a fatal error.") +Cc: stable@vger.kernel.org +Signed-off-by: Trond Myklebust +Signed-off-by: Anna Schumaker +Signed-off-by: Greg Kroah-Hartman +--- + fs/nfs/internal.h | 1 + + 1 file changed, 1 insertion(+) + +--- a/fs/nfs/internal.h ++++ b/fs/nfs/internal.h +@@ -841,6 +841,7 @@ static inline bool nfs_error_is_fatal_on + case 0: + case -ERESTARTSYS: + case -EINTR: ++ case -ENOMEM: + return false; + } + return nfs_error_is_fatal(err); diff --git a/queue-5.18/nfsd-fix-possible-sleep-during-nfsd4_release_lockowner.patch b/queue-5.18/nfsd-fix-possible-sleep-during-nfsd4_release_lockowner.patch new file mode 100644 index 00000000000..d707b3982c4 --- /dev/null +++ b/queue-5.18/nfsd-fix-possible-sleep-during-nfsd4_release_lockowner.patch @@ -0,0 +1,51 @@ +From ce3c4ad7f4ce5db7b4f08a1e237d8dd94b39180b Mon Sep 17 00:00:00 2001 +From: Chuck Lever +Date: Sat, 21 May 2022 19:06:13 -0400 +Subject: NFSD: Fix possible sleep during nfsd4_release_lockowner() + +From: Chuck Lever + +commit ce3c4ad7f4ce5db7b4f08a1e237d8dd94b39180b upstream. + +nfsd4_release_lockowner() holds clp->cl_lock when it calls +check_for_locks(). However, check_for_locks() calls nfsd_file_get() +/ nfsd_file_put() to access the backing inode's flc_posix list, and +nfsd_file_put() can sleep if the inode was recently removed. + +Let's instead rely on the stateowner's reference count to gate +whether the release is permitted. This should be a reliable +indication of locks-in-use since file lock operations and +->lm_get_owner take appropriate references, which are released +appropriately when file locks are removed. + +Reported-by: Dai Ngo +Signed-off-by: Chuck Lever +Cc: stable@vger.kernel.org +Signed-off-by: Greg Kroah-Hartman +--- + fs/nfsd/nfs4state.c | 12 ++++-------- + 1 file changed, 4 insertions(+), 8 deletions(-) + +--- a/fs/nfsd/nfs4state.c ++++ b/fs/nfsd/nfs4state.c +@@ -7330,16 +7330,12 @@ nfsd4_release_lockowner(struct svc_rqst + if (sop->so_is_open_owner || !same_owner_str(sop, owner)) + continue; + +- /* see if there are still any locks associated with it */ +- lo = lockowner(sop); +- list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) { +- if (check_for_locks(stp->st_stid.sc_file, lo)) { +- status = nfserr_locks_held; +- spin_unlock(&clp->cl_lock); +- return status; +- } ++ if (atomic_read(&sop->so_count) != 1) { ++ spin_unlock(&clp->cl_lock); ++ return nfserr_locks_held; + } + ++ lo = lockowner(sop); + nfs4_get_stateowner(sop); + break; + } diff --git a/queue-5.18/series b/queue-5.18/series index eac0a7e6743..af2566b274f 100644 --- a/queue-5.18/series +++ b/queue-5.18/series @@ -53,3 +53,15 @@ media-i2c-imx412-fix-reset-gpio-polarity.patch media-i2c-imx412-fix-power_off-ordering.patch tpm-fix-buffer-access-in-tpm2_get_tpm_pt.patch tpm-ibmvtpm-correct-the-return-value-in-tpm_ibmvtpm_probe.patch +docs-submitting-patches-fix-crossref-to-the-canonical-patch-format.patch +nfs-memory-allocation-failures-are-not-server-fatal-errors.patch +nfsd-fix-possible-sleep-during-nfsd4_release_lockowner.patch +bpf-fill-new-bpf_prog_pack-with-illegal-instructions.patch +bpf-fix-potential-array-overflow-in-bpf_trampoline_get_progs.patch +bpf-fix-combination-of-jit-blinding-and-pointers-to-bpf-subprogs.patch +bpf-enlarge-offset-check-value-to-int_max-in-bpf_skb_-load-store-_bytes.patch +bpf-fix-usage-of-trace-rcu-in-local-storage.patch +bpf-fix-excessive-memory-allocation-in-stack_map_alloc.patch +bpf-reject-writes-for-ptr_to_map_key-in-check_helper_mem_access.patch +bpf-check-ptr_to_mem-mem_rdonly-in-check_helper_mem_access.patch +bpf-do-write-access-check-for-kfunc-and-global-func.patch