From: Greg Kroah-Hartman Date: Thu, 17 Feb 2022 19:34:30 +0000 (+0100) Subject: 5.16-stable patches X-Git-Tag: v4.9.303~82 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=2fcac6bfe7f4cd58c174ed3754b32af8f4012205;p=thirdparty%2Fkernel%2Fstable-queue.git 5.16-stable patches added patches: bpf-add-mem_rdonly-for-helper-args-that-are-pointers-to-rdonly-mem.patch bpf-convert-ptr_to_mem_or_null-to-composable-types.patch bpf-introduce-composable-reg-ret-and-arg-types.patch bpf-introduce-mem_rdonly-flag.patch bpf-make-per_cpu_ptr-return-rdonly-ptr_to_mem.patch bpf-replace-arg_xxx_or_null-with-arg_xxx-ptr_maybe_null.patch bpf-replace-ptr_to_xxx_or_null-with-ptr_to_xxx-ptr_maybe_null.patch bpf-replace-ret_xxx_or_null-with-ret_xxx-ptr_maybe_null.patch bpf-selftests-test-ptr_to_rdonly_mem.patch btrfs-defrag-don-t-try-to-defrag-extents-which-are-under-writeback.patch btrfs-don-t-hold-cpu-for-too-long-when-defragging-a-file.patch btrfs-send-in-case-of-io-error-log-it.patch drm-nouveau-pmu-gm200-use-alternate-falcon-reset-sequence.patch hid-add-support-for-ugtablet-wp5540.patch hid-amd_sfh-add-illuminance-mask-to-limit-als-max-value.patch hid-amd_sfh-correct-the-structure-field-name.patch hid-amd_sfh-increase-sensor-command-timeout.patch hid-apple-set-the-tilde-quirk-flag-on-the-wellspring-5-and-later.patch hid-i2c-hid-goodix-fix-a-lockdep-splat.patch mm-don-t-try-to-numa-migrate-cow-pages-that-have-other-uses.patch mmc-block-fix-read-single-on-recovery-logic.patch parisc-add-ioread64_lo_hi-and-iowrite64_lo_hi.patch parisc-drop-__init-from-map_pages-declaration.patch parisc-fix-data-tlb-miss-in-sba_unmap_sg.patch parisc-fix-sglist-access-in-ccio-dma.c.patch parisc-show-error-if-wrong-32-64-bit-compiler-is-being-used.patch pci-hv-fix-numa-node-assignment-when-kernel-boots-with-custom-numa-topology.patch revert-svm-add-warning-message-for-avic-ipi-invalid-target.patch selftests-kvm-remove-absent-target-file.patch serial-parisc-gsc-fix-build-when-iosapic-is-not-set.patch --- diff --git a/queue-5.16/bpf-add-mem_rdonly-for-helper-args-that-are-pointers-to-rdonly-mem.patch b/queue-5.16/bpf-add-mem_rdonly-for-helper-args-that-are-pointers-to-rdonly-mem.patch new file mode 100644 index 00000000000..7ae252ff12c --- /dev/null +++ b/queue-5.16/bpf-add-mem_rdonly-for-helper-args-that-are-pointers-to-rdonly-mem.patch @@ -0,0 +1,566 @@ +From foo@baz Thu Feb 17 08:07:01 PM CET 2022 +From: Hao Luo +Date: Wed, 16 Feb 2022 14:52:08 -0800 +Subject: bpf: Add MEM_RDONLY for helper args that are pointers to rdonly mem. +To: Greg KH +Cc: Alexei Starovoitov , Andrii Nakryiko , Daniel Borkmann , laura@labbott.name, stable@vger.kernel.org, Hao Luo +Message-ID: <20220216225209.2196865-9-haoluo@google.com> + +From: Hao Luo + +commit 216e3cd2f28dbbf1fe86848e0e29e6693b9f0a20 upstream. + +Some helper functions may modify its arguments, for example, +bpf_d_path, bpf_get_stack etc. Previously, their argument types +were marked as ARG_PTR_TO_MEM, which is compatible with read-only +mem types, such as PTR_TO_RDONLY_BUF. Therefore it's legitimate, +but technically incorrect, to modify a read-only memory by passing +it into one of such helper functions. + +This patch tags the bpf_args compatible with immutable memory with +MEM_RDONLY flag. The arguments that don't have this flag will be +only compatible with mutable memory types, preventing the helper +from modifying a read-only memory. The bpf_args that have +MEM_RDONLY are compatible with both mutable memory and immutable +memory. + +Signed-off-by: Hao Luo +Signed-off-by: Alexei Starovoitov +Link: https://lore.kernel.org/bpf/20211217003152.48334-9-haoluo@google.com +Cc: stable@vger.kernel.org # 5.16.x +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/bpf.h | 4 ++ + kernel/bpf/btf.c | 2 - + kernel/bpf/cgroup.c | 2 - + kernel/bpf/helpers.c | 8 ++--- + kernel/bpf/ringbuf.c | 2 - + kernel/bpf/syscall.c | 2 - + kernel/bpf/verifier.c | 20 ++++++++++++-- + kernel/trace/bpf_trace.c | 26 +++++++++---------- + net/core/filter.c | 64 +++++++++++++++++++++++------------------------ + 9 files changed, 73 insertions(+), 57 deletions(-) + +--- a/include/linux/bpf.h ++++ b/include/linux/bpf.h +@@ -311,7 +311,9 @@ enum bpf_type_flag { + /* PTR may be NULL. */ + PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS), + +- /* MEM is read-only. */ ++ /* MEM is read-only. When applied on bpf_arg, it indicates the arg is ++ * compatible with both mutable and immutable memory. ++ */ + MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS), + + __BPF_TYPE_LAST_FLAG = MEM_RDONLY, +--- a/kernel/bpf/btf.c ++++ b/kernel/bpf/btf.c +@@ -6337,7 +6337,7 @@ const struct bpf_func_proto bpf_btf_find + .func = bpf_btf_find_by_name_kind, + .gpl_only = false, + .ret_type = RET_INTEGER, +- .arg1_type = ARG_PTR_TO_MEM, ++ .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg2_type = ARG_CONST_SIZE, + .arg3_type = ARG_ANYTHING, + .arg4_type = ARG_ANYTHING, +--- a/kernel/bpf/cgroup.c ++++ b/kernel/bpf/cgroup.c +@@ -1789,7 +1789,7 @@ static const struct bpf_func_proto bpf_s + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +- .arg2_type = ARG_PTR_TO_MEM, ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE, + }; + +--- a/kernel/bpf/helpers.c ++++ b/kernel/bpf/helpers.c +@@ -530,7 +530,7 @@ const struct bpf_func_proto bpf_strtol_p + .func = bpf_strtol, + .gpl_only = false, + .ret_type = RET_INTEGER, +- .arg1_type = ARG_PTR_TO_MEM, ++ .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg2_type = ARG_CONST_SIZE, + .arg3_type = ARG_ANYTHING, + .arg4_type = ARG_PTR_TO_LONG, +@@ -558,7 +558,7 @@ const struct bpf_func_proto bpf_strtoul_ + .func = bpf_strtoul, + .gpl_only = false, + .ret_type = RET_INTEGER, +- .arg1_type = ARG_PTR_TO_MEM, ++ .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg2_type = ARG_CONST_SIZE, + .arg3_type = ARG_ANYTHING, + .arg4_type = ARG_PTR_TO_LONG, +@@ -630,7 +630,7 @@ const struct bpf_func_proto bpf_event_ou + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, +- .arg4_type = ARG_PTR_TO_MEM, ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg5_type = ARG_CONST_SIZE_OR_ZERO, + }; + +@@ -1011,7 +1011,7 @@ const struct bpf_func_proto bpf_snprintf + .arg1_type = ARG_PTR_TO_MEM_OR_NULL, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, + .arg3_type = ARG_PTR_TO_CONST_STR, +- .arg4_type = ARG_PTR_TO_MEM_OR_NULL, ++ .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, + .arg5_type = ARG_CONST_SIZE_OR_ZERO, + }; + +--- a/kernel/bpf/ringbuf.c ++++ b/kernel/bpf/ringbuf.c +@@ -444,7 +444,7 @@ const struct bpf_func_proto bpf_ringbuf_ + .func = bpf_ringbuf_output, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, +- .arg2_type = ARG_PTR_TO_MEM, ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, + .arg4_type = ARG_ANYTHING, + }; +--- a/kernel/bpf/syscall.c ++++ b/kernel/bpf/syscall.c +@@ -4772,7 +4772,7 @@ static const struct bpf_func_proto bpf_s + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_ANYTHING, +- .arg2_type = ARG_PTR_TO_MEM, ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE, + }; + +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -5060,7 +5060,6 @@ static const struct bpf_reg_types mem_ty + PTR_TO_MAP_VALUE, + PTR_TO_MEM, + PTR_TO_BUF, +- PTR_TO_BUF | MEM_RDONLY, + }, + }; + +@@ -5130,6 +5129,21 @@ static int check_reg_type(struct bpf_ver + return -EFAULT; + } + ++ /* ARG_PTR_TO_MEM + RDONLY is compatible with PTR_TO_MEM and PTR_TO_MEM + RDONLY, ++ * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM and NOT with PTR_TO_MEM + RDONLY ++ * ++ * Same for MAYBE_NULL: ++ * ++ * ARG_PTR_TO_MEM + MAYBE_NULL is compatible with PTR_TO_MEM and PTR_TO_MEM + MAYBE_NULL, ++ * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM but NOT with PTR_TO_MEM + MAYBE_NULL ++ * ++ * Therefore we fold these flags depending on the arg_type before comparison. ++ */ ++ if (arg_type & MEM_RDONLY) ++ type &= ~MEM_RDONLY; ++ if (arg_type & PTR_MAYBE_NULL) ++ type &= ~PTR_MAYBE_NULL; ++ + for (i = 0; i < ARRAY_SIZE(compatible->types); i++) { + expected = compatible->types[i]; + if (expected == NOT_INIT) +@@ -5139,14 +5153,14 @@ static int check_reg_type(struct bpf_ver + goto found; + } + +- verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, type)); ++ verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type)); + for (j = 0; j + 1 < i; j++) + verbose(env, "%s, ", reg_type_str(env, compatible->types[j])); + verbose(env, "%s\n", reg_type_str(env, compatible->types[j])); + return -EACCES; + + found: +- if (type == PTR_TO_BTF_ID) { ++ if (reg->type == PTR_TO_BTF_ID) { + if (!arg_btf_id) { + if (!compatible->btf_id) { + verbose(env, "verifier internal error: missing arg compatible BTF ID\n"); +--- a/kernel/trace/bpf_trace.c ++++ b/kernel/trace/bpf_trace.c +@@ -345,7 +345,7 @@ static const struct bpf_func_proto bpf_p + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_ANYTHING, +- .arg2_type = ARG_PTR_TO_MEM, ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE, + }; + +@@ -394,7 +394,7 @@ static const struct bpf_func_proto bpf_t + .func = bpf_trace_printk, + .gpl_only = true, + .ret_type = RET_INTEGER, +- .arg1_type = ARG_PTR_TO_MEM, ++ .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg2_type = ARG_CONST_SIZE, + }; + +@@ -450,9 +450,9 @@ static const struct bpf_func_proto bpf_t + .func = bpf_trace_vprintk, + .gpl_only = true, + .ret_type = RET_INTEGER, +- .arg1_type = ARG_PTR_TO_MEM, ++ .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg2_type = ARG_CONST_SIZE, +- .arg3_type = ARG_PTR_TO_MEM_OR_NULL, ++ .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, + .arg4_type = ARG_CONST_SIZE_OR_ZERO, + }; + +@@ -492,9 +492,9 @@ static const struct bpf_func_proto bpf_s + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_BTF_ID, + .arg1_btf_id = &btf_seq_file_ids[0], +- .arg2_type = ARG_PTR_TO_MEM, ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE, +- .arg4_type = ARG_PTR_TO_MEM_OR_NULL, ++ .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, + .arg5_type = ARG_CONST_SIZE_OR_ZERO, + }; + +@@ -509,7 +509,7 @@ static const struct bpf_func_proto bpf_s + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_BTF_ID, + .arg1_btf_id = &btf_seq_file_ids[0], +- .arg2_type = ARG_PTR_TO_MEM, ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, + }; + +@@ -533,7 +533,7 @@ static const struct bpf_func_proto bpf_s + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_BTF_ID, + .arg1_btf_id = &btf_seq_file_ids[0], +- .arg2_type = ARG_PTR_TO_MEM, ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, + .arg4_type = ARG_ANYTHING, + }; +@@ -694,7 +694,7 @@ static const struct bpf_func_proto bpf_p + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, +- .arg4_type = ARG_PTR_TO_MEM, ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg5_type = ARG_CONST_SIZE_OR_ZERO, + }; + +@@ -1004,7 +1004,7 @@ const struct bpf_func_proto bpf_snprintf + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_CONST_SIZE, +- .arg3_type = ARG_PTR_TO_MEM, ++ .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg4_type = ARG_CONST_SIZE, + .arg5_type = ARG_ANYTHING, + }; +@@ -1285,7 +1285,7 @@ static const struct bpf_func_proto bpf_p + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, +- .arg4_type = ARG_PTR_TO_MEM, ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg5_type = ARG_CONST_SIZE_OR_ZERO, + }; + +@@ -1507,7 +1507,7 @@ static const struct bpf_func_proto bpf_p + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, +- .arg4_type = ARG_PTR_TO_MEM, ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg5_type = ARG_CONST_SIZE_OR_ZERO, + }; + +@@ -1561,7 +1561,7 @@ static const struct bpf_func_proto bpf_g + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +- .arg2_type = ARG_PTR_TO_MEM, ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, + .arg4_type = ARG_ANYTHING, + }; +--- a/net/core/filter.c ++++ b/net/core/filter.c +@@ -1713,7 +1713,7 @@ static const struct bpf_func_proto bpf_s + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, +- .arg3_type = ARG_PTR_TO_MEM, ++ .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg4_type = ARG_CONST_SIZE, + .arg5_type = ARG_ANYTHING, + }; +@@ -2018,9 +2018,9 @@ static const struct bpf_func_proto bpf_c + .gpl_only = false, + .pkt_access = true, + .ret_type = RET_INTEGER, +- .arg1_type = ARG_PTR_TO_MEM_OR_NULL, ++ .arg1_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, +- .arg3_type = ARG_PTR_TO_MEM_OR_NULL, ++ .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, + .arg4_type = ARG_CONST_SIZE_OR_ZERO, + .arg5_type = ARG_ANYTHING, + }; +@@ -2541,7 +2541,7 @@ static const struct bpf_func_proto bpf_r + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_ANYTHING, +- .arg2_type = ARG_PTR_TO_MEM_OR_NULL, ++ .arg2_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, + .arg4_type = ARG_ANYTHING, + }; +@@ -4174,7 +4174,7 @@ static const struct bpf_func_proto bpf_s + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, +- .arg4_type = ARG_PTR_TO_MEM, ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg5_type = ARG_CONST_SIZE_OR_ZERO, + }; + +@@ -4188,7 +4188,7 @@ const struct bpf_func_proto bpf_skb_outp + .arg1_btf_id = &bpf_skb_output_btf_ids[0], + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, +- .arg4_type = ARG_PTR_TO_MEM, ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg5_type = ARG_CONST_SIZE_OR_ZERO, + }; + +@@ -4371,7 +4371,7 @@ static const struct bpf_func_proto bpf_s + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +- .arg2_type = ARG_PTR_TO_MEM, ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE, + .arg4_type = ARG_ANYTHING, + }; +@@ -4397,7 +4397,7 @@ static const struct bpf_func_proto bpf_s + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +- .arg2_type = ARG_PTR_TO_MEM, ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE, + }; + +@@ -4567,7 +4567,7 @@ static const struct bpf_func_proto bpf_x + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, +- .arg4_type = ARG_PTR_TO_MEM, ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg5_type = ARG_CONST_SIZE_OR_ZERO, + }; + +@@ -4581,7 +4581,7 @@ const struct bpf_func_proto bpf_xdp_outp + .arg1_btf_id = &bpf_xdp_output_btf_ids[0], + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, +- .arg4_type = ARG_PTR_TO_MEM, ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg5_type = ARG_CONST_SIZE_OR_ZERO, + }; + +@@ -5069,7 +5069,7 @@ const struct bpf_func_proto bpf_sk_setso + .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, +- .arg4_type = ARG_PTR_TO_MEM, ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg5_type = ARG_CONST_SIZE, + }; + +@@ -5103,7 +5103,7 @@ static const struct bpf_func_proto bpf_s + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, +- .arg4_type = ARG_PTR_TO_MEM, ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg5_type = ARG_CONST_SIZE, + }; + +@@ -5137,7 +5137,7 @@ static const struct bpf_func_proto bpf_s + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, +- .arg4_type = ARG_PTR_TO_MEM, ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg5_type = ARG_CONST_SIZE, + }; + +@@ -5312,7 +5312,7 @@ static const struct bpf_func_proto bpf_b + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +- .arg2_type = ARG_PTR_TO_MEM, ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE, + }; + +@@ -5900,7 +5900,7 @@ static const struct bpf_func_proto bpf_l + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, +- .arg3_type = ARG_PTR_TO_MEM, ++ .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg4_type = ARG_CONST_SIZE + }; + +@@ -5910,7 +5910,7 @@ static const struct bpf_func_proto bpf_l + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, +- .arg3_type = ARG_PTR_TO_MEM, ++ .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg4_type = ARG_CONST_SIZE + }; + +@@ -5953,7 +5953,7 @@ static const struct bpf_func_proto bpf_l + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, +- .arg3_type = ARG_PTR_TO_MEM, ++ .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg4_type = ARG_CONST_SIZE + }; + +@@ -6041,7 +6041,7 @@ static const struct bpf_func_proto bpf_l + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, +- .arg3_type = ARG_PTR_TO_MEM, ++ .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg4_type = ARG_CONST_SIZE + }; + +@@ -6266,7 +6266,7 @@ static const struct bpf_func_proto bpf_s + .pkt_access = true, + .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, + .arg1_type = ARG_PTR_TO_CTX, +- .arg2_type = ARG_PTR_TO_MEM, ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE, + .arg4_type = ARG_ANYTHING, + .arg5_type = ARG_ANYTHING, +@@ -6285,7 +6285,7 @@ static const struct bpf_func_proto bpf_s + .pkt_access = true, + .ret_type = RET_PTR_TO_SOCKET_OR_NULL, + .arg1_type = ARG_PTR_TO_CTX, +- .arg2_type = ARG_PTR_TO_MEM, ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE, + .arg4_type = ARG_ANYTHING, + .arg5_type = ARG_ANYTHING, +@@ -6304,7 +6304,7 @@ static const struct bpf_func_proto bpf_s + .pkt_access = true, + .ret_type = RET_PTR_TO_SOCKET_OR_NULL, + .arg1_type = ARG_PTR_TO_CTX, +- .arg2_type = ARG_PTR_TO_MEM, ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE, + .arg4_type = ARG_ANYTHING, + .arg5_type = ARG_ANYTHING, +@@ -6341,7 +6341,7 @@ static const struct bpf_func_proto bpf_x + .pkt_access = true, + .ret_type = RET_PTR_TO_SOCKET_OR_NULL, + .arg1_type = ARG_PTR_TO_CTX, +- .arg2_type = ARG_PTR_TO_MEM, ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE, + .arg4_type = ARG_ANYTHING, + .arg5_type = ARG_ANYTHING, +@@ -6364,7 +6364,7 @@ static const struct bpf_func_proto bpf_x + .pkt_access = true, + .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, + .arg1_type = ARG_PTR_TO_CTX, +- .arg2_type = ARG_PTR_TO_MEM, ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE, + .arg4_type = ARG_ANYTHING, + .arg5_type = ARG_ANYTHING, +@@ -6387,7 +6387,7 @@ static const struct bpf_func_proto bpf_x + .pkt_access = true, + .ret_type = RET_PTR_TO_SOCKET_OR_NULL, + .arg1_type = ARG_PTR_TO_CTX, +- .arg2_type = ARG_PTR_TO_MEM, ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE, + .arg4_type = ARG_ANYTHING, + .arg5_type = ARG_ANYTHING, +@@ -6406,7 +6406,7 @@ static const struct bpf_func_proto bpf_s + .gpl_only = false, + .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, + .arg1_type = ARG_PTR_TO_CTX, +- .arg2_type = ARG_PTR_TO_MEM, ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE, + .arg4_type = ARG_ANYTHING, + .arg5_type = ARG_ANYTHING, +@@ -6425,7 +6425,7 @@ static const struct bpf_func_proto bpf_s + .gpl_only = false, + .ret_type = RET_PTR_TO_SOCKET_OR_NULL, + .arg1_type = ARG_PTR_TO_CTX, +- .arg2_type = ARG_PTR_TO_MEM, ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE, + .arg4_type = ARG_ANYTHING, + .arg5_type = ARG_ANYTHING, +@@ -6444,7 +6444,7 @@ static const struct bpf_func_proto bpf_s + .gpl_only = false, + .ret_type = RET_PTR_TO_SOCKET_OR_NULL, + .arg1_type = ARG_PTR_TO_CTX, +- .arg2_type = ARG_PTR_TO_MEM, ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE, + .arg4_type = ARG_ANYTHING, + .arg5_type = ARG_ANYTHING, +@@ -6757,9 +6757,9 @@ static const struct bpf_func_proto bpf_t + .pkt_access = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, +- .arg2_type = ARG_PTR_TO_MEM, ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE, +- .arg4_type = ARG_PTR_TO_MEM, ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg5_type = ARG_CONST_SIZE, + }; + +@@ -6826,9 +6826,9 @@ static const struct bpf_func_proto bpf_t + .pkt_access = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, +- .arg2_type = ARG_PTR_TO_MEM, ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE, +- .arg4_type = ARG_PTR_TO_MEM, ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg5_type = ARG_CONST_SIZE, + }; + +@@ -7057,7 +7057,7 @@ static const struct bpf_func_proto bpf_s + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +- .arg2_type = ARG_PTR_TO_MEM, ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE, + .arg4_type = ARG_ANYTHING, + }; diff --git a/queue-5.16/bpf-convert-ptr_to_mem_or_null-to-composable-types.patch b/queue-5.16/bpf-convert-ptr_to_mem_or_null-to-composable-types.patch new file mode 100644 index 00000000000..3c41f43e031 --- /dev/null +++ b/queue-5.16/bpf-convert-ptr_to_mem_or_null-to-composable-types.patch @@ -0,0 +1,58 @@ +From foo@baz Thu Feb 17 08:07:01 PM CET 2022 +From: Hao Luo +Date: Wed, 16 Feb 2022 14:52:06 -0800 +Subject: bpf: Convert PTR_TO_MEM_OR_NULL to composable types. +To: Greg KH +Cc: Alexei Starovoitov , Andrii Nakryiko , Daniel Borkmann , laura@labbott.name, stable@vger.kernel.org, Hao Luo +Message-ID: <20220216225209.2196865-7-haoluo@google.com> + +From: Hao Luo + +commit cf9f2f8d62eca810afbd1ee6cc0800202b000e57 upstream. + +Remove PTR_TO_MEM_OR_NULL and replace it with PTR_TO_MEM combined with +flag PTR_MAYBE_NULL. + +Signed-off-by: Hao Luo +Signed-off-by: Alexei Starovoitov +Link: https://lore.kernel.org/bpf/20211217003152.48334-7-haoluo@google.com +Cc: stable@vger.kernel.org # 5.16.x +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/bpf.h | 1 - + kernel/bpf/btf.c | 2 +- + kernel/bpf/verifier.c | 2 +- + 3 files changed, 2 insertions(+), 3 deletions(-) + +--- a/include/linux/bpf.h ++++ b/include/linux/bpf.h +@@ -506,7 +506,6 @@ enum bpf_reg_type { + PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON, + PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK, + PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID, +- PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MEM, + + /* This must be the last entry. Its purpose is to ensure the enum is + * wide enough to hold the higher bits reserved for bpf_type_flag. +--- a/kernel/bpf/btf.c ++++ b/kernel/bpf/btf.c +@@ -5847,7 +5847,7 @@ int btf_prepare_func_args(struct bpf_ver + return -EINVAL; + } + +- reg->type = PTR_TO_MEM_OR_NULL; ++ reg->type = PTR_TO_MEM | PTR_MAYBE_NULL; + reg->id = ++env->id_gen; + + continue; +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -13336,7 +13336,7 @@ static int do_check_common(struct bpf_ve + mark_reg_known_zero(env, regs, i); + else if (regs[i].type == SCALAR_VALUE) + mark_reg_unknown(env, regs, i); +- else if (regs[i].type == PTR_TO_MEM_OR_NULL) { ++ else if (base_type(regs[i].type) == PTR_TO_MEM) { + const u32 mem_size = regs[i].mem_size; + + mark_reg_known_zero(env, regs, i); diff --git a/queue-5.16/bpf-introduce-composable-reg-ret-and-arg-types.patch b/queue-5.16/bpf-introduce-composable-reg-ret-and-arg-types.patch new file mode 100644 index 00000000000..e9f4a9b3aa6 --- /dev/null +++ b/queue-5.16/bpf-introduce-composable-reg-ret-and-arg-types.patch @@ -0,0 +1,145 @@ +From foo@baz Thu Feb 17 08:07:01 PM CET 2022 +From: Hao Luo +Date: Wed, 16 Feb 2022 14:52:01 -0800 +Subject: bpf: Introduce composable reg, ret and arg types. +To: Greg KH +Cc: Alexei Starovoitov , Andrii Nakryiko , Daniel Borkmann , laura@labbott.name, stable@vger.kernel.org, Hao Luo +Message-ID: <20220216225209.2196865-2-haoluo@google.com> + +From: Hao Luo + +commit d639b9d13a39cf15639cbe6e8b2c43eb60148a73 upstream. + +There are some common properties shared between bpf reg, ret and arg +values. For instance, a value may be a NULL pointer, or a pointer to +a read-only memory. Previously, to express these properties, enumeration +was used. For example, in order to test whether a reg value can be NULL, +reg_type_may_be_null() simply enumerates all types that are possibly +NULL. The problem of this approach is that it's not scalable and causes +a lot of duplication. These properties can be combined, for example, a +type could be either MAYBE_NULL or RDONLY, or both. + +This patch series rewrites the layout of reg_type, arg_type and +ret_type, so that common properties can be extracted and represented as +composable flag. For example, one can write + + ARG_PTR_TO_MEM | PTR_MAYBE_NULL + +which is equivalent to the previous + + ARG_PTR_TO_MEM_OR_NULL + +The type ARG_PTR_TO_MEM are called "base type" in this patch. Base +types can be extended with flags. A flag occupies the higher bits while +base types sits in the lower bits. + +This patch in particular sets up a set of macro for this purpose. The +following patches will rewrite arg_types, ret_types and reg_types +respectively. + +Signed-off-by: Hao Luo +Signed-off-by: Alexei Starovoitov +Link: https://lore.kernel.org/bpf/20211217003152.48334-2-haoluo@google.com +Cc: stable@vger.kernel.org # 5.16.x +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/bpf.h | 42 ++++++++++++++++++++++++++++++++++++++++++ + include/linux/bpf_verifier.h | 13 +++++++++++++ + 2 files changed, 55 insertions(+) + +--- a/include/linux/bpf.h ++++ b/include/linux/bpf.h +@@ -297,6 +297,29 @@ bool bpf_map_meta_equal(const struct bpf + + extern const struct bpf_map_ops bpf_map_offload_ops; + ++/* bpf_type_flag contains a set of flags that are applicable to the values of ++ * arg_type, ret_type and reg_type. For example, a pointer value may be null, ++ * or a memory is read-only. We classify types into two categories: base types ++ * and extended types. Extended types are base types combined with a type flag. ++ * ++ * Currently there are no more than 32 base types in arg_type, ret_type and ++ * reg_types. ++ */ ++#define BPF_BASE_TYPE_BITS 8 ++ ++enum bpf_type_flag { ++ /* PTR may be NULL. */ ++ PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS), ++ ++ __BPF_TYPE_LAST_FLAG = PTR_MAYBE_NULL, ++}; ++ ++/* Max number of base types. */ ++#define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS) ++ ++/* Max number of all types. */ ++#define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1)) ++ + /* function argument constraints */ + enum bpf_arg_type { + ARG_DONTCARE = 0, /* unused argument in helper function */ +@@ -343,7 +366,13 @@ enum bpf_arg_type { + ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */ + ARG_PTR_TO_TIMER, /* pointer to bpf_timer */ + __BPF_ARG_TYPE_MAX, ++ ++ /* This must be the last entry. Its purpose is to ensure the enum is ++ * wide enough to hold the higher bits reserved for bpf_type_flag. ++ */ ++ __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT, + }; ++static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); + + /* type of values returned from helper functions */ + enum bpf_return_type { +@@ -359,7 +388,14 @@ enum bpf_return_type { + RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, /* returns a pointer to a valid memory or a btf_id or NULL */ + RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */ + RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */ ++ __BPF_RET_TYPE_MAX, ++ ++ /* This must be the last entry. Its purpose is to ensure the enum is ++ * wide enough to hold the higher bits reserved for bpf_type_flag. ++ */ ++ __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT, + }; ++static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); + + /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs + * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL +@@ -461,7 +497,13 @@ enum bpf_reg_type { + PTR_TO_FUNC, /* reg points to a bpf program function */ + PTR_TO_MAP_KEY, /* reg points to a map element key */ + __BPF_REG_TYPE_MAX, ++ ++ /* This must be the last entry. Its purpose is to ensure the enum is ++ * wide enough to hold the higher bits reserved for bpf_type_flag. ++ */ ++ __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT, + }; ++static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); + + /* The information passed from prog-specific *_is_valid_access + * back to the verifier. +--- a/include/linux/bpf_verifier.h ++++ b/include/linux/bpf_verifier.h +@@ -536,5 +536,18 @@ int bpf_check_attach_target(struct bpf_v + struct bpf_attach_target_info *tgt_info); + void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab); + ++#define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0) ++ ++/* extract base type from bpf_{arg, return, reg}_type. */ ++static inline u32 base_type(u32 type) ++{ ++ return type & BPF_BASE_TYPE_MASK; ++} ++ ++/* extract flags from an extended type. See bpf_type_flag in bpf.h. */ ++static inline u32 type_flag(u32 type) ++{ ++ return type & ~BPF_BASE_TYPE_MASK; ++} + + #endif /* _LINUX_BPF_VERIFIER_H */ diff --git a/queue-5.16/bpf-introduce-mem_rdonly-flag.patch b/queue-5.16/bpf-introduce-mem_rdonly-flag.patch new file mode 100644 index 00000000000..8505da23aff --- /dev/null +++ b/queue-5.16/bpf-introduce-mem_rdonly-flag.patch @@ -0,0 +1,258 @@ +From foo@baz Thu Feb 17 08:07:01 PM CET 2022 +From: Hao Luo +Date: Wed, 16 Feb 2022 14:52:05 -0800 +Subject: bpf: Introduce MEM_RDONLY flag +To: Greg KH +Cc: Alexei Starovoitov , Andrii Nakryiko , Daniel Borkmann , laura@labbott.name, stable@vger.kernel.org, Hao Luo +Message-ID: <20220216225209.2196865-6-haoluo@google.com> + +From: Hao Luo + +commit 20b2aff4bc15bda809f994761d5719827d66c0b4 upstream. + +This patch introduce a flag MEM_RDONLY to tag a reg value +pointing to read-only memory. It makes the following changes: + +1. PTR_TO_RDWR_BUF -> PTR_TO_BUF +2. PTR_TO_RDONLY_BUF -> PTR_TO_BUF | MEM_RDONLY + +Signed-off-by: Hao Luo +Signed-off-by: Alexei Starovoitov +Link: https://lore.kernel.org/bpf/20211217003152.48334-6-haoluo@google.com +Cc: stable@vger.kernel.org # 5.16.x +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/bpf.h | 8 ++-- + kernel/bpf/btf.c | 3 - + kernel/bpf/map_iter.c | 4 +- + kernel/bpf/verifier.c | 84 +++++++++++++++++++++++++++------------------- + net/core/bpf_sk_storage.c | 2 - + net/core/sock_map.c | 2 - + 6 files changed, 60 insertions(+), 43 deletions(-) + +--- a/include/linux/bpf.h ++++ b/include/linux/bpf.h +@@ -311,7 +311,10 @@ enum bpf_type_flag { + /* PTR may be NULL. */ + PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS), + +- __BPF_TYPE_LAST_FLAG = PTR_MAYBE_NULL, ++ /* MEM is read-only. */ ++ MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS), ++ ++ __BPF_TYPE_LAST_FLAG = MEM_RDONLY, + }; + + /* Max number of base types. */ +@@ -492,8 +495,7 @@ enum bpf_reg_type { + * an explicit null check is required for this struct. + */ + PTR_TO_MEM, /* reg points to valid memory region */ +- PTR_TO_RDONLY_BUF, /* reg points to a readonly buffer */ +- PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */ ++ PTR_TO_BUF, /* reg points to a read/write buffer */ + PTR_TO_PERCPU_BTF_ID, /* reg points to a percpu kernel variable */ + PTR_TO_FUNC, /* reg points to a bpf program function */ + __BPF_REG_TYPE_MAX, +--- a/kernel/bpf/btf.c ++++ b/kernel/bpf/btf.c +@@ -4932,8 +4932,7 @@ bool btf_ctx_access(int off, int size, e + + type = base_type(ctx_arg_info->reg_type); + flag = type_flag(ctx_arg_info->reg_type); +- if (ctx_arg_info->offset == off && +- (type == PTR_TO_RDWR_BUF || type == PTR_TO_RDONLY_BUF) && ++ if (ctx_arg_info->offset == off && type == PTR_TO_BUF && + (flag & PTR_MAYBE_NULL)) { + info->reg_type = ctx_arg_info->reg_type; + return true; +--- a/kernel/bpf/map_iter.c ++++ b/kernel/bpf/map_iter.c +@@ -174,9 +174,9 @@ static const struct bpf_iter_reg bpf_map + .ctx_arg_info_size = 2, + .ctx_arg_info = { + { offsetof(struct bpf_iter__bpf_map_elem, key), +- PTR_TO_RDONLY_BUF | PTR_MAYBE_NULL }, ++ PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY }, + { offsetof(struct bpf_iter__bpf_map_elem, value), +- PTR_TO_RDWR_BUF | PTR_MAYBE_NULL }, ++ PTR_TO_BUF | PTR_MAYBE_NULL }, + }, + }; + +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -452,6 +452,11 @@ static bool reg_type_may_be_refcounted_o + base_type(type) == PTR_TO_MEM; + } + ++static bool type_is_rdonly_mem(u32 type) ++{ ++ return type & MEM_RDONLY; ++} ++ + static bool arg_type_may_be_refcounted(enum bpf_arg_type type) + { + return type == ARG_PTR_TO_SOCK_COMMON; +@@ -527,7 +532,7 @@ static bool is_cmpxchg_insn(const struct + static const char *reg_type_str(struct bpf_verifier_env *env, + enum bpf_reg_type type) + { +- char postfix[16] = {0}; ++ char postfix[16] = {0}, prefix[16] = {0}; + static const char * const str[] = { + [NOT_INIT] = "?", + [SCALAR_VALUE] = "inv", +@@ -547,8 +552,7 @@ static const char *reg_type_str(struct b + [PTR_TO_BTF_ID] = "ptr_", + [PTR_TO_PERCPU_BTF_ID] = "percpu_ptr_", + [PTR_TO_MEM] = "mem", +- [PTR_TO_RDONLY_BUF] = "rdonly_buf", +- [PTR_TO_RDWR_BUF] = "rdwr_buf", ++ [PTR_TO_BUF] = "buf", + [PTR_TO_FUNC] = "func", + [PTR_TO_MAP_KEY] = "map_key", + }; +@@ -561,8 +565,11 @@ static const char *reg_type_str(struct b + strncpy(postfix, "_or_null", 16); + } + +- snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s", +- str[base_type(type)], postfix); ++ if (type & MEM_RDONLY) ++ strncpy(prefix, "rdonly_", 16); ++ ++ snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s", ++ prefix, str[base_type(type)], postfix); + return env->type_str_buf; + } + +@@ -2688,8 +2695,7 @@ static bool is_spillable_regtype(enum bp + case PTR_TO_TCP_SOCK: + case PTR_TO_XDP_SOCK: + case PTR_TO_BTF_ID: +- case PTR_TO_RDONLY_BUF: +- case PTR_TO_RDWR_BUF: ++ case PTR_TO_BUF: + case PTR_TO_PERCPU_BTF_ID: + case PTR_TO_MEM: + case PTR_TO_FUNC: +@@ -4442,22 +4448,28 @@ static int check_mem_access(struct bpf_v + } else if (reg->type == CONST_PTR_TO_MAP) { + err = check_ptr_to_map_access(env, regs, regno, off, size, t, + value_regno); +- } else if (reg->type == PTR_TO_RDONLY_BUF) { +- if (t == BPF_WRITE) { +- verbose(env, "R%d cannot write into %s\n", +- regno, reg_type_str(env, reg->type)); +- return -EACCES; ++ } else if (base_type(reg->type) == PTR_TO_BUF) { ++ bool rdonly_mem = type_is_rdonly_mem(reg->type); ++ const char *buf_info; ++ u32 *max_access; ++ ++ if (rdonly_mem) { ++ if (t == BPF_WRITE) { ++ verbose(env, "R%d cannot write into %s\n", ++ regno, reg_type_str(env, reg->type)); ++ return -EACCES; ++ } ++ buf_info = "rdonly"; ++ max_access = &env->prog->aux->max_rdonly_access; ++ } else { ++ buf_info = "rdwr"; ++ max_access = &env->prog->aux->max_rdwr_access; + } ++ + err = check_buffer_access(env, reg, regno, off, size, false, +- "rdonly", +- &env->prog->aux->max_rdonly_access); +- if (!err && value_regno >= 0) +- mark_reg_unknown(env, regs, value_regno); +- } else if (reg->type == PTR_TO_RDWR_BUF) { +- err = check_buffer_access(env, reg, regno, off, size, false, +- "rdwr", +- &env->prog->aux->max_rdwr_access); +- if (!err && t == BPF_READ && value_regno >= 0) ++ buf_info, max_access); ++ ++ if (!err && value_regno >= 0 && (rdonly_mem || t == BPF_READ)) + mark_reg_unknown(env, regs, value_regno); + } else { + verbose(env, "R%d invalid mem access '%s'\n", regno, +@@ -4718,8 +4730,10 @@ static int check_helper_mem_access(struc + struct bpf_call_arg_meta *meta) + { + struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; ++ const char *buf_info; ++ u32 *max_access; + +- switch (reg->type) { ++ switch (base_type(reg->type)) { + case PTR_TO_PACKET: + case PTR_TO_PACKET_META: + return check_packet_access(env, regno, reg->off, access_size, +@@ -4738,18 +4752,20 @@ static int check_helper_mem_access(struc + return check_mem_region_access(env, regno, reg->off, + access_size, reg->mem_size, + zero_size_allowed); +- case PTR_TO_RDONLY_BUF: +- if (meta && meta->raw_mode) +- return -EACCES; +- return check_buffer_access(env, reg, regno, reg->off, +- access_size, zero_size_allowed, +- "rdonly", +- &env->prog->aux->max_rdonly_access); +- case PTR_TO_RDWR_BUF: ++ case PTR_TO_BUF: ++ if (type_is_rdonly_mem(reg->type)) { ++ if (meta && meta->raw_mode) ++ return -EACCES; ++ ++ buf_info = "rdonly"; ++ max_access = &env->prog->aux->max_rdonly_access; ++ } else { ++ buf_info = "rdwr"; ++ max_access = &env->prog->aux->max_rdwr_access; ++ } + return check_buffer_access(env, reg, regno, reg->off, + access_size, zero_size_allowed, +- "rdwr", +- &env->prog->aux->max_rdwr_access); ++ buf_info, max_access); + case PTR_TO_STACK: + return check_stack_range_initialized( + env, +@@ -5028,8 +5044,8 @@ static const struct bpf_reg_types mem_ty + PTR_TO_MAP_KEY, + PTR_TO_MAP_VALUE, + PTR_TO_MEM, +- PTR_TO_RDONLY_BUF, +- PTR_TO_RDWR_BUF, ++ PTR_TO_BUF, ++ PTR_TO_BUF | MEM_RDONLY, + }, + }; + +--- a/net/core/bpf_sk_storage.c ++++ b/net/core/bpf_sk_storage.c +@@ -929,7 +929,7 @@ static struct bpf_iter_reg bpf_sk_storag + { offsetof(struct bpf_iter__bpf_sk_storage_map, sk), + PTR_TO_BTF_ID_OR_NULL }, + { offsetof(struct bpf_iter__bpf_sk_storage_map, value), +- PTR_TO_RDWR_BUF | PTR_MAYBE_NULL }, ++ PTR_TO_BUF | PTR_MAYBE_NULL }, + }, + .seq_info = &iter_seq_info, + }; +--- a/net/core/sock_map.c ++++ b/net/core/sock_map.c +@@ -1569,7 +1569,7 @@ static struct bpf_iter_reg sock_map_iter + .ctx_arg_info_size = 2, + .ctx_arg_info = { + { offsetof(struct bpf_iter__sockmap, key), +- PTR_TO_RDONLY_BUF | PTR_MAYBE_NULL }, ++ PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY }, + { offsetof(struct bpf_iter__sockmap, sk), + PTR_TO_BTF_ID_OR_NULL }, + }, diff --git a/queue-5.16/bpf-make-per_cpu_ptr-return-rdonly-ptr_to_mem.patch b/queue-5.16/bpf-make-per_cpu_ptr-return-rdonly-ptr_to_mem.patch new file mode 100644 index 00000000000..c93a2362949 --- /dev/null +++ b/queue-5.16/bpf-make-per_cpu_ptr-return-rdonly-ptr_to_mem.patch @@ -0,0 +1,119 @@ +From foo@baz Thu Feb 17 08:07:01 PM CET 2022 +From: Hao Luo +Date: Wed, 16 Feb 2022 14:52:07 -0800 +Subject: bpf: Make per_cpu_ptr return rdonly PTR_TO_MEM. +To: Greg KH +Cc: Alexei Starovoitov , Andrii Nakryiko , Daniel Borkmann , laura@labbott.name, stable@vger.kernel.org, Hao Luo +Message-ID: <20220216225209.2196865-8-haoluo@google.com> + +From: Hao Luo + +commit 34d3a78c681e8e7844b43d1a2f4671a04249c821 upstream. + +Tag the return type of {per, this}_cpu_ptr with RDONLY_MEM. The +returned value of this pair of helpers is kernel object, which +can not be updated by bpf programs. Previously these two helpers +return PTR_OT_MEM for kernel objects of scalar type, which allows +one to directly modify the memory. Now with RDONLY_MEM tagging, +the verifier will reject programs that write into RDONLY_MEM. + +Fixes: 63d9b80dcf2c ("bpf: Introducte bpf_this_cpu_ptr()") +Fixes: eaa6bcb71ef6 ("bpf: Introduce bpf_per_cpu_ptr()") +Fixes: 4976b718c355 ("bpf: Introduce pseudo_btf_id") +Signed-off-by: Hao Luo +Signed-off-by: Alexei Starovoitov +Link: https://lore.kernel.org/bpf/20211217003152.48334-8-haoluo@google.com +Cc: stable@vger.kernel.org # 5.16.x +Signed-off-by: Greg Kroah-Hartman +--- + kernel/bpf/helpers.c | 4 ++-- + kernel/bpf/verifier.c | 30 ++++++++++++++++++++++++++---- + 2 files changed, 28 insertions(+), 6 deletions(-) + +--- a/kernel/bpf/helpers.c ++++ b/kernel/bpf/helpers.c +@@ -667,7 +667,7 @@ BPF_CALL_2(bpf_per_cpu_ptr, const void * + const struct bpf_func_proto bpf_per_cpu_ptr_proto = { + .func = bpf_per_cpu_ptr, + .gpl_only = false, +- .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL, ++ .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY, + .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, + .arg2_type = ARG_ANYTHING, + }; +@@ -680,7 +680,7 @@ BPF_CALL_1(bpf_this_cpu_ptr, const void + const struct bpf_func_proto bpf_this_cpu_ptr_proto = { + .func = bpf_this_cpu_ptr, + .gpl_only = false, +- .ret_type = RET_PTR_TO_MEM_OR_BTF_ID, ++ .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY, + .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, + }; + +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -4333,15 +4333,30 @@ static int check_mem_access(struct bpf_v + mark_reg_unknown(env, regs, value_regno); + } + } +- } else if (reg->type == PTR_TO_MEM) { ++ } else if (base_type(reg->type) == PTR_TO_MEM) { ++ bool rdonly_mem = type_is_rdonly_mem(reg->type); ++ ++ if (type_may_be_null(reg->type)) { ++ verbose(env, "R%d invalid mem access '%s'\n", regno, ++ reg_type_str(env, reg->type)); ++ return -EACCES; ++ } ++ ++ if (t == BPF_WRITE && rdonly_mem) { ++ verbose(env, "R%d cannot write into %s\n", ++ regno, reg_type_str(env, reg->type)); ++ return -EACCES; ++ } ++ + if (t == BPF_WRITE && value_regno >= 0 && + is_pointer_value(env, value_regno)) { + verbose(env, "R%d leaks addr into mem\n", value_regno); + return -EACCES; + } ++ + err = check_mem_region_access(env, regno, off, size, + reg->mem_size, false); +- if (!err && t == BPF_READ && value_regno >= 0) ++ if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem)) + mark_reg_unknown(env, regs, value_regno); + } else if (reg->type == PTR_TO_CTX) { + enum bpf_reg_type reg_type = SCALAR_VALUE; +@@ -6550,6 +6565,13 @@ static int check_helper_call(struct bpf_ + regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag; + regs[BPF_REG_0].mem_size = tsize; + } else { ++ /* MEM_RDONLY may be carried from ret_flag, but it ++ * doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise ++ * it will confuse the check of PTR_TO_BTF_ID in ++ * check_mem_access(). ++ */ ++ ret_flag &= ~MEM_RDONLY; ++ + regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag; + regs[BPF_REG_0].btf = meta.ret_btf; + regs[BPF_REG_0].btf_id = meta.ret_btf_id; +@@ -9362,7 +9384,7 @@ static int check_ld_imm(struct bpf_verif + + if (insn->src_reg == BPF_PSEUDO_BTF_ID) { + dst_reg->type = aux->btf_var.reg_type; +- switch (dst_reg->type) { ++ switch (base_type(dst_reg->type)) { + case PTR_TO_MEM: + dst_reg->mem_size = aux->btf_var.mem_size; + break; +@@ -11505,7 +11527,7 @@ static int check_pseudo_btf_id(struct bp + err = -EINVAL; + goto err_put; + } +- aux->btf_var.reg_type = PTR_TO_MEM; ++ aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY; + aux->btf_var.mem_size = tsize; + } else { + aux->btf_var.reg_type = PTR_TO_BTF_ID; diff --git a/queue-5.16/bpf-replace-arg_xxx_or_null-with-arg_xxx-ptr_maybe_null.patch b/queue-5.16/bpf-replace-arg_xxx_or_null-with-arg_xxx-ptr_maybe_null.patch new file mode 100644 index 00000000000..4fbc1f617a6 --- /dev/null +++ b/queue-5.16/bpf-replace-arg_xxx_or_null-with-arg_xxx-ptr_maybe_null.patch @@ -0,0 +1,201 @@ +From foo@baz Thu Feb 17 08:07:01 PM CET 2022 +From: Hao Luo +Date: Wed, 16 Feb 2022 14:52:02 -0800 +Subject: bpf: Replace ARG_XXX_OR_NULL with ARG_XXX | PTR_MAYBE_NULL +To: Greg KH +Cc: Alexei Starovoitov , Andrii Nakryiko , Daniel Borkmann , laura@labbott.name, stable@vger.kernel.org, Hao Luo +Message-ID: <20220216225209.2196865-3-haoluo@google.com> + +From: Hao Luo + +commit 48946bd6a5d695c50b34546864b79c1f910a33c1 upstream. + +We have introduced a new type to make bpf_arg composable, by +reserving high bits of bpf_arg to represent flags of a type. + +One of the flags is PTR_MAYBE_NULL which indicates a pointer +may be NULL. When applying this flag to an arg_type, it means +the arg can take NULL pointer. This patch switches the +qualified arg_types to use this flag. The arg_types changed +in this patch include: + +1. ARG_PTR_TO_MAP_VALUE_OR_NULL +2. ARG_PTR_TO_MEM_OR_NULL +3. ARG_PTR_TO_CTX_OR_NULL +4. ARG_PTR_TO_SOCKET_OR_NULL +5. ARG_PTR_TO_ALLOC_MEM_OR_NULL +6. ARG_PTR_TO_STACK_OR_NULL + +This patch does not eliminate the use of these arg_types, instead +it makes them an alias to the 'ARG_XXX | PTR_MAYBE_NULL'. + +Signed-off-by: Hao Luo +Signed-off-by: Alexei Starovoitov +Link: https://lore.kernel.org/bpf/20211217003152.48334-3-haoluo@google.com +Cc: stable@vger.kernel.org # 5.16.x +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/bpf.h | 15 +++++++++------ + kernel/bpf/verifier.c | 39 ++++++++++++++------------------------- + 2 files changed, 23 insertions(+), 31 deletions(-) + +--- a/include/linux/bpf.h ++++ b/include/linux/bpf.h +@@ -331,13 +331,11 @@ enum bpf_arg_type { + ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ + ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ + ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */ +- ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */ + + /* the following constraints used to prototype bpf_memcmp() and other + * functions that access data on eBPF program stack + */ + ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ +- ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */ + ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, + * helper function must fill all bytes or clear + * them in error case. +@@ -347,26 +345,31 @@ enum bpf_arg_type { + ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ + + ARG_PTR_TO_CTX, /* pointer to context */ +- ARG_PTR_TO_CTX_OR_NULL, /* pointer to context or NULL */ + ARG_ANYTHING, /* any (initialized) argument is ok */ + ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ + ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ + ARG_PTR_TO_INT, /* pointer to int */ + ARG_PTR_TO_LONG, /* pointer to long */ + ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ +- ARG_PTR_TO_SOCKET_OR_NULL, /* pointer to bpf_sock (fullsock) or NULL */ + ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ + ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */ +- ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */ + ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */ + ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */ + ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */ + ARG_PTR_TO_FUNC, /* pointer to a bpf program function */ +- ARG_PTR_TO_STACK_OR_NULL, /* pointer to stack or NULL */ ++ ARG_PTR_TO_STACK, /* pointer to stack */ + ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */ + ARG_PTR_TO_TIMER, /* pointer to bpf_timer */ + __BPF_ARG_TYPE_MAX, + ++ /* Extended arg_types. */ ++ ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE, ++ ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM, ++ ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX, ++ ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET, ++ ARG_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_ALLOC_MEM, ++ ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK, ++ + /* This must be the last entry. Its purpose is to ensure the enum is + * wide enough to hold the higher bits reserved for bpf_type_flag. + */ +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -472,14 +472,9 @@ static bool arg_type_may_be_refcounted(e + return type == ARG_PTR_TO_SOCK_COMMON; + } + +-static bool arg_type_may_be_null(enum bpf_arg_type type) ++static bool type_may_be_null(u32 type) + { +- return type == ARG_PTR_TO_MAP_VALUE_OR_NULL || +- type == ARG_PTR_TO_MEM_OR_NULL || +- type == ARG_PTR_TO_CTX_OR_NULL || +- type == ARG_PTR_TO_SOCKET_OR_NULL || +- type == ARG_PTR_TO_ALLOC_MEM_OR_NULL || +- type == ARG_PTR_TO_STACK_OR_NULL; ++ return type & PTR_MAYBE_NULL; + } + + /* Determine whether the function releases some resources allocated by another +@@ -4963,9 +4958,8 @@ static int process_timer_func(struct bpf + + static bool arg_type_is_mem_ptr(enum bpf_arg_type type) + { +- return type == ARG_PTR_TO_MEM || +- type == ARG_PTR_TO_MEM_OR_NULL || +- type == ARG_PTR_TO_UNINIT_MEM; ++ return base_type(type) == ARG_PTR_TO_MEM || ++ base_type(type) == ARG_PTR_TO_UNINIT_MEM; + } + + static bool arg_type_is_mem_size(enum bpf_arg_type type) +@@ -5102,31 +5096,26 @@ static const struct bpf_reg_types *compa + [ARG_PTR_TO_MAP_KEY] = &map_key_value_types, + [ARG_PTR_TO_MAP_VALUE] = &map_key_value_types, + [ARG_PTR_TO_UNINIT_MAP_VALUE] = &map_key_value_types, +- [ARG_PTR_TO_MAP_VALUE_OR_NULL] = &map_key_value_types, + [ARG_CONST_SIZE] = &scalar_types, + [ARG_CONST_SIZE_OR_ZERO] = &scalar_types, + [ARG_CONST_ALLOC_SIZE_OR_ZERO] = &scalar_types, + [ARG_CONST_MAP_PTR] = &const_map_ptr_types, + [ARG_PTR_TO_CTX] = &context_types, +- [ARG_PTR_TO_CTX_OR_NULL] = &context_types, + [ARG_PTR_TO_SOCK_COMMON] = &sock_types, + #ifdef CONFIG_NET + [ARG_PTR_TO_BTF_ID_SOCK_COMMON] = &btf_id_sock_common_types, + #endif + [ARG_PTR_TO_SOCKET] = &fullsock_types, +- [ARG_PTR_TO_SOCKET_OR_NULL] = &fullsock_types, + [ARG_PTR_TO_BTF_ID] = &btf_ptr_types, + [ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types, + [ARG_PTR_TO_MEM] = &mem_types, +- [ARG_PTR_TO_MEM_OR_NULL] = &mem_types, + [ARG_PTR_TO_UNINIT_MEM] = &mem_types, + [ARG_PTR_TO_ALLOC_MEM] = &alloc_mem_types, +- [ARG_PTR_TO_ALLOC_MEM_OR_NULL] = &alloc_mem_types, + [ARG_PTR_TO_INT] = &int_ptr_types, + [ARG_PTR_TO_LONG] = &int_ptr_types, + [ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types, + [ARG_PTR_TO_FUNC] = &func_ptr_types, +- [ARG_PTR_TO_STACK_OR_NULL] = &stack_ptr_types, ++ [ARG_PTR_TO_STACK] = &stack_ptr_types, + [ARG_PTR_TO_CONST_STR] = &const_str_ptr_types, + [ARG_PTR_TO_TIMER] = &timer_types, + }; +@@ -5140,7 +5129,7 @@ static int check_reg_type(struct bpf_ver + const struct bpf_reg_types *compatible; + int i, j; + +- compatible = compatible_reg_types[arg_type]; ++ compatible = compatible_reg_types[base_type(arg_type)]; + if (!compatible) { + verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type); + return -EFAULT; +@@ -5221,15 +5210,14 @@ static int check_func_arg(struct bpf_ver + return -EACCES; + } + +- if (arg_type == ARG_PTR_TO_MAP_VALUE || +- arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE || +- arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) { ++ if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE || ++ base_type(arg_type) == ARG_PTR_TO_UNINIT_MAP_VALUE) { + err = resolve_map_arg_type(env, meta, &arg_type); + if (err) + return err; + } + +- if (register_is_null(reg) && arg_type_may_be_null(arg_type)) ++ if (register_is_null(reg) && type_may_be_null(arg_type)) + /* A NULL register has a SCALAR_VALUE type, so skip + * type checking. + */ +@@ -5298,10 +5286,11 @@ skip_type_check: + err = check_helper_mem_access(env, regno, + meta->map_ptr->key_size, false, + NULL); +- } else if (arg_type == ARG_PTR_TO_MAP_VALUE || +- (arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL && +- !register_is_null(reg)) || +- arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) { ++ } else if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE || ++ base_type(arg_type) == ARG_PTR_TO_UNINIT_MAP_VALUE) { ++ if (type_may_be_null(arg_type) && register_is_null(reg)) ++ return 0; ++ + /* bpf_map_xxx(..., map_ptr, ..., value) call: + * check [value, value + map->value_size) validity + */ diff --git a/queue-5.16/bpf-replace-ptr_to_xxx_or_null-with-ptr_to_xxx-ptr_maybe_null.patch b/queue-5.16/bpf-replace-ptr_to_xxx_or_null-with-ptr_to_xxx-ptr_maybe_null.patch new file mode 100644 index 00000000000..7f502d50751 --- /dev/null +++ b/queue-5.16/bpf-replace-ptr_to_xxx_or_null-with-ptr_to_xxx-ptr_maybe_null.patch @@ -0,0 +1,820 @@ +From foo@baz Thu Feb 17 08:07:01 PM CET 2022 +From: Hao Luo +Date: Wed, 16 Feb 2022 14:52:04 -0800 +Subject: bpf: Replace PTR_TO_XXX_OR_NULL with PTR_TO_XXX | PTR_MAYBE_NULL +To: Greg KH +Cc: Alexei Starovoitov , Andrii Nakryiko , Daniel Borkmann , laura@labbott.name, stable@vger.kernel.org, Hao Luo +Message-ID: <20220216225209.2196865-5-haoluo@google.com> + +From: Hao Luo + +commit c25b2ae136039ffa820c26138ed4a5e5f3ab3841 upstream. + +We have introduced a new type to make bpf_reg composable, by +allocating bits in the type to represent flags. + +One of the flags is PTR_MAYBE_NULL which indicates a pointer +may be NULL. This patch switches the qualified reg_types to +use this flag. The reg_types changed in this patch include: + +1. PTR_TO_MAP_VALUE_OR_NULL +2. PTR_TO_SOCKET_OR_NULL +3. PTR_TO_SOCK_COMMON_OR_NULL +4. PTR_TO_TCP_SOCK_OR_NULL +5. PTR_TO_BTF_ID_OR_NULL +6. PTR_TO_MEM_OR_NULL +7. PTR_TO_RDONLY_BUF_OR_NULL +8. PTR_TO_RDWR_BUF_OR_NULL + +[haoluo: backport notes + There was a reg_type_may_be_null() in adjust_ptr_min_max_vals() in + 5.16.x, but didn't exist in the upstream commit. This backport + converted that reg_type_may_be_null() to type_may_be_null() as well.] + +Signed-off-by: Hao Luo +Signed-off-by: Alexei Starovoitov +Link: https://lore.kernel.org/r/20211217003152.48334-5-haoluo@google.com +Cc: stable@vger.kernel.org # 5.16.x +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/bpf.h | 18 +- + include/linux/bpf_verifier.h | 4 + kernel/bpf/btf.c | 7 - + kernel/bpf/map_iter.c | 4 + kernel/bpf/verifier.c | 297 ++++++++++++++++++------------------------- + net/core/bpf_sk_storage.c | 2 + net/core/sock_map.c | 2 + 7 files changed, 148 insertions(+), 186 deletions(-) + +--- a/include/linux/bpf.h ++++ b/include/linux/bpf.h +@@ -465,18 +465,15 @@ enum bpf_reg_type { + PTR_TO_CTX, /* reg points to bpf_context */ + CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ + PTR_TO_MAP_VALUE, /* reg points to map element value */ +- PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ ++ PTR_TO_MAP_KEY, /* reg points to a map element key */ + PTR_TO_STACK, /* reg == frame_pointer + offset */ + PTR_TO_PACKET_META, /* skb->data - meta_len */ + PTR_TO_PACKET, /* reg points to skb->data */ + PTR_TO_PACKET_END, /* skb->data + headlen */ + PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ + PTR_TO_SOCKET, /* reg points to struct bpf_sock */ +- PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */ + PTR_TO_SOCK_COMMON, /* reg points to sock_common */ +- PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */ + PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ +- PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ + PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ + PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ + /* PTR_TO_BTF_ID points to a kernel struct that does not need +@@ -494,18 +491,21 @@ enum bpf_reg_type { + * been checked for null. Used primarily to inform the verifier + * an explicit null check is required for this struct. + */ +- PTR_TO_BTF_ID_OR_NULL, + PTR_TO_MEM, /* reg points to valid memory region */ +- PTR_TO_MEM_OR_NULL, /* reg points to valid memory region or NULL */ + PTR_TO_RDONLY_BUF, /* reg points to a readonly buffer */ +- PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */ + PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */ +- PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */ + PTR_TO_PERCPU_BTF_ID, /* reg points to a percpu kernel variable */ + PTR_TO_FUNC, /* reg points to a bpf program function */ +- PTR_TO_MAP_KEY, /* reg points to a map element key */ + __BPF_REG_TYPE_MAX, + ++ /* Extended reg_types. */ ++ PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE, ++ PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET, ++ PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON, ++ PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK, ++ PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID, ++ PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MEM, ++ + /* This must be the last entry. Its purpose is to ensure the enum is + * wide enough to hold the higher bits reserved for bpf_type_flag. + */ +--- a/include/linux/bpf_verifier.h ++++ b/include/linux/bpf_verifier.h +@@ -18,6 +18,8 @@ + * that converting umax_value to int cannot overflow. + */ + #define BPF_MAX_VAR_SIZ (1 << 29) ++/* size of type_str_buf in bpf_verifier. */ ++#define TYPE_STR_BUF_LEN 64 + + /* Liveness marks, used for registers and spilled-regs (in stack slots). + * Read marks propagate upwards until they find a write mark; they record that +@@ -474,6 +476,8 @@ struct bpf_verifier_env { + /* longest register parentage chain walked for liveness marking */ + u32 longest_mark_read_walk; + bpfptr_t fd_array; ++ /* buffer used in reg_type_str() to generate reg_type string */ ++ char type_str_buf[TYPE_STR_BUF_LEN]; + }; + + __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, +--- a/kernel/bpf/btf.c ++++ b/kernel/bpf/btf.c +@@ -4928,10 +4928,13 @@ bool btf_ctx_access(int off, int size, e + /* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */ + for (i = 0; i < prog->aux->ctx_arg_info_size; i++) { + const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i]; ++ u32 type, flag; + ++ type = base_type(ctx_arg_info->reg_type); ++ flag = type_flag(ctx_arg_info->reg_type); + if (ctx_arg_info->offset == off && +- (ctx_arg_info->reg_type == PTR_TO_RDONLY_BUF_OR_NULL || +- ctx_arg_info->reg_type == PTR_TO_RDWR_BUF_OR_NULL)) { ++ (type == PTR_TO_RDWR_BUF || type == PTR_TO_RDONLY_BUF) && ++ (flag & PTR_MAYBE_NULL)) { + info->reg_type = ctx_arg_info->reg_type; + return true; + } +--- a/kernel/bpf/map_iter.c ++++ b/kernel/bpf/map_iter.c +@@ -174,9 +174,9 @@ static const struct bpf_iter_reg bpf_map + .ctx_arg_info_size = 2, + .ctx_arg_info = { + { offsetof(struct bpf_iter__bpf_map_elem, key), +- PTR_TO_RDONLY_BUF_OR_NULL }, ++ PTR_TO_RDONLY_BUF | PTR_MAYBE_NULL }, + { offsetof(struct bpf_iter__bpf_map_elem, value), +- PTR_TO_RDWR_BUF_OR_NULL }, ++ PTR_TO_RDWR_BUF | PTR_MAYBE_NULL }, + }, + }; + +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -439,18 +439,6 @@ static bool reg_type_not_null(enum bpf_r + type == PTR_TO_SOCK_COMMON; + } + +-static bool reg_type_may_be_null(enum bpf_reg_type type) +-{ +- return type == PTR_TO_MAP_VALUE_OR_NULL || +- type == PTR_TO_SOCKET_OR_NULL || +- type == PTR_TO_SOCK_COMMON_OR_NULL || +- type == PTR_TO_TCP_SOCK_OR_NULL || +- type == PTR_TO_BTF_ID_OR_NULL || +- type == PTR_TO_MEM_OR_NULL || +- type == PTR_TO_RDONLY_BUF_OR_NULL || +- type == PTR_TO_RDWR_BUF_OR_NULL; +-} +- + static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) + { + return reg->type == PTR_TO_MAP_VALUE && +@@ -459,12 +447,9 @@ static bool reg_may_point_to_spin_lock(c + + static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type) + { +- return type == PTR_TO_SOCKET || +- type == PTR_TO_SOCKET_OR_NULL || +- type == PTR_TO_TCP_SOCK || +- type == PTR_TO_TCP_SOCK_OR_NULL || +- type == PTR_TO_MEM || +- type == PTR_TO_MEM_OR_NULL; ++ return base_type(type) == PTR_TO_SOCKET || ++ base_type(type) == PTR_TO_TCP_SOCK || ++ base_type(type) == PTR_TO_MEM; + } + + static bool arg_type_may_be_refcounted(enum bpf_arg_type type) +@@ -534,39 +519,52 @@ static bool is_cmpxchg_insn(const struct + insn->imm == BPF_CMPXCHG; + } + +-/* string representation of 'enum bpf_reg_type' */ +-static const char * const reg_type_str[] = { +- [NOT_INIT] = "?", +- [SCALAR_VALUE] = "inv", +- [PTR_TO_CTX] = "ctx", +- [CONST_PTR_TO_MAP] = "map_ptr", +- [PTR_TO_MAP_VALUE] = "map_value", +- [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", +- [PTR_TO_STACK] = "fp", +- [PTR_TO_PACKET] = "pkt", +- [PTR_TO_PACKET_META] = "pkt_meta", +- [PTR_TO_PACKET_END] = "pkt_end", +- [PTR_TO_FLOW_KEYS] = "flow_keys", +- [PTR_TO_SOCKET] = "sock", +- [PTR_TO_SOCKET_OR_NULL] = "sock_or_null", +- [PTR_TO_SOCK_COMMON] = "sock_common", +- [PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null", +- [PTR_TO_TCP_SOCK] = "tcp_sock", +- [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null", +- [PTR_TO_TP_BUFFER] = "tp_buffer", +- [PTR_TO_XDP_SOCK] = "xdp_sock", +- [PTR_TO_BTF_ID] = "ptr_", +- [PTR_TO_BTF_ID_OR_NULL] = "ptr_or_null_", +- [PTR_TO_PERCPU_BTF_ID] = "percpu_ptr_", +- [PTR_TO_MEM] = "mem", +- [PTR_TO_MEM_OR_NULL] = "mem_or_null", +- [PTR_TO_RDONLY_BUF] = "rdonly_buf", +- [PTR_TO_RDONLY_BUF_OR_NULL] = "rdonly_buf_or_null", +- [PTR_TO_RDWR_BUF] = "rdwr_buf", +- [PTR_TO_RDWR_BUF_OR_NULL] = "rdwr_buf_or_null", +- [PTR_TO_FUNC] = "func", +- [PTR_TO_MAP_KEY] = "map_key", +-}; ++/* string representation of 'enum bpf_reg_type' ++ * ++ * Note that reg_type_str() can not appear more than once in a single verbose() ++ * statement. ++ */ ++static const char *reg_type_str(struct bpf_verifier_env *env, ++ enum bpf_reg_type type) ++{ ++ char postfix[16] = {0}; ++ static const char * const str[] = { ++ [NOT_INIT] = "?", ++ [SCALAR_VALUE] = "inv", ++ [PTR_TO_CTX] = "ctx", ++ [CONST_PTR_TO_MAP] = "map_ptr", ++ [PTR_TO_MAP_VALUE] = "map_value", ++ [PTR_TO_STACK] = "fp", ++ [PTR_TO_PACKET] = "pkt", ++ [PTR_TO_PACKET_META] = "pkt_meta", ++ [PTR_TO_PACKET_END] = "pkt_end", ++ [PTR_TO_FLOW_KEYS] = "flow_keys", ++ [PTR_TO_SOCKET] = "sock", ++ [PTR_TO_SOCK_COMMON] = "sock_common", ++ [PTR_TO_TCP_SOCK] = "tcp_sock", ++ [PTR_TO_TP_BUFFER] = "tp_buffer", ++ [PTR_TO_XDP_SOCK] = "xdp_sock", ++ [PTR_TO_BTF_ID] = "ptr_", ++ [PTR_TO_PERCPU_BTF_ID] = "percpu_ptr_", ++ [PTR_TO_MEM] = "mem", ++ [PTR_TO_RDONLY_BUF] = "rdonly_buf", ++ [PTR_TO_RDWR_BUF] = "rdwr_buf", ++ [PTR_TO_FUNC] = "func", ++ [PTR_TO_MAP_KEY] = "map_key", ++ }; ++ ++ if (type & PTR_MAYBE_NULL) { ++ if (base_type(type) == PTR_TO_BTF_ID || ++ base_type(type) == PTR_TO_PERCPU_BTF_ID) ++ strncpy(postfix, "or_null_", 16); ++ else ++ strncpy(postfix, "_or_null", 16); ++ } ++ ++ snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s", ++ str[base_type(type)], postfix); ++ return env->type_str_buf; ++} + + static char slot_type_char[] = { + [STACK_INVALID] = '?', +@@ -631,7 +629,7 @@ static void print_verifier_state(struct + continue; + verbose(env, " R%d", i); + print_liveness(env, reg->live); +- verbose(env, "=%s", reg_type_str[t]); ++ verbose(env, "=%s", reg_type_str(env, t)); + if (t == SCALAR_VALUE && reg->precise) + verbose(env, "P"); + if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && +@@ -639,9 +637,8 @@ static void print_verifier_state(struct + /* reg->off should be 0 for SCALAR_VALUE */ + verbose(env, "%lld", reg->var_off.value + reg->off); + } else { +- if (t == PTR_TO_BTF_ID || +- t == PTR_TO_BTF_ID_OR_NULL || +- t == PTR_TO_PERCPU_BTF_ID) ++ if (base_type(t) == PTR_TO_BTF_ID || ++ base_type(t) == PTR_TO_PERCPU_BTF_ID) + verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id)); + verbose(env, "(id=%d", reg->id); + if (reg_type_may_be_refcounted_or_null(t)) +@@ -650,10 +647,9 @@ static void print_verifier_state(struct + verbose(env, ",off=%d", reg->off); + if (type_is_pkt_pointer(t)) + verbose(env, ",r=%d", reg->range); +- else if (t == CONST_PTR_TO_MAP || +- t == PTR_TO_MAP_KEY || +- t == PTR_TO_MAP_VALUE || +- t == PTR_TO_MAP_VALUE_OR_NULL) ++ else if (base_type(t) == CONST_PTR_TO_MAP || ++ base_type(t) == PTR_TO_MAP_KEY || ++ base_type(t) == PTR_TO_MAP_VALUE) + verbose(env, ",ks=%d,vs=%d", + reg->map_ptr->key_size, + reg->map_ptr->value_size); +@@ -723,7 +719,7 @@ static void print_verifier_state(struct + if (is_spilled_reg(&state->stack[i])) { + reg = &state->stack[i].spilled_ptr; + t = reg->type; +- verbose(env, "=%s", reg_type_str[t]); ++ verbose(env, "=%s", reg_type_str(env, t)); + if (t == SCALAR_VALUE && reg->precise) + verbose(env, "P"); + if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) +@@ -1136,8 +1132,7 @@ static void mark_reg_known_zero(struct b + + static void mark_ptr_not_null_reg(struct bpf_reg_state *reg) + { +- switch (reg->type) { +- case PTR_TO_MAP_VALUE_OR_NULL: { ++ if (base_type(reg->type) == PTR_TO_MAP_VALUE) { + const struct bpf_map *map = reg->map_ptr; + + if (map->inner_map_meta) { +@@ -1156,32 +1151,10 @@ static void mark_ptr_not_null_reg(struct + } else { + reg->type = PTR_TO_MAP_VALUE; + } +- break; +- } +- case PTR_TO_SOCKET_OR_NULL: +- reg->type = PTR_TO_SOCKET; +- break; +- case PTR_TO_SOCK_COMMON_OR_NULL: +- reg->type = PTR_TO_SOCK_COMMON; +- break; +- case PTR_TO_TCP_SOCK_OR_NULL: +- reg->type = PTR_TO_TCP_SOCK; +- break; +- case PTR_TO_BTF_ID_OR_NULL: +- reg->type = PTR_TO_BTF_ID; +- break; +- case PTR_TO_MEM_OR_NULL: +- reg->type = PTR_TO_MEM; +- break; +- case PTR_TO_RDONLY_BUF_OR_NULL: +- reg->type = PTR_TO_RDONLY_BUF; +- break; +- case PTR_TO_RDWR_BUF_OR_NULL: +- reg->type = PTR_TO_RDWR_BUF; +- break; +- default: +- WARN_ONCE(1, "unknown nullable register type"); ++ return; + } ++ ++ reg->type &= ~PTR_MAYBE_NULL; + } + + static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) +@@ -2042,7 +2015,7 @@ static int mark_reg_read(struct bpf_veri + break; + if (parent->live & REG_LIVE_DONE) { + verbose(env, "verifier BUG type %s var_off %lld off %d\n", +- reg_type_str[parent->type], ++ reg_type_str(env, parent->type), + parent->var_off.value, parent->off); + return -EFAULT; + } +@@ -2701,9 +2674,8 @@ static int mark_chain_precision_stack(st + + static bool is_spillable_regtype(enum bpf_reg_type type) + { +- switch (type) { ++ switch (base_type(type)) { + case PTR_TO_MAP_VALUE: +- case PTR_TO_MAP_VALUE_OR_NULL: + case PTR_TO_STACK: + case PTR_TO_CTX: + case PTR_TO_PACKET: +@@ -2712,21 +2684,14 @@ static bool is_spillable_regtype(enum bp + case PTR_TO_FLOW_KEYS: + case CONST_PTR_TO_MAP: + case PTR_TO_SOCKET: +- case PTR_TO_SOCKET_OR_NULL: + case PTR_TO_SOCK_COMMON: +- case PTR_TO_SOCK_COMMON_OR_NULL: + case PTR_TO_TCP_SOCK: +- case PTR_TO_TCP_SOCK_OR_NULL: + case PTR_TO_XDP_SOCK: + case PTR_TO_BTF_ID: +- case PTR_TO_BTF_ID_OR_NULL: + case PTR_TO_RDONLY_BUF: +- case PTR_TO_RDONLY_BUF_OR_NULL: + case PTR_TO_RDWR_BUF: +- case PTR_TO_RDWR_BUF_OR_NULL: + case PTR_TO_PERCPU_BTF_ID: + case PTR_TO_MEM: +- case PTR_TO_MEM_OR_NULL: + case PTR_TO_FUNC: + case PTR_TO_MAP_KEY: + return true; +@@ -3567,7 +3532,7 @@ static int check_ctx_access(struct bpf_v + */ + *reg_type = info.reg_type; + +- if (*reg_type == PTR_TO_BTF_ID || *reg_type == PTR_TO_BTF_ID_OR_NULL) { ++ if (base_type(*reg_type) == PTR_TO_BTF_ID) { + *btf = info.btf; + *btf_id = info.btf_id; + } else { +@@ -3635,7 +3600,7 @@ static int check_sock_access(struct bpf_ + } + + verbose(env, "R%d invalid %s access off=%d size=%d\n", +- regno, reg_type_str[reg->type], off, size); ++ regno, reg_type_str(env, reg->type), off, size); + + return -EACCES; + } +@@ -4400,7 +4365,7 @@ static int check_mem_access(struct bpf_v + } else { + mark_reg_known_zero(env, regs, + value_regno); +- if (reg_type_may_be_null(reg_type)) ++ if (type_may_be_null(reg_type)) + regs[value_regno].id = ++env->id_gen; + /* A load of ctx field could have different + * actual load size with the one encoded in the +@@ -4408,8 +4373,7 @@ static int check_mem_access(struct bpf_v + * a sub-register. + */ + regs[value_regno].subreg_def = DEF_NOT_SUBREG; +- if (reg_type == PTR_TO_BTF_ID || +- reg_type == PTR_TO_BTF_ID_OR_NULL) { ++ if (base_type(reg_type) == PTR_TO_BTF_ID) { + regs[value_regno].btf = btf; + regs[value_regno].btf_id = btf_id; + } +@@ -4462,7 +4426,7 @@ static int check_mem_access(struct bpf_v + } else if (type_is_sk_pointer(reg->type)) { + if (t == BPF_WRITE) { + verbose(env, "R%d cannot write into %s\n", +- regno, reg_type_str[reg->type]); ++ regno, reg_type_str(env, reg->type)); + return -EACCES; + } + err = check_sock_access(env, insn_idx, regno, off, size, t); +@@ -4481,7 +4445,7 @@ static int check_mem_access(struct bpf_v + } else if (reg->type == PTR_TO_RDONLY_BUF) { + if (t == BPF_WRITE) { + verbose(env, "R%d cannot write into %s\n", +- regno, reg_type_str[reg->type]); ++ regno, reg_type_str(env, reg->type)); + return -EACCES; + } + err = check_buffer_access(env, reg, regno, off, size, false, +@@ -4497,7 +4461,7 @@ static int check_mem_access(struct bpf_v + mark_reg_unknown(env, regs, value_regno); + } else { + verbose(env, "R%d invalid mem access '%s'\n", regno, +- reg_type_str[reg->type]); ++ reg_type_str(env, reg->type)); + return -EACCES; + } + +@@ -4571,7 +4535,7 @@ static int check_atomic(struct bpf_verif + is_sk_reg(env, insn->dst_reg)) { + verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n", + insn->dst_reg, +- reg_type_str[reg_state(env, insn->dst_reg)->type]); ++ reg_type_str(env, reg_state(env, insn->dst_reg)->type)); + return -EACCES; + } + +@@ -4797,9 +4761,9 @@ static int check_helper_mem_access(struc + register_is_null(reg)) + return 0; + +- verbose(env, "R%d type=%s expected=%s\n", regno, +- reg_type_str[reg->type], +- reg_type_str[PTR_TO_STACK]); ++ verbose(env, "R%d type=%s ", regno, ++ reg_type_str(env, reg->type)); ++ verbose(env, "expected=%s\n", reg_type_str(env, PTR_TO_STACK)); + return -EACCES; + } + } +@@ -4810,7 +4774,7 @@ int check_mem_reg(struct bpf_verifier_en + if (register_is_null(reg)) + return 0; + +- if (reg_type_may_be_null(reg->type)) { ++ if (type_may_be_null(reg->type)) { + /* Assuming that the register contains a value check if the memory + * access is safe. Temporarily save and restore the register's state as + * the conversion shouldn't be visible to a caller. +@@ -5144,10 +5108,10 @@ static int check_reg_type(struct bpf_ver + goto found; + } + +- verbose(env, "R%d type=%s expected=", regno, reg_type_str[type]); ++ verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, type)); + for (j = 0; j + 1 < i; j++) +- verbose(env, "%s, ", reg_type_str[compatible->types[j]]); +- verbose(env, "%s\n", reg_type_str[compatible->types[j]]); ++ verbose(env, "%s, ", reg_type_str(env, compatible->types[j])); ++ verbose(env, "%s\n", reg_type_str(env, compatible->types[j])); + return -EACCES; + + found: +@@ -6376,6 +6340,7 @@ static int check_helper_call(struct bpf_ + { + const struct bpf_func_proto *fn = NULL; + enum bpf_return_type ret_type; ++ enum bpf_type_flag ret_flag; + struct bpf_reg_state *regs; + struct bpf_call_arg_meta meta; + int insn_idx = *insn_idx_p; +@@ -6510,6 +6475,7 @@ static int check_helper_call(struct bpf_ + + /* update return register (already marked as written above) */ + ret_type = fn->ret_type; ++ ret_flag = type_flag(fn->ret_type); + if (ret_type == RET_INTEGER) { + /* sets type to SCALAR_VALUE */ + mark_reg_unknown(env, regs, BPF_REG_0); +@@ -6529,25 +6495,23 @@ static int check_helper_call(struct bpf_ + } + regs[BPF_REG_0].map_ptr = meta.map_ptr; + regs[BPF_REG_0].map_uid = meta.map_uid; +- if (type_may_be_null(ret_type)) { +- regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; +- } else { +- regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; +- if (map_value_has_spin_lock(meta.map_ptr)) +- regs[BPF_REG_0].id = ++env->id_gen; ++ regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag; ++ if (!type_may_be_null(ret_type) && ++ map_value_has_spin_lock(meta.map_ptr)) { ++ regs[BPF_REG_0].id = ++env->id_gen; + } + } else if (base_type(ret_type) == RET_PTR_TO_SOCKET) { + mark_reg_known_zero(env, regs, BPF_REG_0); +- regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL; ++ regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag; + } else if (base_type(ret_type) == RET_PTR_TO_SOCK_COMMON) { + mark_reg_known_zero(env, regs, BPF_REG_0); +- regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL; ++ regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag; + } else if (base_type(ret_type) == RET_PTR_TO_TCP_SOCK) { + mark_reg_known_zero(env, regs, BPF_REG_0); +- regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL; ++ regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag; + } else if (base_type(ret_type) == RET_PTR_TO_ALLOC_MEM) { + mark_reg_known_zero(env, regs, BPF_REG_0); +- regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL; ++ regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag; + regs[BPF_REG_0].mem_size = meta.mem_size; + } else if (base_type(ret_type) == RET_PTR_TO_MEM_OR_BTF_ID) { + const struct btf_type *t; +@@ -6567,14 +6531,10 @@ static int check_helper_call(struct bpf_ + tname, PTR_ERR(ret)); + return -EINVAL; + } +- regs[BPF_REG_0].type = +- (ret_type & PTR_MAYBE_NULL) ? +- PTR_TO_MEM_OR_NULL : PTR_TO_MEM; ++ regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag; + regs[BPF_REG_0].mem_size = tsize; + } else { +- regs[BPF_REG_0].type = +- (ret_type & PTR_MAYBE_NULL) ? +- PTR_TO_BTF_ID_OR_NULL : PTR_TO_BTF_ID; ++ regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag; + regs[BPF_REG_0].btf = meta.ret_btf; + regs[BPF_REG_0].btf_id = meta.ret_btf_id; + } +@@ -6582,9 +6542,7 @@ static int check_helper_call(struct bpf_ + int ret_btf_id; + + mark_reg_known_zero(env, regs, BPF_REG_0); +- regs[BPF_REG_0].type = (ret_type & PTR_MAYBE_NULL) ? +- PTR_TO_BTF_ID_OR_NULL : +- PTR_TO_BTF_ID; ++ regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag; + ret_btf_id = *fn->ret_btf_id; + if (ret_btf_id == 0) { + verbose(env, "invalid return type %u of func %s#%d\n", +@@ -6603,7 +6561,7 @@ static int check_helper_call(struct bpf_ + return -EINVAL; + } + +- if (reg_type_may_be_null(regs[BPF_REG_0].type)) ++ if (type_may_be_null(regs[BPF_REG_0].type)) + regs[BPF_REG_0].id = ++env->id_gen; + + if (is_ptr_cast_function(func_id)) { +@@ -6812,25 +6770,25 @@ static bool check_reg_sane_offset(struct + + if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { + verbose(env, "math between %s pointer and %lld is not allowed\n", +- reg_type_str[type], val); ++ reg_type_str(env, type), val); + return false; + } + + if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { + verbose(env, "%s pointer offset %d is not allowed\n", +- reg_type_str[type], reg->off); ++ reg_type_str(env, type), reg->off); + return false; + } + + if (smin == S64_MIN) { + verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n", +- reg_type_str[type]); ++ reg_type_str(env, type)); + return false; + } + + if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { + verbose(env, "value %lld makes %s pointer be out of bounds\n", +- smin, reg_type_str[type]); ++ smin, reg_type_str(env, type)); + return false; + } + +@@ -7207,11 +7165,13 @@ static int adjust_ptr_min_max_vals(struc + return -EACCES; + } + +- switch (ptr_reg->type) { +- case PTR_TO_MAP_VALUE_OR_NULL: ++ if (ptr_reg->type & PTR_MAYBE_NULL) { + verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", +- dst, reg_type_str[ptr_reg->type]); ++ dst, reg_type_str(env, ptr_reg->type)); + return -EACCES; ++ } ++ ++ switch (base_type(ptr_reg->type)) { + case CONST_PTR_TO_MAP: + /* smin_val represents the known value */ + if (known && smin_val == 0 && opcode == BPF_ADD) +@@ -7224,10 +7184,10 @@ static int adjust_ptr_min_max_vals(struc + case PTR_TO_XDP_SOCK: + reject: + verbose(env, "R%d pointer arithmetic on %s prohibited\n", +- dst, reg_type_str[ptr_reg->type]); ++ dst, reg_type_str(env, ptr_reg->type)); + return -EACCES; + default: +- if (reg_type_may_be_null(ptr_reg->type)) ++ if (type_may_be_null(ptr_reg->type)) + goto reject; + break; + } +@@ -8949,7 +8909,7 @@ static void mark_ptr_or_null_reg(struct + struct bpf_reg_state *reg, u32 id, + bool is_null) + { +- if (reg_type_may_be_null(reg->type) && reg->id == id && ++ if (type_may_be_null(reg->type) && reg->id == id && + !WARN_ON_ONCE(!reg->id)) { + if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || + !tnum_equals_const(reg->var_off, 0) || +@@ -9327,7 +9287,7 @@ static int check_cond_jmp_op(struct bpf_ + */ + if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && + insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && +- reg_type_may_be_null(dst_reg->type)) { ++ type_may_be_null(dst_reg->type)) { + /* Mark all identical registers in each branch as either + * safe or unknown depending R == 0 or R != 0 conditional. + */ +@@ -9584,7 +9544,7 @@ static int check_return_code(struct bpf_ + /* enforce return zero from async callbacks like timer */ + if (reg->type != SCALAR_VALUE) { + verbose(env, "In async callback the register R0 is not a known value (%s)\n", +- reg_type_str[reg->type]); ++ reg_type_str(env, reg->type)); + return -EINVAL; + } + +@@ -9598,7 +9558,7 @@ static int check_return_code(struct bpf_ + if (is_subprog) { + if (reg->type != SCALAR_VALUE) { + verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n", +- reg_type_str[reg->type]); ++ reg_type_str(env, reg->type)); + return -EINVAL; + } + return 0; +@@ -9662,7 +9622,7 @@ static int check_return_code(struct bpf_ + + if (reg->type != SCALAR_VALUE) { + verbose(env, "At program exit the register R0 is not a known value (%s)\n", +- reg_type_str[reg->type]); ++ reg_type_str(env, reg->type)); + return -EINVAL; + } + +@@ -10443,7 +10403,7 @@ static bool regsafe(struct bpf_verifier_ + return true; + if (rcur->type == NOT_INIT) + return false; +- switch (rold->type) { ++ switch (base_type(rold->type)) { + case SCALAR_VALUE: + if (env->explore_alu_limits) + return false; +@@ -10465,6 +10425,22 @@ static bool regsafe(struct bpf_verifier_ + } + case PTR_TO_MAP_KEY: + case PTR_TO_MAP_VALUE: ++ /* a PTR_TO_MAP_VALUE could be safe to use as a ++ * PTR_TO_MAP_VALUE_OR_NULL into the same map. ++ * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- ++ * checked, doing so could have affected others with the same ++ * id, and we can't check for that because we lost the id when ++ * we converted to a PTR_TO_MAP_VALUE. ++ */ ++ if (type_may_be_null(rold->type)) { ++ if (!type_may_be_null(rcur->type)) ++ return false; ++ if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) ++ return false; ++ /* Check our ids match any regs they're supposed to */ ++ return check_ids(rold->id, rcur->id, idmap); ++ } ++ + /* If the new min/max/var_off satisfy the old ones and + * everything else matches, we are OK. + * 'id' is not compared, since it's only used for maps with +@@ -10476,20 +10452,6 @@ static bool regsafe(struct bpf_verifier_ + return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && + range_within(rold, rcur) && + tnum_in(rold->var_off, rcur->var_off); +- case PTR_TO_MAP_VALUE_OR_NULL: +- /* a PTR_TO_MAP_VALUE could be safe to use as a +- * PTR_TO_MAP_VALUE_OR_NULL into the same map. +- * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- +- * checked, doing so could have affected others with the same +- * id, and we can't check for that because we lost the id when +- * we converted to a PTR_TO_MAP_VALUE. +- */ +- if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) +- return false; +- if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) +- return false; +- /* Check our ids match any regs they're supposed to */ +- return check_ids(rold->id, rcur->id, idmap); + case PTR_TO_PACKET_META: + case PTR_TO_PACKET: + if (rcur->type != rold->type) +@@ -10518,11 +10480,8 @@ static bool regsafe(struct bpf_verifier_ + case PTR_TO_PACKET_END: + case PTR_TO_FLOW_KEYS: + case PTR_TO_SOCKET: +- case PTR_TO_SOCKET_OR_NULL: + case PTR_TO_SOCK_COMMON: +- case PTR_TO_SOCK_COMMON_OR_NULL: + case PTR_TO_TCP_SOCK: +- case PTR_TO_TCP_SOCK_OR_NULL: + case PTR_TO_XDP_SOCK: + /* Only valid matches are exact, which memcmp() above + * would have accepted +@@ -11048,17 +11007,13 @@ next: + /* Return true if it's OK to have the same insn return a different type. */ + static bool reg_type_mismatch_ok(enum bpf_reg_type type) + { +- switch (type) { ++ switch (base_type(type)) { + case PTR_TO_CTX: + case PTR_TO_SOCKET: +- case PTR_TO_SOCKET_OR_NULL: + case PTR_TO_SOCK_COMMON: +- case PTR_TO_SOCK_COMMON_OR_NULL: + case PTR_TO_TCP_SOCK: +- case PTR_TO_TCP_SOCK_OR_NULL: + case PTR_TO_XDP_SOCK: + case PTR_TO_BTF_ID: +- case PTR_TO_BTF_ID_OR_NULL: + return false; + default: + return true; +@@ -11282,7 +11237,7 @@ static int do_check(struct bpf_verifier_ + if (is_ctx_reg(env, insn->dst_reg)) { + verbose(env, "BPF_ST stores into R%d %s is not allowed\n", + insn->dst_reg, +- reg_type_str[reg_state(env, insn->dst_reg)->type]); ++ reg_type_str(env, reg_state(env, insn->dst_reg)->type)); + return -EACCES; + } + +--- a/net/core/bpf_sk_storage.c ++++ b/net/core/bpf_sk_storage.c +@@ -929,7 +929,7 @@ static struct bpf_iter_reg bpf_sk_storag + { offsetof(struct bpf_iter__bpf_sk_storage_map, sk), + PTR_TO_BTF_ID_OR_NULL }, + { offsetof(struct bpf_iter__bpf_sk_storage_map, value), +- PTR_TO_RDWR_BUF_OR_NULL }, ++ PTR_TO_RDWR_BUF | PTR_MAYBE_NULL }, + }, + .seq_info = &iter_seq_info, + }; +--- a/net/core/sock_map.c ++++ b/net/core/sock_map.c +@@ -1569,7 +1569,7 @@ static struct bpf_iter_reg sock_map_iter + .ctx_arg_info_size = 2, + .ctx_arg_info = { + { offsetof(struct bpf_iter__sockmap, key), +- PTR_TO_RDONLY_BUF_OR_NULL }, ++ PTR_TO_RDONLY_BUF | PTR_MAYBE_NULL }, + { offsetof(struct bpf_iter__sockmap, sk), + PTR_TO_BTF_ID_OR_NULL }, + }, diff --git a/queue-5.16/bpf-replace-ret_xxx_or_null-with-ret_xxx-ptr_maybe_null.patch b/queue-5.16/bpf-replace-ret_xxx_or_null-with-ret_xxx-ptr_maybe_null.patch new file mode 100644 index 00000000000..179a57cfd35 --- /dev/null +++ b/queue-5.16/bpf-replace-ret_xxx_or_null-with-ret_xxx-ptr_maybe_null.patch @@ -0,0 +1,202 @@ +From foo@baz Thu Feb 17 08:07:01 PM CET 2022 +From: Hao Luo +Date: Wed, 16 Feb 2022 14:52:03 -0800 +Subject: bpf: Replace RET_XXX_OR_NULL with RET_XXX | PTR_MAYBE_NULL +To: Greg KH +Cc: Alexei Starovoitov , Andrii Nakryiko , Daniel Borkmann , laura@labbott.name, stable@vger.kernel.org, Hao Luo +Message-ID: <20220216225209.2196865-4-haoluo@google.com> + +From: Hao Luo + +commit 3c4807322660d4290ac9062c034aed6b87243861 upstream. + +We have introduced a new type to make bpf_ret composable, by +reserving high bits to represent flags. + +One of the flag is PTR_MAYBE_NULL, which indicates a pointer +may be NULL. When applying this flag to ret_types, it means +the returned value could be a NULL pointer. This patch +switches the qualified arg_types to use this flag. +The ret_types changed in this patch include: + +1. RET_PTR_TO_MAP_VALUE_OR_NULL +2. RET_PTR_TO_SOCKET_OR_NULL +3. RET_PTR_TO_TCP_SOCK_OR_NULL +4. RET_PTR_TO_SOCK_COMMON_OR_NULL +5. RET_PTR_TO_ALLOC_MEM_OR_NULL +6. RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL +7. RET_PTR_TO_BTF_ID_OR_NULL + +This patch doesn't eliminate the use of these names, instead +it makes them aliases to 'RET_PTR_TO_XXX | PTR_MAYBE_NULL'. + +Signed-off-by: Hao Luo +Signed-off-by: Alexei Starovoitov +Link: https://lore.kernel.org/bpf/20211217003152.48334-4-haoluo@google.com +Cc: stable@vger.kernel.org # 5.16.x +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/bpf.h | 19 +++++++++++------- + kernel/bpf/helpers.c | 2 - + kernel/bpf/verifier.c | 52 +++++++++++++++++++++++++------------------------- + 3 files changed, 39 insertions(+), 34 deletions(-) + +--- a/include/linux/bpf.h ++++ b/include/linux/bpf.h +@@ -382,17 +382,22 @@ enum bpf_return_type { + RET_INTEGER, /* function returns integer */ + RET_VOID, /* function doesn't return anything */ + RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ +- RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ +- RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ +- RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ +- RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */ +- RET_PTR_TO_ALLOC_MEM_OR_NULL, /* returns a pointer to dynamically allocated memory or NULL */ +- RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */ +- RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, /* returns a pointer to a valid memory or a btf_id or NULL */ ++ RET_PTR_TO_SOCKET, /* returns a pointer to a socket */ ++ RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */ ++ RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */ ++ RET_PTR_TO_ALLOC_MEM, /* returns a pointer to dynamically allocated memory */ + RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */ + RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */ + __BPF_RET_TYPE_MAX, + ++ /* Extended ret_types. */ ++ RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE, ++ RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET, ++ RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK, ++ RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON, ++ RET_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_ALLOC_MEM, ++ RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID, ++ + /* This must be the last entry. Its purpose is to ensure the enum is + * wide enough to hold the higher bits reserved for bpf_type_flag. + */ +--- a/kernel/bpf/helpers.c ++++ b/kernel/bpf/helpers.c +@@ -667,7 +667,7 @@ BPF_CALL_2(bpf_per_cpu_ptr, const void * + const struct bpf_func_proto bpf_per_cpu_ptr_proto = { + .func = bpf_per_cpu_ptr, + .gpl_only = false, +- .ret_type = RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, ++ .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL, + .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, + .arg2_type = ARG_ANYTHING, + }; +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -6375,6 +6375,7 @@ static int check_helper_call(struct bpf_ + int *insn_idx_p) + { + const struct bpf_func_proto *fn = NULL; ++ enum bpf_return_type ret_type; + struct bpf_reg_state *regs; + struct bpf_call_arg_meta meta; + int insn_idx = *insn_idx_p; +@@ -6508,13 +6509,13 @@ static int check_helper_call(struct bpf_ + regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; + + /* update return register (already marked as written above) */ +- if (fn->ret_type == RET_INTEGER) { ++ ret_type = fn->ret_type; ++ if (ret_type == RET_INTEGER) { + /* sets type to SCALAR_VALUE */ + mark_reg_unknown(env, regs, BPF_REG_0); +- } else if (fn->ret_type == RET_VOID) { ++ } else if (ret_type == RET_VOID) { + regs[BPF_REG_0].type = NOT_INIT; +- } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL || +- fn->ret_type == RET_PTR_TO_MAP_VALUE) { ++ } else if (base_type(ret_type) == RET_PTR_TO_MAP_VALUE) { + /* There is no offset yet applied, variable or fixed */ + mark_reg_known_zero(env, regs, BPF_REG_0); + /* remember map_ptr, so that check_map_access() +@@ -6528,28 +6529,27 @@ static int check_helper_call(struct bpf_ + } + regs[BPF_REG_0].map_ptr = meta.map_ptr; + regs[BPF_REG_0].map_uid = meta.map_uid; +- if (fn->ret_type == RET_PTR_TO_MAP_VALUE) { ++ if (type_may_be_null(ret_type)) { ++ regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; ++ } else { + regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; + if (map_value_has_spin_lock(meta.map_ptr)) + regs[BPF_REG_0].id = ++env->id_gen; +- } else { +- regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; + } +- } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) { ++ } else if (base_type(ret_type) == RET_PTR_TO_SOCKET) { + mark_reg_known_zero(env, regs, BPF_REG_0); + regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL; +- } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) { ++ } else if (base_type(ret_type) == RET_PTR_TO_SOCK_COMMON) { + mark_reg_known_zero(env, regs, BPF_REG_0); + regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL; +- } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) { ++ } else if (base_type(ret_type) == RET_PTR_TO_TCP_SOCK) { + mark_reg_known_zero(env, regs, BPF_REG_0); + regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL; +- } else if (fn->ret_type == RET_PTR_TO_ALLOC_MEM_OR_NULL) { ++ } else if (base_type(ret_type) == RET_PTR_TO_ALLOC_MEM) { + mark_reg_known_zero(env, regs, BPF_REG_0); + regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL; + regs[BPF_REG_0].mem_size = meta.mem_size; +- } else if (fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL || +- fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID) { ++ } else if (base_type(ret_type) == RET_PTR_TO_MEM_OR_BTF_ID) { + const struct btf_type *t; + + mark_reg_known_zero(env, regs, BPF_REG_0); +@@ -6568,28 +6568,28 @@ static int check_helper_call(struct bpf_ + return -EINVAL; + } + regs[BPF_REG_0].type = +- fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ? +- PTR_TO_MEM : PTR_TO_MEM_OR_NULL; ++ (ret_type & PTR_MAYBE_NULL) ? ++ PTR_TO_MEM_OR_NULL : PTR_TO_MEM; + regs[BPF_REG_0].mem_size = tsize; + } else { + regs[BPF_REG_0].type = +- fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ? +- PTR_TO_BTF_ID : PTR_TO_BTF_ID_OR_NULL; ++ (ret_type & PTR_MAYBE_NULL) ? ++ PTR_TO_BTF_ID_OR_NULL : PTR_TO_BTF_ID; + regs[BPF_REG_0].btf = meta.ret_btf; + regs[BPF_REG_0].btf_id = meta.ret_btf_id; + } +- } else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL || +- fn->ret_type == RET_PTR_TO_BTF_ID) { ++ } else if (base_type(ret_type) == RET_PTR_TO_BTF_ID) { + int ret_btf_id; + + mark_reg_known_zero(env, regs, BPF_REG_0); +- regs[BPF_REG_0].type = fn->ret_type == RET_PTR_TO_BTF_ID ? +- PTR_TO_BTF_ID : +- PTR_TO_BTF_ID_OR_NULL; ++ regs[BPF_REG_0].type = (ret_type & PTR_MAYBE_NULL) ? ++ PTR_TO_BTF_ID_OR_NULL : ++ PTR_TO_BTF_ID; + ret_btf_id = *fn->ret_btf_id; + if (ret_btf_id == 0) { +- verbose(env, "invalid return type %d of func %s#%d\n", +- fn->ret_type, func_id_name(func_id), func_id); ++ verbose(env, "invalid return type %u of func %s#%d\n", ++ base_type(ret_type), func_id_name(func_id), ++ func_id); + return -EINVAL; + } + /* current BPF helper definitions are only coming from +@@ -6598,8 +6598,8 @@ static int check_helper_call(struct bpf_ + regs[BPF_REG_0].btf = btf_vmlinux; + regs[BPF_REG_0].btf_id = ret_btf_id; + } else { +- verbose(env, "unknown return type %d of func %s#%d\n", +- fn->ret_type, func_id_name(func_id), func_id); ++ verbose(env, "unknown return type %u of func %s#%d\n", ++ base_type(ret_type), func_id_name(func_id), func_id); + return -EINVAL; + } + diff --git a/queue-5.16/bpf-selftests-test-ptr_to_rdonly_mem.patch b/queue-5.16/bpf-selftests-test-ptr_to_rdonly_mem.patch new file mode 100644 index 00000000000..c707030e64d --- /dev/null +++ b/queue-5.16/bpf-selftests-test-ptr_to_rdonly_mem.patch @@ -0,0 +1,94 @@ +From foo@baz Thu Feb 17 08:07:01 PM CET 2022 +From: Hao Luo +Date: Wed, 16 Feb 2022 14:52:09 -0800 +Subject: bpf/selftests: Test PTR_TO_RDONLY_MEM +To: Greg KH +Cc: Alexei Starovoitov , Andrii Nakryiko , Daniel Borkmann , laura@labbott.name, stable@vger.kernel.org, Hao Luo +Message-ID: <20220216225209.2196865-10-haoluo@google.com> + +From: Hao Luo + +commit 9497c458c10b049438ef6e6ddda898edbc3ec6a8 upstream. + +This test verifies that a ksym of non-struct can not be directly +updated. + +Signed-off-by: Hao Luo +Signed-off-by: Alexei Starovoitov +Acked-by: Andrii Nakryiko +Link: https://lore.kernel.org/bpf/20211217003152.48334-10-haoluo@google.com +Cc: stable@vger.kernel.org # 5.16.x +Signed-off-by: Greg Kroah-Hartman +--- + tools/testing/selftests/bpf/prog_tests/ksyms_btf.c | 14 ++++ + tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c | 29 ++++++++++ + 2 files changed, 43 insertions(+) + create mode 100644 tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c + +--- a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c ++++ b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c +@@ -8,6 +8,7 @@ + #include "test_ksyms_btf_null_check.skel.h" + #include "test_ksyms_weak.skel.h" + #include "test_ksyms_weak.lskel.h" ++#include "test_ksyms_btf_write_check.skel.h" + + static int duration; + +@@ -137,6 +138,16 @@ cleanup: + test_ksyms_weak_lskel__destroy(skel); + } + ++static void test_write_check(void) ++{ ++ struct test_ksyms_btf_write_check *skel; ++ ++ skel = test_ksyms_btf_write_check__open_and_load(); ++ ASSERT_ERR_PTR(skel, "unexpected load of a prog writing to ksym memory\n"); ++ ++ test_ksyms_btf_write_check__destroy(skel); ++} ++ + void test_ksyms_btf(void) + { + int percpu_datasec; +@@ -167,4 +178,7 @@ void test_ksyms_btf(void) + + if (test__start_subtest("weak_ksyms_lskel")) + test_weak_syms_lskel(); ++ ++ if (test__start_subtest("write_check")) ++ test_write_check(); + } +--- /dev/null ++++ b/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c +@@ -0,0 +1,29 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Copyright (c) 2021 Google */ ++ ++#include "vmlinux.h" ++ ++#include ++ ++extern const int bpf_prog_active __ksym; /* int type global var. */ ++ ++SEC("raw_tp/sys_enter") ++int handler(const void *ctx) ++{ ++ int *active; ++ __u32 cpu; ++ ++ cpu = bpf_get_smp_processor_id(); ++ active = (int *)bpf_per_cpu_ptr(&bpf_prog_active, cpu); ++ if (active) { ++ /* Kernel memory obtained from bpf_{per,this}_cpu_ptr ++ * is read-only, should _not_ pass verification. ++ */ ++ /* WRITE_ONCE */ ++ *(volatile int *)active = -1; ++ } ++ ++ return 0; ++} ++ ++char _license[] SEC("license") = "GPL"; diff --git a/queue-5.16/btrfs-defrag-don-t-try-to-defrag-extents-which-are-under-writeback.patch b/queue-5.16/btrfs-defrag-don-t-try-to-defrag-extents-which-are-under-writeback.patch new file mode 100644 index 00000000000..86fbbd45411 --- /dev/null +++ b/queue-5.16/btrfs-defrag-don-t-try-to-defrag-extents-which-are-under-writeback.patch @@ -0,0 +1,49 @@ +From 0d1ffa2228cb34f485f8fe927f134b82a0ea62ae Mon Sep 17 00:00:00 2001 +From: Qu Wenruo +Date: Tue, 8 Feb 2022 14:54:05 +0800 +Subject: btrfs: defrag: don't try to defrag extents which are under writeback + +From: Qu Wenruo + +commit 0d1ffa2228cb34f485f8fe927f134b82a0ea62ae upstream. + +Once we start writeback (have called btrfs_run_delalloc_range()), we +allocate an extent, create an extent map point to that extent, with a +generation of (u64)-1, created the ordered extent and then clear the +DELALLOC bit from the range in the inode's io tree. + +Such extent map can pass the first call of defrag_collect_targets(), as +its generation is (u64)-1, meets any possible minimal generation check. +And the range will not have DELALLOC bit, also passing the DELALLOC bit +check. + +It will only be re-checked in the second call of +defrag_collect_targets(), which will wait for writeback. + +But at that stage we have already spent our time waiting for some IO we +may or may not want to defrag. + +Let's reject such extents early so we won't waste our time. + +CC: stable@vger.kernel.org # 5.16 +Reviewed-by: Filipe Manana +Signed-off-by: Qu Wenruo +Signed-off-by: David Sterba +Signed-off-by: Greg Kroah-Hartman +--- + fs/btrfs/ioctl.c | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -1184,6 +1184,10 @@ static int defrag_collect_targets(struct + if (em->generation < newer_than) + goto next; + ++ /* This em is under writeback, no need to defrag */ ++ if (em->generation == (u64)-1) ++ goto next; ++ + /* + * Our start offset might be in the middle of an existing extent + * map, so take that into account. diff --git a/queue-5.16/btrfs-don-t-hold-cpu-for-too-long-when-defragging-a-file.patch b/queue-5.16/btrfs-don-t-hold-cpu-for-too-long-when-defragging-a-file.patch new file mode 100644 index 00000000000..4f6d31797b3 --- /dev/null +++ b/queue-5.16/btrfs-don-t-hold-cpu-for-too-long-when-defragging-a-file.patch @@ -0,0 +1,39 @@ +From ea0eba69a2a8125229b1b6011644598039bc53aa Mon Sep 17 00:00:00 2001 +From: Qu Wenruo +Date: Sun, 30 Jan 2022 20:53:15 +0800 +Subject: btrfs: don't hold CPU for too long when defragging a file + +From: Qu Wenruo + +commit ea0eba69a2a8125229b1b6011644598039bc53aa upstream. + +There is a user report about "btrfs filesystem defrag" causing 120s +timeout problem. + +For btrfs_defrag_file() it will iterate all file extents if called from +defrag ioctl, thus it can take a long time. + +There is no reason not to release the CPU during such a long operation. + +Add cond_resched() after defragged one cluster. + +CC: stable@vger.kernel.org # 5.16 +Link: https://lore.kernel.org/linux-btrfs/10e51417-2203-f0a4-2021-86c8511cc367@gmx.com +Signed-off-by: Qu Wenruo +Reviewed-by: David Sterba +Signed-off-by: David Sterba +Signed-off-by: Greg Kroah-Hartman +--- + fs/btrfs/ioctl.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -1603,6 +1603,7 @@ int btrfs_defrag_file(struct inode *inod + ret = 0; + break; + } ++ cond_resched(); + } + + if (ra_allocated) diff --git a/queue-5.16/btrfs-send-in-case-of-io-error-log-it.patch b/queue-5.16/btrfs-send-in-case-of-io-error-log-it.patch new file mode 100644 index 00000000000..89c3cf6de92 --- /dev/null +++ b/queue-5.16/btrfs-send-in-case-of-io-error-log-it.patch @@ -0,0 +1,38 @@ +From 2e7be9db125a0bf940c5d65eb5c40d8700f738b5 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?D=C4=81vis=20Mos=C4=81ns?= +Date: Sat, 5 Feb 2022 20:48:23 +0200 +Subject: btrfs: send: in case of IO error log it +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Dāvis Mosāns + +commit 2e7be9db125a0bf940c5d65eb5c40d8700f738b5 upstream. + +Currently if we get IO error while doing send then we abort without +logging information about which file caused issue. So log it to help +with debugging. + +CC: stable@vger.kernel.org # 4.9+ +Signed-off-by: Dāvis Mosāns +Reviewed-by: David Sterba +Signed-off-by: David Sterba +Signed-off-by: Greg Kroah-Hartman +--- + fs/btrfs/send.c | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/fs/btrfs/send.c ++++ b/fs/btrfs/send.c +@@ -4983,6 +4983,10 @@ static int put_file_data(struct send_ctx + lock_page(page); + if (!PageUptodate(page)) { + unlock_page(page); ++ btrfs_err(fs_info, ++ "send: IO error at offset %llu for inode %llu root %llu", ++ page_offset(page), sctx->cur_ino, ++ sctx->send_root->root_key.objectid); + put_page(page); + ret = -EIO; + break; diff --git a/queue-5.16/drm-nouveau-pmu-gm200-use-alternate-falcon-reset-sequence.patch b/queue-5.16/drm-nouveau-pmu-gm200-use-alternate-falcon-reset-sequence.patch new file mode 100644 index 00000000000..3ea24c844f1 --- /dev/null +++ b/queue-5.16/drm-nouveau-pmu-gm200-use-alternate-falcon-reset-sequence.patch @@ -0,0 +1,126 @@ +From 4cdd2450bf739bada353e82d27b00db9af8c3001 Mon Sep 17 00:00:00 2001 +From: Ben Skeggs +Date: Thu, 25 Feb 2021 14:54:59 +1000 +Subject: drm/nouveau/pmu/gm200-: use alternate falcon reset sequence + +From: Ben Skeggs + +commit 4cdd2450bf739bada353e82d27b00db9af8c3001 upstream. + +Signed-off-by: Ben Skeggs +Reviewed-by: Karol Herbst +Signed-off-by: Karol Herbst +Link: https://gitlab.freedesktop.org/drm/nouveau/-/merge_requests/10 +Signed-off-by: Greg Kroah-Hartman +--- + drivers/gpu/drm/nouveau/nvkm/falcon/base.c | 8 ++++-- + drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c | 31 +++++++++++++++++++++++- + drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c | 2 - + drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c | 2 - + drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c | 2 - + drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h | 2 + + 6 files changed, 41 insertions(+), 6 deletions(-) + +--- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c ++++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c +@@ -117,8 +117,12 @@ nvkm_falcon_disable(struct nvkm_falcon * + int + nvkm_falcon_reset(struct nvkm_falcon *falcon) + { +- nvkm_falcon_disable(falcon); +- return nvkm_falcon_enable(falcon); ++ if (!falcon->func->reset) { ++ nvkm_falcon_disable(falcon); ++ return nvkm_falcon_enable(falcon); ++ } ++ ++ return falcon->func->reset(falcon); + } + + int +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c +@@ -23,9 +23,38 @@ + */ + #include "priv.h" + ++static int ++gm200_pmu_flcn_reset(struct nvkm_falcon *falcon) ++{ ++ struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon); ++ ++ nvkm_falcon_wr32(falcon, 0x014, 0x0000ffff); ++ pmu->func->reset(pmu); ++ return nvkm_falcon_enable(falcon); ++} ++ ++const struct nvkm_falcon_func ++gm200_pmu_flcn = { ++ .debug = 0xc08, ++ .fbif = 0xe00, ++ .load_imem = nvkm_falcon_v1_load_imem, ++ .load_dmem = nvkm_falcon_v1_load_dmem, ++ .read_dmem = nvkm_falcon_v1_read_dmem, ++ .bind_context = nvkm_falcon_v1_bind_context, ++ .wait_for_halt = nvkm_falcon_v1_wait_for_halt, ++ .clear_interrupt = nvkm_falcon_v1_clear_interrupt, ++ .set_start_addr = nvkm_falcon_v1_set_start_addr, ++ .start = nvkm_falcon_v1_start, ++ .enable = nvkm_falcon_v1_enable, ++ .disable = nvkm_falcon_v1_disable, ++ .reset = gm200_pmu_flcn_reset, ++ .cmdq = { 0x4a0, 0x4b0, 4 }, ++ .msgq = { 0x4c8, 0x4cc, 0 }, ++}; ++ + static const struct nvkm_pmu_func + gm200_pmu = { +- .flcn = >215_pmu_flcn, ++ .flcn = &gm200_pmu_flcn, + .enabled = gf100_pmu_enabled, + .reset = gf100_pmu_reset, + }; +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c +@@ -211,7 +211,7 @@ gm20b_pmu_recv(struct nvkm_pmu *pmu) + + static const struct nvkm_pmu_func + gm20b_pmu = { +- .flcn = >215_pmu_flcn, ++ .flcn = &gm200_pmu_flcn, + .enabled = gf100_pmu_enabled, + .intr = gt215_pmu_intr, + .recv = gm20b_pmu_recv, +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c +@@ -39,7 +39,7 @@ gp102_pmu_enabled(struct nvkm_pmu *pmu) + + static const struct nvkm_pmu_func + gp102_pmu = { +- .flcn = >215_pmu_flcn, ++ .flcn = &gm200_pmu_flcn, + .enabled = gp102_pmu_enabled, + .reset = gp102_pmu_reset, + }; +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c +@@ -78,7 +78,7 @@ gp10b_pmu_acr = { + + static const struct nvkm_pmu_func + gp10b_pmu = { +- .flcn = >215_pmu_flcn, ++ .flcn = &gm200_pmu_flcn, + .enabled = gf100_pmu_enabled, + .intr = gt215_pmu_intr, + .recv = gm20b_pmu_recv, +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h +@@ -44,6 +44,8 @@ void gf100_pmu_reset(struct nvkm_pmu *); + + void gk110_pmu_pgob(struct nvkm_pmu *, bool); + ++extern const struct nvkm_falcon_func gm200_pmu_flcn; ++ + void gm20b_pmu_acr_bld_patch(struct nvkm_acr *, u32, s64); + void gm20b_pmu_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *); + int gm20b_pmu_acr_boot(struct nvkm_falcon *); diff --git a/queue-5.16/hid-add-support-for-ugtablet-wp5540.patch b/queue-5.16/hid-add-support-for-ugtablet-wp5540.patch new file mode 100644 index 00000000000..071e61b77fa --- /dev/null +++ b/queue-5.16/hid-add-support-for-ugtablet-wp5540.patch @@ -0,0 +1,43 @@ +From fd5dd6acd8f823ea804f76d3af64fa1be9d5fb78 Mon Sep 17 00:00:00 2001 +From: Sergio Costas +Date: Fri, 4 Feb 2022 10:01:17 +0100 +Subject: HID:Add support for UGTABLET WP5540 + +From: Sergio Costas + +commit fd5dd6acd8f823ea804f76d3af64fa1be9d5fb78 upstream. + +This patch adds support for the UGTABLET WP5540 digitizer tablet +devices. Without it, the pen moves the cursor, but neither the +buttons nor the tap sensor in the tip do work. + +Signed-off-by: Sergio Costas +Link: https://lore.kernel.org/r/63dece1d-91ca-1b1b-d90d-335be66896be@gmail.com +Cc: stable@vger.kernel.org +Signed-off-by: Benjamin Tissoires +Signed-off-by: Greg Kroah-Hartman +--- + drivers/hid/hid-ids.h | 1 + + drivers/hid/hid-quirks.c | 1 + + 2 files changed, 2 insertions(+) + +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -1365,6 +1365,7 @@ + #define USB_VENDOR_ID_UGTIZER 0x2179 + #define USB_DEVICE_ID_UGTIZER_TABLET_GP0610 0x0053 + #define USB_DEVICE_ID_UGTIZER_TABLET_GT5040 0x0077 ++#define USB_DEVICE_ID_UGTIZER_TABLET_WP5540 0x0004 + + #define USB_VENDOR_ID_VIEWSONIC 0x0543 + #define USB_DEVICE_ID_VIEWSONIC_PD1011 0xe621 +--- a/drivers/hid/hid-quirks.c ++++ b/drivers/hid/hid-quirks.c +@@ -187,6 +187,7 @@ static const struct hid_device_id hid_qu + { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET }, + { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_WP5540), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT }, diff --git a/queue-5.16/hid-amd_sfh-add-illuminance-mask-to-limit-als-max-value.patch b/queue-5.16/hid-amd_sfh-add-illuminance-mask-to-limit-als-max-value.patch new file mode 100644 index 00000000000..f56e5e19a99 --- /dev/null +++ b/queue-5.16/hid-amd_sfh-add-illuminance-mask-to-limit-als-max-value.patch @@ -0,0 +1,41 @@ +From 91aaea527bc3b707c5d3208cde035421ed54f79c Mon Sep 17 00:00:00 2001 +From: Basavaraj Natikar +Date: Mon, 31 Jan 2022 22:48:33 +0530 +Subject: HID: amd_sfh: Add illuminance mask to limit ALS max value + +From: Basavaraj Natikar + +commit 91aaea527bc3b707c5d3208cde035421ed54f79c upstream. + +ALS illuminance value present only in first 15 bits from SFH firmware +for V2 platforms. Hence added a mask of 15 bit to limit ALS max +illuminance values to get correct illuminance value. + +Fixes: 0aad9c95eb9a ("HID: amd_sfh: Extend ALS support for newer AMD platform") +Signed-off-by: Basavaraj Natikar +Signed-off-by: Jiri Kosina +Signed-off-by: Greg Kroah-Hartman +--- + drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c ++++ b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c +@@ -27,6 +27,7 @@ + #define HID_USAGE_SENSOR_STATE_READY_ENUM 0x02 + #define HID_USAGE_SENSOR_STATE_INITIALIZING_ENUM 0x05 + #define HID_USAGE_SENSOR_EVENT_DATA_UPDATED_ENUM 0x04 ++#define ILLUMINANCE_MASK GENMASK(14, 0) + + int get_report_descriptor(int sensor_idx, u8 *rep_desc) + { +@@ -246,7 +247,8 @@ u8 get_input_report(u8 current_index, in + get_common_inputs(&als_input.common_property, report_id); + /* For ALS ,V2 Platforms uses C2P_MSG5 register instead of DRAM access method */ + if (supported_input == V2_STATUS) +- als_input.illuminance_value = (int)readl(privdata->mmio + AMD_C2P_MSG(5)); ++ als_input.illuminance_value = ++ readl(privdata->mmio + AMD_C2P_MSG(5)) & ILLUMINANCE_MASK; + else + als_input.illuminance_value = + (int)sensor_virt_addr[0] / AMD_SFH_FW_MULTIPLIER; diff --git a/queue-5.16/hid-amd_sfh-correct-the-structure-field-name.patch b/queue-5.16/hid-amd_sfh-correct-the-structure-field-name.patch new file mode 100644 index 00000000000..65bfbd052c4 --- /dev/null +++ b/queue-5.16/hid-amd_sfh-correct-the-structure-field-name.patch @@ -0,0 +1,31 @@ +From aa0b724a2bf041036e56cbb3b4b3afde7c5e7c9e Mon Sep 17 00:00:00 2001 +From: Basavaraj Natikar +Date: Tue, 8 Feb 2022 17:51:09 +0530 +Subject: HID: amd_sfh: Correct the structure field name + +From: Basavaraj Natikar + +commit aa0b724a2bf041036e56cbb3b4b3afde7c5e7c9e upstream. + +Misinterpreted intr_enable field name. Hence correct the structure +field name accordingly to reflect the functionality. + +Fixes: f264481ad614 ("HID: amd_sfh: Extend driver capabilities for multi-generation support") +Signed-off-by: Basavaraj Natikar +Signed-off-by: Jiri Kosina +Signed-off-by: Greg Kroah-Hartman +--- + drivers/hid/amd-sfh-hid/amd_sfh_pcie.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h ++++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h +@@ -49,7 +49,7 @@ union sfh_cmd_base { + } s; + struct { + u32 cmd_id : 4; +- u32 intr_enable : 1; ++ u32 intr_disable : 1; + u32 rsvd1 : 3; + u32 length : 7; + u32 mem_type : 1; diff --git a/queue-5.16/hid-amd_sfh-increase-sensor-command-timeout.patch b/queue-5.16/hid-amd_sfh-increase-sensor-command-timeout.patch new file mode 100644 index 00000000000..ace21e3ea88 --- /dev/null +++ b/queue-5.16/hid-amd_sfh-increase-sensor-command-timeout.patch @@ -0,0 +1,36 @@ +From a7072c01c3ac3ae6ecd08fa7b43431cfc8ed331f Mon Sep 17 00:00:00 2001 +From: Basavaraj Natikar +Date: Mon, 31 Jan 2022 22:48:32 +0530 +Subject: HID: amd_sfh: Increase sensor command timeout + +From: Basavaraj Natikar + +commit a7072c01c3ac3ae6ecd08fa7b43431cfc8ed331f upstream. + +HPD sensors take more time to initialize. Hence increasing sensor +command timeout to get response with status within a max timeout. + +Fixes: 173709f50e98 ("HID: amd_sfh: Add command response to check command status") +Signed-off-by: Basavaraj Natikar +Signed-off-by: Jiri Kosina +Signed-off-by: Greg Kroah-Hartman +--- + drivers/hid/amd-sfh-hid/amd_sfh_pcie.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c ++++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c +@@ -37,11 +37,11 @@ static int amd_sfh_wait_response_v2(stru + { + union cmd_response cmd_resp; + +- /* Get response with status within a max of 800 ms timeout */ ++ /* Get response with status within a max of 1600 ms timeout */ + if (!readl_poll_timeout(mp2->mmio + AMD_P2C_MSG(0), cmd_resp.resp, + (cmd_resp.response_v2.response == sensor_sts && + cmd_resp.response_v2.status == 0 && (sid == 0xff || +- cmd_resp.response_v2.sensor_id == sid)), 500, 800000)) ++ cmd_resp.response_v2.sensor_id == sid)), 500, 1600000)) + return cmd_resp.response_v2.response; + + return SENSOR_DISABLED; diff --git a/queue-5.16/hid-apple-set-the-tilde-quirk-flag-on-the-wellspring-5-and-later.patch b/queue-5.16/hid-apple-set-the-tilde-quirk-flag-on-the-wellspring-5-and-later.patch new file mode 100644 index 00000000000..47610f17499 --- /dev/null +++ b/queue-5.16/hid-apple-set-the-tilde-quirk-flag-on-the-wellspring-5-and-later.patch @@ -0,0 +1,84 @@ +From e26a78057c25dd56f112d536319c38735ed92ba4 Mon Sep 17 00:00:00 2001 +From: Alex Henrie +Date: Sun, 16 Jan 2022 16:01:58 -0700 +Subject: HID: apple: Set the tilde quirk flag on the Wellspring 5 and later + +From: Alex Henrie + +commit e26a78057c25dd56f112d536319c38735ed92ba4 upstream. + +Markus reports that his 2011 MacBook with a German ISO keyboard (USB +product code 05ac:0246, HID country code 13) has the tilde key quirk. +Seeing as all of the standalone Apple ISO keyboards since about 2008 +have the quirk, it seems reasonable to assume that once the integrated +laptop keyboards started having the quirk, they likewise never stopped +having it. + +Reported-by: Markus Wageringel +Signed-off-by: Alex Henrie +Signed-off-by: Jiri Kosina +Signed-off-by: Greg Kroah-Hartman +--- + drivers/hid/hid-apple.c | 16 ++++++++-------- + 1 file changed, 8 insertions(+), 8 deletions(-) + +--- a/drivers/hid/hid-apple.c ++++ b/drivers/hid/hid-apple.c +@@ -580,49 +580,49 @@ static const struct hid_device_id apple_ + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI), + .driver_data = APPLE_HAS_FN }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO), +- .driver_data = APPLE_HAS_FN }, ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI), + .driver_data = APPLE_HAS_FN }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO), +- .driver_data = APPLE_HAS_FN }, ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS), + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI), + .driver_data = APPLE_HAS_FN }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO), +- .driver_data = APPLE_HAS_FN }, ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS), + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI), + .driver_data = APPLE_HAS_FN }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO), +- .driver_data = APPLE_HAS_FN }, ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS), + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI), + .driver_data = APPLE_HAS_FN }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO), +- .driver_data = APPLE_HAS_FN }, ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS), + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI), + .driver_data = APPLE_HAS_FN }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO), +- .driver_data = APPLE_HAS_FN }, ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS), + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI), + .driver_data = APPLE_HAS_FN }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO), +- .driver_data = APPLE_HAS_FN }, ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS), + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI), + .driver_data = APPLE_HAS_FN }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO), +- .driver_data = APPLE_HAS_FN }, ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS), + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI), diff --git a/queue-5.16/hid-i2c-hid-goodix-fix-a-lockdep-splat.patch b/queue-5.16/hid-i2c-hid-goodix-fix-a-lockdep-splat.patch new file mode 100644 index 00000000000..9448e8a8471 --- /dev/null +++ b/queue-5.16/hid-i2c-hid-goodix-fix-a-lockdep-splat.patch @@ -0,0 +1,176 @@ +From 2787710f73fcce4a9bdab540aaf1aef778a27462 Mon Sep 17 00:00:00 2001 +From: Daniel Thompson +Date: Fri, 28 Jan 2022 17:46:25 +0000 +Subject: HID: i2c-hid: goodix: Fix a lockdep splat + +From: Daniel Thompson + +commit 2787710f73fcce4a9bdab540aaf1aef778a27462 upstream. + +I'm was on the receiving end of a lockdep splat from this driver and after +scratching my head I couldn't be entirely sure it was a false positive +given we would also have to think about whether the regulator locking is +safe (since the notifier is called whilst holding regulator locks which +are also needed for regulator_is_enabled() ). + +Regardless of whether it is a real bug or not, the mutex isn't needed. +We can use reference counting tricks instead to avoid races with the +notifier calls. + +The observed splat follows: + +------------------------------------------------------ +kworker/u16:3/127 is trying to acquire lock: +ffff00008021fb20 (&ihid_goodix->regulator_mutex){+.+.}-{4:4}, at: ihid_goodix_vdd_notify+0x30/0x94 + +but task is already holding lock: +ffff0000835c60c0 (&(&rdev->notifier)->rwsem){++++}-{4:4}, at: blocking_notifier_call_chain+0x30/0x70 + +which lock already depends on the new lock. + +the existing dependency chain (in reverse order) is: + +-> #1 (&(&rdev->notifier)->rwsem){++++}-{4:4}: + down_write+0x68/0x8c + blocking_notifier_chain_register+0x54/0x70 + regulator_register_notifier+0x1c/0x24 + devm_regulator_register_notifier+0x58/0x98 + i2c_hid_of_goodix_probe+0xdc/0x158 + i2c_device_probe+0x25d/0x270 + really_probe+0x174/0x2cc + __driver_probe_device+0xc0/0xd8 + driver_probe_device+0x50/0xe4 + __device_attach_driver+0xa8/0xc0 + bus_for_each_drv+0x9c/0xc0 + __device_attach_async_helper+0x6c/0xbc + async_run_entry_fn+0x38/0x100 + process_one_work+0x294/0x438 + worker_thread+0x180/0x258 + kthread+0x120/0x130 + ret_from_fork+0x10/0x20 + +-> #0 (&ihid_goodix->regulator_mutex){+.+.}-{4:4}: + __lock_acquire+0xd24/0xfe8 + lock_acquire+0x288/0x2f4 + __mutex_lock+0xa0/0x338 + mutex_lock_nested+0x3c/0x5c + ihid_goodix_vdd_notify+0x30/0x94 + notifier_call_chain+0x6c/0x8c + blocking_notifier_call_chain+0x48/0x70 + _notifier_call_chain.isra.0+0x18/0x20 + _regulator_enable+0xc0/0x178 + regulator_enable+0x40/0x7c + goodix_i2c_hid_power_up+0x18/0x20 + i2c_hid_core_power_up.isra.0+0x1c/0x2c + i2c_hid_core_probe+0xd8/0x3d4 + i2c_hid_of_goodix_probe+0x14c/0x158 + i2c_device_probe+0x25c/0x270 + really_probe+0x174/0x2cc + __driver_probe_device+0xc0/0xd8 + driver_probe_device+0x50/0xe4 + __device_attach_driver+0xa8/0xc0 + bus_for_each_drv+0x9c/0xc0 + __device_attach_async_helper+0x6c/0xbc + async_run_entry_fn+0x38/0x100 + process_one_work+0x294/0x438 + worker_thread+0x180/0x258 + kthread+0x120/0x130 + ret_from_fork+0x10/0x20 + +other info that might help us debug this: + + Possible unsafe locking scenario: + + CPU0 CPU1 + ---- ---- + lock(&(&rdev->notifier)->rwsem); + lock(&ihid_goodix->regulator_mutex); + lock(&(&rdev->notifier)->rwsem); + lock(&ihid_goodix->regulator_mutex); + + *** DEADLOCK *** + +Signed-off-by: Daniel Thompson +Fixes: 18eeef46d359 ("HID: i2c-hid: goodix: Tie the reset line to true state of the regulator") +Reviewed-by: Douglas Anderson +Signed-off-by: Jiri Kosina +Signed-off-by: Greg Kroah-Hartman +--- + drivers/hid/i2c-hid/i2c-hid-of-goodix.c | 28 ++++++++++++---------------- + 1 file changed, 12 insertions(+), 16 deletions(-) + +--- a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c ++++ b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c +@@ -27,7 +27,6 @@ struct i2c_hid_of_goodix { + + struct regulator *vdd; + struct notifier_block nb; +- struct mutex regulator_mutex; + struct gpio_desc *reset_gpio; + const struct goodix_i2c_hid_timing_data *timings; + }; +@@ -67,8 +66,6 @@ static int ihid_goodix_vdd_notify(struct + container_of(nb, struct i2c_hid_of_goodix, nb); + int ret = NOTIFY_OK; + +- mutex_lock(&ihid_goodix->regulator_mutex); +- + switch (event) { + case REGULATOR_EVENT_PRE_DISABLE: + gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1); +@@ -87,8 +84,6 @@ static int ihid_goodix_vdd_notify(struct + break; + } + +- mutex_unlock(&ihid_goodix->regulator_mutex); +- + return ret; + } + +@@ -102,8 +97,6 @@ static int i2c_hid_of_goodix_probe(struc + if (!ihid_goodix) + return -ENOMEM; + +- mutex_init(&ihid_goodix->regulator_mutex); +- + ihid_goodix->ops.power_up = goodix_i2c_hid_power_up; + ihid_goodix->ops.power_down = goodix_i2c_hid_power_down; + +@@ -130,25 +123,28 @@ static int i2c_hid_of_goodix_probe(struc + * long. Holding the controller in reset apparently draws extra + * power. + */ +- mutex_lock(&ihid_goodix->regulator_mutex); + ihid_goodix->nb.notifier_call = ihid_goodix_vdd_notify; + ret = devm_regulator_register_notifier(ihid_goodix->vdd, &ihid_goodix->nb); +- if (ret) { +- mutex_unlock(&ihid_goodix->regulator_mutex); ++ if (ret) + return dev_err_probe(&client->dev, ret, + "regulator notifier request failed\n"); +- } + + /* + * If someone else is holding the regulator on (or the regulator is + * an always-on one) we might never be told to deassert reset. Do it +- * now. Here we'll assume that someone else might have _just +- * barely_ turned the regulator on so we'll do the full +- * "post_power_delay" just in case. ++ * now... and temporarily bump the regulator reference count just to ++ * make sure it is impossible for this to race with our own notifier! ++ * We also assume that someone else might have _just barely_ turned ++ * the regulator on so we'll do the full "post_power_delay" just in ++ * case. + */ +- if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd)) ++ if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd)) { ++ ret = regulator_enable(ihid_goodix->vdd); ++ if (ret) ++ return ret; + goodix_i2c_hid_deassert_reset(ihid_goodix, true); +- mutex_unlock(&ihid_goodix->regulator_mutex); ++ regulator_disable(ihid_goodix->vdd); ++ } + + return i2c_hid_core_probe(client, &ihid_goodix->ops, 0x0001, 0); + } diff --git a/queue-5.16/mm-don-t-try-to-numa-migrate-cow-pages-that-have-other-uses.patch b/queue-5.16/mm-don-t-try-to-numa-migrate-cow-pages-that-have-other-uses.patch new file mode 100644 index 00000000000..73c9725d99b --- /dev/null +++ b/queue-5.16/mm-don-t-try-to-numa-migrate-cow-pages-that-have-other-uses.patch @@ -0,0 +1,75 @@ +From 80d47f5de5e311cbc0d01ebb6ee684e8f4c196c6 Mon Sep 17 00:00:00 2001 +From: Linus Torvalds +Date: Thu, 17 Feb 2022 08:57:47 -0800 +Subject: mm: don't try to NUMA-migrate COW pages that have other uses + +From: Linus Torvalds + +commit 80d47f5de5e311cbc0d01ebb6ee684e8f4c196c6 upstream. + +Oded Gabbay reports that enabling NUMA balancing causes corruption with +his Gaudi accelerator test load: + + "All the details are in the bug, but the bottom line is that somehow, + this patch causes corruption when the numa balancing feature is + enabled AND we don't use process affinity AND we use GUP to pin pages + so our accelerator can DMA to/from system memory. + + Either disabling numa balancing, using process affinity to bind to + specific numa-node or reverting this patch causes the bug to + disappear" + +and Oded bisected the issue to commit 09854ba94c6a ("mm: do_wp_page() +simplification"). + +Now, the NUMA balancing shouldn't actually be changing the writability +of a page, and as such shouldn't matter for COW. But it appears it +does. Suspicious. + +However, regardless of that, the condition for enabling NUMA faults in +change_pte_range() is nonsensical. It uses "page_mapcount(page)" to +decide if a COW page should be NUMA-protected or not, and that makes +absolutely no sense. + +The number of mappings a page has is irrelevant: not only does GUP get a +reference to a page as in Oded's case, but the other mappings migth be +paged out and the only reference to them would be in the page count. + +Since we should never try to NUMA-balance a page that we can't move +anyway due to other references, just fix the code to use 'page_count()'. +Oded confirms that that fixes his issue. + +Now, this does imply that something in NUMA balancing ends up changing +page protections (other than the obvious one of making the page +inaccessible to get the NUMA faulting information). Otherwise the COW +simplification wouldn't matter - since doing the GUP on the page would +make sure it's writable. + +The cause of that permission change would be good to figure out too, +since it clearly results in spurious COW events - but fixing the +nonsensical test that just happened to work before is obviously the +CorrectThing(tm) to do regardless. + +Fixes: 09854ba94c6a ("mm: do_wp_page() simplification") +Link: https://bugzilla.kernel.org/show_bug.cgi?id=215616 +Link: https://lore.kernel.org/all/CAFCwf10eNmwq2wD71xjUhqkvv5+_pJMR1nPug2RqNDcFT4H86Q@mail.gmail.com/ +Reported-and-tested-by: Oded Gabbay +Cc: David Hildenbrand +Cc: Peter Xu +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman +--- + mm/mprotect.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/mm/mprotect.c ++++ b/mm/mprotect.c +@@ -94,7 +94,7 @@ static unsigned long change_pte_range(st + + /* Also skip shared copy-on-write pages */ + if (is_cow_mapping(vma->vm_flags) && +- page_mapcount(page) != 1) ++ page_count(page) != 1) + continue; + + /* diff --git a/queue-5.16/mmc-block-fix-read-single-on-recovery-logic.patch b/queue-5.16/mmc-block-fix-read-single-on-recovery-logic.patch new file mode 100644 index 00000000000..f2d614bf6e3 --- /dev/null +++ b/queue-5.16/mmc-block-fix-read-single-on-recovery-logic.patch @@ -0,0 +1,83 @@ +From 54309fde1a352ad2674ebba004a79f7d20b9f037 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Christian=20L=C3=B6hle?= +Date: Fri, 4 Feb 2022 15:11:37 +0000 +Subject: mmc: block: fix read single on recovery logic + +From: Christian Löhle + +commit 54309fde1a352ad2674ebba004a79f7d20b9f037 upstream. + +On reads with MMC_READ_MULTIPLE_BLOCK that fail, +the recovery handler will use MMC_READ_SINGLE_BLOCK for +each of the blocks, up to MMC_READ_SINGLE_RETRIES times each. +The logic for this is fixed to never report unsuccessful reads +as success to the block layer. + +On command error with retries remaining, blk_update_request was +called with whatever value error was set last to. +In case it was last set to BLK_STS_OK (default), the read will be +reported as success, even though there was no data read from the device. +This could happen on a CRC mismatch for the response, +a card rejecting the command (e.g. again due to a CRC mismatch). +In case it was last set to BLK_STS_IOERR, the error is reported correctly, +but no retries will be attempted. + +Fixes: 81196976ed946c ("mmc: block: Add blk-mq support") +Cc: stable@vger.kernel.org +Signed-off-by: Christian Loehle +Reviewed-by: Adrian Hunter +Link: https://lore.kernel.org/r/bc706a6ab08c4fe2834ba0c05a804672@hyperstone.com +Signed-off-by: Ulf Hansson +Signed-off-by: Greg Kroah-Hartman +--- + drivers/mmc/core/block.c | 28 ++++++++++++++-------------- + 1 file changed, 14 insertions(+), 14 deletions(-) + +--- a/drivers/mmc/core/block.c ++++ b/drivers/mmc/core/block.c +@@ -1682,31 +1682,31 @@ static void mmc_blk_read_single(struct m + struct mmc_card *card = mq->card; + struct mmc_host *host = card->host; + blk_status_t error = BLK_STS_OK; +- int retries = 0; + + do { + u32 status; + int err; ++ int retries = 0; + +- mmc_blk_rw_rq_prep(mqrq, card, 1, mq); ++ while (retries++ <= MMC_READ_SINGLE_RETRIES) { ++ mmc_blk_rw_rq_prep(mqrq, card, 1, mq); + +- mmc_wait_for_req(host, mrq); ++ mmc_wait_for_req(host, mrq); + +- err = mmc_send_status(card, &status); +- if (err) +- goto error_exit; +- +- if (!mmc_host_is_spi(host) && +- !mmc_ready_for_data(status)) { +- err = mmc_blk_fix_state(card, req); ++ err = mmc_send_status(card, &status); + if (err) + goto error_exit; +- } + +- if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES) +- continue; ++ if (!mmc_host_is_spi(host) && ++ !mmc_ready_for_data(status)) { ++ err = mmc_blk_fix_state(card, req); ++ if (err) ++ goto error_exit; ++ } + +- retries = 0; ++ if (!mrq->cmd->error) ++ break; ++ } + + if (mrq->cmd->error || + mrq->data->error || diff --git a/queue-5.16/parisc-add-ioread64_lo_hi-and-iowrite64_lo_hi.patch b/queue-5.16/parisc-add-ioread64_lo_hi-and-iowrite64_lo_hi.patch new file mode 100644 index 00000000000..73cbb376806 --- /dev/null +++ b/queue-5.16/parisc-add-ioread64_lo_hi-and-iowrite64_lo_hi.patch @@ -0,0 +1,70 @@ +From 18a1d5e1945385d9b5adc3fe11427ce4a9d2826e Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Mon, 7 Feb 2022 17:16:39 +0200 +Subject: parisc: Add ioread64_lo_hi() and iowrite64_lo_hi() + +From: Andy Shevchenko + +commit 18a1d5e1945385d9b5adc3fe11427ce4a9d2826e upstream. + +It's a followup to the previous commit f15309d7ad5d ("parisc: Add +ioread64_hi_lo() and iowrite64_hi_lo()") which does only half of +the job. Add the rest, so we won't get a new kernel test robot +reports. + +Fixes: f15309d7ad5d ("parisc: Add ioread64_hi_lo() and iowrite64_hi_lo()") +Signed-off-by: Andy Shevchenko +Signed-off-by: Helge Deller +Signed-off-by: Greg Kroah-Hartman +--- + arch/parisc/lib/iomap.c | 18 ++++++++++++++++++ + 1 file changed, 18 insertions(+) + +--- a/arch/parisc/lib/iomap.c ++++ b/arch/parisc/lib/iomap.c +@@ -346,6 +346,16 @@ u64 ioread64be(const void __iomem *addr) + return *((u64 *)addr); + } + ++u64 ioread64_lo_hi(const void __iomem *addr) ++{ ++ u32 low, high; ++ ++ low = ioread32(addr); ++ high = ioread32(addr + sizeof(u32)); ++ ++ return low + ((u64)high << 32); ++} ++ + u64 ioread64_hi_lo(const void __iomem *addr) + { + u32 low, high; +@@ -419,6 +429,12 @@ void iowrite64be(u64 datum, void __iomem + } + } + ++void iowrite64_lo_hi(u64 val, void __iomem *addr) ++{ ++ iowrite32(val, addr); ++ iowrite32(val >> 32, addr + sizeof(u32)); ++} ++ + void iowrite64_hi_lo(u64 val, void __iomem *addr) + { + iowrite32(val >> 32, addr + sizeof(u32)); +@@ -530,6 +546,7 @@ EXPORT_SYMBOL(ioread32); + EXPORT_SYMBOL(ioread32be); + EXPORT_SYMBOL(ioread64); + EXPORT_SYMBOL(ioread64be); ++EXPORT_SYMBOL(ioread64_lo_hi); + EXPORT_SYMBOL(ioread64_hi_lo); + EXPORT_SYMBOL(iowrite8); + EXPORT_SYMBOL(iowrite16); +@@ -538,6 +555,7 @@ EXPORT_SYMBOL(iowrite32); + EXPORT_SYMBOL(iowrite32be); + EXPORT_SYMBOL(iowrite64); + EXPORT_SYMBOL(iowrite64be); ++EXPORT_SYMBOL(iowrite64_lo_hi); + EXPORT_SYMBOL(iowrite64_hi_lo); + EXPORT_SYMBOL(ioread8_rep); + EXPORT_SYMBOL(ioread16_rep); diff --git a/queue-5.16/parisc-drop-__init-from-map_pages-declaration.patch b/queue-5.16/parisc-drop-__init-from-map_pages-declaration.patch new file mode 100644 index 00000000000..e53e1b7bede --- /dev/null +++ b/queue-5.16/parisc-drop-__init-from-map_pages-declaration.patch @@ -0,0 +1,61 @@ +From 9129886b88185962538180625ca8051362b01327 Mon Sep 17 00:00:00 2001 +From: John David Anglin +Date: Sat, 22 Jan 2022 18:19:49 +0000 +Subject: parisc: Drop __init from map_pages declaration + +From: John David Anglin + +commit 9129886b88185962538180625ca8051362b01327 upstream. + +With huge kernel pages, we randomly eat a SPARC in map_pages(). This +is fixed by dropping __init from the declaration. + +However, map_pages references the __init routine memblock_alloc_try_nid +via memblock_alloc. Thus, it needs to be marked with __ref. + +memblock_alloc is only called before the kernel text is set to readonly. + +The __ref on free_initmem is no longer needed. + +Comment regarding map_pages being in the init section is removed. + +Signed-off-by: John David Anglin +Cc: stable@vger.kernel.org # v5.4+ +Signed-off-by: Helge Deller +Signed-off-by: Greg Kroah-Hartman +--- + arch/parisc/mm/init.c | 9 ++++----- + 1 file changed, 4 insertions(+), 5 deletions(-) + +--- a/arch/parisc/mm/init.c ++++ b/arch/parisc/mm/init.c +@@ -337,9 +337,9 @@ static void __init setup_bootmem(void) + + static bool kernel_set_to_readonly; + +-static void __init map_pages(unsigned long start_vaddr, +- unsigned long start_paddr, unsigned long size, +- pgprot_t pgprot, int force) ++static void __ref map_pages(unsigned long start_vaddr, ++ unsigned long start_paddr, unsigned long size, ++ pgprot_t pgprot, int force) + { + pmd_t *pmd; + pte_t *pg_table; +@@ -449,7 +449,7 @@ void __init set_kernel_text_rw(int enabl + flush_tlb_all(); + } + +-void __ref free_initmem(void) ++void free_initmem(void) + { + unsigned long init_begin = (unsigned long)__init_begin; + unsigned long init_end = (unsigned long)__init_end; +@@ -463,7 +463,6 @@ void __ref free_initmem(void) + /* The init text pages are marked R-X. We have to + * flush the icache and mark them RW- + * +- * This is tricky, because map_pages is in the init section. + * Do a dummy remap of the data section first (the data + * section is already PAGE_KERNEL) to pull in the TLB entries + * for map_kernel */ diff --git a/queue-5.16/parisc-fix-data-tlb-miss-in-sba_unmap_sg.patch b/queue-5.16/parisc-fix-data-tlb-miss-in-sba_unmap_sg.patch new file mode 100644 index 00000000000..fe311d21443 --- /dev/null +++ b/queue-5.16/parisc-fix-data-tlb-miss-in-sba_unmap_sg.patch @@ -0,0 +1,86 @@ +From b7d6f44a0fa716a82969725516dc0b16bc7cd514 Mon Sep 17 00:00:00 2001 +From: John David Anglin +Date: Wed, 26 Jan 2022 20:39:05 +0000 +Subject: parisc: Fix data TLB miss in sba_unmap_sg + +From: John David Anglin + +commit b7d6f44a0fa716a82969725516dc0b16bc7cd514 upstream. + +Rolf Eike Beer reported the following bug: + +[1274934.746891] Bad Address (null pointer deref?): Code=15 (Data TLB miss fault) at addr 0000004140000018 +[1274934.746891] CPU: 3 PID: 5549 Comm: cmake Not tainted 5.15.4-gentoo-parisc64 #4 +[1274934.746891] Hardware name: 9000/785/C8000 +[1274934.746891] +[1274934.746891] YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI +[1274934.746891] PSW: 00001000000001001111111000001110 Not tainted +[1274934.746891] r00-03 000000ff0804fe0e 0000000040bc9bc0 00000000406760e4 0000004140000000 +[1274934.746891] r04-07 0000000040b693c0 0000004140000000 000000004a2b08b0 0000000000000001 +[1274934.746891] r08-11 0000000041f98810 0000000000000000 000000004a0a7000 0000000000000001 +[1274934.746891] r12-15 0000000040bddbc0 0000000040c0cbc0 0000000040bddbc0 0000000040bddbc0 +[1274934.746891] r16-19 0000000040bde3c0 0000000040bddbc0 0000000040bde3c0 0000000000000007 +[1274934.746891] r20-23 0000000000000006 000000004a368950 0000000000000000 0000000000000001 +[1274934.746891] r24-27 0000000000001fff 000000000800000e 000000004a1710f0 0000000040b693c0 +[1274934.746891] r28-31 0000000000000001 0000000041f988b0 0000000041f98840 000000004a171118 +[1274934.746891] sr00-03 00000000066e5800 0000000000000000 0000000000000000 00000000066e5800 +[1274934.746891] sr04-07 0000000000000000 0000000000000000 0000000000000000 0000000000000000 +[1274934.746891] +[1274934.746891] IASQ: 0000000000000000 0000000000000000 IAOQ: 00000000406760e8 00000000406760ec +[1274934.746891] IIR: 48780030 ISR: 0000000000000000 IOR: 0000004140000018 +[1274934.746891] CPU: 3 CR30: 00000040e3a9c000 CR31: ffffffffffffffff +[1274934.746891] ORIG_R28: 0000000040acdd58 +[1274934.746891] IAOQ[0]: sba_unmap_sg+0xb0/0x118 +[1274934.746891] IAOQ[1]: sba_unmap_sg+0xb4/0x118 +[1274934.746891] RP(r2): sba_unmap_sg+0xac/0x118 +[1274934.746891] Backtrace: +[1274934.746891] [<00000000402740cc>] dma_unmap_sg_attrs+0x6c/0x70 +[1274934.746891] [<000000004074d6bc>] scsi_dma_unmap+0x54/0x60 +[1274934.746891] [<00000000407a3488>] mptscsih_io_done+0x150/0xd70 +[1274934.746891] [<0000000040798600>] mpt_interrupt+0x168/0xa68 +[1274934.746891] [<0000000040255a48>] __handle_irq_event_percpu+0xc8/0x278 +[1274934.746891] [<0000000040255c34>] handle_irq_event_percpu+0x3c/0xd8 +[1274934.746891] [<000000004025ecb4>] handle_percpu_irq+0xb4/0xf0 +[1274934.746891] [<00000000402548e0>] generic_handle_irq+0x50/0x70 +[1274934.746891] [<000000004019a254>] call_on_stack+0x18/0x24 +[1274934.746891] +[1274934.746891] Kernel panic - not syncing: Bad Address (null pointer deref?) + +The bug is caused by overrunning the sglist and incorrectly testing +sg_dma_len(sglist) before nents. Normally this doesn't cause a crash, +but in this case sglist crossed a page boundary. This occurs in the +following code: + + while (sg_dma_len(sglist) && nents--) { + +The fix is simply to test nents first and move the decrement of nents +into the loop. + +Reported-by: Rolf Eike Beer +Signed-off-by: John David Anglin +Cc: stable@vger.kernel.org +Signed-off-by: Helge Deller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/parisc/sba_iommu.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/drivers/parisc/sba_iommu.c ++++ b/drivers/parisc/sba_iommu.c +@@ -1047,7 +1047,7 @@ sba_unmap_sg(struct device *dev, struct + spin_unlock_irqrestore(&ioc->res_lock, flags); + #endif + +- while (sg_dma_len(sglist) && nents--) { ++ while (nents && sg_dma_len(sglist)) { + + sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist), + direction, 0); +@@ -1056,6 +1056,7 @@ sba_unmap_sg(struct device *dev, struct + ioc->usingle_calls--; /* kluge since call is unmap_sg() */ + #endif + ++sglist; ++ nents--; + } + + DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); diff --git a/queue-5.16/parisc-fix-sglist-access-in-ccio-dma.c.patch b/queue-5.16/parisc-fix-sglist-access-in-ccio-dma.c.patch new file mode 100644 index 00000000000..b0ae31b66dd --- /dev/null +++ b/queue-5.16/parisc-fix-sglist-access-in-ccio-dma.c.patch @@ -0,0 +1,39 @@ +From d7da660cab47183cded65e11b64497d0f56c6edf Mon Sep 17 00:00:00 2001 +From: John David Anglin +Date: Thu, 27 Jan 2022 22:33:41 +0000 +Subject: parisc: Fix sglist access in ccio-dma.c + +From: John David Anglin + +commit d7da660cab47183cded65e11b64497d0f56c6edf upstream. + +This patch implements the same bug fix to ccio-dma.c as to sba_iommu.c. +It ensures that only the allocated entries of the sglist are accessed. + +Signed-off-by: John David Anglin +Cc: stable@vger.kernel.org +Signed-off-by: Helge Deller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/parisc/ccio-dma.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/drivers/parisc/ccio-dma.c ++++ b/drivers/parisc/ccio-dma.c +@@ -1003,7 +1003,7 @@ ccio_unmap_sg(struct device *dev, struct + ioc->usg_calls++; + #endif + +- while(sg_dma_len(sglist) && nents--) { ++ while (nents && sg_dma_len(sglist)) { + + #ifdef CCIO_COLLECT_STATS + ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT; +@@ -1011,6 +1011,7 @@ ccio_unmap_sg(struct device *dev, struct + ccio_unmap_page(dev, sg_dma_address(sglist), + sg_dma_len(sglist), direction, 0); + ++sglist; ++ nents--; + } + + DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); diff --git a/queue-5.16/parisc-show-error-if-wrong-32-64-bit-compiler-is-being-used.patch b/queue-5.16/parisc-show-error-if-wrong-32-64-bit-compiler-is-being-used.patch new file mode 100644 index 00000000000..ee1d36a2534 --- /dev/null +++ b/queue-5.16/parisc-show-error-if-wrong-32-64-bit-compiler-is-being-used.patch @@ -0,0 +1,42 @@ +From b160628e9ebcdc85d0db9d7f423c26b3c7c179d0 Mon Sep 17 00:00:00 2001 +From: Helge Deller +Date: Sun, 13 Feb 2022 22:29:25 +0100 +Subject: parisc: Show error if wrong 32/64-bit compiler is being used + +From: Helge Deller + +commit b160628e9ebcdc85d0db9d7f423c26b3c7c179d0 upstream. + +It happens quite often that people use the wrong compiler to build the +kernel: + +make ARCH=parisc -> builds the 32-bit kernel +make ARCH=parisc64 -> builds the 64-bit kernel + +This patch adds a sanity check which errors out with an instruction how +use the correct ARCH= option. + +Signed-off-by: Helge Deller +Cc: stable@vger.kernel.org # v5.15+ +Signed-off-by: Greg Kroah-Hartman +--- + arch/parisc/include/asm/bitops.h | 8 ++++++++ + 1 file changed, 8 insertions(+) + +--- a/arch/parisc/include/asm/bitops.h ++++ b/arch/parisc/include/asm/bitops.h +@@ -12,6 +12,14 @@ + #include + #include + ++/* compiler build environment sanity checks: */ ++#if !defined(CONFIG_64BIT) && defined(__LP64__) ++#error "Please use 'ARCH=parisc' to build the 32-bit kernel." ++#endif ++#if defined(CONFIG_64BIT) && !defined(__LP64__) ++#error "Please use 'ARCH=parisc64' to build the 64-bit kernel." ++#endif ++ + /* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion + * on use of volatile and __*_bit() (set/clear/change): + * *_bit() want use of volatile. diff --git a/queue-5.16/pci-hv-fix-numa-node-assignment-when-kernel-boots-with-custom-numa-topology.patch b/queue-5.16/pci-hv-fix-numa-node-assignment-when-kernel-boots-with-custom-numa-topology.patch new file mode 100644 index 00000000000..06e0d44b43e --- /dev/null +++ b/queue-5.16/pci-hv-fix-numa-node-assignment-when-kernel-boots-with-custom-numa-topology.patch @@ -0,0 +1,49 @@ +From 3149efcdf2c6314420c418dfc94de53bfd076b1f Mon Sep 17 00:00:00 2001 +From: Long Li +Date: Wed, 26 Jan 2022 17:43:34 -0800 +Subject: PCI: hv: Fix NUMA node assignment when kernel boots with custom NUMA topology + +From: Long Li + +commit 3149efcdf2c6314420c418dfc94de53bfd076b1f upstream. + +When kernel boots with a NUMA topology with some NUMA nodes offline, the PCI +driver should only set an online NUMA node on the device. This can happen +during KDUMP where some NUMA nodes are not made online by the KDUMP kernel. + +This patch also fixes the case where kernel is booting with "numa=off". + +Fixes: 999dd956d838 ("PCI: hv: Add support for protocol 1.3 and support PCI_BUS_RELATIONS2") +Signed-off-by: Long Li +Reviewed-by: Michael Kelley +Tested-by: Purna Pavan Chandra Aekkaladevi +Acked-by: Lorenzo Pieralisi +Link: https://lore.kernel.org/r/1643247814-15184-1-git-send-email-longli@linuxonhyperv.com +Signed-off-by: Wei Liu +Signed-off-by: Greg Kroah-Hartman +--- + drivers/pci/controller/pci-hyperv.c | 13 +++++++++++-- + 1 file changed, 11 insertions(+), 2 deletions(-) + +--- a/drivers/pci/controller/pci-hyperv.c ++++ b/drivers/pci/controller/pci-hyperv.c +@@ -1899,8 +1899,17 @@ static void hv_pci_assign_numa_node(stru + if (!hv_dev) + continue; + +- if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY) +- set_dev_node(&dev->dev, hv_dev->desc.virtual_numa_node); ++ if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY && ++ hv_dev->desc.virtual_numa_node < num_possible_nodes()) ++ /* ++ * The kernel may boot with some NUMA nodes offline ++ * (e.g. in a KDUMP kernel) or with NUMA disabled via ++ * "numa=off". In those cases, adjust the host provided ++ * NUMA node to a valid NUMA node used by the kernel. ++ */ ++ set_dev_node(&dev->dev, ++ numa_map_to_online_node( ++ hv_dev->desc.virtual_numa_node)); + + put_pcichild(hv_dev); + } diff --git a/queue-5.16/revert-svm-add-warning-message-for-avic-ipi-invalid-target.patch b/queue-5.16/revert-svm-add-warning-message-for-avic-ipi-invalid-target.patch new file mode 100644 index 00000000000..93a9a2ea4c9 --- /dev/null +++ b/queue-5.16/revert-svm-add-warning-message-for-avic-ipi-invalid-target.patch @@ -0,0 +1,38 @@ +From dd4589eee99db8f61f7b8f7df1531cad3f74a64d Mon Sep 17 00:00:00 2001 +From: Sean Christopherson +Date: Fri, 4 Feb 2022 21:41:55 +0000 +Subject: Revert "svm: Add warning message for AVIC IPI invalid target" + +From: Sean Christopherson + +commit dd4589eee99db8f61f7b8f7df1531cad3f74a64d upstream. + +Remove a WARN on an "AVIC IPI invalid target" exit, the WARN is trivial +to trigger from guest as it will fail on any destination APIC ID that +doesn't exist from the guest's perspective. + +Don't bother recording anything in the kernel log, the common tracepoint +for kvm_avic_incomplete_ipi() is sufficient for debugging. + +This reverts commit 37ef0c4414c9743ba7f1af4392f0a27a99649f2a. + +Cc: stable@vger.kernel.org +Signed-off-by: Sean Christopherson +Message-Id: <20220204214205.3306634-2-seanjc@google.com> +Signed-off-by: Paolo Bonzini +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kvm/svm/avic.c | 2 -- + 1 file changed, 2 deletions(-) + +--- a/arch/x86/kvm/svm/avic.c ++++ b/arch/x86/kvm/svm/avic.c +@@ -342,8 +342,6 @@ int avic_incomplete_ipi_interception(str + avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh); + break; + case AVIC_IPI_FAILURE_INVALID_TARGET: +- WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n", +- index, vcpu->vcpu_id, icrh, icrl); + break; + case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE: + WARN_ONCE(1, "Invalid backing page\n"); diff --git a/queue-5.16/selftests-kvm-remove-absent-target-file.patch b/queue-5.16/selftests-kvm-remove-absent-target-file.patch new file mode 100644 index 00000000000..768b1c0c8f0 --- /dev/null +++ b/queue-5.16/selftests-kvm-remove-absent-target-file.patch @@ -0,0 +1,35 @@ +From 0316dbb9a017d3231f86e0188376f067ec26a59c Mon Sep 17 00:00:00 2001 +From: Muhammad Usama Anjum +Date: Thu, 10 Feb 2022 22:23:51 +0500 +Subject: selftests: kvm: Remove absent target file + +From: Muhammad Usama Anjum + +commit 0316dbb9a017d3231f86e0188376f067ec26a59c upstream. + +There is no vmx_pi_mmio_test file. Remove it to get rid of error while +creation of selftest archive: + +rsync: [sender] link_stat "/kselftest/kvm/x86_64/vmx_pi_mmio_test" failed: No such file or directory (2) +rsync error: some files/attrs were not transferred (see previous errors) (code 23) at main.c(1333) [sender=3.2.3] + +Fixes: 6a58150859fd ("selftest: KVM: Add intra host migration tests") +Reported-by: "kernelci.org bot" +Signed-off-by: Muhammad Usama Anjum +Message-Id: <20220210172352.1317554-1-usama.anjum@collabora.com> +Signed-off-by: Paolo Bonzini +Signed-off-by: Greg Kroah-Hartman +--- + tools/testing/selftests/kvm/Makefile | 1 - + 1 file changed, 1 deletion(-) + +--- a/tools/testing/selftests/kvm/Makefile ++++ b/tools/testing/selftests/kvm/Makefile +@@ -75,7 +75,6 @@ TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs + TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test + TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test + TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test +-TEST_GEN_PROGS_x86_64 += x86_64/vmx_pi_mmio_test + TEST_GEN_PROGS_x86_64 += x86_64/sev_migrate_tests + TEST_GEN_PROGS_x86_64 += access_tracking_perf_test + TEST_GEN_PROGS_x86_64 += demand_paging_test diff --git a/queue-5.16/serial-parisc-gsc-fix-build-when-iosapic-is-not-set.patch b/queue-5.16/serial-parisc-gsc-fix-build-when-iosapic-is-not-set.patch new file mode 100644 index 00000000000..10812286ca8 --- /dev/null +++ b/queue-5.16/serial-parisc-gsc-fix-build-when-iosapic-is-not-set.patch @@ -0,0 +1,54 @@ +From 6e8793674bb0d1135ca0e5c9f7e16fecbf815926 Mon Sep 17 00:00:00 2001 +From: Randy Dunlap +Date: Mon, 14 Feb 2022 10:00:19 -0800 +Subject: serial: parisc: GSC: fix build when IOSAPIC is not set + +From: Randy Dunlap + +commit 6e8793674bb0d1135ca0e5c9f7e16fecbf815926 upstream. + +There is a build error when using a kernel .config file from +'kernel test robot' for a different build problem: + +hppa64-linux-ld: drivers/tty/serial/8250/8250_gsc.o: in function `.LC3': +(.data.rel.ro+0x18): undefined reference to `iosapic_serial_irq' + +when: + CONFIG_GSC=y + CONFIG_SERIO_GSCPS2=y + CONFIG_SERIAL_8250_GSC=y + CONFIG_PCI is not set + and hence PCI_LBA is not set. + IOSAPIC depends on PCI_LBA, so IOSAPIC is not set/enabled. + +Make the use of iosapic_serial_irq() conditional to fix the build error. + +Signed-off-by: Randy Dunlap +Reported-by: kernel test robot +Cc: "James E.J. Bottomley" +Cc: Helge Deller +Cc: linux-parisc@vger.kernel.org +Cc: Greg Kroah-Hartman +Cc: linux-serial@vger.kernel.org +Cc: Jiri Slaby +Cc: Johan Hovold +Suggested-by: Helge Deller +Signed-off-by: Helge Deller +Cc: stable@vger.kernel.org +Signed-off-by: Helge Deller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/tty/serial/8250/8250_gsc.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/tty/serial/8250/8250_gsc.c ++++ b/drivers/tty/serial/8250/8250_gsc.c +@@ -26,7 +26,7 @@ static int __init serial_init_chip(struc + unsigned long address; + int err; + +-#ifdef CONFIG_64BIT ++#if defined(CONFIG_64BIT) && defined(CONFIG_IOSAPIC) + if (!dev->irq && (dev->id.sversion == 0xad)) + dev->irq = iosapic_serial_irq(dev); + #endif diff --git a/queue-5.16/series b/queue-5.16/series new file mode 100644 index 00000000000..6a563faa66c --- /dev/null +++ b/queue-5.16/series @@ -0,0 +1,30 @@ +drm-nouveau-pmu-gm200-use-alternate-falcon-reset-sequence.patch +bpf-introduce-composable-reg-ret-and-arg-types.patch +bpf-replace-arg_xxx_or_null-with-arg_xxx-ptr_maybe_null.patch +bpf-replace-ret_xxx_or_null-with-ret_xxx-ptr_maybe_null.patch +bpf-replace-ptr_to_xxx_or_null-with-ptr_to_xxx-ptr_maybe_null.patch +bpf-introduce-mem_rdonly-flag.patch +bpf-convert-ptr_to_mem_or_null-to-composable-types.patch +bpf-make-per_cpu_ptr-return-rdonly-ptr_to_mem.patch +bpf-add-mem_rdonly-for-helper-args-that-are-pointers-to-rdonly-mem.patch +bpf-selftests-test-ptr_to_rdonly_mem.patch +hid-add-support-for-ugtablet-wp5540.patch +revert-svm-add-warning-message-for-avic-ipi-invalid-target.patch +parisc-show-error-if-wrong-32-64-bit-compiler-is-being-used.patch +serial-parisc-gsc-fix-build-when-iosapic-is-not-set.patch +parisc-drop-__init-from-map_pages-declaration.patch +parisc-fix-data-tlb-miss-in-sba_unmap_sg.patch +parisc-fix-sglist-access-in-ccio-dma.c.patch +mmc-block-fix-read-single-on-recovery-logic.patch +mm-don-t-try-to-numa-migrate-cow-pages-that-have-other-uses.patch +hid-amd_sfh-add-illuminance-mask-to-limit-als-max-value.patch +hid-i2c-hid-goodix-fix-a-lockdep-splat.patch +hid-amd_sfh-increase-sensor-command-timeout.patch +selftests-kvm-remove-absent-target-file.patch +hid-amd_sfh-correct-the-structure-field-name.patch +pci-hv-fix-numa-node-assignment-when-kernel-boots-with-custom-numa-topology.patch +parisc-add-ioread64_lo_hi-and-iowrite64_lo_hi.patch +hid-apple-set-the-tilde-quirk-flag-on-the-wellspring-5-and-later.patch +btrfs-don-t-hold-cpu-for-too-long-when-defragging-a-file.patch +btrfs-send-in-case-of-io-error-log-it.patch +btrfs-defrag-don-t-try-to-defrag-extents-which-are-under-writeback.patch