--- /dev/null
+From 97e6d7dab1ca4648821c790a2b7913d6d5d549db Mon Sep 17 00:00:00 2001
+From: Kumar Kartikeya Dwivedi <memxor@gmail.com>
+Date: Sat, 19 Mar 2022 13:38:24 +0530
+Subject: bpf: Check PTR_TO_MEM | MEM_RDONLY in check_helper_mem_access
+
+From: Kumar Kartikeya Dwivedi <memxor@gmail.com>
+
+commit 97e6d7dab1ca4648821c790a2b7913d6d5d549db upstream.
+
+The commit being fixed was aiming to disallow users from incorrectly
+obtaining writable pointer to memory that is only meant to be read. This
+is enforced now using a MEM_RDONLY flag.
+
+For instance, in case of global percpu variables, when the BTF type is
+not struct (e.g. bpf_prog_active), the verifier marks register type as
+PTR_TO_MEM | MEM_RDONLY from bpf_this_cpu_ptr or bpf_per_cpu_ptr
+helpers. However, when passing such pointer to kfunc, global funcs, or
+BPF helpers, in check_helper_mem_access, there is no expectation
+MEM_RDONLY flag will be set, hence it is checked as pointer to writable
+memory. Later, verifier sets up argument type of global func as
+PTR_TO_MEM | PTR_MAYBE_NULL, so user can use a global func to get around
+the limitations imposed by this flag.
+
+This check will also cover global non-percpu variables that may be
+introduced in kernel BTF in future.
+
+Also, we update the log message for PTR_TO_BUF case to be similar to
+PTR_TO_MEM case, so that the reason for error is clear to user.
+
+Fixes: 34d3a78c681e ("bpf: Make per_cpu_ptr return rdonly PTR_TO_MEM.")
+Reviewed-by: Hao Luo <haoluo@google.com>
+Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
+Link: https://lore.kernel.org/r/20220319080827.73251-3-memxor@gmail.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -4847,13 +4847,23 @@ static int check_helper_mem_access(struc
+ return check_map_access(env, regno, reg->off, access_size,
+ zero_size_allowed);
+ case PTR_TO_MEM:
++ if (type_is_rdonly_mem(reg->type)) {
++ if (meta && meta->raw_mode) {
++ verbose(env, "R%d cannot write into %s\n", regno,
++ reg_type_str(env, reg->type));
++ return -EACCES;
++ }
++ }
+ return check_mem_region_access(env, regno, reg->off,
+ access_size, reg->mem_size,
+ zero_size_allowed);
+ case PTR_TO_BUF:
+ if (type_is_rdonly_mem(reg->type)) {
+- if (meta && meta->raw_mode)
++ if (meta && meta->raw_mode) {
++ verbose(env, "R%d cannot write into %s\n", regno,
++ reg_type_str(env, reg->type));
+ return -EACCES;
++ }
+
+ buf_info = "rdonly";
+ max_access = &env->prog->aux->max_rdonly_access;
--- /dev/null
+From 45969b4152c1752089351cd6836a42a566d49bcf Mon Sep 17 00:00:00 2001
+From: Liu Jian <liujian56@huawei.com>
+Date: Sat, 16 Apr 2022 18:57:59 +0800
+Subject: bpf: Enlarge offset check value to INT_MAX in bpf_skb_{load,store}_bytes
+
+From: Liu Jian <liujian56@huawei.com>
+
+commit 45969b4152c1752089351cd6836a42a566d49bcf upstream.
+
+The data length of skb frags + frag_list may be greater than 0xffff, and
+skb_header_pointer can not handle negative offset. So, here INT_MAX is used
+to check the validity of offset. Add the same change to the related function
+skb_store_bytes.
+
+Fixes: 05c74e5e53f6 ("bpf: add bpf_skb_load_bytes helper")
+Signed-off-by: Liu Jian <liujian56@huawei.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Song Liu <songliubraving@fb.com>
+Link: https://lore.kernel.org/bpf/20220416105801.88708-2-liujian56@huawei.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/filter.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -1687,7 +1687,7 @@ BPF_CALL_5(bpf_skb_store_bytes, struct s
+
+ if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
+ return -EINVAL;
+- if (unlikely(offset > 0xffff))
++ if (unlikely(offset > INT_MAX))
+ return -EFAULT;
+ if (unlikely(bpf_try_make_writable(skb, offset + len)))
+ return -EFAULT;
+@@ -1722,7 +1722,7 @@ BPF_CALL_4(bpf_skb_load_bytes, const str
+ {
+ void *ptr;
+
+- if (unlikely(offset > 0xffff))
++ if (unlikely(offset > INT_MAX))
+ goto err_clear;
+
+ ptr = skb_header_pointer(skb, offset, len, to);
--- /dev/null
+From 4b6313cf99b0d51b49aeaea98ec76ca8161ecb80 Mon Sep 17 00:00:00 2001
+From: Alexei Starovoitov <ast@kernel.org>
+Date: Thu, 12 May 2022 18:10:24 -0700
+Subject: bpf: Fix combination of jit blinding and pointers to bpf subprogs.
+
+From: Alexei Starovoitov <ast@kernel.org>
+
+commit 4b6313cf99b0d51b49aeaea98ec76ca8161ecb80 upstream.
+
+The combination of jit blinding and pointers to bpf subprogs causes:
+[ 36.989548] BUG: unable to handle page fault for address: 0000000100000001
+[ 36.990342] #PF: supervisor instruction fetch in kernel mode
+[ 36.990968] #PF: error_code(0x0010) - not-present page
+[ 36.994859] RIP: 0010:0x100000001
+[ 36.995209] Code: Unable to access opcode bytes at RIP 0xffffffd7.
+[ 37.004091] Call Trace:
+[ 37.004351] <TASK>
+[ 37.004576] ? bpf_loop+0x4d/0x70
+[ 37.004932] ? bpf_prog_3899083f75e4c5de_F+0xe3/0x13b
+
+The jit blinding logic didn't recognize that ld_imm64 with an address
+of bpf subprogram is a special instruction and proceeded to randomize it.
+By itself it wouldn't have been an issue, but jit_subprogs() logic
+relies on two step process to JIT all subprogs and then JIT them
+again when addresses of all subprogs are known.
+Blinding process in the first JIT phase caused second JIT to miss
+adjustment of special ld_imm64.
+
+Fix this issue by ignoring special ld_imm64 instructions that don't have
+user controlled constants and shouldn't be blinded.
+
+Fixes: 69c087ba6225 ("bpf: Add bpf_for_each_map_elem() helper")
+Reported-by: Andrii Nakryiko <andrii@kernel.org>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Andrii Nakryiko <andrii@kernel.org>
+Acked-by: Martin KaFai Lau <kafai@fb.com>
+Link: https://lore.kernel.org/bpf/20220513011025.13344-1-alexei.starovoitov@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/core.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -1157,6 +1157,16 @@ struct bpf_prog *bpf_jit_blind_constants
+ insn = clone->insnsi;
+
+ for (i = 0; i < insn_cnt; i++, insn++) {
++ if (bpf_pseudo_func(insn)) {
++ /* ld_imm64 with an address of bpf subprog is not
++ * a user controlled constant. Don't randomize it,
++ * since it will conflict with jit_subprogs() logic.
++ */
++ insn++;
++ i++;
++ continue;
++ }
++
+ /* We temporarily need to hold the original ld64 insn
+ * so that we can still access the first part in the
+ * second blinding run.
--- /dev/null
+From b45043192b3e481304062938a6561da2ceea46a6 Mon Sep 17 00:00:00 2001
+From: Yuntao Wang <ytcoode@gmail.com>
+Date: Thu, 7 Apr 2022 21:04:23 +0800
+Subject: bpf: Fix excessive memory allocation in stack_map_alloc()
+
+From: Yuntao Wang <ytcoode@gmail.com>
+
+commit b45043192b3e481304062938a6561da2ceea46a6 upstream.
+
+The 'n_buckets * (value_size + sizeof(struct stack_map_bucket))' part of the
+allocated memory for 'smap' is never used after the memlock accounting was
+removed, thus get rid of it.
+
+[ Note, Daniel:
+
+Commit b936ca643ade ("bpf: rework memlock-based memory accounting for maps")
+moved `cost += n_buckets * (value_size + sizeof(struct stack_map_bucket))`
+up and therefore before the bpf_map_area_alloc() allocation, sigh. In a later
+step commit c85d69135a91 ("bpf: move memory size checks to bpf_map_charge_init()"),
+and the overflow checks of `cost >= U32_MAX - PAGE_SIZE` moved into
+bpf_map_charge_init(). And then 370868107bf6 ("bpf: Eliminate rlimit-based
+memory accounting for stackmap maps") finally removed the bpf_map_charge_init().
+Anyway, the original code did the allocation same way as /after/ this fix. ]
+
+Fixes: b936ca643ade ("bpf: rework memlock-based memory accounting for maps")
+Signed-off-by: Yuntao Wang <ytcoode@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20220407130423.798386-1-ytcoode@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/stackmap.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/kernel/bpf/stackmap.c
++++ b/kernel/bpf/stackmap.c
+@@ -100,7 +100,6 @@ static struct bpf_map *stack_map_alloc(u
+ return ERR_PTR(-E2BIG);
+
+ cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
+- cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
+ smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
+ if (!smap)
+ return ERR_PTR(-ENOMEM);
--- /dev/null
+From a2aa95b71c9bbec793b5c5fa50f0a80d882b3e8d Mon Sep 17 00:00:00 2001
+From: Yuntao Wang <ytcoode@gmail.com>
+Date: Sat, 30 Apr 2022 21:08:03 +0800
+Subject: bpf: Fix potential array overflow in bpf_trampoline_get_progs()
+
+From: Yuntao Wang <ytcoode@gmail.com>
+
+commit a2aa95b71c9bbec793b5c5fa50f0a80d882b3e8d upstream.
+
+The cnt value in the 'cnt >= BPF_MAX_TRAMP_PROGS' check does not
+include BPF_TRAMP_MODIFY_RETURN bpf programs, so the number of
+the attached BPF_TRAMP_MODIFY_RETURN bpf programs in a trampoline
+can exceed BPF_MAX_TRAMP_PROGS.
+
+When this happens, the assignment '*progs++ = aux->prog' in
+bpf_trampoline_get_progs() will cause progs array overflow as the
+progs field in the bpf_tramp_progs struct can only hold at most
+BPF_MAX_TRAMP_PROGS bpf programs.
+
+Fixes: 88fd9e5352fe ("bpf: Refactor trampoline update code")
+Signed-off-by: Yuntao Wang <ytcoode@gmail.com>
+Link: https://lore.kernel.org/r/20220430130803.210624-1-ytcoode@gmail.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/trampoline.c | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+--- a/kernel/bpf/trampoline.c
++++ b/kernel/bpf/trampoline.c
+@@ -423,7 +423,7 @@ int bpf_trampoline_link_prog(struct bpf_
+ {
+ enum bpf_tramp_prog_type kind;
+ int err = 0;
+- int cnt;
++ int cnt = 0, i;
+
+ kind = bpf_attach_type_to_tramp(prog);
+ mutex_lock(&tr->mutex);
+@@ -434,7 +434,10 @@ int bpf_trampoline_link_prog(struct bpf_
+ err = -EBUSY;
+ goto out;
+ }
+- cnt = tr->progs_cnt[BPF_TRAMP_FENTRY] + tr->progs_cnt[BPF_TRAMP_FEXIT];
++
++ for (i = 0; i < BPF_TRAMP_MAX; i++)
++ cnt += tr->progs_cnt[i];
++
+ if (kind == BPF_TRAMP_REPLACE) {
+ /* Cannot attach extension if fentry/fexit are in use. */
+ if (cnt) {
+@@ -512,16 +515,19 @@ out:
+
+ void bpf_trampoline_put(struct bpf_trampoline *tr)
+ {
++ int i;
++
+ if (!tr)
+ return;
+ mutex_lock(&trampoline_mutex);
+ if (!refcount_dec_and_test(&tr->refcnt))
+ goto out;
+ WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
+- if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FENTRY])))
+- goto out;
+- if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT])))
+- goto out;
++
++ for (i = 0; i < BPF_TRAMP_MAX; i++)
++ if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[i])))
++ goto out;
++
+ /* This code will be executed even when the last bpf_tramp_image
+ * is alive. All progs are detached from the trampoline and the
+ * trampoline image is patched with jmp into epilogue to skip
--- /dev/null
+From dcf456c9a095a6e71f53d6f6f004133ee851ee70 Mon Sep 17 00:00:00 2001
+From: KP Singh <kpsingh@kernel.org>
+Date: Mon, 18 Apr 2022 15:51:58 +0000
+Subject: bpf: Fix usage of trace RCU in local storage.
+
+From: KP Singh <kpsingh@kernel.org>
+
+commit dcf456c9a095a6e71f53d6f6f004133ee851ee70 upstream.
+
+bpf_{sk,task,inode}_storage_free() do not need to use
+call_rcu_tasks_trace as no BPF program should be accessing the owner
+as it's being destroyed. The only other reader at this point is
+bpf_local_storage_map_free() which uses normal RCU.
+
+The only path that needs trace RCU are:
+
+* bpf_local_storage_{delete,update} helpers
+* map_{delete,update}_elem() syscalls
+
+Fixes: 0fe4b381a59e ("bpf: Allow bpf_local_storage to be used by sleepable programs")
+Signed-off-by: KP Singh <kpsingh@kernel.org>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Martin KaFai Lau <kafai@fb.com>
+Link: https://lore.kernel.org/bpf/20220418155158.2865678-1-kpsingh@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/bpf_local_storage.h | 4 ++--
+ kernel/bpf/bpf_inode_storage.c | 4 ++--
+ kernel/bpf/bpf_local_storage.c | 29 +++++++++++++++++++----------
+ kernel/bpf/bpf_task_storage.c | 4 ++--
+ net/core/bpf_sk_storage.c | 6 +++---
+ 5 files changed, 28 insertions(+), 19 deletions(-)
+
+--- a/include/linux/bpf_local_storage.h
++++ b/include/linux/bpf_local_storage.h
+@@ -143,9 +143,9 @@ void bpf_selem_link_storage_nolock(struc
+
+ bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_elem *selem,
+- bool uncharge_omem);
++ bool uncharge_omem, bool use_trace_rcu);
+
+-void bpf_selem_unlink(struct bpf_local_storage_elem *selem);
++void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool use_trace_rcu);
+
+ void bpf_selem_link_map(struct bpf_local_storage_map *smap,
+ struct bpf_local_storage_elem *selem);
+--- a/kernel/bpf/bpf_inode_storage.c
++++ b/kernel/bpf/bpf_inode_storage.c
+@@ -90,7 +90,7 @@ void bpf_inode_storage_free(struct inode
+ */
+ bpf_selem_unlink_map(selem);
+ free_inode_storage = bpf_selem_unlink_storage_nolock(
+- local_storage, selem, false);
++ local_storage, selem, false, false);
+ }
+ raw_spin_unlock_bh(&local_storage->lock);
+ rcu_read_unlock();
+@@ -149,7 +149,7 @@ static int inode_storage_delete(struct i
+ if (!sdata)
+ return -ENOENT;
+
+- bpf_selem_unlink(SELEM(sdata));
++ bpf_selem_unlink(SELEM(sdata), true);
+
+ return 0;
+ }
+--- a/kernel/bpf/bpf_local_storage.c
++++ b/kernel/bpf/bpf_local_storage.c
+@@ -106,7 +106,7 @@ static void bpf_selem_free_rcu(struct rc
+ */
+ bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_elem *selem,
+- bool uncharge_mem)
++ bool uncharge_mem, bool use_trace_rcu)
+ {
+ struct bpf_local_storage_map *smap;
+ bool free_local_storage;
+@@ -150,11 +150,16 @@ bool bpf_selem_unlink_storage_nolock(str
+ SDATA(selem))
+ RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
+
+- call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_rcu);
++ if (use_trace_rcu)
++ call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_rcu);
++ else
++ kfree_rcu(selem, rcu);
++
+ return free_local_storage;
+ }
+
+-static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem)
++static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
++ bool use_trace_rcu)
+ {
+ struct bpf_local_storage *local_storage;
+ bool free_local_storage = false;
+@@ -169,12 +174,16 @@ static void __bpf_selem_unlink_storage(s
+ raw_spin_lock_irqsave(&local_storage->lock, flags);
+ if (likely(selem_linked_to_storage(selem)))
+ free_local_storage = bpf_selem_unlink_storage_nolock(
+- local_storage, selem, true);
++ local_storage, selem, true, use_trace_rcu);
+ raw_spin_unlock_irqrestore(&local_storage->lock, flags);
+
+- if (free_local_storage)
+- call_rcu_tasks_trace(&local_storage->rcu,
++ if (free_local_storage) {
++ if (use_trace_rcu)
++ call_rcu_tasks_trace(&local_storage->rcu,
+ bpf_local_storage_free_rcu);
++ else
++ kfree_rcu(local_storage, rcu);
++ }
+ }
+
+ void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
+@@ -214,14 +223,14 @@ void bpf_selem_link_map(struct bpf_local
+ raw_spin_unlock_irqrestore(&b->lock, flags);
+ }
+
+-void bpf_selem_unlink(struct bpf_local_storage_elem *selem)
++void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool use_trace_rcu)
+ {
+ /* Always unlink from map before unlinking from local_storage
+ * because selem will be freed after successfully unlinked from
+ * the local_storage.
+ */
+ bpf_selem_unlink_map(selem);
+- __bpf_selem_unlink_storage(selem);
++ __bpf_selem_unlink_storage(selem, use_trace_rcu);
+ }
+
+ struct bpf_local_storage_data *
+@@ -454,7 +463,7 @@ bpf_local_storage_update(void *owner, st
+ if (old_sdata) {
+ bpf_selem_unlink_map(SELEM(old_sdata));
+ bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
+- false);
++ false, true);
+ }
+
+ unlock:
+@@ -532,7 +541,7 @@ void bpf_local_storage_map_free(struct b
+ migrate_disable();
+ __this_cpu_inc(*busy_counter);
+ }
+- bpf_selem_unlink(selem);
++ bpf_selem_unlink(selem, false);
+ if (busy_counter) {
+ __this_cpu_dec(*busy_counter);
+ migrate_enable();
+--- a/kernel/bpf/bpf_task_storage.c
++++ b/kernel/bpf/bpf_task_storage.c
+@@ -102,7 +102,7 @@ void bpf_task_storage_free(struct task_s
+ */
+ bpf_selem_unlink_map(selem);
+ free_task_storage = bpf_selem_unlink_storage_nolock(
+- local_storage, selem, false);
++ local_storage, selem, false, false);
+ }
+ raw_spin_unlock_irqrestore(&local_storage->lock, flags);
+ bpf_task_storage_unlock();
+@@ -191,7 +191,7 @@ static int task_storage_delete(struct ta
+ if (!sdata)
+ return -ENOENT;
+
+- bpf_selem_unlink(SELEM(sdata));
++ bpf_selem_unlink(SELEM(sdata), true);
+
+ return 0;
+ }
+--- a/net/core/bpf_sk_storage.c
++++ b/net/core/bpf_sk_storage.c
+@@ -40,7 +40,7 @@ static int bpf_sk_storage_del(struct soc
+ if (!sdata)
+ return -ENOENT;
+
+- bpf_selem_unlink(SELEM(sdata));
++ bpf_selem_unlink(SELEM(sdata), true);
+
+ return 0;
+ }
+@@ -75,8 +75,8 @@ void bpf_sk_storage_free(struct sock *sk
+ * sk_storage.
+ */
+ bpf_selem_unlink_map(selem);
+- free_sk_storage = bpf_selem_unlink_storage_nolock(sk_storage,
+- selem, true);
++ free_sk_storage = bpf_selem_unlink_storage_nolock(
++ sk_storage, selem, true, false);
+ }
+ raw_spin_unlock_bh(&sk_storage->lock);
+ rcu_read_unlock();
--- /dev/null
+From 7b3552d3f9f6897851fc453b5131a967167e43c2 Mon Sep 17 00:00:00 2001
+From: Kumar Kartikeya Dwivedi <memxor@gmail.com>
+Date: Sat, 19 Mar 2022 13:38:25 +0530
+Subject: bpf: Reject writes for PTR_TO_MAP_KEY in check_helper_mem_access
+
+From: Kumar Kartikeya Dwivedi <memxor@gmail.com>
+
+commit 7b3552d3f9f6897851fc453b5131a967167e43c2 upstream.
+
+It is not permitted to write to PTR_TO_MAP_KEY, but the current code in
+check_helper_mem_access would allow for it, reject this case as well, as
+helpers taking ARG_PTR_TO_UNINIT_MEM also take PTR_TO_MAP_KEY.
+
+Fixes: 69c087ba6225 ("bpf: Add bpf_for_each_map_elem() helper")
+Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
+Link: https://lore.kernel.org/r/20220319080827.73251-4-memxor@gmail.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -4832,6 +4832,11 @@ static int check_helper_mem_access(struc
+ return check_packet_access(env, regno, reg->off, access_size,
+ zero_size_allowed);
+ case PTR_TO_MAP_KEY:
++ if (meta && meta->raw_mode) {
++ verbose(env, "R%d cannot write into %s\n", regno,
++ reg_type_str(env, reg->type));
++ return -EACCES;
++ }
+ return check_mem_region_access(env, regno, reg->off, access_size,
+ reg->map_ptr->key_size, false);
+ case PTR_TO_MAP_VALUE:
--- /dev/null
+From 6d5aa418b3bd42cdccc36e94ee199af423ef7c84 Mon Sep 17 00:00:00 2001
+From: Akira Yokosawa <akiyks@gmail.com>
+Date: Wed, 27 Apr 2022 18:28:39 +0900
+Subject: docs: submitting-patches: Fix crossref to 'The canonical patch format'
+
+From: Akira Yokosawa <akiyks@gmail.com>
+
+commit 6d5aa418b3bd42cdccc36e94ee199af423ef7c84 upstream.
+
+The reference to `explicit_in_reply_to` is pointless as when the
+reference was added in the form of "#15" [1], Section 15) was "The
+canonical patch format".
+The reference of "#15" had not been properly updated in a couple of
+reorganizations during the plain-text SubmittingPatches era.
+
+Fix it by using `the_canonical_patch_format`.
+
+[1]: 2ae19acaa50a ("Documentation: Add "how to write a good patch summary" to SubmittingPatches")
+
+Signed-off-by: Akira Yokosawa <akiyks@gmail.com>
+Fixes: 5903019b2a5e ("Documentation/SubmittingPatches: convert it to ReST markup")
+Fixes: 9b2c76777acc ("Documentation/SubmittingPatches: enrich the Sphinx output")
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Mauro Carvalho Chehab <mchehab@kernel.org>
+Cc: stable@vger.kernel.org # v4.9+
+Link: https://lore.kernel.org/r/64e105a5-50be-23f2-6cae-903a2ea98e18@gmail.com
+Signed-off-by: Jonathan Corbet <corbet@lwn.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/process/submitting-patches.rst | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/Documentation/process/submitting-patches.rst
++++ b/Documentation/process/submitting-patches.rst
+@@ -77,7 +77,7 @@ as you intend it to.
+
+ The maintainer will thank you if you write your patch description in a
+ form which can be easily pulled into Linux's source code management
+-system, ``git``, as a "commit log". See :ref:`explicit_in_reply_to`.
++system, ``git``, as a "commit log". See :ref:`the_canonical_patch_format`.
+
+ Solve only one problem per patch. If your description starts to get
+ long, that's a sign that you probably need to split up your patch.
--- /dev/null
+From 452284407c18d8a522c3039339b1860afa0025a8 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+Date: Sat, 14 May 2022 10:08:10 -0400
+Subject: NFS: Memory allocation failures are not server fatal errors
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+commit 452284407c18d8a522c3039339b1860afa0025a8 upstream.
+
+We need to filter out ENOMEM in nfs_error_is_fatal_on_server(), because
+running out of memory on our client is not a server error.
+
+Reported-by: Olga Kornievskaia <aglo@umich.edu>
+Fixes: 2dc23afffbca ("NFS: ENOMEM should also be a fatal error.")
+Cc: stable@vger.kernel.org
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfs/internal.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -827,6 +827,7 @@ static inline bool nfs_error_is_fatal_on
+ case 0:
+ case -ERESTARTSYS:
+ case -EINTR:
++ case -ENOMEM:
+ return false;
+ }
+ return nfs_error_is_fatal(err);
--- /dev/null
+From ce3c4ad7f4ce5db7b4f08a1e237d8dd94b39180b Mon Sep 17 00:00:00 2001
+From: Chuck Lever <chuck.lever@oracle.com>
+Date: Sat, 21 May 2022 19:06:13 -0400
+Subject: NFSD: Fix possible sleep during nfsd4_release_lockowner()
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+commit ce3c4ad7f4ce5db7b4f08a1e237d8dd94b39180b upstream.
+
+nfsd4_release_lockowner() holds clp->cl_lock when it calls
+check_for_locks(). However, check_for_locks() calls nfsd_file_get()
+/ nfsd_file_put() to access the backing inode's flc_posix list, and
+nfsd_file_put() can sleep if the inode was recently removed.
+
+Let's instead rely on the stateowner's reference count to gate
+whether the release is permitted. This should be a reliable
+indication of locks-in-use since file lock operations and
+->lm_get_owner take appropriate references, which are released
+appropriately when file locks are removed.
+
+Reported-by: Dai Ngo <dai.ngo@oracle.com>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/nfs4state.c | 12 ++++--------
+ 1 file changed, 4 insertions(+), 8 deletions(-)
+
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -7330,16 +7330,12 @@ nfsd4_release_lockowner(struct svc_rqst
+ if (sop->so_is_open_owner || !same_owner_str(sop, owner))
+ continue;
+
+- /* see if there are still any locks associated with it */
+- lo = lockowner(sop);
+- list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
+- if (check_for_locks(stp->st_stid.sc_file, lo)) {
+- status = nfserr_locks_held;
+- spin_unlock(&clp->cl_lock);
+- return status;
+- }
++ if (atomic_read(&sop->so_count) != 1) {
++ spin_unlock(&clp->cl_lock);
++ return nfserr_locks_held;
+ }
+
++ lo = lockowner(sop);
+ nfs4_get_stateowner(sop);
+ break;
+ }
media-i2c-imx412-fix-power_off-ordering.patch
tpm-fix-buffer-access-in-tpm2_get_tpm_pt.patch
tpm-ibmvtpm-correct-the-return-value-in-tpm_ibmvtpm_probe.patch
+docs-submitting-patches-fix-crossref-to-the-canonical-patch-format.patch
+nfs-memory-allocation-failures-are-not-server-fatal-errors.patch
+nfsd-fix-possible-sleep-during-nfsd4_release_lockowner.patch
+bpf-fix-potential-array-overflow-in-bpf_trampoline_get_progs.patch
+bpf-fix-combination-of-jit-blinding-and-pointers-to-bpf-subprogs.patch
+bpf-enlarge-offset-check-value-to-int_max-in-bpf_skb_-load-store-_bytes.patch
+bpf-fix-usage-of-trace-rcu-in-local-storage.patch
+bpf-fix-excessive-memory-allocation-in-stack_map_alloc.patch
+bpf-reject-writes-for-ptr_to_map_key-in-check_helper_mem_access.patch
+bpf-check-ptr_to_mem-mem_rdonly-in-check_helper_mem_access.patch