--- /dev/null
+From ee2a098851bfbe8bcdd964c0121f4246f00ff41e Mon Sep 17 00:00:00 2001
+From: Namhyung Kim <namhyung@kernel.org>
+Date: Mon, 14 Mar 2022 11:20:41 -0700
+Subject: bpf: Adjust BPF stack helper functions to accommodate skip > 0
+
+From: Namhyung Kim <namhyung@kernel.org>
+
+commit ee2a098851bfbe8bcdd964c0121f4246f00ff41e upstream.
+
+Let's say that the caller has storage for num_elem stack frames. Then,
+the BPF stack helper functions walk the stack for only num_elem frames.
+This means that if skip > 0, one keeps only 'num_elem - skip' frames.
+
+This is because it sets init_nr in the perf_callchain_entry to the end
+of the buffer to save num_elem entries only. I believe it was because
+the perf callchain code unwound the stack frames until it reached the
+global max size (sysctl_perf_event_max_stack).
+
+However it now has perf_callchain_entry_ctx.max_stack to limit the
+iteration locally. This simplifies the code to handle init_nr in the
+BPF callstack entries and removes the confusion with the perf_event's
+__PERF_SAMPLE_CALLCHAIN_EARLY which sets init_nr to 0.
+
+Also change the comment on bpf_get_stack() in the header file to be
+more explicit what the return value means.
+
+Fixes: c195651e565a ("bpf: add bpf_get_stack helper")
+Signed-off-by: Namhyung Kim <namhyung@kernel.org>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Yonghong Song <yhs@fb.com>
+Link: https://lore.kernel.org/bpf/30a7b5d5-6726-1cc2-eaee-8da2828a9a9c@oracle.com
+Link: https://lore.kernel.org/bpf/20220314182042.71025-1-namhyung@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+Based-on-patch-by: Eugene Loh <eugene.loh@oracle.com>
+---
+ include/uapi/linux/bpf.h | 8 +++---
+ kernel/bpf/stackmap.c | 56 ++++++++++++++++++++---------------------------
+ 2 files changed, 28 insertions(+), 36 deletions(-)
+
+--- a/include/uapi/linux/bpf.h
++++ b/include/uapi/linux/bpf.h
+@@ -2163,8 +2163,8 @@ union bpf_attr {
+ *
+ * # sysctl kernel.perf_event_max_stack=<new value>
+ * Return
+- * A non-negative value equal to or less than *size* on success,
+- * or a negative error in case of failure.
++ * The non-negative copied *buf* length equal to or less than
++ * *size* on success, or a negative error in case of failure.
+ *
+ * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
+ * Description
+@@ -3448,8 +3448,8 @@ union bpf_attr {
+ *
+ * # sysctl kernel.perf_event_max_stack=<new value>
+ * Return
+- * A non-negative value equal to or less than *size* on success,
+- * or a negative error in case of failure.
++ * The non-negative copied *buf* length equal to or less than
++ * *size* on success, or a negative error in case of failure.
+ *
+ * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags)
+ * Description
+--- a/kernel/bpf/stackmap.c
++++ b/kernel/bpf/stackmap.c
+@@ -358,7 +358,7 @@ static void stack_map_get_build_id_offse
+ }
+
+ static struct perf_callchain_entry *
+-get_callchain_entry_for_task(struct task_struct *task, u32 init_nr)
++get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
+ {
+ #ifdef CONFIG_STACKTRACE
+ struct perf_callchain_entry *entry;
+@@ -369,9 +369,8 @@ get_callchain_entry_for_task(struct task
+ if (!entry)
+ return NULL;
+
+- entry->nr = init_nr +
+- stack_trace_save_tsk(task, (unsigned long *)(entry->ip + init_nr),
+- sysctl_perf_event_max_stack - init_nr, 0);
++ entry->nr = stack_trace_save_tsk(task, (unsigned long *)entry->ip,
++ max_depth, 0);
+
+ /* stack_trace_save_tsk() works on unsigned long array, while
+ * perf_callchain_entry uses u64 array. For 32-bit systems, it is
+@@ -383,7 +382,7 @@ get_callchain_entry_for_task(struct task
+ int i;
+
+ /* copy data from the end to avoid using extra buffer */
+- for (i = entry->nr - 1; i >= (int)init_nr; i--)
++ for (i = entry->nr - 1; i >= 0; i--)
+ to[i] = (u64)(from[i]);
+ }
+
+@@ -400,27 +399,19 @@ static long __bpf_get_stackid(struct bpf
+ {
+ struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
+ struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
+- u32 max_depth = map->value_size / stack_map_data_size(map);
+- /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
+- u32 init_nr = sysctl_perf_event_max_stack - max_depth;
+ u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
+ u32 hash, id, trace_nr, trace_len;
+ bool user = flags & BPF_F_USER_STACK;
+ u64 *ips;
+ bool hash_matches;
+
+- /* get_perf_callchain() guarantees that trace->nr >= init_nr
+- * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth
+- */
+- trace_nr = trace->nr - init_nr;
+-
+- if (trace_nr <= skip)
++ if (trace->nr <= skip)
+ /* skipping more than usable stack trace */
+ return -EFAULT;
+
+- trace_nr -= skip;
++ trace_nr = trace->nr - skip;
+ trace_len = trace_nr * sizeof(u64);
+- ips = trace->ip + skip + init_nr;
++ ips = trace->ip + skip;
+ hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0);
+ id = hash & (smap->n_buckets - 1);
+ bucket = READ_ONCE(smap->buckets[id]);
+@@ -477,8 +468,7 @@ BPF_CALL_3(bpf_get_stackid, struct pt_re
+ u64, flags)
+ {
+ u32 max_depth = map->value_size / stack_map_data_size(map);
+- /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
+- u32 init_nr = sysctl_perf_event_max_stack - max_depth;
++ u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
+ bool user = flags & BPF_F_USER_STACK;
+ struct perf_callchain_entry *trace;
+ bool kernel = !user;
+@@ -487,8 +477,12 @@ BPF_CALL_3(bpf_get_stackid, struct pt_re
+ BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
+ return -EINVAL;
+
+- trace = get_perf_callchain(regs, init_nr, kernel, user,
+- sysctl_perf_event_max_stack, false, false);
++ max_depth += skip;
++ if (max_depth > sysctl_perf_event_max_stack)
++ max_depth = sysctl_perf_event_max_stack;
++
++ trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
++ false, false);
+
+ if (unlikely(!trace))
+ /* couldn't fetch the stack trace */
+@@ -579,7 +573,7 @@ static long __bpf_get_stack(struct pt_re
+ struct perf_callchain_entry *trace_in,
+ void *buf, u32 size, u64 flags)
+ {
+- u32 init_nr, trace_nr, copy_len, elem_size, num_elem;
++ u32 trace_nr, copy_len, elem_size, num_elem, max_depth;
+ bool user_build_id = flags & BPF_F_USER_BUILD_ID;
+ u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
+ bool user = flags & BPF_F_USER_STACK;
+@@ -604,30 +598,28 @@ static long __bpf_get_stack(struct pt_re
+ goto err_fault;
+
+ num_elem = size / elem_size;
+- if (sysctl_perf_event_max_stack < num_elem)
+- init_nr = 0;
+- else
+- init_nr = sysctl_perf_event_max_stack - num_elem;
++ max_depth = num_elem + skip;
++ if (sysctl_perf_event_max_stack < max_depth)
++ max_depth = sysctl_perf_event_max_stack;
+
+ if (trace_in)
+ trace = trace_in;
+ else if (kernel && task)
+- trace = get_callchain_entry_for_task(task, init_nr);
++ trace = get_callchain_entry_for_task(task, max_depth);
+ else
+- trace = get_perf_callchain(regs, init_nr, kernel, user,
+- sysctl_perf_event_max_stack,
++ trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
+ false, false);
+ if (unlikely(!trace))
+ goto err_fault;
+
+- trace_nr = trace->nr - init_nr;
+- if (trace_nr < skip)
++ if (trace->nr < skip)
+ goto err_fault;
+
+- trace_nr -= skip;
++ trace_nr = trace->nr - skip;
+ trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem;
+ copy_len = trace_nr * elem_size;
+- ips = trace->ip + skip + init_nr;
++
++ ips = trace->ip + skip;
+ if (user && user_build_id)
+ stack_map_get_build_id_offset(buf, ips, trace_nr, user);
+ else
--- /dev/null
+From 58617014405ad5c9f94f464444f4972dabb71ca7 Mon Sep 17 00:00:00 2001
+From: Hengqi Chen <hengqi.chen@gmail.com>
+Date: Thu, 10 Mar 2022 23:53:35 +0800
+Subject: bpf: Fix comment for helper bpf_current_task_under_cgroup()
+
+From: Hengqi Chen <hengqi.chen@gmail.com>
+
+commit 58617014405ad5c9f94f464444f4972dabb71ca7 upstream.
+
+Fix the descriptions of the return values of helper bpf_current_task_under_cgroup().
+
+Fixes: c6b5fb8690fa ("bpf: add documentation for eBPF helpers (42-50)")
+Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Yonghong Song <yhs@fb.com>
+Link: https://lore.kernel.org/bpf/20220310155335.1278783-1-hengqi.chen@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/uapi/linux/bpf.h | 4 ++--
+ tools/include/uapi/linux/bpf.h | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/include/uapi/linux/bpf.h
++++ b/include/uapi/linux/bpf.h
+@@ -1490,8 +1490,8 @@ union bpf_attr {
+ * Return
+ * The return value depends on the result of the test, and can be:
+ *
+- * * 0, if current task belongs to the cgroup2.
+- * * 1, if current task does not belong to the cgroup2.
++ * * 1, if current task belongs to the cgroup2.
++ * * 0, if current task does not belong to the cgroup2.
+ * * A negative error code, if an error occurred.
+ *
+ * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
+--- a/tools/include/uapi/linux/bpf.h
++++ b/tools/include/uapi/linux/bpf.h
+@@ -1490,8 +1490,8 @@ union bpf_attr {
+ * Return
+ * The return value depends on the result of the test, and can be:
+ *
+- * * 0, if current task belongs to the cgroup2.
+- * * 1, if current task does not belong to the cgroup2.
++ * * 1, if current task belongs to the cgroup2.
++ * * 0, if current task does not belong to the cgroup2.
+ * * A negative error code, if an error occurred.
+ *
+ * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
--- /dev/null
+From 0e7f1b557974ce297e5e4c9d4245720fbb489886 Mon Sep 17 00:00:00 2001
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+Date: Thu, 16 Dec 2021 12:16:28 +0100
+Subject: dt-bindings: mtd: nand-controller: Fix a comment in the examples
+
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+
+commit 0e7f1b557974ce297e5e4c9d4245720fbb489886 upstream.
+
+The controller properties should be in the controller 'parent' node,
+while properties in the children nodes are specific to the NAND
+*chip*. This error was already present during the yaml conversion.
+
+Fixes: 2d472aba15ff ("mtd: nand: document the NAND controller/NAND chip DT representation")
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Acked-by: Rob Herring <robh@kernel.org>
+Link: https://lore.kernel.org/linux-mtd/20211216111654.238086-3-miquel.raynal@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/devicetree/bindings/mtd/nand-controller.yaml | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/Documentation/devicetree/bindings/mtd/nand-controller.yaml
++++ b/Documentation/devicetree/bindings/mtd/nand-controller.yaml
+@@ -174,6 +174,6 @@ examples:
+ nand-ecc-mode = "soft";
+ nand-ecc-algo = "bch";
+
+- /* controller specific properties */
++ /* NAND chip specific properties */
+ };
+ };
--- /dev/null
+From 93f2ec9e401276fb4ea9903194a5bfcf175f9a2c Mon Sep 17 00:00:00 2001
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+Date: Thu, 16 Dec 2021 12:16:27 +0100
+Subject: dt-bindings: mtd: nand-controller: Fix the reg property description
+
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+
+commit 93f2ec9e401276fb4ea9903194a5bfcf175f9a2c upstream.
+
+The reg property of a NAND device always references the chip-selects.
+The ready/busy lines are described in the nand-rb property. I believe
+this was a harmless copy/paste error during the conversion to yaml.
+
+Fixes: 212e49693592 ("dt-bindings: mtd: Add YAML schemas for the generic NAND options")
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Acked-by: Rob Herring <robh@kernel.org>
+Link: https://lore.kernel.org/linux-mtd/20211216111654.238086-2-miquel.raynal@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/devicetree/bindings/mtd/nand-controller.yaml | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/Documentation/devicetree/bindings/mtd/nand-controller.yaml
++++ b/Documentation/devicetree/bindings/mtd/nand-controller.yaml
+@@ -44,7 +44,7 @@ patternProperties:
+ properties:
+ reg:
+ description:
+- Contains the native Ready/Busy IDs.
++ Contains the chip-select IDs.
+
+ nand-ecc-mode:
+ description:
--- /dev/null
+From 90c204d3195a795f77f5bce767e311dd1c59ca17 Mon Sep 17 00:00:00 2001
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+Date: Thu, 16 Dec 2021 12:16:33 +0100
+Subject: dt-bindings: spi: mxic: The interrupt property is not mandatory
+
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+
+commit 90c204d3195a795f77f5bce767e311dd1c59ca17 upstream.
+
+The interrupt property is not mandatory at all, this property should not
+be part of the required properties list, so move it into the optional
+properties list.
+
+Fixes: 326e5c8d4a87 ("dt-binding: spi: Document Macronix controller bindings")
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Reviewed-by: Rob Herring <robh@kernel.org>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Link: https://lore.kernel.org/linux-mtd/20211216111654.238086-8-miquel.raynal@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/devicetree/bindings/spi/spi-mxic.txt | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/Documentation/devicetree/bindings/spi/spi-mxic.txt
++++ b/Documentation/devicetree/bindings/spi/spi-mxic.txt
+@@ -8,11 +8,13 @@ Required properties:
+ - reg: should contain 2 entries, one for the registers and one for the direct
+ mapping area
+ - reg-names: should contain "regs" and "dirmap"
+-- interrupts: interrupt line connected to the SPI controller
+ - clock-names: should contain "ps_clk", "send_clk" and "send_dly_clk"
+ - clocks: should contain 3 entries for the "ps_clk", "send_clk" and
+ "send_dly_clk" clocks
+
++Optional properties:
++- interrupts: interrupt line connected to the SPI controller
++
+ Example:
+
+ spi@43c30000 {
arm-9187-1-jive-fix-return-value-of-__setup-handler.patch
mm-memcontrol-return-1-from-cgroup.memory-__setup-handler.patch
mm-usercopy-return-1-from-hardened_usercopy-__setup-handler.patch
+bpf-adjust-bpf-stack-helper-functions-to-accommodate-skip-0.patch
+bpf-fix-comment-for-helper-bpf_current_task_under_cgroup.patch
+dt-bindings-mtd-nand-controller-fix-the-reg-property-description.patch
+dt-bindings-mtd-nand-controller-fix-a-comment-in-the-examples.patch
+dt-bindings-spi-mxic-the-interrupt-property-is-not-mandatory.patch
+ubi-fastmap-return-error-code-if-memory-allocation-fails-in-add_aeb.patch
--- /dev/null
+From c3c07fc25f37c157fde041b3a0c3dfcb1590cbce Mon Sep 17 00:00:00 2001
+From: Zhihao Cheng <chengzhihao1@huawei.com>
+Date: Mon, 27 Dec 2021 11:22:42 +0800
+Subject: ubi: fastmap: Return error code if memory allocation fails in add_aeb()
+
+From: Zhihao Cheng <chengzhihao1@huawei.com>
+
+commit c3c07fc25f37c157fde041b3a0c3dfcb1590cbce upstream.
+
+Abort fastmap scanning and return error code if memory allocation fails
+in add_aeb(). Otherwise ubi will get wrong peb statistics information
+after scanning.
+
+Fixes: dbb7d2a88d2a7b ("UBI: Add fastmap core")
+Signed-off-by: Zhihao Cheng <chengzhihao1@huawei.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/ubi/fastmap.c | 28 +++++++++++++++++++---------
+ 1 file changed, 19 insertions(+), 9 deletions(-)
+
+--- a/drivers/mtd/ubi/fastmap.c
++++ b/drivers/mtd/ubi/fastmap.c
+@@ -468,7 +468,9 @@ static int scan_pool(struct ubi_device *
+ if (err == UBI_IO_FF_BITFLIPS)
+ scrub = 1;
+
+- add_aeb(ai, free, pnum, ec, scrub);
++ ret = add_aeb(ai, free, pnum, ec, scrub);
++ if (ret)
++ goto out;
+ continue;
+ } else if (err == 0 || err == UBI_IO_BITFLIPS) {
+ dbg_bld("Found non empty PEB:%i in pool", pnum);
+@@ -638,8 +640,10 @@ static int ubi_attach_fastmap(struct ubi
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+- add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
+- be32_to_cpu(fmec->ec), 0);
++ ret = add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
++ be32_to_cpu(fmec->ec), 0);
++ if (ret)
++ goto fail;
+ }
+
+ /* read EC values from used list */
+@@ -649,8 +653,10 @@ static int ubi_attach_fastmap(struct ubi
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+- add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+- be32_to_cpu(fmec->ec), 0);
++ ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
++ be32_to_cpu(fmec->ec), 0);
++ if (ret)
++ goto fail;
+ }
+
+ /* read EC values from scrub list */
+@@ -660,8 +666,10 @@ static int ubi_attach_fastmap(struct ubi
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+- add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+- be32_to_cpu(fmec->ec), 1);
++ ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
++ be32_to_cpu(fmec->ec), 1);
++ if (ret)
++ goto fail;
+ }
+
+ /* read EC values from erase list */
+@@ -671,8 +679,10 @@ static int ubi_attach_fastmap(struct ubi
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+- add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
+- be32_to_cpu(fmec->ec), 1);
++ ret = add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
++ be32_to_cpu(fmec->ec), 1);
++ if (ret)
++ goto fail;
+ }
+
+ ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);