--- /dev/null
+From ca46ad2214473df1a6a9496be17156d65ba89b9f Mon Sep 17 00:00:00 2001
+From: Joel Stanley <joel@jms.id.au>
+Date: Thu, 24 Jun 2021 18:37:42 +0930
+Subject: ARM: dts: aspeed: Fix AST2600 machines line names
+
+From: Joel Stanley <joel@jms.id.au>
+
+commit ca46ad2214473df1a6a9496be17156d65ba89b9f upstream.
+
+Tacoma and Rainier both have a line-names array that is too long:
+
+ gpio gpiochip0: gpio-line-names is length 232 but should be at most length 208
+
+This was probably copied from an AST2500 device tree that did have more
+GPIOs on the controller.
+
+Fixes: e9b24b55ca4f ("ARM: dts: aspeed: rainier: Add gpio line names")
+Fixes: 2f68e4e7df67 ("ARM: dts: aspeed: tacoma: Add gpio line names")
+Link: https://lore.kernel.org/r/20210624090742.56640-1-joel@jms.id.au
+Signed-off-by: Joel Stanley <joel@jms.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts | 5 +----
+ arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts | 5 +----
+ 2 files changed, 2 insertions(+), 8 deletions(-)
+
+--- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
+@@ -280,10 +280,7 @@
+ /*W0-W7*/ "","","","","","","","",
+ /*X0-X7*/ "","","","","","","","",
+ /*Y0-Y7*/ "","","","","","","","",
+- /*Z0-Z7*/ "","","","","","","","",
+- /*AA0-AA7*/ "","","","","","","","",
+- /*AB0-AB7*/ "","","","","","","","",
+- /*AC0-AC7*/ "","","","","","","","";
++ /*Z0-Z7*/ "","","","","","","","";
+
+ pin_mclr_vpp {
+ gpio-hog;
+--- a/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts
+@@ -136,10 +136,7 @@
+ /*W0-W7*/ "","","","","","","","",
+ /*X0-X7*/ "","","","","","","","",
+ /*Y0-Y7*/ "","","","","","","","",
+- /*Z0-Z7*/ "","","","","","","","",
+- /*AA0-AA7*/ "","","","","","","","",
+- /*AB0-AB7*/ "","","","","","","","",
+- /*AC0-AC7*/ "","","","","","","","";
++ /*Z0-Z7*/ "","","","","","","","";
+ };
+
+ &fmc {
--- /dev/null
+From faffd1b2bde3ee428d6891961f6a60f8e08749d6 Mon Sep 17 00:00:00 2001
+From: Andrew Jeffery <andrew@aj.id.au>
+Date: Mon, 28 Jun 2021 11:06:05 +0930
+Subject: ARM: dts: everest: Add phase corrections for eMMC
+
+From: Andrew Jeffery <andrew@aj.id.au>
+
+commit faffd1b2bde3ee428d6891961f6a60f8e08749d6 upstream.
+
+The values were determined experimentally via boot tests, not by
+measuring the bus behaviour with a scope. We plan to do scope
+measurements to confirm or refine the values and will update the
+devicetree if necessary once these have been obtained.
+
+However, with the patch we can write and read data without issue, where
+as booting the system without the patch failed at the point of mounting
+the rootfs.
+
+Signed-off-by: Andrew Jeffery <andrew@aj.id.au>
+Link: https://lore.kernel.org/r/20210628013605.1257346-1-andrew@aj.id.au
+Fixes: 2fc88f92359d ("mmc: sdhci-of-aspeed: Expose clock phase controls")
+Fixes: a5c5168478d7 ("ARM: dts: aspeed: Add Everest BMC machine")
+Signed-off-by: Joel Stanley <joel@jms.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
+@@ -1068,6 +1068,7 @@
+
+ &emmc {
+ status = "okay";
++ clk-phase-mmc-hs200 = <180>, <180>;
+ };
+
+ &fsim0 {
--- /dev/null
+From 2d6608b57c50c54c3e46649110e8ea5a40959c30 Mon Sep 17 00:00:00 2001
+From: Andrew Jeffery <andrew@aj.id.au>
+Date: Fri, 25 Jun 2021 15:40:17 +0930
+Subject: ARM: dts: tacoma: Add phase corrections for eMMC
+
+From: Andrew Jeffery <andrew@aj.id.au>
+
+commit 2d6608b57c50c54c3e46649110e8ea5a40959c30 upstream.
+
+The degree values were reversed out from the magic tap values of 7 (in)
+and 15 + inversion (out) initially suggested by Aspeed.
+
+With the patch tacoma survives several gigabytes of reads and writes
+using dd while without it locks up randomly during the boot process.
+
+Signed-off-by: Andrew Jeffery <andrew@aj.id.au>
+Link: https://lore.kernel.org/r/20210625061017.1149942-1-andrew@aj.id.au
+Fixes: 2fc88f92359d ("mmc: sdhci-of-aspeed: Expose clock phase controls")
+Fixes: 961216c135a8 ("ARM: dts: aspeed: Add Rainier system")
+Signed-off-by: Joel Stanley <joel@jms.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts
+@@ -186,6 +186,7 @@
+
+ &emmc {
+ status = "okay";
++ clk-phase-mmc-hs200 = <36>, <270>;
+ };
+
+ &fsim0 {
--- /dev/null
+From f263a81451c12da5a342d90572e317e611846f2c Mon Sep 17 00:00:00 2001
+From: John Fastabend <john.fastabend@gmail.com>
+Date: Wed, 7 Jul 2021 15:38:47 -0700
+Subject: bpf: Track subprog poke descriptors correctly and fix use-after-free
+
+From: John Fastabend <john.fastabend@gmail.com>
+
+commit f263a81451c12da5a342d90572e317e611846f2c upstream.
+
+Subprograms are calling map_poke_track(), but on program release there is no
+hook to call map_poke_untrack(). However, on program release, the aux memory
+(and poke descriptor table) is freed even though we still have a reference to
+it in the element list of the map aux data. When we run map_poke_run(), we then
+end up accessing free'd memory, triggering KASAN in prog_array_map_poke_run():
+
+ [...]
+ [ 402.824689] BUG: KASAN: use-after-free in prog_array_map_poke_run+0xc2/0x34e
+ [ 402.824698] Read of size 4 at addr ffff8881905a7940 by task hubble-fgs/4337
+ [ 402.824705] CPU: 1 PID: 4337 Comm: hubble-fgs Tainted: G I 5.12.0+ #399
+ [ 402.824715] Call Trace:
+ [ 402.824719] dump_stack+0x93/0xc2
+ [ 402.824727] print_address_description.constprop.0+0x1a/0x140
+ [ 402.824736] ? prog_array_map_poke_run+0xc2/0x34e
+ [ 402.824740] ? prog_array_map_poke_run+0xc2/0x34e
+ [ 402.824744] kasan_report.cold+0x7c/0xd8
+ [ 402.824752] ? prog_array_map_poke_run+0xc2/0x34e
+ [ 402.824757] prog_array_map_poke_run+0xc2/0x34e
+ [ 402.824765] bpf_fd_array_map_update_elem+0x124/0x1a0
+ [...]
+
+The elements concerned are walked as follows:
+
+ for (i = 0; i < elem->aux->size_poke_tab; i++) {
+ poke = &elem->aux->poke_tab[i];
+ [...]
+
+The access to size_poke_tab is a 4 byte read, verified by checking offsets
+in the KASAN dump:
+
+ [ 402.825004] The buggy address belongs to the object at ffff8881905a7800
+ which belongs to the cache kmalloc-1k of size 1024
+ [ 402.825008] The buggy address is located 320 bytes inside of
+ 1024-byte region [ffff8881905a7800, ffff8881905a7c00)
+
+The pahole output of bpf_prog_aux:
+
+ struct bpf_prog_aux {
+ [...]
+ /* --- cacheline 5 boundary (320 bytes) --- */
+ u32 size_poke_tab; /* 320 4 */
+ [...]
+
+In general, subprograms do not necessarily manage their own data structures.
+For example, BTF func_info and linfo are just pointers to the main program
+structure. This allows reference counting and cleanup to be done on the latter
+which simplifies their management a bit. The aux->poke_tab struct, however,
+did not follow this logic. The initial proposed fix for this use-after-free
+bug further embedded poke data tracking into the subprogram with proper
+reference counting. However, Daniel and Alexei questioned why we were treating
+these objects special; I agree, its unnecessary. The fix here removes the per
+subprogram poke table allocation and map tracking and instead simply points
+the aux->poke_tab pointer at the main programs poke table. This way, map
+tracking is simplified to the main program and we do not need to manage them
+per subprogram.
+
+This also means, bpf_prog_free_deferred(), which unwinds the program reference
+counting and kfrees objects, needs to ensure that we don't try to double free
+the poke_tab when free'ing the subprog structures. This is easily solved by
+NULL'ing the poke_tab pointer. The second detail is to ensure that per
+subprogram JIT logic only does fixups on poke_tab[] entries it owns. To do
+this, we add a pointer in the poke structure to point at the subprogram value
+so JITs can easily check while walking the poke_tab structure if the current
+entry belongs to the current program. The aux pointer is stable and therefore
+suitable for such comparison. On the jit_subprogs() error path, we omit
+cleaning up the poke->aux field because these are only ever referenced from
+the JIT side, but on error we will never make it to the JIT, so its fine to
+leave them dangling. Removing these pointers would complicate the error path
+for no reason. However, we do need to untrack all poke descriptors from the
+main program as otherwise they could race with the freeing of JIT memory from
+the subprograms. Lastly, a748c6975dea3 ("bpf: propagate poke descriptors to
+subprograms") had an off-by-one on the subprogram instruction index range
+check as it was testing 'insn_idx >= subprog_start && insn_idx <= subprog_end'.
+However, subprog_end is the next subprogram's start instruction.
+
+Fixes: a748c6975dea3 ("bpf: propagate poke descriptors to subprograms")
+Signed-off-by: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Co-developed-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20210707223848.14580-2-john.fastabend@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/net/bpf_jit_comp.c | 3 ++
+ include/linux/bpf.h | 1
+ kernel/bpf/core.c | 8 +++++
+ kernel/bpf/verifier.c | 60 +++++++++++++++-----------------------------
+ 4 files changed, 32 insertions(+), 40 deletions(-)
+
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -576,6 +576,9 @@ static void bpf_tail_call_direct_fixup(s
+
+ for (i = 0; i < prog->aux->size_poke_tab; i++) {
+ poke = &prog->aux->poke_tab[i];
++ if (poke->aux && poke->aux != prog->aux)
++ continue;
++
+ WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
+
+ if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -777,6 +777,7 @@ struct bpf_jit_poke_descriptor {
+ void *tailcall_target;
+ void *tailcall_bypass;
+ void *bypass_addr;
++ void *aux;
+ union {
+ struct {
+ struct bpf_map *map;
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -2236,8 +2236,14 @@ static void bpf_prog_free_deferred(struc
+ #endif
+ if (aux->dst_trampoline)
+ bpf_trampoline_put(aux->dst_trampoline);
+- for (i = 0; i < aux->func_cnt; i++)
++ for (i = 0; i < aux->func_cnt; i++) {
++ /* We can just unlink the subprog poke descriptor table as
++ * it was originally linked to the main program and is also
++ * released along with it.
++ */
++ aux->func[i]->aux->poke_tab = NULL;
+ bpf_jit_free(aux->func[i]);
++ }
+ if (aux->func_cnt) {
+ kfree(aux->func);
+ bpf_prog_unlock_free(aux->prog);
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -12106,33 +12106,19 @@ static int jit_subprogs(struct bpf_verif
+ goto out_free;
+ func[i]->is_func = 1;
+ func[i]->aux->func_idx = i;
+- /* the btf and func_info will be freed only at prog->aux */
++ /* Below members will be freed only at prog->aux */
+ func[i]->aux->btf = prog->aux->btf;
+ func[i]->aux->func_info = prog->aux->func_info;
++ func[i]->aux->poke_tab = prog->aux->poke_tab;
++ func[i]->aux->size_poke_tab = prog->aux->size_poke_tab;
+
+ for (j = 0; j < prog->aux->size_poke_tab; j++) {
+- u32 insn_idx = prog->aux->poke_tab[j].insn_idx;
+- int ret;
++ struct bpf_jit_poke_descriptor *poke;
+
+- if (!(insn_idx >= subprog_start &&
+- insn_idx <= subprog_end))
+- continue;
+-
+- ret = bpf_jit_add_poke_descriptor(func[i],
+- &prog->aux->poke_tab[j]);
+- if (ret < 0) {
+- verbose(env, "adding tail call poke descriptor failed\n");
+- goto out_free;
+- }
+-
+- func[i]->insnsi[insn_idx - subprog_start].imm = ret + 1;
+-
+- map_ptr = func[i]->aux->poke_tab[ret].tail_call.map;
+- ret = map_ptr->ops->map_poke_track(map_ptr, func[i]->aux);
+- if (ret < 0) {
+- verbose(env, "tracking tail call prog failed\n");
+- goto out_free;
+- }
++ poke = &prog->aux->poke_tab[j];
++ if (poke->insn_idx < subprog_end &&
++ poke->insn_idx >= subprog_start)
++ poke->aux = func[i]->aux;
+ }
+
+ /* Use bpf_prog_F_tag to indicate functions in stack traces.
+@@ -12163,18 +12149,6 @@ static int jit_subprogs(struct bpf_verif
+ cond_resched();
+ }
+
+- /* Untrack main program's aux structs so that during map_poke_run()
+- * we will not stumble upon the unfilled poke descriptors; each
+- * of the main program's poke descs got distributed across subprogs
+- * and got tracked onto map, so we are sure that none of them will
+- * be missed after the operation below
+- */
+- for (i = 0; i < prog->aux->size_poke_tab; i++) {
+- map_ptr = prog->aux->poke_tab[i].tail_call.map;
+-
+- map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
+- }
+-
+ /* at this point all bpf functions were successfully JITed
+ * now populate all bpf_calls with correct addresses and
+ * run last pass of JIT
+@@ -12252,14 +12226,22 @@ static int jit_subprogs(struct bpf_verif
+ bpf_prog_jit_attempt_done(prog);
+ return 0;
+ out_free:
++ /* We failed JIT'ing, so at this point we need to unregister poke
++ * descriptors from subprogs, so that kernel is not attempting to
++ * patch it anymore as we're freeing the subprog JIT memory.
++ */
++ for (i = 0; i < prog->aux->size_poke_tab; i++) {
++ map_ptr = prog->aux->poke_tab[i].tail_call.map;
++ map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
++ }
++ /* At this point we're guaranteed that poke descriptors are not
++ * live anymore. We can just unlink its descriptor table as it's
++ * released with the main prog.
++ */
+ for (i = 0; i < env->subprog_cnt; i++) {
+ if (!func[i])
+ continue;
+-
+- for (j = 0; j < func[i]->aux->size_poke_tab; j++) {
+- map_ptr = func[i]->aux->poke_tab[j].tail_call.map;
+- map_ptr->ops->map_poke_untrack(map_ptr, func[i]->aux);
+- }
++ func[i]->aux->poke_tab = NULL;
+ bpf_jit_free(func[i]);
+ }
+ kfree(func);
--- /dev/null
+From bc832065b60f973771ff3e657214bb21b559833c Mon Sep 17 00:00:00 2001
+From: Gu Shengxian <gushengxian@yulong.com>
+Date: Mon, 5 Jul 2021 18:35:43 -0700
+Subject: bpftool: Properly close va_list 'ap' by va_end() on error
+
+From: Gu Shengxian <gushengxian@yulong.com>
+
+commit bc832065b60f973771ff3e657214bb21b559833c upstream.
+
+va_list 'ap' was opened but not closed by va_end() in error case. It should
+be closed by va_end() before the return.
+
+Fixes: aa52bcbe0e72 ("tools: bpftool: Fix json dump crash on powerpc")
+Signed-off-by: Gu Shengxian <gushengxian@yulong.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Link: https://lore.kernel.org/bpf/20210706013543.671114-1-gushengxian507419@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/bpf/bpftool/jit_disasm.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/tools/bpf/bpftool/jit_disasm.c
++++ b/tools/bpf/bpftool/jit_disasm.c
+@@ -43,11 +43,13 @@ static int fprintf_json(void *out, const
+ {
+ va_list ap;
+ char *s;
++ int err;
+
+ va_start(ap, fmt);
+- if (vasprintf(&s, fmt, ap) < 0)
+- return -1;
++ err = vasprintf(&s, fmt, ap);
+ va_end(ap);
++ if (err < 0)
++ return -1;
+
+ if (!oper_count) {
+ int i;
--- /dev/null
+From 1988e0d84161dabd99d1c27033fbd6ee439bf432 Mon Sep 17 00:00:00 2001
+From: Linus Walleij <linus.walleij@linaro.org>
+Date: Fri, 4 Jun 2021 01:18:30 +0200
+Subject: drm/panel: nt35510: Do not fail if DSI read fails
+
+From: Linus Walleij <linus.walleij@linaro.org>
+
+commit 1988e0d84161dabd99d1c27033fbd6ee439bf432 upstream.
+
+Failing to read the MTP over DSI should not bring down the
+system and make us bail out from using the display, it turns
+out that this happens when toggling the display off and on,
+and that write is often still working so the display output
+is just fine. Printing an error is enough.
+
+Tested by killing the Gnome session repeatedly on the
+Samsung Skomer.
+
+Fixes: 899f24ed8d3a ("drm/panel: Add driver for Novatek NT35510-based panels")
+Cc: Stephan Gerhold <stephan@gerhold.net>
+Reported-by: newbyte@disroot.org
+Acked-by: Stefan Hansson <newbyte@disroot.org>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20210603231830.3200040-1-linus.walleij@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/panel/panel-novatek-nt35510.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
++++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+@@ -706,9 +706,7 @@ static int nt35510_power_on(struct nt355
+ if (ret)
+ return ret;
+
+- ret = nt35510_read_id(nt);
+- if (ret)
+- return ret;
++ nt35510_read_id(nt);
+
+ /* Set up stuff in manufacturer control, page 1 */
+ ret = nt35510_send_long(nt, dsi, MCS_CMD_MAUCCTR,
--- /dev/null
+From 187a002b07e8089f0b5657eafec50b5d05625569 Mon Sep 17 00:00:00 2001
+From: Cristian Marussi <cristian.marussi@arm.com>
+Date: Mon, 28 Jun 2021 18:00:42 +0100
+Subject: firmware: arm_scmi: Avoid padding in sensor message structure
+
+From: Cristian Marussi <cristian.marussi@arm.com>
+
+commit 187a002b07e8089f0b5657eafec50b5d05625569 upstream.
+
+scmi_resp_sensor_reading_complete structure is meant to represent an
+SCMI asynchronous reading complete message. The readings field with
+a 64bit type forces padding and breaks reads in scmi_sensor_reading_get.
+
+Split it in two adjacent 32bit readings_low/high subfields to avoid the
+padding within the structure. Alternatively we could to mark the structure
+packed.
+
+Link: https://lore.kernel.org/r/20210628170042.34105-1-cristian.marussi@arm.com
+Fixes: e2083d3673916 ("firmware: arm_scmi: Add SCMI v3.0 sensors timestamped reads")
+Signed-off-by: Cristian Marussi <cristian.marussi@arm.com>
+Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/arm_scmi/sensors.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/firmware/arm_scmi/sensors.c
++++ b/drivers/firmware/arm_scmi/sensors.c
+@@ -166,7 +166,8 @@ struct scmi_msg_sensor_reading_get {
+
+ struct scmi_resp_sensor_reading_complete {
+ __le32 id;
+- __le64 readings;
++ __le32 readings_low;
++ __le32 readings_high;
+ };
+
+ struct scmi_sensor_reading_resp {
+@@ -717,7 +718,8 @@ static int scmi_sensor_reading_get(const
+
+ resp = t->rx.buf;
+ if (le32_to_cpu(resp->id) == sensor_id)
+- *value = get_unaligned_le64(&resp->readings);
++ *value =
++ get_unaligned_le64(&resp->readings_low);
+ else
+ ret = -EPROTO;
+ }
--- /dev/null
+From c7bb4b89033b764eb07db4e060548a6311d801ee Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 8 Jul 2021 00:21:09 -0700
+Subject: ipv6: tcp: drop silly ICMPv6 packet too big messages
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit c7bb4b89033b764eb07db4e060548a6311d801ee upstream.
+
+While TCP stack scales reasonably well, there is still one part that
+can be used to DDOS it.
+
+IPv6 Packet too big messages have to lookup/insert a new route,
+and if abused by attackers, can easily put hosts under high stress,
+with many cpus contending on a spinlock while one is stuck in fib6_run_gc()
+
+ip6_protocol_deliver_rcu()
+ icmpv6_rcv()
+ icmpv6_notify()
+ tcp_v6_err()
+ tcp_v6_mtu_reduced()
+ inet6_csk_update_pmtu()
+ ip6_rt_update_pmtu()
+ __ip6_rt_update_pmtu()
+ ip6_rt_cache_alloc()
+ ip6_dst_alloc()
+ dst_alloc()
+ ip6_dst_gc()
+ fib6_run_gc()
+ spin_lock_bh() ...
+
+Some of our servers have been hit by malicious ICMPv6 packets
+trying to _increase_ the MTU/MSS of TCP flows.
+
+We believe these ICMPv6 packets are a result of a bug in one ISP stack,
+since they were blindly sent back for _every_ (small) packet sent to them.
+
+These packets are for one TCP flow:
+09:24:36.266491 IP6 Addr1 > Victim ICMP6, packet too big, mtu 1460, length 1240
+09:24:36.266509 IP6 Addr1 > Victim ICMP6, packet too big, mtu 1460, length 1240
+09:24:36.316688 IP6 Addr1 > Victim ICMP6, packet too big, mtu 1460, length 1240
+09:24:36.316704 IP6 Addr1 > Victim ICMP6, packet too big, mtu 1460, length 1240
+09:24:36.608151 IP6 Addr1 > Victim ICMP6, packet too big, mtu 1460, length 1240
+
+TCP stack can filter some silly requests :
+
+1) MTU below IPV6_MIN_MTU can be filtered early in tcp_v6_err()
+2) tcp_v6_mtu_reduced() can drop requests trying to increase current MSS.
+
+This tests happen before the IPv6 routing stack is entered, thus
+removing the potential contention and route exhaustion.
+
+Note that IPv6 stack was performing these checks, but too late
+(ie : after the route has been added, and after the potential
+garbage collect war)
+
+v2: fix typo caught by Martin, thanks !
+v3: exports tcp_mtu_to_mss(), caught by David, thanks !
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Maciej Żenczykowski <maze@google.com>
+Cc: Martin KaFai Lau <kafai@fb.com>
+Acked-by: Martin KaFai Lau <kafai@fb.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_output.c | 1 +
+ net/ipv6/tcp_ipv6.c | 19 +++++++++++++++++--
+ 2 files changed, 18 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1732,6 +1732,7 @@ int tcp_mtu_to_mss(struct sock *sk, int
+ return __tcp_mtu_to_mss(sk, pmtu) -
+ (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
+ }
++EXPORT_SYMBOL(tcp_mtu_to_mss);
+
+ /* Inverse of above */
+ int tcp_mss_to_mtu(struct sock *sk, int mss)
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -348,11 +348,20 @@ failure:
+ static void tcp_v6_mtu_reduced(struct sock *sk)
+ {
+ struct dst_entry *dst;
++ u32 mtu;
+
+ if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
+ return;
+
+- dst = inet6_csk_update_pmtu(sk, READ_ONCE(tcp_sk(sk)->mtu_info));
++ mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
++
++ /* Drop requests trying to increase our current mss.
++ * Check done in __ip6_rt_update_pmtu() is too late.
++ */
++ if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache)
++ return;
++
++ dst = inet6_csk_update_pmtu(sk, mtu);
+ if (!dst)
+ return;
+
+@@ -433,6 +442,8 @@ static int tcp_v6_err(struct sk_buff *sk
+ }
+
+ if (type == ICMPV6_PKT_TOOBIG) {
++ u32 mtu = ntohl(info);
++
+ /* We are not interested in TCP_LISTEN and open_requests
+ * (SYN-ACKs send out by Linux are always <576bytes so
+ * they should go through unfragmented).
+@@ -443,7 +454,11 @@ static int tcp_v6_err(struct sk_buff *sk
+ if (!ip6_sk_accept_pmtu(sk))
+ goto out;
+
+- WRITE_ONCE(tp->mtu_info, ntohl(info));
++ if (mtu < IPV6_MIN_MTU)
++ goto out;
++
++ WRITE_ONCE(tp->mtu_info, mtu);
++
+ if (!sock_owned_by_user(sk))
+ tcp_v6_mtu_reduced(sk);
+ else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
--- /dev/null
+From d952cfaf0cffdbbb0433c67206b645131f17ca5f Mon Sep 17 00:00:00 2001
+From: Masahiro Yamada <masahiroy@kernel.org>
+Date: Wed, 14 Jul 2021 13:23:49 +0900
+Subject: kbuild: do not suppress Kconfig prompts for silent build
+
+From: Masahiro Yamada <masahiroy@kernel.org>
+
+commit d952cfaf0cffdbbb0433c67206b645131f17ca5f upstream.
+
+When a new CONFIG option is available, Kbuild shows a prompt to get
+the user input.
+
+ $ make
+ [ snip ]
+ Core Scheduling for SMT (SCHED_CORE) [N/y/?] (NEW)
+
+This is the only interactive place in the build process.
+
+Commit 174a1dcc9642 ("kbuild: sink stdout from cmd for silent build")
+suppressed Kconfig prompts as well because syncconfig is invoked by
+the 'cmd' macro. You cannot notice the fact that Kconfig is waiting
+for the user input.
+
+Use 'kecho' to show the equivalent short log without suppressing stdout
+from sub-make.
+
+Fixes: 174a1dcc9642 ("kbuild: sink stdout from cmd for silent build")
+Reported-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Tested-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Makefile | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/Makefile
++++ b/Makefile
+@@ -721,11 +721,12 @@ $(KCONFIG_CONFIG):
+ # This exploits the 'multi-target pattern rule' trick.
+ # The syncconfig should be executed only once to make all the targets.
+ # (Note: use the grouped target '&:' when we bump to GNU Make 4.3)
+-quiet_cmd_syncconfig = SYNC $@
+- cmd_syncconfig = $(MAKE) -f $(srctree)/Makefile syncconfig
+-
++#
++# Do not use $(call cmd,...) here. That would suppress prompts from syncconfig,
++# so you cannot notice that Kconfig is waiting for the user input.
+ %/config/auto.conf %/config/auto.conf.cmd %/generated/autoconf.h: $(KCONFIG_CONFIG)
+- +$(call cmd,syncconfig)
++ $(Q)$(kecho) " SYNC $@"
++ $(Q)$(MAKE) -f $(srctree)/Makefile syncconfig
+ else # !may-sync-config
+ # External modules and some install targets need include/generated/autoconf.h
+ # and include/config/auto.conf but do not care if they are up-to-date.
--- /dev/null
+From c34269041185dad1bab7a34f42ef9fab967a1684 Mon Sep 17 00:00:00 2001
+From: Aaron Ma <aaron.ma@canonical.com>
+Date: Thu, 8 Jul 2021 21:17:10 +0800
+Subject: mt76: mt7921: continue to probe driver when fw already downloaded
+
+From: Aaron Ma <aaron.ma@canonical.com>
+
+commit c34269041185dad1bab7a34f42ef9fab967a1684 upstream.
+
+When reboot system, no power cycles, firmware is already downloaded,
+return -EIO will break driver as error:
+mt7921e: probe of 0000:03:00.0 failed with error -5
+
+Skip firmware download and continue to probe.
+
+Signed-off-by: Aaron Ma <aaron.ma@canonical.com>
+Fixes: 1c099ab44727c ("mt76: mt7921: add MCU support")
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt7921/mcu.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+@@ -921,7 +921,7 @@ static int mt7921_load_firmware(struct m
+ ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY);
+ if (ret) {
+ dev_dbg(dev->mt76.dev, "Firmware is already download\n");
+- return -EIO;
++ goto fw_loaded;
+ }
+
+ ret = mt7921_load_patch(dev);
+@@ -939,6 +939,7 @@ static int mt7921_load_firmware(struct m
+ return -EIO;
+ }
+
++fw_loaded:
+ mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false);
+
+ #ifdef CONFIG_PM
--- /dev/null
+From 937654ce497fb6e977a8c52baee5f7d9616302d9 Mon Sep 17 00:00:00 2001
+From: Riccardo Mancini <rickyman7@gmail.com>
+Date: Thu, 15 Jul 2021 18:07:24 +0200
+Subject: perf test bpf: Free obj_buf
+
+From: Riccardo Mancini <rickyman7@gmail.com>
+
+commit 937654ce497fb6e977a8c52baee5f7d9616302d9 upstream.
+
+ASan reports some memory leaks when running:
+
+ # perf test "42: BPF filter"
+
+The first of these leaks is caused by obj_buf never being deallocated in
+__test__bpf.
+
+This patch adds the missing free.
+
+Signed-off-by: Riccardo Mancini <rickyman7@gmail.com>
+Fixes: ba1fae431e74bb42 ("perf test: Add 'perf test BPF'")
+Cc: Ian Rogers <irogers@google.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Wang Nan <wangnan0@huawei.com>
+Link: http://lore.kernel.org/lkml/60f3ca935fe6672e7e866276ce6264c9e26e4c87.1626343282.git.rickyman7@gmail.com
+[ Added missing stdlib.h include ]
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/perf/tests/bpf.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/tools/perf/tests/bpf.c
++++ b/tools/perf/tests/bpf.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <errno.h>
+ #include <stdio.h>
++#include <stdlib.h>
+ #include <sys/epoll.h>
+ #include <sys/types.h>
+ #include <sys/stat.h>
+@@ -276,6 +277,7 @@ static int __test__bpf(int idx)
+ }
+
+ out:
++ free(obj_buf);
+ bpf__clear();
+ return ret;
+ }
net-dsa-properly-check-for-the-bridge_leave-methods-in-dsa_switch_bridge_leave.patch
net-fddi-fix-uaf-in-fza_probe.patch
dma-buf-sync_file-don-t-leak-fences-on-merge-failure.patch
+kbuild-do-not-suppress-kconfig-prompts-for-silent-build.patch
+vboxsf-honor-excl-flag-to-the-dir-inode-create-op.patch
+vboxsf-make-vboxsf_dir_create-return-the-handle-for-the-created-file.patch
+vboxsf-add-vboxsf__sf_handle-helpers.patch
+vboxsf-add-support-for-the-atomic_open-directory-inode-op.patch
+arm-dts-aspeed-fix-ast2600-machines-line-names.patch
+arm-dts-tacoma-add-phase-corrections-for-emmc.patch
+arm-dts-everest-add-phase-corrections-for-emmc.patch
+tcp-consistently-disable-header-prediction-for-mptcp.patch
+tcp-annotate-data-races-around-tp-mtu_info.patch
+tcp-fix-tcp_init_transfer-to-not-reset-icsk_ca_initialized.patch
+ipv6-tcp-drop-silly-icmpv6-packet-too-big-messages.patch
+tcp-call-sk_wmem_schedule-before-sk_mem_charge-in-zerocopy-path.patch
+tools-bpf-fix-error-in-make-c-tools-bpf_install.patch
+bpftool-properly-close-va_list-ap-by-va_end-on-error.patch
+bpf-track-subprog-poke-descriptors-correctly-and-fix-use-after-free.patch
+perf-test-bpf-free-obj_buf.patch
+drm-panel-nt35510-do-not-fail-if-dsi-read-fails.patch
+firmware-arm_scmi-avoid-padding-in-sensor-message-structure.patch
+udp-annotate-data-races-around-unix_sk-sk-gso_size.patch
+udp-properly-flush-normal-packet-at-gro-time.patch
+mt76-mt7921-continue-to-probe-driver-when-fw-already-downloaded.patch
--- /dev/null
+From 561022acb1ce62e50f7a8258687a21b84282a4cb Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 2 Jul 2021 13:09:03 -0700
+Subject: tcp: annotate data races around tp->mtu_info
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit 561022acb1ce62e50f7a8258687a21b84282a4cb upstream.
+
+While tp->mtu_info is read while socket is owned, the write
+sides happen from err handlers (tcp_v[46]_mtu_reduced)
+which only own the socket spinlock.
+
+Fixes: 563d34d05786 ("tcp: dont drop MTU reduction indications")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_ipv4.c | 4 ++--
+ net/ipv6/tcp_ipv6.c | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -342,7 +342,7 @@ void tcp_v4_mtu_reduced(struct sock *sk)
+
+ if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
+ return;
+- mtu = tcp_sk(sk)->mtu_info;
++ mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
+ dst = inet_csk_update_pmtu(sk, mtu);
+ if (!dst)
+ return;
+@@ -546,7 +546,7 @@ int tcp_v4_err(struct sk_buff *skb, u32
+ if (sk->sk_state == TCP_LISTEN)
+ goto out;
+
+- tp->mtu_info = info;
++ WRITE_ONCE(tp->mtu_info, info);
+ if (!sock_owned_by_user(sk)) {
+ tcp_v4_mtu_reduced(sk);
+ } else {
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -352,7 +352,7 @@ static void tcp_v6_mtu_reduced(struct so
+ if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
+ return;
+
+- dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
++ dst = inet6_csk_update_pmtu(sk, READ_ONCE(tcp_sk(sk)->mtu_info));
+ if (!dst)
+ return;
+
+@@ -443,7 +443,7 @@ static int tcp_v6_err(struct sk_buff *sk
+ if (!ip6_sk_accept_pmtu(sk))
+ goto out;
+
+- tp->mtu_info = ntohl(info);
++ WRITE_ONCE(tp->mtu_info, ntohl(info));
+ if (!sock_owned_by_user(sk))
+ tcp_v6_mtu_reduced(sk);
+ else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
--- /dev/null
+From 358ed624207012f03318235017ac6fb41f8af592 Mon Sep 17 00:00:00 2001
+From: Talal Ahmad <talalahmad@google.com>
+Date: Fri, 9 Jul 2021 11:43:06 -0400
+Subject: tcp: call sk_wmem_schedule before sk_mem_charge in zerocopy path
+
+From: Talal Ahmad <talalahmad@google.com>
+
+commit 358ed624207012f03318235017ac6fb41f8af592 upstream.
+
+sk_wmem_schedule makes sure that sk_forward_alloc has enough
+bytes for charging that is going to be done by sk_mem_charge.
+
+In the transmit zerocopy path, there is sk_mem_charge but there was
+no call to sk_wmem_schedule. This change adds that call.
+
+Without this call to sk_wmem_schedule, sk_forward_alloc can go
+negetive which is a bug because sk_forward_alloc is a per-socket
+space that has been forward charged so this can't be negative.
+
+Fixes: f214f915e7db ("tcp: enable MSG_ZEROCOPY")
+Signed-off-by: Talal Ahmad <talalahmad@google.com>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Reviewed-by: Wei Wang <weiwan@google.com>
+Reviewed-by: Soheil Hassas Yeganeh <soheil@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1375,6 +1375,9 @@ new_segment:
+ }
+ pfrag->offset += copy;
+ } else {
++ if (!sk_wmem_schedule(sk, copy))
++ goto wait_for_space;
++
+ err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg);
+ if (err == -EMSGSIZE || err == -EEXIST) {
+ tcp_mark_push(tp, skb);
--- /dev/null
+From 71158bb1f2d2da61385c58fc1114e1a1c19984ba Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Wed, 30 Jun 2021 13:42:13 +0200
+Subject: tcp: consistently disable header prediction for mptcp
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit 71158bb1f2d2da61385c58fc1114e1a1c19984ba upstream.
+
+The MPTCP receive path is hooked only into the TCP slow-path.
+The DSS presence allows plain MPTCP traffic to hit that
+consistently.
+
+Since commit e1ff9e82e2ea ("net: mptcp: improve fallback to TCP"),
+when an MPTCP socket falls back to TCP, it can hit the TCP receive
+fast-path, and delay or stop triggering the event notification.
+
+Address the issue explicitly disabling the header prediction
+for MPTCP sockets.
+
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/200
+Fixes: e1ff9e82e2ea ("net: mptcp: improve fallback to TCP")
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/tcp.h | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -682,6 +682,10 @@ static inline u32 __tcp_set_rto(const st
+
+ static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
+ {
++ /* mptcp hooks are only on the slow path */
++ if (sk_is_mptcp((struct sock *)tp))
++ return;
++
+ tp->pred_flags = htonl((tp->tcp_header_len << 26) |
+ ntohl(TCP_FLAG_ACK) |
+ snd_wnd);
--- /dev/null
+From be5d1b61a2ad28c7e57fe8bfa277373e8ecffcdc Mon Sep 17 00:00:00 2001
+From: Nguyen Dinh Phi <phind.uet@gmail.com>
+Date: Tue, 6 Jul 2021 07:19:12 +0800
+Subject: tcp: fix tcp_init_transfer() to not reset icsk_ca_initialized
+
+From: Nguyen Dinh Phi <phind.uet@gmail.com>
+
+commit be5d1b61a2ad28c7e57fe8bfa277373e8ecffcdc upstream.
+
+This commit fixes a bug (found by syzkaller) that could cause spurious
+double-initializations for congestion control modules, which could cause
+memory leaks or other problems for congestion control modules (like CDG)
+that allocate memory in their init functions.
+
+The buggy scenario constructed by syzkaller was something like:
+
+(1) create a TCP socket
+(2) initiate a TFO connect via sendto()
+(3) while socket is in TCP_SYN_SENT, call setsockopt(TCP_CONGESTION),
+ which calls:
+ tcp_set_congestion_control() ->
+ tcp_reinit_congestion_control() ->
+ tcp_init_congestion_control()
+(4) receive ACK, connection is established, call tcp_init_transfer(),
+ set icsk_ca_initialized=0 (without first calling cc->release()),
+ call tcp_init_congestion_control() again.
+
+Note that in this sequence tcp_init_congestion_control() is called
+twice without a cc->release() call in between. Thus, for CC modules
+that allocate memory in their init() function, e.g, CDG, a memory leak
+may occur. The syzkaller tool managed to find a reproducer that
+triggered such a leak in CDG.
+
+The bug was introduced when that commit 8919a9b31eb4 ("tcp: Only init
+congestion control if not initialized already")
+introduced icsk_ca_initialized and set icsk_ca_initialized to 0 in
+tcp_init_transfer(), missing the possibility for a sequence like the
+one above, where a process could call setsockopt(TCP_CONGESTION) in
+state TCP_SYN_SENT (i.e. after the connect() or TFO open sendmsg()),
+which would call tcp_init_congestion_control(). It did not intend to
+reset any initialization that the user had already explicitly made;
+it just missed the possibility of that particular sequence (which
+syzkaller managed to find).
+
+Fixes: 8919a9b31eb4 ("tcp: Only init congestion control if not initialized already")
+Reported-by: syzbot+f1e24a0594d4e3a895d3@syzkaller.appspotmail.com
+Signed-off-by: Nguyen Dinh Phi <phind.uet@gmail.com>
+Acked-by: Neal Cardwell <ncardwell@google.com>
+Tested-by: Neal Cardwell <ncardwell@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_input.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -5921,8 +5921,8 @@ void tcp_init_transfer(struct sock *sk,
+ tp->snd_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
+ tp->snd_cwnd_stamp = tcp_jiffies32;
+
+- icsk->icsk_ca_initialized = 0;
+ bpf_skops_established(sk, bpf_op, skb);
++ /* Initialize congestion control unless BPF initialized it already: */
+ if (!icsk->icsk_ca_initialized)
+ tcp_init_congestion_control(sk);
+ tcp_init_buffer_space(sk);
--- /dev/null
+From 1d719254c139fb62fb8056fb496b6fd007e71550 Mon Sep 17 00:00:00 2001
+From: Wei Li <liwei391@huawei.com>
+Date: Mon, 28 Jun 2021 11:04:09 +0800
+Subject: tools: bpf: Fix error in 'make -C tools/ bpf_install'
+
+From: Wei Li <liwei391@huawei.com>
+
+commit 1d719254c139fb62fb8056fb496b6fd007e71550 upstream.
+
+make[2]: *** No rule to make target 'install'. Stop.
+make[1]: *** [Makefile:122: runqslower_install] Error 2
+make: *** [Makefile:116: bpf_install] Error 2
+
+There is no rule for target 'install' in tools/bpf/runqslower/Makefile,
+and there is no need to install it, so just remove 'runqslower_install'.
+
+Fixes: 9c01546d26d2 ("tools/bpf: Add runqslower tool to tools/bpf")
+Signed-off-by: Wei Li <liwei391@huawei.com>
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Link: https://lore.kernel.org/bpf/20210628030409.3459095-1-liwei391@huawei.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/bpf/Makefile | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+--- a/tools/bpf/Makefile
++++ b/tools/bpf/Makefile
+@@ -97,7 +97,7 @@ clean: bpftool_clean runqslower_clean re
+ $(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.bpf
+ $(Q)$(RM) -r -- $(OUTPUT)feature
+
+-install: $(PROGS) bpftool_install runqslower_install
++install: $(PROGS) bpftool_install
+ $(call QUIET_INSTALL, bpf_jit_disasm)
+ $(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(prefix)/bin
+ $(Q)$(INSTALL) $(OUTPUT)bpf_jit_disasm $(DESTDIR)$(prefix)/bin/bpf_jit_disasm
+@@ -118,9 +118,6 @@ bpftool_clean:
+ runqslower:
+ $(call descend,runqslower)
+
+-runqslower_install:
+- $(call descend,runqslower,install)
+-
+ runqslower_clean:
+ $(call descend,runqslower,clean)
+
+@@ -131,5 +128,5 @@ resolve_btfids_clean:
+ $(call descend,resolve_btfids,clean)
+
+ .PHONY: all install clean bpftool bpftool_install bpftool_clean \
+- runqslower runqslower_install runqslower_clean \
++ runqslower runqslower_clean \
+ resolve_btfids resolve_btfids_clean
--- /dev/null
+From 18a419bad63b7f68a1979e28459782518e7b6bbe Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 30 Jun 2021 09:42:44 -0700
+Subject: udp: annotate data races around unix_sk(sk)->gso_size
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit 18a419bad63b7f68a1979e28459782518e7b6bbe upstream.
+
+Accesses to unix_sk(sk)->gso_size are lockless.
+Add READ_ONCE()/WRITE_ONCE() around them.
+
+BUG: KCSAN: data-race in udp_lib_setsockopt / udpv6_sendmsg
+
+write to 0xffff88812d78f47c of 2 bytes by task 10849 on cpu 1:
+ udp_lib_setsockopt+0x3b3/0x710 net/ipv4/udp.c:2696
+ udpv6_setsockopt+0x63/0x90 net/ipv6/udp.c:1630
+ sock_common_setsockopt+0x5d/0x70 net/core/sock.c:3265
+ __sys_setsockopt+0x18f/0x200 net/socket.c:2104
+ __do_sys_setsockopt net/socket.c:2115 [inline]
+ __se_sys_setsockopt net/socket.c:2112 [inline]
+ __x64_sys_setsockopt+0x62/0x70 net/socket.c:2112
+ do_syscall_64+0x4a/0x90 arch/x86/entry/common.c:47
+ entry_SYSCALL_64_after_hwframe+0x44/0xae
+
+read to 0xffff88812d78f47c of 2 bytes by task 10852 on cpu 0:
+ udpv6_sendmsg+0x161/0x16b0 net/ipv6/udp.c:1299
+ inet6_sendmsg+0x5f/0x80 net/ipv6/af_inet6.c:642
+ sock_sendmsg_nosec net/socket.c:654 [inline]
+ sock_sendmsg net/socket.c:674 [inline]
+ ____sys_sendmsg+0x360/0x4d0 net/socket.c:2337
+ ___sys_sendmsg net/socket.c:2391 [inline]
+ __sys_sendmmsg+0x315/0x4b0 net/socket.c:2477
+ __do_sys_sendmmsg net/socket.c:2506 [inline]
+ __se_sys_sendmmsg net/socket.c:2503 [inline]
+ __x64_sys_sendmmsg+0x53/0x60 net/socket.c:2503
+ do_syscall_64+0x4a/0x90 arch/x86/entry/common.c:47
+ entry_SYSCALL_64_after_hwframe+0x44/0xae
+
+value changed: 0x0000 -> 0x0005
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 0 PID: 10852 Comm: syz-executor.0 Not tainted 5.13.0-syzkaller #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+
+Fixes: bec1f6f69736 ("udp: generate gso with UDP_SEGMENT")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Willem de Bruijn <willemb@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/udp.c | 6 +++---
+ net/ipv6/udp.c | 2 +-
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1102,7 +1102,7 @@ int udp_sendmsg(struct sock *sk, struct
+ }
+
+ ipcm_init_sk(&ipc, inet);
+- ipc.gso_size = up->gso_size;
++ ipc.gso_size = READ_ONCE(up->gso_size);
+
+ if (msg->msg_controllen) {
+ err = udp_cmsg_send(sk, msg, &ipc.gso_size);
+@@ -2695,7 +2695,7 @@ int udp_lib_setsockopt(struct sock *sk,
+ case UDP_SEGMENT:
+ if (val < 0 || val > USHRT_MAX)
+ return -EINVAL;
+- up->gso_size = val;
++ WRITE_ONCE(up->gso_size, val);
+ break;
+
+ case UDP_GRO:
+@@ -2790,7 +2790,7 @@ int udp_lib_getsockopt(struct sock *sk,
+ break;
+
+ case UDP_SEGMENT:
+- val = up->gso_size;
++ val = READ_ONCE(up->gso_size);
+ break;
+
+ case UDP_GRO:
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1296,7 +1296,7 @@ int udpv6_sendmsg(struct sock *sk, struc
+ int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
+
+ ipcm6_init(&ipc6);
+- ipc6.gso_size = up->gso_size;
++ ipc6.gso_size = READ_ONCE(up->gso_size);
+ ipc6.sockc.tsflags = sk->sk_tsflags;
+ ipc6.sockc.mark = sk->sk_mark;
+
--- /dev/null
+From b43c8909be52f2baca8884f967b418a88424494a Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Sat, 3 Jul 2021 00:38:43 +0200
+Subject: udp: properly flush normal packet at GRO time
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit b43c8909be52f2baca8884f967b418a88424494a upstream.
+
+If an UDP packet enters the GRO engine but is not eligible
+for aggregation and is not targeting an UDP tunnel,
+udp_gro_receive() will not set the flush bit, and packet
+could delayed till the next napi flush.
+
+Fix the issue ensuring non GROed packets traverse
+skb_gro_flush_final().
+
+Reported-and-tested-by: Matthias Treydte <mt@waldheinz.de>
+Fixes: 18f25dc39990 ("udp: skip L4 aggregation for UDP tunnel packets")
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/udp_offload.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -525,8 +525,10 @@ struct sk_buff *udp_gro_receive(struct l
+
+ if ((!sk && (skb->dev->features & NETIF_F_GRO_UDP_FWD)) ||
+ (sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist)
+- pp = call_gro_receive(udp_gro_receive_segment, head, skb);
+- return pp;
++ return call_gro_receive(udp_gro_receive_segment, head, skb);
++
++ /* no GRO, be sure flush the current packet */
++ goto out;
+ }
+
+ if (NAPI_GRO_CB(skb)->encap_mark ||
--- /dev/null
+From 52dfd86aa568e433b24357bb5fc725560f1e22d8 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Thu, 21 Jan 2021 12:54:18 +0100
+Subject: vboxsf: Add support for the atomic_open directory-inode op
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit 52dfd86aa568e433b24357bb5fc725560f1e22d8 upstream.
+
+Opening a new file is done in 2 steps on regular filesystems:
+
+1. Call the create inode-op on the parent-dir to create an inode
+to hold the meta-data related to the file.
+2. Call the open file-op to get a handle for the file.
+
+vboxsf however does not really use disk-backed inodes because it
+is based on passing through file-related system-calls through to
+the hypervisor. So both steps translate to an open(2) call being
+passed through to the hypervisor. With the handle returned by
+the first call immediately being closed again.
+
+Making 2 open calls for a single open(..., O_CREATE, ...) calls
+has 2 problems:
+
+a) It is not really efficient.
+b) It actually breaks some apps.
+
+An example of b) is doing a git clone inside a vboxsf mount.
+When git clone tries to create a tempfile to store the pak
+files which is downloading the following happens:
+
+1. vboxsf_dir_mkfile() gets called with a mode of 0444 and succeeds.
+2. vboxsf_file_open() gets called with file->f_flags containing
+O_RDWR. When the host is a Linux machine this fails because doing
+a open(..., O_RDWR) on a file which exists and has mode 0444 results
+in an -EPERM error.
+
+Other network-filesystems and fuse avoid the problem of needing to
+pass 2 open() calls to the other side by using the atomic_open
+directory-inode op.
+
+This commit fixes git clone not working inside a vboxsf mount,
+by adding support for the atomic_open directory-inode op.
+As an added bonus this should also make opening new files faster.
+
+The atomic_open implementation is modelled after the atomic_open
+implementations from the 9p and fuse code.
+
+Fixes: 0fd169576648 ("fs: Add VirtualBox guest shared folder (vboxsf) support")
+Reported-by: Ludovic Pouzenc <bugreports@pouzenc.fr>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/vboxsf/dir.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 48 insertions(+)
+
+--- a/fs/vboxsf/dir.c
++++ b/fs/vboxsf/dir.c
+@@ -308,6 +308,53 @@ static int vboxsf_dir_mkdir(struct user_
+ return vboxsf_dir_create(parent, dentry, mode, true, true, NULL);
+ }
+
++static int vboxsf_dir_atomic_open(struct inode *parent, struct dentry *dentry,
++ struct file *file, unsigned int flags, umode_t mode)
++{
++ struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
++ struct vboxsf_handle *sf_handle;
++ struct dentry *res = NULL;
++ u64 handle;
++ int err;
++
++ if (d_in_lookup(dentry)) {
++ res = vboxsf_dir_lookup(parent, dentry, 0);
++ if (IS_ERR(res))
++ return PTR_ERR(res);
++
++ if (res)
++ dentry = res;
++ }
++
++ /* Only creates */
++ if (!(flags & O_CREAT) || d_really_is_positive(dentry))
++ return finish_no_open(file, res);
++
++ err = vboxsf_dir_create(parent, dentry, mode, false, flags & O_EXCL, &handle);
++ if (err)
++ goto out;
++
++ sf_handle = vboxsf_create_sf_handle(d_inode(dentry), handle, SHFL_CF_ACCESS_READWRITE);
++ if (IS_ERR(sf_handle)) {
++ vboxsf_close(sbi->root, handle);
++ err = PTR_ERR(sf_handle);
++ goto out;
++ }
++
++ err = finish_open(file, dentry, generic_file_open);
++ if (err) {
++ /* This also closes the handle passed to vboxsf_create_sf_handle() */
++ vboxsf_release_sf_handle(d_inode(dentry), sf_handle);
++ goto out;
++ }
++
++ file->private_data = sf_handle;
++ file->f_mode |= FMODE_CREATED;
++out:
++ dput(res);
++ return err;
++}
++
+ static int vboxsf_dir_unlink(struct inode *parent, struct dentry *dentry)
+ {
+ struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
+@@ -428,6 +475,7 @@ const struct inode_operations vboxsf_dir
+ .lookup = vboxsf_dir_lookup,
+ .create = vboxsf_dir_mkfile,
+ .mkdir = vboxsf_dir_mkdir,
++ .atomic_open = vboxsf_dir_atomic_open,
+ .rmdir = vboxsf_dir_unlink,
+ .unlink = vboxsf_dir_unlink,
+ .rename = vboxsf_dir_rename,
--- /dev/null
+From 02f840f90764f22f5c898901849bdbf0cee752ba Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Thu, 21 Jan 2021 10:55:03 +0100
+Subject: vboxsf: Add vboxsf_[create|release]_sf_handle() helpers
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit 02f840f90764f22f5c898901849bdbf0cee752ba upstream.
+
+Factor out the code to create / release a struct vboxsf_handle into
+2 new helper functions.
+
+This is a preparation patch for adding atomic_open support.
+
+Fixes: 0fd169576648 ("fs: Add VirtualBox guest shared folder (vboxsf) support")
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/vboxsf/file.c | 71 ++++++++++++++++++++++++++++++++---------------------
+ fs/vboxsf/vfsmod.h | 7 +++++
+ 2 files changed, 51 insertions(+), 27 deletions(-)
+
+--- a/fs/vboxsf/file.c
++++ b/fs/vboxsf/file.c
+@@ -20,17 +20,39 @@ struct vboxsf_handle {
+ struct list_head head;
+ };
+
+-static int vboxsf_file_open(struct inode *inode, struct file *file)
++struct vboxsf_handle *vboxsf_create_sf_handle(struct inode *inode,
++ u64 handle, u32 access_flags)
+ {
+ struct vboxsf_inode *sf_i = VBOXSF_I(inode);
+- struct shfl_createparms params = {};
+ struct vboxsf_handle *sf_handle;
+- u32 access_flags = 0;
+- int err;
+
+ sf_handle = kmalloc(sizeof(*sf_handle), GFP_KERNEL);
+ if (!sf_handle)
+- return -ENOMEM;
++ return ERR_PTR(-ENOMEM);
++
++ /* the host may have given us different attr then requested */
++ sf_i->force_restat = 1;
++
++ /* init our handle struct and add it to the inode's handles list */
++ sf_handle->handle = handle;
++ sf_handle->root = VBOXSF_SBI(inode->i_sb)->root;
++ sf_handle->access_flags = access_flags;
++ kref_init(&sf_handle->refcount);
++
++ mutex_lock(&sf_i->handle_list_mutex);
++ list_add(&sf_handle->head, &sf_i->handle_list);
++ mutex_unlock(&sf_i->handle_list_mutex);
++
++ return sf_handle;
++}
++
++static int vboxsf_file_open(struct inode *inode, struct file *file)
++{
++ struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
++ struct shfl_createparms params = {};
++ struct vboxsf_handle *sf_handle;
++ u32 access_flags = 0;
++ int err;
+
+ /*
+ * We check the value of params.handle afterwards to find out if
+@@ -83,23 +105,14 @@ static int vboxsf_file_open(struct inode
+ err = vboxsf_create_at_dentry(file_dentry(file), ¶ms);
+ if (err == 0 && params.handle == SHFL_HANDLE_NIL)
+ err = (params.result == SHFL_FILE_EXISTS) ? -EEXIST : -ENOENT;
+- if (err) {
+- kfree(sf_handle);
++ if (err)
+ return err;
+- }
+
+- /* the host may have given us different attr then requested */
+- sf_i->force_restat = 1;
+-
+- /* init our handle struct and add it to the inode's handles list */
+- sf_handle->handle = params.handle;
+- sf_handle->root = VBOXSF_SBI(inode->i_sb)->root;
+- sf_handle->access_flags = access_flags;
+- kref_init(&sf_handle->refcount);
+-
+- mutex_lock(&sf_i->handle_list_mutex);
+- list_add(&sf_handle->head, &sf_i->handle_list);
+- mutex_unlock(&sf_i->handle_list_mutex);
++ sf_handle = vboxsf_create_sf_handle(inode, params.handle, access_flags);
++ if (IS_ERR(sf_handle)) {
++ vboxsf_close(sbi->root, params.handle);
++ return PTR_ERR(sf_handle);
++ }
+
+ file->private_data = sf_handle;
+ return 0;
+@@ -114,22 +127,26 @@ static void vboxsf_handle_release(struct
+ kfree(sf_handle);
+ }
+
+-static int vboxsf_file_release(struct inode *inode, struct file *file)
++void vboxsf_release_sf_handle(struct inode *inode, struct vboxsf_handle *sf_handle)
+ {
+ struct vboxsf_inode *sf_i = VBOXSF_I(inode);
+- struct vboxsf_handle *sf_handle = file->private_data;
+
++ mutex_lock(&sf_i->handle_list_mutex);
++ list_del(&sf_handle->head);
++ mutex_unlock(&sf_i->handle_list_mutex);
++
++ kref_put(&sf_handle->refcount, vboxsf_handle_release);
++}
++
++static int vboxsf_file_release(struct inode *inode, struct file *file)
++{
+ /*
+ * When a file is closed on our (the guest) side, we want any subsequent
+ * accesses done on the host side to see all changes done from our side.
+ */
+ filemap_write_and_wait(inode->i_mapping);
+
+- mutex_lock(&sf_i->handle_list_mutex);
+- list_del(&sf_handle->head);
+- mutex_unlock(&sf_i->handle_list_mutex);
+-
+- kref_put(&sf_handle->refcount, vboxsf_handle_release);
++ vboxsf_release_sf_handle(inode, file->private_data);
+ return 0;
+ }
+
+--- a/fs/vboxsf/vfsmod.h
++++ b/fs/vboxsf/vfsmod.h
+@@ -18,6 +18,8 @@
+ #define VBOXSF_SBI(sb) ((struct vboxsf_sbi *)(sb)->s_fs_info)
+ #define VBOXSF_I(i) container_of(i, struct vboxsf_inode, vfs_inode)
+
++struct vboxsf_handle;
++
+ struct vboxsf_options {
+ unsigned long ttl;
+ kuid_t uid;
+@@ -80,6 +82,11 @@ extern const struct file_operations vbox
+ extern const struct address_space_operations vboxsf_reg_aops;
+ extern const struct dentry_operations vboxsf_dentry_ops;
+
++/* from file.c */
++struct vboxsf_handle *vboxsf_create_sf_handle(struct inode *inode,
++ u64 handle, u32 access_flags);
++void vboxsf_release_sf_handle(struct inode *inode, struct vboxsf_handle *sf_handle);
++
+ /* from utils.c */
+ struct inode *vboxsf_new_inode(struct super_block *sb);
+ int vboxsf_init_inode(struct vboxsf_sbi *sbi, struct inode *inode,
--- /dev/null
+From cc3ddee97cff034cea4d095de4a484c92a219bf5 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Thu, 21 Jan 2021 10:08:59 +0100
+Subject: vboxsf: Honor excl flag to the dir-inode create op
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit cc3ddee97cff034cea4d095de4a484c92a219bf5 upstream.
+
+Honor the excl flag to the dir-inode create op, instead of behaving
+as if it is always set.
+
+Note the old behavior still worked most of the time since a non-exclusive
+open only calls the create op, if there is a race and the file is created
+between the dentry lookup and the calling of the create call.
+
+While at it change the type of the is_dir parameter to the
+vboxsf_dir_create() helper from an int to a bool, to be consistent with
+the use of bool for the excl parameter.
+
+Fixes: 0fd169576648 ("fs: Add VirtualBox guest shared folder (vboxsf) support")
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/vboxsf/dir.c | 16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+--- a/fs/vboxsf/dir.c
++++ b/fs/vboxsf/dir.c
+@@ -253,7 +253,7 @@ static int vboxsf_dir_instantiate(struct
+ }
+
+ static int vboxsf_dir_create(struct inode *parent, struct dentry *dentry,
+- umode_t mode, int is_dir)
++ umode_t mode, bool is_dir, bool excl)
+ {
+ struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent);
+ struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
+@@ -261,10 +261,12 @@ static int vboxsf_dir_create(struct inod
+ int err;
+
+ params.handle = SHFL_HANDLE_NIL;
+- params.create_flags = SHFL_CF_ACT_CREATE_IF_NEW |
+- SHFL_CF_ACT_FAIL_IF_EXISTS |
+- SHFL_CF_ACCESS_READWRITE |
+- (is_dir ? SHFL_CF_DIRECTORY : 0);
++ params.create_flags = SHFL_CF_ACT_CREATE_IF_NEW | SHFL_CF_ACCESS_READWRITE;
++ if (is_dir)
++ params.create_flags |= SHFL_CF_DIRECTORY;
++ if (excl)
++ params.create_flags |= SHFL_CF_ACT_FAIL_IF_EXISTS;
++
+ params.info.attr.mode = (mode & 0777) |
+ (is_dir ? SHFL_TYPE_DIRECTORY : SHFL_TYPE_FILE);
+ params.info.attr.additional = SHFLFSOBJATTRADD_NOTHING;
+@@ -292,14 +294,14 @@ static int vboxsf_dir_mkfile(struct user
+ struct inode *parent, struct dentry *dentry,
+ umode_t mode, bool excl)
+ {
+- return vboxsf_dir_create(parent, dentry, mode, 0);
++ return vboxsf_dir_create(parent, dentry, mode, false, excl);
+ }
+
+ static int vboxsf_dir_mkdir(struct user_namespace *mnt_userns,
+ struct inode *parent, struct dentry *dentry,
+ umode_t mode)
+ {
+- return vboxsf_dir_create(parent, dentry, mode, 1);
++ return vboxsf_dir_create(parent, dentry, mode, true, true);
+ }
+
+ static int vboxsf_dir_unlink(struct inode *parent, struct dentry *dentry)
--- /dev/null
+From ab0c29687bc7a890d1a86ac376b0b0fd78b2d9b6 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Thu, 21 Jan 2021 10:22:27 +0100
+Subject: vboxsf: Make vboxsf_dir_create() return the handle for the created file
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit ab0c29687bc7a890d1a86ac376b0b0fd78b2d9b6 upstream.
+
+Make vboxsf_dir_create() optionally return the vboxsf-handle for
+the created file. This is a preparation patch for adding atomic_open
+support.
+
+Fixes: 0fd169576648 ("fs: Add VirtualBox guest shared folder (vboxsf) support")
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/vboxsf/dir.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+--- a/fs/vboxsf/dir.c
++++ b/fs/vboxsf/dir.c
+@@ -253,7 +253,7 @@ static int vboxsf_dir_instantiate(struct
+ }
+
+ static int vboxsf_dir_create(struct inode *parent, struct dentry *dentry,
+- umode_t mode, bool is_dir, bool excl)
++ umode_t mode, bool is_dir, bool excl, u64 *handle_ret)
+ {
+ struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent);
+ struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
+@@ -278,30 +278,34 @@ static int vboxsf_dir_create(struct inod
+ if (params.result != SHFL_FILE_CREATED)
+ return -EPERM;
+
+- vboxsf_close(sbi->root, params.handle);
+-
+ err = vboxsf_dir_instantiate(parent, dentry, ¶ms.info);
+ if (err)
+- return err;
++ goto out;
+
+ /* parent directory access/change time changed */
+ sf_parent_i->force_restat = 1;
+
+- return 0;
++out:
++ if (err == 0 && handle_ret)
++ *handle_ret = params.handle;
++ else
++ vboxsf_close(sbi->root, params.handle);
++
++ return err;
+ }
+
+ static int vboxsf_dir_mkfile(struct user_namespace *mnt_userns,
+ struct inode *parent, struct dentry *dentry,
+ umode_t mode, bool excl)
+ {
+- return vboxsf_dir_create(parent, dentry, mode, false, excl);
++ return vboxsf_dir_create(parent, dentry, mode, false, excl, NULL);
+ }
+
+ static int vboxsf_dir_mkdir(struct user_namespace *mnt_userns,
+ struct inode *parent, struct dentry *dentry,
+ umode_t mode)
+ {
+- return vboxsf_dir_create(parent, dentry, mode, true, true);
++ return vboxsf_dir_create(parent, dentry, mode, true, true, NULL);
+ }
+
+ static int vboxsf_dir_unlink(struct inode *parent, struct dentry *dentry)