--- /dev/null
+From ca46ad2214473df1a6a9496be17156d65ba89b9f Mon Sep 17 00:00:00 2001
+From: Joel Stanley <joel@jms.id.au>
+Date: Thu, 24 Jun 2021 18:37:42 +0930
+Subject: ARM: dts: aspeed: Fix AST2600 machines line names
+
+From: Joel Stanley <joel@jms.id.au>
+
+commit ca46ad2214473df1a6a9496be17156d65ba89b9f upstream.
+
+Tacoma and Rainier both have a line-names array that is too long:
+
+ gpio gpiochip0: gpio-line-names is length 232 but should be at most length 208
+
+This was probably copied from an AST2500 device tree that did have more
+GPIOs on the controller.
+
+Fixes: e9b24b55ca4f ("ARM: dts: aspeed: rainier: Add gpio line names")
+Fixes: 2f68e4e7df67 ("ARM: dts: aspeed: tacoma: Add gpio line names")
+Link: https://lore.kernel.org/r/20210624090742.56640-1-joel@jms.id.au
+Signed-off-by: Joel Stanley <joel@jms.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts | 5 +----
+ arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts | 5 +----
+ 2 files changed, 2 insertions(+), 8 deletions(-)
+
+--- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
+@@ -156,10 +156,7 @@
+ /*W0-W7*/ "","","","","","","","",
+ /*X0-X7*/ "","","","","","","","",
+ /*Y0-Y7*/ "","","","","","","","",
+- /*Z0-Z7*/ "","","","","","","","",
+- /*AA0-AA7*/ "","","","","","","","",
+- /*AB0-AB7*/ "","","","","","","","",
+- /*AC0-AC7*/ "","","","","","","","";
++ /*Z0-Z7*/ "","","","","","","","";
+
+ pin_mclr_vpp {
+ gpio-hog;
+--- a/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts
+@@ -127,10 +127,7 @@
+ /*W0-W7*/ "","","","","","","","",
+ /*X0-X7*/ "","","","","","","","",
+ /*Y0-Y7*/ "","","","","","","","",
+- /*Z0-Z7*/ "","","","","","","","",
+- /*AA0-AA7*/ "","","","","","","","",
+- /*AB0-AB7*/ "","","","","","","","",
+- /*AC0-AC7*/ "","","","","","","","";
++ /*Z0-Z7*/ "","","","","","","","";
+ };
+
+ &fmc {
--- /dev/null
+From 2d6608b57c50c54c3e46649110e8ea5a40959c30 Mon Sep 17 00:00:00 2001
+From: Andrew Jeffery <andrew@aj.id.au>
+Date: Fri, 25 Jun 2021 15:40:17 +0930
+Subject: ARM: dts: tacoma: Add phase corrections for eMMC
+
+From: Andrew Jeffery <andrew@aj.id.au>
+
+commit 2d6608b57c50c54c3e46649110e8ea5a40959c30 upstream.
+
+The degree values were reversed out from the magic tap values of 7 (in)
+and 15 + inversion (out) initially suggested by Aspeed.
+
+With the patch tacoma survives several gigabytes of reads and writes
+using dd while without it locks up randomly during the boot process.
+
+Signed-off-by: Andrew Jeffery <andrew@aj.id.au>
+Link: https://lore.kernel.org/r/20210625061017.1149942-1-andrew@aj.id.au
+Fixes: 2fc88f92359d ("mmc: sdhci-of-aspeed: Expose clock phase controls")
+Fixes: 961216c135a8 ("ARM: dts: aspeed: Add Rainier system")
+Signed-off-by: Joel Stanley <joel@jms.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts
+@@ -177,6 +177,7 @@
+
+ &emmc {
+ status = "okay";
++ clk-phase-mmc-hs200 = <36>, <270>;
+ };
+
+ &fsim0 {
--- /dev/null
+From f263a81451c12da5a342d90572e317e611846f2c Mon Sep 17 00:00:00 2001
+From: John Fastabend <john.fastabend@gmail.com>
+Date: Wed, 7 Jul 2021 15:38:47 -0700
+Subject: bpf: Track subprog poke descriptors correctly and fix use-after-free
+
+From: John Fastabend <john.fastabend@gmail.com>
+
+commit f263a81451c12da5a342d90572e317e611846f2c upstream.
+
+Subprograms are calling map_poke_track(), but on program release there is no
+hook to call map_poke_untrack(). However, on program release, the aux memory
+(and poke descriptor table) is freed even though we still have a reference to
+it in the element list of the map aux data. When we run map_poke_run(), we then
+end up accessing free'd memory, triggering KASAN in prog_array_map_poke_run():
+
+ [...]
+ [ 402.824689] BUG: KASAN: use-after-free in prog_array_map_poke_run+0xc2/0x34e
+ [ 402.824698] Read of size 4 at addr ffff8881905a7940 by task hubble-fgs/4337
+ [ 402.824705] CPU: 1 PID: 4337 Comm: hubble-fgs Tainted: G I 5.12.0+ #399
+ [ 402.824715] Call Trace:
+ [ 402.824719] dump_stack+0x93/0xc2
+ [ 402.824727] print_address_description.constprop.0+0x1a/0x140
+ [ 402.824736] ? prog_array_map_poke_run+0xc2/0x34e
+ [ 402.824740] ? prog_array_map_poke_run+0xc2/0x34e
+ [ 402.824744] kasan_report.cold+0x7c/0xd8
+ [ 402.824752] ? prog_array_map_poke_run+0xc2/0x34e
+ [ 402.824757] prog_array_map_poke_run+0xc2/0x34e
+ [ 402.824765] bpf_fd_array_map_update_elem+0x124/0x1a0
+ [...]
+
+The elements concerned are walked as follows:
+
+ for (i = 0; i < elem->aux->size_poke_tab; i++) {
+ poke = &elem->aux->poke_tab[i];
+ [...]
+
+The access to size_poke_tab is a 4 byte read, verified by checking offsets
+in the KASAN dump:
+
+ [ 402.825004] The buggy address belongs to the object at ffff8881905a7800
+ which belongs to the cache kmalloc-1k of size 1024
+ [ 402.825008] The buggy address is located 320 bytes inside of
+ 1024-byte region [ffff8881905a7800, ffff8881905a7c00)
+
+The pahole output of bpf_prog_aux:
+
+ struct bpf_prog_aux {
+ [...]
+ /* --- cacheline 5 boundary (320 bytes) --- */
+ u32 size_poke_tab; /* 320 4 */
+ [...]
+
+In general, subprograms do not necessarily manage their own data structures.
+For example, BTF func_info and linfo are just pointers to the main program
+structure. This allows reference counting and cleanup to be done on the latter
+which simplifies their management a bit. The aux->poke_tab struct, however,
+did not follow this logic. The initial proposed fix for this use-after-free
+bug further embedded poke data tracking into the subprogram with proper
+reference counting. However, Daniel and Alexei questioned why we were treating
+these objects special; I agree, its unnecessary. The fix here removes the per
+subprogram poke table allocation and map tracking and instead simply points
+the aux->poke_tab pointer at the main programs poke table. This way, map
+tracking is simplified to the main program and we do not need to manage them
+per subprogram.
+
+This also means, bpf_prog_free_deferred(), which unwinds the program reference
+counting and kfrees objects, needs to ensure that we don't try to double free
+the poke_tab when free'ing the subprog structures. This is easily solved by
+NULL'ing the poke_tab pointer. The second detail is to ensure that per
+subprogram JIT logic only does fixups on poke_tab[] entries it owns. To do
+this, we add a pointer in the poke structure to point at the subprogram value
+so JITs can easily check while walking the poke_tab structure if the current
+entry belongs to the current program. The aux pointer is stable and therefore
+suitable for such comparison. On the jit_subprogs() error path, we omit
+cleaning up the poke->aux field because these are only ever referenced from
+the JIT side, but on error we will never make it to the JIT, so its fine to
+leave them dangling. Removing these pointers would complicate the error path
+for no reason. However, we do need to untrack all poke descriptors from the
+main program as otherwise they could race with the freeing of JIT memory from
+the subprograms. Lastly, a748c6975dea3 ("bpf: propagate poke descriptors to
+subprograms") had an off-by-one on the subprogram instruction index range
+check as it was testing 'insn_idx >= subprog_start && insn_idx <= subprog_end'.
+However, subprog_end is the next subprogram's start instruction.
+
+Fixes: a748c6975dea3 ("bpf: propagate poke descriptors to subprograms")
+Signed-off-by: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Co-developed-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20210707223848.14580-2-john.fastabend@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/net/bpf_jit_comp.c | 3 ++
+ include/linux/bpf.h | 1
+ kernel/bpf/core.c | 8 +++++
+ kernel/bpf/verifier.c | 60 +++++++++++++++-----------------------------
+ 4 files changed, 32 insertions(+), 40 deletions(-)
+
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -564,6 +564,9 @@ static void bpf_tail_call_direct_fixup(s
+
+ for (i = 0; i < prog->aux->size_poke_tab; i++) {
+ poke = &prog->aux->poke_tab[i];
++ if (poke->aux && poke->aux != prog->aux)
++ continue;
++
+ WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
+
+ if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -752,6 +752,7 @@ struct bpf_jit_poke_descriptor {
+ void *tailcall_target;
+ void *tailcall_bypass;
+ void *bypass_addr;
++ void *aux;
+ union {
+ struct {
+ struct bpf_map *map;
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -2167,8 +2167,14 @@ static void bpf_prog_free_deferred(struc
+ #endif
+ if (aux->dst_trampoline)
+ bpf_trampoline_put(aux->dst_trampoline);
+- for (i = 0; i < aux->func_cnt; i++)
++ for (i = 0; i < aux->func_cnt; i++) {
++ /* We can just unlink the subprog poke descriptor table as
++ * it was originally linked to the main program and is also
++ * released along with it.
++ */
++ aux->func[i]->aux->poke_tab = NULL;
+ bpf_jit_free(aux->func[i]);
++ }
+ if (aux->func_cnt) {
+ kfree(aux->func);
+ bpf_prog_unlock_free(aux->prog);
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -11157,33 +11157,19 @@ static int jit_subprogs(struct bpf_verif
+ goto out_free;
+ func[i]->is_func = 1;
+ func[i]->aux->func_idx = i;
+- /* the btf and func_info will be freed only at prog->aux */
++ /* Below members will be freed only at prog->aux */
+ func[i]->aux->btf = prog->aux->btf;
+ func[i]->aux->func_info = prog->aux->func_info;
++ func[i]->aux->poke_tab = prog->aux->poke_tab;
++ func[i]->aux->size_poke_tab = prog->aux->size_poke_tab;
+
+ for (j = 0; j < prog->aux->size_poke_tab; j++) {
+- u32 insn_idx = prog->aux->poke_tab[j].insn_idx;
+- int ret;
++ struct bpf_jit_poke_descriptor *poke;
+
+- if (!(insn_idx >= subprog_start &&
+- insn_idx <= subprog_end))
+- continue;
+-
+- ret = bpf_jit_add_poke_descriptor(func[i],
+- &prog->aux->poke_tab[j]);
+- if (ret < 0) {
+- verbose(env, "adding tail call poke descriptor failed\n");
+- goto out_free;
+- }
+-
+- func[i]->insnsi[insn_idx - subprog_start].imm = ret + 1;
+-
+- map_ptr = func[i]->aux->poke_tab[ret].tail_call.map;
+- ret = map_ptr->ops->map_poke_track(map_ptr, func[i]->aux);
+- if (ret < 0) {
+- verbose(env, "tracking tail call prog failed\n");
+- goto out_free;
+- }
++ poke = &prog->aux->poke_tab[j];
++ if (poke->insn_idx < subprog_end &&
++ poke->insn_idx >= subprog_start)
++ poke->aux = func[i]->aux;
+ }
+
+ /* Use bpf_prog_F_tag to indicate functions in stack traces.
+@@ -11213,18 +11199,6 @@ static int jit_subprogs(struct bpf_verif
+ cond_resched();
+ }
+
+- /* Untrack main program's aux structs so that during map_poke_run()
+- * we will not stumble upon the unfilled poke descriptors; each
+- * of the main program's poke descs got distributed across subprogs
+- * and got tracked onto map, so we are sure that none of them will
+- * be missed after the operation below
+- */
+- for (i = 0; i < prog->aux->size_poke_tab; i++) {
+- map_ptr = prog->aux->poke_tab[i].tail_call.map;
+-
+- map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
+- }
+-
+ /* at this point all bpf functions were successfully JITed
+ * now populate all bpf_calls with correct addresses and
+ * run last pass of JIT
+@@ -11293,14 +11267,22 @@ static int jit_subprogs(struct bpf_verif
+ bpf_prog_free_unused_jited_linfo(prog);
+ return 0;
+ out_free:
++ /* We failed JIT'ing, so at this point we need to unregister poke
++ * descriptors from subprogs, so that kernel is not attempting to
++ * patch it anymore as we're freeing the subprog JIT memory.
++ */
++ for (i = 0; i < prog->aux->size_poke_tab; i++) {
++ map_ptr = prog->aux->poke_tab[i].tail_call.map;
++ map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
++ }
++ /* At this point we're guaranteed that poke descriptors are not
++ * live anymore. We can just unlink its descriptor table as it's
++ * released with the main prog.
++ */
+ for (i = 0; i < env->subprog_cnt; i++) {
+ if (!func[i])
+ continue;
+-
+- for (j = 0; j < func[i]->aux->size_poke_tab; j++) {
+- map_ptr = func[i]->aux->poke_tab[j].tail_call.map;
+- map_ptr->ops->map_poke_untrack(map_ptr, func[i]->aux);
+- }
++ func[i]->aux->poke_tab = NULL;
+ bpf_jit_free(func[i]);
+ }
+ kfree(func);
--- /dev/null
+From bc832065b60f973771ff3e657214bb21b559833c Mon Sep 17 00:00:00 2001
+From: Gu Shengxian <gushengxian@yulong.com>
+Date: Mon, 5 Jul 2021 18:35:43 -0700
+Subject: bpftool: Properly close va_list 'ap' by va_end() on error
+
+From: Gu Shengxian <gushengxian@yulong.com>
+
+commit bc832065b60f973771ff3e657214bb21b559833c upstream.
+
+va_list 'ap' was opened but not closed by va_end() in error case. It should
+be closed by va_end() before the return.
+
+Fixes: aa52bcbe0e72 ("tools: bpftool: Fix json dump crash on powerpc")
+Signed-off-by: Gu Shengxian <gushengxian@yulong.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Link: https://lore.kernel.org/bpf/20210706013543.671114-1-gushengxian507419@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/bpf/bpftool/jit_disasm.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/tools/bpf/bpftool/jit_disasm.c
++++ b/tools/bpf/bpftool/jit_disasm.c
+@@ -43,11 +43,13 @@ static int fprintf_json(void *out, const
+ {
+ va_list ap;
+ char *s;
++ int err;
+
+ va_start(ap, fmt);
+- if (vasprintf(&s, fmt, ap) < 0)
+- return -1;
++ err = vasprintf(&s, fmt, ap);
+ va_end(ap);
++ if (err < 0)
++ return -1;
+
+ if (!oper_count) {
+ int i;
--- /dev/null
+From 1988e0d84161dabd99d1c27033fbd6ee439bf432 Mon Sep 17 00:00:00 2001
+From: Linus Walleij <linus.walleij@linaro.org>
+Date: Fri, 4 Jun 2021 01:18:30 +0200
+Subject: drm/panel: nt35510: Do not fail if DSI read fails
+
+From: Linus Walleij <linus.walleij@linaro.org>
+
+commit 1988e0d84161dabd99d1c27033fbd6ee439bf432 upstream.
+
+Failing to read the MTP over DSI should not bring down the
+system and make us bail out from using the display, it turns
+out that this happens when toggling the display off and on,
+and that write is often still working so the display output
+is just fine. Printing an error is enough.
+
+Tested by killing the Gnome session repeatedly on the
+Samsung Skomer.
+
+Fixes: 899f24ed8d3a ("drm/panel: Add driver for Novatek NT35510-based panels")
+Cc: Stephan Gerhold <stephan@gerhold.net>
+Reported-by: newbyte@disroot.org
+Acked-by: Stefan Hansson <newbyte@disroot.org>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20210603231830.3200040-1-linus.walleij@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/panel/panel-novatek-nt35510.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
++++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+@@ -706,9 +706,7 @@ static int nt35510_power_on(struct nt355
+ if (ret)
+ return ret;
+
+- ret = nt35510_read_id(nt);
+- if (ret)
+- return ret;
++ nt35510_read_id(nt);
+
+ /* Set up stuff in manufacturer control, page 1 */
+ ret = nt35510_send_long(nt, dsi, MCS_CMD_MAUCCTR,
--- /dev/null
+From c7bb4b89033b764eb07db4e060548a6311d801ee Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 8 Jul 2021 00:21:09 -0700
+Subject: ipv6: tcp: drop silly ICMPv6 packet too big messages
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit c7bb4b89033b764eb07db4e060548a6311d801ee upstream.
+
+While TCP stack scales reasonably well, there is still one part that
+can be used to DDOS it.
+
+IPv6 Packet too big messages have to lookup/insert a new route,
+and if abused by attackers, can easily put hosts under high stress,
+with many cpus contending on a spinlock while one is stuck in fib6_run_gc()
+
+ip6_protocol_deliver_rcu()
+ icmpv6_rcv()
+ icmpv6_notify()
+ tcp_v6_err()
+ tcp_v6_mtu_reduced()
+ inet6_csk_update_pmtu()
+ ip6_rt_update_pmtu()
+ __ip6_rt_update_pmtu()
+ ip6_rt_cache_alloc()
+ ip6_dst_alloc()
+ dst_alloc()
+ ip6_dst_gc()
+ fib6_run_gc()
+ spin_lock_bh() ...
+
+Some of our servers have been hit by malicious ICMPv6 packets
+trying to _increase_ the MTU/MSS of TCP flows.
+
+We believe these ICMPv6 packets are a result of a bug in one ISP stack,
+since they were blindly sent back for _every_ (small) packet sent to them.
+
+These packets are for one TCP flow:
+09:24:36.266491 IP6 Addr1 > Victim ICMP6, packet too big, mtu 1460, length 1240
+09:24:36.266509 IP6 Addr1 > Victim ICMP6, packet too big, mtu 1460, length 1240
+09:24:36.316688 IP6 Addr1 > Victim ICMP6, packet too big, mtu 1460, length 1240
+09:24:36.316704 IP6 Addr1 > Victim ICMP6, packet too big, mtu 1460, length 1240
+09:24:36.608151 IP6 Addr1 > Victim ICMP6, packet too big, mtu 1460, length 1240
+
+TCP stack can filter some silly requests :
+
+1) MTU below IPV6_MIN_MTU can be filtered early in tcp_v6_err()
+2) tcp_v6_mtu_reduced() can drop requests trying to increase current MSS.
+
+This tests happen before the IPv6 routing stack is entered, thus
+removing the potential contention and route exhaustion.
+
+Note that IPv6 stack was performing these checks, but too late
+(ie : after the route has been added, and after the potential
+garbage collect war)
+
+v2: fix typo caught by Martin, thanks !
+v3: exports tcp_mtu_to_mss(), caught by David, thanks !
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Maciej Żenczykowski <maze@google.com>
+Cc: Martin KaFai Lau <kafai@fb.com>
+Acked-by: Martin KaFai Lau <kafai@fb.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_output.c | 1 +
+ net/ipv6/tcp_ipv6.c | 19 +++++++++++++++++--
+ 2 files changed, 18 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1730,6 +1730,7 @@ int tcp_mtu_to_mss(struct sock *sk, int
+ return __tcp_mtu_to_mss(sk, pmtu) -
+ (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
+ }
++EXPORT_SYMBOL(tcp_mtu_to_mss);
+
+ /* Inverse of above */
+ int tcp_mss_to_mtu(struct sock *sk, int mss)
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -348,11 +348,20 @@ failure:
+ static void tcp_v6_mtu_reduced(struct sock *sk)
+ {
+ struct dst_entry *dst;
++ u32 mtu;
+
+ if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
+ return;
+
+- dst = inet6_csk_update_pmtu(sk, READ_ONCE(tcp_sk(sk)->mtu_info));
++ mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
++
++ /* Drop requests trying to increase our current mss.
++ * Check done in __ip6_rt_update_pmtu() is too late.
++ */
++ if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache)
++ return;
++
++ dst = inet6_csk_update_pmtu(sk, mtu);
+ if (!dst)
+ return;
+
+@@ -433,6 +442,8 @@ static int tcp_v6_err(struct sk_buff *sk
+ }
+
+ if (type == ICMPV6_PKT_TOOBIG) {
++ u32 mtu = ntohl(info);
++
+ /* We are not interested in TCP_LISTEN and open_requests
+ * (SYN-ACKs send out by Linux are always <576bytes so
+ * they should go through unfragmented).
+@@ -443,7 +454,11 @@ static int tcp_v6_err(struct sk_buff *sk
+ if (!ip6_sk_accept_pmtu(sk))
+ goto out;
+
+- WRITE_ONCE(tp->mtu_info, ntohl(info));
++ if (mtu < IPV6_MIN_MTU)
++ goto out;
++
++ WRITE_ONCE(tp->mtu_info, mtu);
++
+ if (!sock_owned_by_user(sk))
+ tcp_v6_mtu_reduced(sk);
+ else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
--- /dev/null
+From d952cfaf0cffdbbb0433c67206b645131f17ca5f Mon Sep 17 00:00:00 2001
+From: Masahiro Yamada <masahiroy@kernel.org>
+Date: Wed, 14 Jul 2021 13:23:49 +0900
+Subject: kbuild: do not suppress Kconfig prompts for silent build
+
+From: Masahiro Yamada <masahiroy@kernel.org>
+
+commit d952cfaf0cffdbbb0433c67206b645131f17ca5f upstream.
+
+When a new CONFIG option is available, Kbuild shows a prompt to get
+the user input.
+
+ $ make
+ [ snip ]
+ Core Scheduling for SMT (SCHED_CORE) [N/y/?] (NEW)
+
+This is the only interactive place in the build process.
+
+Commit 174a1dcc9642 ("kbuild: sink stdout from cmd for silent build")
+suppressed Kconfig prompts as well because syncconfig is invoked by
+the 'cmd' macro. You cannot notice the fact that Kconfig is waiting
+for the user input.
+
+Use 'kecho' to show the equivalent short log without suppressing stdout
+from sub-make.
+
+Fixes: 174a1dcc9642 ("kbuild: sink stdout from cmd for silent build")
+Reported-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Tested-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Makefile | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/Makefile
++++ b/Makefile
+@@ -704,11 +704,12 @@ $(KCONFIG_CONFIG):
+ # This exploits the 'multi-target pattern rule' trick.
+ # The syncconfig should be executed only once to make all the targets.
+ # (Note: use the grouped target '&:' when we bump to GNU Make 4.3)
+-quiet_cmd_syncconfig = SYNC $@
+- cmd_syncconfig = $(MAKE) -f $(srctree)/Makefile syncconfig
+-
++#
++# Do not use $(call cmd,...) here. That would suppress prompts from syncconfig,
++# so you cannot notice that Kconfig is waiting for the user input.
+ %/config/auto.conf %/config/auto.conf.cmd %/generated/autoconf.h: $(KCONFIG_CONFIG)
+- +$(call cmd,syncconfig)
++ $(Q)$(kecho) " SYNC $@"
++ $(Q)$(MAKE) -f $(srctree)/Makefile syncconfig
+ else # !may-sync-config
+ # External modules and some install targets need include/generated/autoconf.h
+ # and include/config/auto.conf but do not care if they are up-to-date.
--- /dev/null
+From 937654ce497fb6e977a8c52baee5f7d9616302d9 Mon Sep 17 00:00:00 2001
+From: Riccardo Mancini <rickyman7@gmail.com>
+Date: Thu, 15 Jul 2021 18:07:24 +0200
+Subject: perf test bpf: Free obj_buf
+
+From: Riccardo Mancini <rickyman7@gmail.com>
+
+commit 937654ce497fb6e977a8c52baee5f7d9616302d9 upstream.
+
+ASan reports some memory leaks when running:
+
+ # perf test "42: BPF filter"
+
+The first of these leaks is caused by obj_buf never being deallocated in
+__test__bpf.
+
+This patch adds the missing free.
+
+Signed-off-by: Riccardo Mancini <rickyman7@gmail.com>
+Fixes: ba1fae431e74bb42 ("perf test: Add 'perf test BPF'")
+Cc: Ian Rogers <irogers@google.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Wang Nan <wangnan0@huawei.com>
+Link: http://lore.kernel.org/lkml/60f3ca935fe6672e7e866276ce6264c9e26e4c87.1626343282.git.rickyman7@gmail.com
+[ Added missing stdlib.h include ]
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/perf/tests/bpf.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/tools/perf/tests/bpf.c
++++ b/tools/perf/tests/bpf.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <errno.h>
+ #include <stdio.h>
++#include <stdlib.h>
+ #include <sys/epoll.h>
+ #include <sys/types.h>
+ #include <sys/stat.h>
+@@ -283,6 +284,7 @@ static int __test__bpf(int idx)
+ }
+
+ out:
++ free(obj_buf);
+ bpf__clear();
+ return ret;
+ }
net-dsa-properly-check-for-the-bridge_leave-methods-in-dsa_switch_bridge_leave.patch
net-fddi-fix-uaf-in-fza_probe.patch
dma-buf-sync_file-don-t-leak-fences-on-merge-failure.patch
+kbuild-do-not-suppress-kconfig-prompts-for-silent-build.patch
+arm-dts-aspeed-fix-ast2600-machines-line-names.patch
+arm-dts-tacoma-add-phase-corrections-for-emmc.patch
+tcp-consistently-disable-header-prediction-for-mptcp.patch
+tcp-annotate-data-races-around-tp-mtu_info.patch
+tcp-fix-tcp_init_transfer-to-not-reset-icsk_ca_initialized.patch
+ipv6-tcp-drop-silly-icmpv6-packet-too-big-messages.patch
+tcp-call-sk_wmem_schedule-before-sk_mem_charge-in-zerocopy-path.patch
+tools-bpf-fix-error-in-make-c-tools-bpf_install.patch
+bpftool-properly-close-va_list-ap-by-va_end-on-error.patch
+bpf-track-subprog-poke-descriptors-correctly-and-fix-use-after-free.patch
+perf-test-bpf-free-obj_buf.patch
+drm-panel-nt35510-do-not-fail-if-dsi-read-fails.patch
+udp-annotate-data-races-around-unix_sk-sk-gso_size.patch
--- /dev/null
+From 561022acb1ce62e50f7a8258687a21b84282a4cb Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 2 Jul 2021 13:09:03 -0700
+Subject: tcp: annotate data races around tp->mtu_info
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit 561022acb1ce62e50f7a8258687a21b84282a4cb upstream.
+
+While tp->mtu_info is read while socket is owned, the write
+sides happen from err handlers (tcp_v[46]_mtu_reduced)
+which only own the socket spinlock.
+
+Fixes: 563d34d05786 ("tcp: dont drop MTU reduction indications")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_ipv4.c | 4 ++--
+ net/ipv6/tcp_ipv6.c | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -342,7 +342,7 @@ void tcp_v4_mtu_reduced(struct sock *sk)
+
+ if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
+ return;
+- mtu = tcp_sk(sk)->mtu_info;
++ mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
+ dst = inet_csk_update_pmtu(sk, mtu);
+ if (!dst)
+ return;
+@@ -546,7 +546,7 @@ int tcp_v4_err(struct sk_buff *skb, u32
+ if (sk->sk_state == TCP_LISTEN)
+ goto out;
+
+- tp->mtu_info = info;
++ WRITE_ONCE(tp->mtu_info, info);
+ if (!sock_owned_by_user(sk)) {
+ tcp_v4_mtu_reduced(sk);
+ } else {
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -352,7 +352,7 @@ static void tcp_v6_mtu_reduced(struct so
+ if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
+ return;
+
+- dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
++ dst = inet6_csk_update_pmtu(sk, READ_ONCE(tcp_sk(sk)->mtu_info));
+ if (!dst)
+ return;
+
+@@ -443,7 +443,7 @@ static int tcp_v6_err(struct sk_buff *sk
+ if (!ip6_sk_accept_pmtu(sk))
+ goto out;
+
+- tp->mtu_info = ntohl(info);
++ WRITE_ONCE(tp->mtu_info, ntohl(info));
+ if (!sock_owned_by_user(sk))
+ tcp_v6_mtu_reduced(sk);
+ else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
--- /dev/null
+From 358ed624207012f03318235017ac6fb41f8af592 Mon Sep 17 00:00:00 2001
+From: Talal Ahmad <talalahmad@google.com>
+Date: Fri, 9 Jul 2021 11:43:06 -0400
+Subject: tcp: call sk_wmem_schedule before sk_mem_charge in zerocopy path
+
+From: Talal Ahmad <talalahmad@google.com>
+
+commit 358ed624207012f03318235017ac6fb41f8af592 upstream.
+
+sk_wmem_schedule makes sure that sk_forward_alloc has enough
+bytes for charging that is going to be done by sk_mem_charge.
+
+In the transmit zerocopy path, there is sk_mem_charge but there was
+no call to sk_wmem_schedule. This change adds that call.
+
+Without this call to sk_wmem_schedule, sk_forward_alloc can go
+negetive which is a bug because sk_forward_alloc is a per-socket
+space that has been forward charged so this can't be negative.
+
+Fixes: f214f915e7db ("tcp: enable MSG_ZEROCOPY")
+Signed-off-by: Talal Ahmad <talalahmad@google.com>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Reviewed-by: Wei Wang <weiwan@google.com>
+Reviewed-by: Soheil Hassas Yeganeh <soheil@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1361,6 +1361,9 @@ new_segment:
+ }
+ pfrag->offset += copy;
+ } else {
++ if (!sk_wmem_schedule(sk, copy))
++ goto wait_for_space;
++
+ err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg);
+ if (err == -EMSGSIZE || err == -EEXIST) {
+ tcp_mark_push(tp, skb);
--- /dev/null
+From 71158bb1f2d2da61385c58fc1114e1a1c19984ba Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Wed, 30 Jun 2021 13:42:13 +0200
+Subject: tcp: consistently disable header prediction for mptcp
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit 71158bb1f2d2da61385c58fc1114e1a1c19984ba upstream.
+
+The MPTCP receive path is hooked only into the TCP slow-path.
+The DSS presence allows plain MPTCP traffic to hit that
+consistently.
+
+Since commit e1ff9e82e2ea ("net: mptcp: improve fallback to TCP"),
+when an MPTCP socket falls back to TCP, it can hit the TCP receive
+fast-path, and delay or stop triggering the event notification.
+
+Address the issue explicitly disabling the header prediction
+for MPTCP sockets.
+
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/200
+Fixes: e1ff9e82e2ea ("net: mptcp: improve fallback to TCP")
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/tcp.h | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -676,6 +676,10 @@ static inline u32 __tcp_set_rto(const st
+
+ static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
+ {
++ /* mptcp hooks are only on the slow path */
++ if (sk_is_mptcp((struct sock *)tp))
++ return;
++
+ tp->pred_flags = htonl((tp->tcp_header_len << 26) |
+ ntohl(TCP_FLAG_ACK) |
+ snd_wnd);
--- /dev/null
+From be5d1b61a2ad28c7e57fe8bfa277373e8ecffcdc Mon Sep 17 00:00:00 2001
+From: Nguyen Dinh Phi <phind.uet@gmail.com>
+Date: Tue, 6 Jul 2021 07:19:12 +0800
+Subject: tcp: fix tcp_init_transfer() to not reset icsk_ca_initialized
+
+From: Nguyen Dinh Phi <phind.uet@gmail.com>
+
+commit be5d1b61a2ad28c7e57fe8bfa277373e8ecffcdc upstream.
+
+This commit fixes a bug (found by syzkaller) that could cause spurious
+double-initializations for congestion control modules, which could cause
+memory leaks or other problems for congestion control modules (like CDG)
+that allocate memory in their init functions.
+
+The buggy scenario constructed by syzkaller was something like:
+
+(1) create a TCP socket
+(2) initiate a TFO connect via sendto()
+(3) while socket is in TCP_SYN_SENT, call setsockopt(TCP_CONGESTION),
+ which calls:
+ tcp_set_congestion_control() ->
+ tcp_reinit_congestion_control() ->
+ tcp_init_congestion_control()
+(4) receive ACK, connection is established, call tcp_init_transfer(),
+ set icsk_ca_initialized=0 (without first calling cc->release()),
+ call tcp_init_congestion_control() again.
+
+Note that in this sequence tcp_init_congestion_control() is called
+twice without a cc->release() call in between. Thus, for CC modules
+that allocate memory in their init() function, e.g, CDG, a memory leak
+may occur. The syzkaller tool managed to find a reproducer that
+triggered such a leak in CDG.
+
+The bug was introduced when that commit 8919a9b31eb4 ("tcp: Only init
+congestion control if not initialized already")
+introduced icsk_ca_initialized and set icsk_ca_initialized to 0 in
+tcp_init_transfer(), missing the possibility for a sequence like the
+one above, where a process could call setsockopt(TCP_CONGESTION) in
+state TCP_SYN_SENT (i.e. after the connect() or TFO open sendmsg()),
+which would call tcp_init_congestion_control(). It did not intend to
+reset any initialization that the user had already explicitly made;
+it just missed the possibility of that particular sequence (which
+syzkaller managed to find).
+
+Fixes: 8919a9b31eb4 ("tcp: Only init congestion control if not initialized already")
+Reported-by: syzbot+f1e24a0594d4e3a895d3@syzkaller.appspotmail.com
+Signed-off-by: Nguyen Dinh Phi <phind.uet@gmail.com>
+Acked-by: Neal Cardwell <ncardwell@google.com>
+Tested-by: Neal Cardwell <ncardwell@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_input.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -5911,8 +5911,8 @@ void tcp_init_transfer(struct sock *sk,
+ tp->snd_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
+ tp->snd_cwnd_stamp = tcp_jiffies32;
+
+- icsk->icsk_ca_initialized = 0;
+ bpf_skops_established(sk, bpf_op, skb);
++ /* Initialize congestion control unless BPF initialized it already: */
+ if (!icsk->icsk_ca_initialized)
+ tcp_init_congestion_control(sk);
+ tcp_init_buffer_space(sk);
--- /dev/null
+From 1d719254c139fb62fb8056fb496b6fd007e71550 Mon Sep 17 00:00:00 2001
+From: Wei Li <liwei391@huawei.com>
+Date: Mon, 28 Jun 2021 11:04:09 +0800
+Subject: tools: bpf: Fix error in 'make -C tools/ bpf_install'
+
+From: Wei Li <liwei391@huawei.com>
+
+commit 1d719254c139fb62fb8056fb496b6fd007e71550 upstream.
+
+make[2]: *** No rule to make target 'install'. Stop.
+make[1]: *** [Makefile:122: runqslower_install] Error 2
+make: *** [Makefile:116: bpf_install] Error 2
+
+There is no rule for target 'install' in tools/bpf/runqslower/Makefile,
+and there is no need to install it, so just remove 'runqslower_install'.
+
+Fixes: 9c01546d26d2 ("tools/bpf: Add runqslower tool to tools/bpf")
+Signed-off-by: Wei Li <liwei391@huawei.com>
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Link: https://lore.kernel.org/bpf/20210628030409.3459095-1-liwei391@huawei.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/bpf/Makefile | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+--- a/tools/bpf/Makefile
++++ b/tools/bpf/Makefile
+@@ -97,7 +97,7 @@ clean: bpftool_clean runqslower_clean re
+ $(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.bpf
+ $(Q)$(RM) -r -- $(OUTPUT)feature
+
+-install: $(PROGS) bpftool_install runqslower_install
++install: $(PROGS) bpftool_install
+ $(call QUIET_INSTALL, bpf_jit_disasm)
+ $(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(prefix)/bin
+ $(Q)$(INSTALL) $(OUTPUT)bpf_jit_disasm $(DESTDIR)$(prefix)/bin/bpf_jit_disasm
+@@ -118,9 +118,6 @@ bpftool_clean:
+ runqslower:
+ $(call descend,runqslower)
+
+-runqslower_install:
+- $(call descend,runqslower,install)
+-
+ runqslower_clean:
+ $(call descend,runqslower,clean)
+
+@@ -131,5 +128,5 @@ resolve_btfids_clean:
+ $(call descend,resolve_btfids,clean)
+
+ .PHONY: all install clean bpftool bpftool_install bpftool_clean \
+- runqslower runqslower_install runqslower_clean \
++ runqslower runqslower_clean \
+ resolve_btfids resolve_btfids_clean
--- /dev/null
+From 18a419bad63b7f68a1979e28459782518e7b6bbe Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 30 Jun 2021 09:42:44 -0700
+Subject: udp: annotate data races around unix_sk(sk)->gso_size
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit 18a419bad63b7f68a1979e28459782518e7b6bbe upstream.
+
+Accesses to unix_sk(sk)->gso_size are lockless.
+Add READ_ONCE()/WRITE_ONCE() around them.
+
+BUG: KCSAN: data-race in udp_lib_setsockopt / udpv6_sendmsg
+
+write to 0xffff88812d78f47c of 2 bytes by task 10849 on cpu 1:
+ udp_lib_setsockopt+0x3b3/0x710 net/ipv4/udp.c:2696
+ udpv6_setsockopt+0x63/0x90 net/ipv6/udp.c:1630
+ sock_common_setsockopt+0x5d/0x70 net/core/sock.c:3265
+ __sys_setsockopt+0x18f/0x200 net/socket.c:2104
+ __do_sys_setsockopt net/socket.c:2115 [inline]
+ __se_sys_setsockopt net/socket.c:2112 [inline]
+ __x64_sys_setsockopt+0x62/0x70 net/socket.c:2112
+ do_syscall_64+0x4a/0x90 arch/x86/entry/common.c:47
+ entry_SYSCALL_64_after_hwframe+0x44/0xae
+
+read to 0xffff88812d78f47c of 2 bytes by task 10852 on cpu 0:
+ udpv6_sendmsg+0x161/0x16b0 net/ipv6/udp.c:1299
+ inet6_sendmsg+0x5f/0x80 net/ipv6/af_inet6.c:642
+ sock_sendmsg_nosec net/socket.c:654 [inline]
+ sock_sendmsg net/socket.c:674 [inline]
+ ____sys_sendmsg+0x360/0x4d0 net/socket.c:2337
+ ___sys_sendmsg net/socket.c:2391 [inline]
+ __sys_sendmmsg+0x315/0x4b0 net/socket.c:2477
+ __do_sys_sendmmsg net/socket.c:2506 [inline]
+ __se_sys_sendmmsg net/socket.c:2503 [inline]
+ __x64_sys_sendmmsg+0x53/0x60 net/socket.c:2503
+ do_syscall_64+0x4a/0x90 arch/x86/entry/common.c:47
+ entry_SYSCALL_64_after_hwframe+0x44/0xae
+
+value changed: 0x0000 -> 0x0005
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 0 PID: 10852 Comm: syz-executor.0 Not tainted 5.13.0-syzkaller #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+
+Fixes: bec1f6f69736 ("udp: generate gso with UDP_SEGMENT")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Willem de Bruijn <willemb@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/udp.c | 6 +++---
+ net/ipv6/udp.c | 2 +-
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1097,7 +1097,7 @@ int udp_sendmsg(struct sock *sk, struct
+ }
+
+ ipcm_init_sk(&ipc, inet);
+- ipc.gso_size = up->gso_size;
++ ipc.gso_size = READ_ONCE(up->gso_size);
+
+ if (msg->msg_controllen) {
+ err = udp_cmsg_send(sk, msg, &ipc.gso_size);
+@@ -2655,7 +2655,7 @@ int udp_lib_setsockopt(struct sock *sk,
+ case UDP_SEGMENT:
+ if (val < 0 || val > USHRT_MAX)
+ return -EINVAL;
+- up->gso_size = val;
++ WRITE_ONCE(up->gso_size, val);
+ break;
+
+ case UDP_GRO:
+@@ -2750,7 +2750,7 @@ int udp_lib_getsockopt(struct sock *sk,
+ break;
+
+ case UDP_SEGMENT:
+- val = up->gso_size;
++ val = READ_ONCE(up->gso_size);
+ break;
+
+ case UDP_GRO:
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1294,7 +1294,7 @@ int udpv6_sendmsg(struct sock *sk, struc
+ int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
+
+ ipcm6_init(&ipc6);
+- ipc6.gso_size = up->gso_size;
++ ipc6.gso_size = READ_ONCE(up->gso_size);
+ ipc6.sockc.tsflags = sk->sk_tsflags;
+ ipc6.sockc.mark = sk->sk_mark;
+