]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 17 Jul 2020 09:26:59 +0000 (11:26 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 17 Jul 2020 09:26:59 +0000 (11:26 +0200)
added patches:
cgroup-fix-cgroup_sk_alloc-for-sk_clone_lock.patch
cgroup-fix-sock_cgroup_data-on-big-endian.patch
genetlink-remove-genl_bind.patch
ipv4-fill-fl4_icmp_-type-code-in-ping_v4_sendmsg.patch
l2tp-remove-skb_dst_set-from-l2tp_xmit_skb.patch
llc-make-sure-applications-use-arphrd_ether.patch
net-added-pointer-check-for-dst-ops-neigh_lookup-in-dst_neigh_lookup_skb.patch
net-usb-qmi_wwan-add-support-for-quectel-eg95-lte-modem.patch
tcp-make-sure-listeners-don-t-initialize-congestion-control-state.patch
tcp-md5-add-missing-memory-barriers-in-tcp_md5_do_add-tcp_md5_hash_key.patch
tcp-md5-allow-changing-md5-keys-in-all-socket-states.patch
tcp-md5-do-not-send-silly-options-in-syncookies.patch
tcp-md5-refine-tcp_md5_do_add-tcp_md5_hash_key-barriers.patch

14 files changed:
queue-4.9/cgroup-fix-cgroup_sk_alloc-for-sk_clone_lock.patch [new file with mode: 0644]
queue-4.9/cgroup-fix-sock_cgroup_data-on-big-endian.patch [new file with mode: 0644]
queue-4.9/genetlink-remove-genl_bind.patch [new file with mode: 0644]
queue-4.9/ipv4-fill-fl4_icmp_-type-code-in-ping_v4_sendmsg.patch [new file with mode: 0644]
queue-4.9/l2tp-remove-skb_dst_set-from-l2tp_xmit_skb.patch [new file with mode: 0644]
queue-4.9/llc-make-sure-applications-use-arphrd_ether.patch [new file with mode: 0644]
queue-4.9/net-added-pointer-check-for-dst-ops-neigh_lookup-in-dst_neigh_lookup_skb.patch [new file with mode: 0644]
queue-4.9/net-usb-qmi_wwan-add-support-for-quectel-eg95-lte-modem.patch [new file with mode: 0644]
queue-4.9/series
queue-4.9/tcp-make-sure-listeners-don-t-initialize-congestion-control-state.patch [new file with mode: 0644]
queue-4.9/tcp-md5-add-missing-memory-barriers-in-tcp_md5_do_add-tcp_md5_hash_key.patch [new file with mode: 0644]
queue-4.9/tcp-md5-allow-changing-md5-keys-in-all-socket-states.patch [new file with mode: 0644]
queue-4.9/tcp-md5-do-not-send-silly-options-in-syncookies.patch [new file with mode: 0644]
queue-4.9/tcp-md5-refine-tcp_md5_do_add-tcp_md5_hash_key-barriers.patch [new file with mode: 0644]

diff --git a/queue-4.9/cgroup-fix-cgroup_sk_alloc-for-sk_clone_lock.patch b/queue-4.9/cgroup-fix-cgroup_sk_alloc-for-sk_clone_lock.patch
new file mode 100644 (file)
index 0000000..359b055
--- /dev/null
@@ -0,0 +1,159 @@
+From foo@baz Fri 17 Jul 2020 09:29:01 AM CEST
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Thu, 2 Jul 2020 11:52:56 -0700
+Subject: cgroup: fix cgroup_sk_alloc() for sk_clone_lock()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit ad0f75e5f57ccbceec13274e1e242f2b5a6397ed ]
+
+When we clone a socket in sk_clone_lock(), its sk_cgrp_data is
+copied, so the cgroup refcnt must be taken too. And, unlike the
+sk_alloc() path, sock_update_netprioidx() is not called here.
+Therefore, it is safe and necessary to grab the cgroup refcnt
+even when cgroup_sk_alloc is disabled.
+
+sk_clone_lock() is in BH context anyway, the in_interrupt()
+would terminate this function if called there. And for sk_alloc()
+skcd->val is always zero. So it's safe to factor out the code
+to make it more readable.
+
+The global variable 'cgroup_sk_alloc_disabled' is used to determine
+whether to take these reference counts. It is impossible to make
+the reference counting correct unless we save this bit of information
+in skcd->val. So, add a new bit there to record whether the socket
+has already taken the reference counts. This obviously relies on
+kmalloc() to align cgroup pointers to at least 4 bytes,
+ARCH_KMALLOC_MINALIGN is certainly larger than that.
+
+This bug seems to be introduced since the beginning, commit
+d979a39d7242 ("cgroup: duplicate cgroup reference when cloning sockets")
+tried to fix it but not compeletely. It seems not easy to trigger until
+the recent commit 090e28b229af
+("netprio_cgroup: Fix unlimited memory leak of v2 cgroups") was merged.
+
+Fixes: bd1060a1d671 ("sock, cgroup: add sock->sk_cgroup")
+Reported-by: Cameron Berkenpas <cam@neo-zeon.de>
+Reported-by: Peter Geis <pgwipeout@gmail.com>
+Reported-by: Lu Fengqi <lufq.fnst@cn.fujitsu.com>
+Reported-by: Daniël Sonck <dsonck92@gmail.com>
+Reported-by: Zhang Qiang <qiang.zhang@windriver.com>
+Tested-by: Cameron Berkenpas <cam@neo-zeon.de>
+Tested-by: Peter Geis <pgwipeout@gmail.com>
+Tested-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
+Cc: Daniel Borkmann <daniel@iogearbox.net>
+Cc: Zefan Li <lizefan@huawei.com>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: Roman Gushchin <guro@fb.com>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/cgroup-defs.h |    6 ++++--
+ include/linux/cgroup.h      |    4 +++-
+ kernel/cgroup.c             |   24 ++++++++++++++++++------
+ net/core/sock.c             |    2 +-
+ 4 files changed, 26 insertions(+), 10 deletions(-)
+
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -587,7 +587,8 @@ struct sock_cgroup_data {
+       union {
+ #ifdef __LITTLE_ENDIAN
+               struct {
+-                      u8      is_data;
++                      u8      is_data : 1;
++                      u8      no_refcnt : 1;
+                       u8      padding;
+                       u16     prioidx;
+                       u32     classid;
+@@ -597,7 +598,8 @@ struct sock_cgroup_data {
+                       u32     classid;
+                       u16     prioidx;
+                       u8      padding;
+-                      u8      is_data;
++                      u8      no_refcnt : 1;
++                      u8      is_data : 1;
+               } __packed;
+ #endif
+               u64             val;
+--- a/include/linux/cgroup.h
++++ b/include/linux/cgroup.h
+@@ -657,6 +657,7 @@ extern spinlock_t cgroup_sk_update_lock;
+ void cgroup_sk_alloc_disable(void);
+ void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
++void cgroup_sk_clone(struct sock_cgroup_data *skcd);
+ void cgroup_sk_free(struct sock_cgroup_data *skcd);
+ static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
+@@ -670,7 +671,7 @@ static inline struct cgroup *sock_cgroup
+        */
+       v = READ_ONCE(skcd->val);
+-      if (v & 1)
++      if (v & 3)
+               return &cgrp_dfl_root.cgrp;
+       return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
+@@ -682,6 +683,7 @@ static inline struct cgroup *sock_cgroup
+ #else /* CONFIG_CGROUP_DATA */
+ static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
++static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {}
+ static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
+ #endif        /* CONFIG_CGROUP_DATA */
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -6326,12 +6326,8 @@ void cgroup_sk_alloc_disable(void)
+ void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
+ {
+-      if (cgroup_sk_alloc_disabled)
+-              return;
+-
+-      /* Socket clone path */
+-      if (skcd->val) {
+-              cgroup_get(sock_cgroup_ptr(skcd));
++      if (cgroup_sk_alloc_disabled) {
++              skcd->no_refcnt = 1;
+               return;
+       }
+@@ -6355,8 +6351,24 @@ void cgroup_sk_alloc(struct sock_cgroup_
+       rcu_read_unlock();
+ }
++void cgroup_sk_clone(struct sock_cgroup_data *skcd)
++{
++      /* Socket clone path */
++      if (skcd->val) {
++              /*
++               * We might be cloning a socket which is left in an empty
++               * cgroup and the cgroup might have already been rmdir'd.
++               * Don't use cgroup_get_live().
++               */
++              cgroup_get(sock_cgroup_ptr(skcd));
++      }
++}
++
+ void cgroup_sk_free(struct sock_cgroup_data *skcd)
+ {
++      if (skcd->no_refcnt)
++              return;
++
+       cgroup_put(sock_cgroup_ptr(skcd));
+ }
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1533,7 +1533,7 @@ struct sock *sk_clone_lock(const struct
+               newsk->sk_userlocks     = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
+               sock_reset_flag(newsk, SOCK_DONE);
+-              cgroup_sk_alloc(&newsk->sk_cgrp_data);
++              cgroup_sk_clone(&newsk->sk_cgrp_data);
+               skb_queue_head_init(&newsk->sk_error_queue);
+               filter = rcu_dereference_protected(newsk->sk_filter, 1);
diff --git a/queue-4.9/cgroup-fix-sock_cgroup_data-on-big-endian.patch b/queue-4.9/cgroup-fix-sock_cgroup_data-on-big-endian.patch
new file mode 100644 (file)
index 0000000..0ca4585
--- /dev/null
@@ -0,0 +1,38 @@
+From foo@baz Fri 17 Jul 2020 11:04:44 AM CEST
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Thu, 9 Jul 2020 16:28:44 -0700
+Subject: cgroup: Fix sock_cgroup_data on big-endian.
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit 14b032b8f8fce03a546dcf365454bec8c4a58d7d ]
+
+In order for no_refcnt and is_data to be the lowest order two
+bits in the 'val' we have to pad out the bitfield of the u8.
+
+Fixes: ad0f75e5f57c ("cgroup: fix cgroup_sk_alloc() for sk_clone_lock()")
+Reported-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/cgroup-defs.h |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -589,6 +589,7 @@ struct sock_cgroup_data {
+               struct {
+                       u8      is_data : 1;
+                       u8      no_refcnt : 1;
++                      u8      unused : 6;
+                       u8      padding;
+                       u16     prioidx;
+                       u32     classid;
+@@ -598,6 +599,7 @@ struct sock_cgroup_data {
+                       u32     classid;
+                       u16     prioidx;
+                       u8      padding;
++                      u8      unused : 6;
+                       u8      no_refcnt : 1;
+                       u8      is_data : 1;
+               } __packed;
diff --git a/queue-4.9/genetlink-remove-genl_bind.patch b/queue-4.9/genetlink-remove-genl_bind.patch
new file mode 100644 (file)
index 0000000..73554e7
--- /dev/null
@@ -0,0 +1,137 @@
+From foo@baz Fri 17 Jul 2020 11:04:44 AM CEST
+From: Sean Tranchetti <stranche@codeaurora.org>
+Date: Tue, 30 Jun 2020 11:50:17 -0600
+Subject: genetlink: remove genl_bind
+
+From: Sean Tranchetti <stranche@codeaurora.org>
+
+[ Upstream commit 1e82a62fec613844da9e558f3493540a5b7a7b67 ]
+
+A potential deadlock can occur during registering or unregistering a
+new generic netlink family between the main nl_table_lock and the
+cb_lock where each thread wants the lock held by the other, as
+demonstrated below.
+
+1) Thread 1 is performing a netlink_bind() operation on a socket. As part
+   of this call, it will call netlink_lock_table(), incrementing the
+   nl_table_users count to 1.
+2) Thread 2 is registering (or unregistering) a genl_family via the
+   genl_(un)register_family() API. The cb_lock semaphore will be taken for
+   writing.
+3) Thread 1 will call genl_bind() as part of the bind operation to handle
+   subscribing to GENL multicast groups at the request of the user. It will
+   attempt to take the cb_lock semaphore for reading, but it will fail and
+   be scheduled away, waiting for Thread 2 to finish the write.
+4) Thread 2 will call netlink_table_grab() during the (un)registration
+   call. However, as Thread 1 has incremented nl_table_users, it will not
+   be able to proceed, and both threads will be stuck waiting for the
+   other.
+
+genl_bind() is a noop, unless a genl_family implements the mcast_bind()
+function to handle setting up family-specific multicast operations. Since
+no one in-tree uses this functionality as Cong pointed out, simply removing
+the genl_bind() function will remove the possibility for deadlock, as there
+is no attempt by Thread 1 above to take the cb_lock semaphore.
+
+Fixes: c380d9a7afff ("genetlink: pass multicast bind/unbind to families")
+Suggested-by: Cong Wang <xiyou.wangcong@gmail.com>
+Acked-by: Johannes Berg <johannes.berg@intel.com>
+Reported-by: kernel test robot <lkp@intel.com>
+Signed-off-by: Sean Tranchetti <stranche@codeaurora.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/genetlink.h |    8 -------
+ net/netlink/genetlink.c |   52 ------------------------------------------------
+ 2 files changed, 60 deletions(-)
+
+--- a/include/net/genetlink.h
++++ b/include/net/genetlink.h
+@@ -33,12 +33,6 @@ struct genl_info;
+  *    do additional, common, filtering and return an error
+  * @post_doit: called after an operation's doit callback, it may
+  *    undo operations done by pre_doit, for example release locks
+- * @mcast_bind: a socket bound to the given multicast group (which
+- *    is given as the offset into the groups array)
+- * @mcast_unbind: a socket was unbound from the given multicast group.
+- *    Note that unbind() will not be called symmetrically if the
+- *    generic netlink family is removed while there are still open
+- *    sockets.
+  * @attrbuf: buffer to store parsed attributes
+  * @family_list: family list
+  * @mcgrps: multicast groups used by this family (private)
+@@ -61,8 +55,6 @@ struct genl_family {
+       void                    (*post_doit)(const struct genl_ops *ops,
+                                            struct sk_buff *skb,
+                                            struct genl_info *info);
+-      int                     (*mcast_bind)(struct net *net, int group);
+-      void                    (*mcast_unbind)(struct net *net, int group);
+       struct nlattr **        attrbuf;        /* private */
+       const struct genl_ops * ops;            /* private */
+       const struct genl_multicast_group *mcgrps; /* private */
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -992,63 +992,11 @@ static const struct genl_multicast_group
+       { .name = "notify", },
+ };
+-static int genl_bind(struct net *net, int group)
+-{
+-      int i, err = -ENOENT;
+-
+-      down_read(&cb_lock);
+-      for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
+-              struct genl_family *f;
+-
+-              list_for_each_entry(f, genl_family_chain(i), family_list) {
+-                      if (group >= f->mcgrp_offset &&
+-                          group < f->mcgrp_offset + f->n_mcgrps) {
+-                              int fam_grp = group - f->mcgrp_offset;
+-
+-                              if (!f->netnsok && net != &init_net)
+-                                      err = -ENOENT;
+-                              else if (f->mcast_bind)
+-                                      err = f->mcast_bind(net, fam_grp);
+-                              else
+-                                      err = 0;
+-                              break;
+-                      }
+-              }
+-      }
+-      up_read(&cb_lock);
+-
+-      return err;
+-}
+-
+-static void genl_unbind(struct net *net, int group)
+-{
+-      int i;
+-
+-      down_read(&cb_lock);
+-      for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
+-              struct genl_family *f;
+-
+-              list_for_each_entry(f, genl_family_chain(i), family_list) {
+-                      if (group >= f->mcgrp_offset &&
+-                          group < f->mcgrp_offset + f->n_mcgrps) {
+-                              int fam_grp = group - f->mcgrp_offset;
+-
+-                              if (f->mcast_unbind)
+-                                      f->mcast_unbind(net, fam_grp);
+-                              break;
+-                      }
+-              }
+-      }
+-      up_read(&cb_lock);
+-}
+-
+ static int __net_init genl_pernet_init(struct net *net)
+ {
+       struct netlink_kernel_cfg cfg = {
+               .input          = genl_rcv,
+               .flags          = NL_CFG_F_NONROOT_RECV,
+-              .bind           = genl_bind,
+-              .unbind         = genl_unbind,
+       };
+       /* we'll bump the group number right afterwards */
diff --git a/queue-4.9/ipv4-fill-fl4_icmp_-type-code-in-ping_v4_sendmsg.patch b/queue-4.9/ipv4-fill-fl4_icmp_-type-code-in-ping_v4_sendmsg.patch
new file mode 100644 (file)
index 0000000..f877860
--- /dev/null
@@ -0,0 +1,50 @@
+From foo@baz Fri 17 Jul 2020 11:04:44 AM CEST
+From: Sabrina Dubroca <sd@queasysnail.net>
+Date: Fri, 3 Jul 2020 17:00:32 +0200
+Subject: ipv4: fill fl4_icmp_{type,code} in ping_v4_sendmsg
+
+From: Sabrina Dubroca <sd@queasysnail.net>
+
+[ Upstream commit 5eff06902394425c722f0a44d9545909a8800f79 ]
+
+IPv4 ping sockets don't set fl4.fl4_icmp_{type,code}, which leads to
+incomplete IPsec ACQUIRE messages being sent to userspace. Currently,
+both raw sockets and IPv6 ping sockets set those fields.
+
+Expected output of "ip xfrm monitor":
+    acquire proto esp
+      sel src 10.0.2.15/32 dst 8.8.8.8/32 proto icmp type 8 code 0 dev ens4
+      policy src 10.0.2.15/32 dst 8.8.8.8/32
+        <snip>
+
+Currently with ping sockets:
+    acquire proto esp
+      sel src 10.0.2.15/32 dst 8.8.8.8/32 proto icmp type 0 code 0 dev ens4
+      policy src 10.0.2.15/32 dst 8.8.8.8/32
+        <snip>
+
+The Libreswan test suite found this problem after Fedora changed the
+value for the sysctl net.ipv4.ping_group_range.
+
+Fixes: c319b4d76b9e ("net: ipv4: add IPPROTO_ICMP socket kind")
+Reported-by: Paul Wouters <pwouters@redhat.com>
+Tested-by: Paul Wouters <pwouters@redhat.com>
+Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ping.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -800,6 +800,9 @@ static int ping_v4_sendmsg(struct sock *
+                          RT_SCOPE_UNIVERSE, sk->sk_protocol,
+                          inet_sk_flowi_flags(sk), faddr, saddr, 0, 0);
++      fl4.fl4_icmp_type = user_icmph.type;
++      fl4.fl4_icmp_code = user_icmph.code;
++
+       security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
+       rt = ip_route_output_flow(net, &fl4, sk);
+       if (IS_ERR(rt)) {
diff --git a/queue-4.9/l2tp-remove-skb_dst_set-from-l2tp_xmit_skb.patch b/queue-4.9/l2tp-remove-skb_dst_set-from-l2tp_xmit_skb.patch
new file mode 100644 (file)
index 0000000..cae7789
--- /dev/null
@@ -0,0 +1,60 @@
+From foo@baz Fri 17 Jul 2020 11:04:44 AM CEST
+From: Xin Long <lucien.xin@gmail.com>
+Date: Tue, 7 Jul 2020 02:02:32 +0800
+Subject: l2tp: remove skb_dst_set() from l2tp_xmit_skb()
+
+From: Xin Long <lucien.xin@gmail.com>
+
+[ Upstream commit 27d53323664c549b5bb2dfaaf6f7ad6e0376a64e ]
+
+In the tx path of l2tp, l2tp_xmit_skb() calls skb_dst_set() to set
+skb's dst. However, it will eventually call inet6_csk_xmit() or
+ip_queue_xmit() where skb's dst will be overwritten by:
+
+   skb_dst_set_noref(skb, dst);
+
+without releasing the old dst in skb. Then it causes dst/dev refcnt leak:
+
+  unregister_netdevice: waiting for eth0 to become free. Usage count = 1
+
+This can be reproduced by simply running:
+
+  # modprobe l2tp_eth && modprobe l2tp_ip
+  # sh ./tools/testing/selftests/net/l2tp.sh
+
+So before going to inet6_csk_xmit() or ip_queue_xmit(), skb's dst
+should be dropped. This patch is to fix it by removing skb_dst_set()
+from l2tp_xmit_skb() and moving skb_dst_drop() into l2tp_xmit_core().
+
+Fixes: 3557baabf280 ("[L2TP]: PPP over L2TP driver core")
+Reported-by: Hangbin Liu <liuhangbin@gmail.com>
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Acked-by: James Chapman <jchapman@katalix.com>
+Tested-by: James Chapman <jchapman@katalix.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/l2tp/l2tp_core.c |    5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -1134,6 +1134,7 @@ static int l2tp_xmit_core(struct l2tp_se
+       /* Queue the packet to IP for output */
+       skb->ignore_df = 1;
++      skb_dst_drop(skb);
+ #if IS_ENABLED(CONFIG_IPV6)
+       if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped)
+               error = inet6_csk_xmit(tunnel->sock, skb, NULL);
+@@ -1198,10 +1199,6 @@ int l2tp_xmit_skb(struct l2tp_session *s
+               goto out_unlock;
+       }
+-      /* Get routing info from the tunnel socket */
+-      skb_dst_drop(skb);
+-      skb_dst_set(skb, sk_dst_check(sk, 0));
+-
+       inet = inet_sk(sk);
+       fl = &inet->cork.fl;
+       switch (tunnel->encap) {
diff --git a/queue-4.9/llc-make-sure-applications-use-arphrd_ether.patch b/queue-4.9/llc-make-sure-applications-use-arphrd_ether.patch
new file mode 100644 (file)
index 0000000..8e62424
--- /dev/null
@@ -0,0 +1,159 @@
+From foo@baz Fri 17 Jul 2020 11:04:44 AM CEST
+From: Eric Dumazet <edumazet@google.com>
+Date: Sat, 27 Jun 2020 13:31:50 -0700
+Subject: llc: make sure applications use ARPHRD_ETHER
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit a9b1110162357689a34992d5c925852948e5b9fd ]
+
+syzbot was to trigger a bug by tricking AF_LLC with
+non sensible addr->sllc_arphrd
+
+It seems clear LLC requires an Ethernet device.
+
+Back in commit abf9d537fea2 ("llc: add support for SO_BINDTODEVICE")
+Octavian Purdila added possibility for application to use a zero
+value for sllc_arphrd, convert it to ARPHRD_ETHER to not cause
+regressions on existing applications.
+
+BUG: KASAN: use-after-free in __read_once_size include/linux/compiler.h:199 [inline]
+BUG: KASAN: use-after-free in list_empty include/linux/list.h:268 [inline]
+BUG: KASAN: use-after-free in waitqueue_active include/linux/wait.h:126 [inline]
+BUG: KASAN: use-after-free in wq_has_sleeper include/linux/wait.h:160 [inline]
+BUG: KASAN: use-after-free in skwq_has_sleeper include/net/sock.h:2092 [inline]
+BUG: KASAN: use-after-free in sock_def_write_space+0x642/0x670 net/core/sock.c:2813
+Read of size 8 at addr ffff88801e0b4078 by task ksoftirqd/3/27
+
+CPU: 3 PID: 27 Comm: ksoftirqd/3 Not tainted 5.5.0-rc1-syzkaller #0
+Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.12.0-59-gc9ba5276e321-prebuilt.qemu.org 04/01/2014
+Call Trace:
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack+0x197/0x210 lib/dump_stack.c:118
+ print_address_description.constprop.0.cold+0xd4/0x30b mm/kasan/report.c:374
+ __kasan_report.cold+0x1b/0x41 mm/kasan/report.c:506
+ kasan_report+0x12/0x20 mm/kasan/common.c:639
+ __asan_report_load8_noabort+0x14/0x20 mm/kasan/generic_report.c:135
+ __read_once_size include/linux/compiler.h:199 [inline]
+ list_empty include/linux/list.h:268 [inline]
+ waitqueue_active include/linux/wait.h:126 [inline]
+ wq_has_sleeper include/linux/wait.h:160 [inline]
+ skwq_has_sleeper include/net/sock.h:2092 [inline]
+ sock_def_write_space+0x642/0x670 net/core/sock.c:2813
+ sock_wfree+0x1e1/0x260 net/core/sock.c:1958
+ skb_release_head_state+0xeb/0x260 net/core/skbuff.c:652
+ skb_release_all+0x16/0x60 net/core/skbuff.c:663
+ __kfree_skb net/core/skbuff.c:679 [inline]
+ consume_skb net/core/skbuff.c:838 [inline]
+ consume_skb+0xfb/0x410 net/core/skbuff.c:832
+ __dev_kfree_skb_any+0xa4/0xd0 net/core/dev.c:2967
+ dev_kfree_skb_any include/linux/netdevice.h:3650 [inline]
+ e1000_unmap_and_free_tx_resource.isra.0+0x21b/0x3a0 drivers/net/ethernet/intel/e1000/e1000_main.c:1963
+ e1000_clean_tx_irq drivers/net/ethernet/intel/e1000/e1000_main.c:3854 [inline]
+ e1000_clean+0x4cc/0x1d10 drivers/net/ethernet/intel/e1000/e1000_main.c:3796
+ napi_poll net/core/dev.c:6532 [inline]
+ net_rx_action+0x508/0x1120 net/core/dev.c:6600
+ __do_softirq+0x262/0x98c kernel/softirq.c:292
+ run_ksoftirqd kernel/softirq.c:603 [inline]
+ run_ksoftirqd+0x8e/0x110 kernel/softirq.c:595
+ smpboot_thread_fn+0x6a3/0xa40 kernel/smpboot.c:165
+ kthread+0x361/0x430 kernel/kthread.c:255
+ ret_from_fork+0x24/0x30 arch/x86/entry/entry_64.S:352
+
+Allocated by task 8247:
+ save_stack+0x23/0x90 mm/kasan/common.c:72
+ set_track mm/kasan/common.c:80 [inline]
+ __kasan_kmalloc mm/kasan/common.c:513 [inline]
+ __kasan_kmalloc.constprop.0+0xcf/0xe0 mm/kasan/common.c:486
+ kasan_slab_alloc+0xf/0x20 mm/kasan/common.c:521
+ slab_post_alloc_hook mm/slab.h:584 [inline]
+ slab_alloc mm/slab.c:3320 [inline]
+ kmem_cache_alloc+0x121/0x710 mm/slab.c:3484
+ sock_alloc_inode+0x1c/0x1d0 net/socket.c:240
+ alloc_inode+0x68/0x1e0 fs/inode.c:230
+ new_inode_pseudo+0x19/0xf0 fs/inode.c:919
+ sock_alloc+0x41/0x270 net/socket.c:560
+ __sock_create+0xc2/0x730 net/socket.c:1384
+ sock_create net/socket.c:1471 [inline]
+ __sys_socket+0x103/0x220 net/socket.c:1513
+ __do_sys_socket net/socket.c:1522 [inline]
+ __se_sys_socket net/socket.c:1520 [inline]
+ __ia32_sys_socket+0x73/0xb0 net/socket.c:1520
+ do_syscall_32_irqs_on arch/x86/entry/common.c:337 [inline]
+ do_fast_syscall_32+0x27b/0xe16 arch/x86/entry/common.c:408
+ entry_SYSENTER_compat+0x70/0x7f arch/x86/entry/entry_64_compat.S:139
+
+Freed by task 17:
+ save_stack+0x23/0x90 mm/kasan/common.c:72
+ set_track mm/kasan/common.c:80 [inline]
+ kasan_set_free_info mm/kasan/common.c:335 [inline]
+ __kasan_slab_free+0x102/0x150 mm/kasan/common.c:474
+ kasan_slab_free+0xe/0x10 mm/kasan/common.c:483
+ __cache_free mm/slab.c:3426 [inline]
+ kmem_cache_free+0x86/0x320 mm/slab.c:3694
+ sock_free_inode+0x20/0x30 net/socket.c:261
+ i_callback+0x44/0x80 fs/inode.c:219
+ __rcu_reclaim kernel/rcu/rcu.h:222 [inline]
+ rcu_do_batch kernel/rcu/tree.c:2183 [inline]
+ rcu_core+0x570/0x1540 kernel/rcu/tree.c:2408
+ rcu_core_si+0x9/0x10 kernel/rcu/tree.c:2417
+ __do_softirq+0x262/0x98c kernel/softirq.c:292
+
+The buggy address belongs to the object at ffff88801e0b4000
+ which belongs to the cache sock_inode_cache of size 1152
+The buggy address is located 120 bytes inside of
+ 1152-byte region [ffff88801e0b4000, ffff88801e0b4480)
+The buggy address belongs to the page:
+page:ffffea0000782d00 refcount:1 mapcount:0 mapping:ffff88807aa59c40 index:0xffff88801e0b4ffd
+raw: 00fffe0000000200 ffffea00008e6c88 ffffea0000782d48 ffff88807aa59c40
+raw: ffff88801e0b4ffd ffff88801e0b4000 0000000100000003 0000000000000000
+page dumped because: kasan: bad access detected
+
+Memory state around the buggy address:
+ ffff88801e0b3f00: fb fb fb fb fb fb fb fb fb fb fb fb fc fc fc fc
+ ffff88801e0b3f80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+>ffff88801e0b4000: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+                                                                ^
+ ffff88801e0b4080: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ffff88801e0b4100: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+
+Fixes: abf9d537fea2 ("llc: add support for SO_BINDTODEVICE")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/llc/af_llc.c |   10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -271,6 +271,10 @@ static int llc_ui_autobind(struct socket
+       if (!sock_flag(sk, SOCK_ZAPPED))
+               goto out;
++      if (!addr->sllc_arphrd)
++              addr->sllc_arphrd = ARPHRD_ETHER;
++      if (addr->sllc_arphrd != ARPHRD_ETHER)
++              goto out;
+       rc = -ENODEV;
+       if (sk->sk_bound_dev_if) {
+               llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
+@@ -328,15 +332,15 @@ static int llc_ui_bind(struct socket *so
+       if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
+               goto out;
+       rc = -EAFNOSUPPORT;
+-      if (unlikely(addr->sllc_family != AF_LLC))
++      if (!addr->sllc_arphrd)
++              addr->sllc_arphrd = ARPHRD_ETHER;
++      if (unlikely(addr->sllc_family != AF_LLC || addr->sllc_arphrd != ARPHRD_ETHER))
+               goto out;
+       rc = -ENODEV;
+       rcu_read_lock();
+       if (sk->sk_bound_dev_if) {
+               llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if);
+               if (llc->dev) {
+-                      if (!addr->sllc_arphrd)
+-                              addr->sllc_arphrd = llc->dev->type;
+                       if (is_zero_ether_addr(addr->sllc_mac))
+                               memcpy(addr->sllc_mac, llc->dev->dev_addr,
+                                      IFHWADDRLEN);
diff --git a/queue-4.9/net-added-pointer-check-for-dst-ops-neigh_lookup-in-dst_neigh_lookup_skb.patch b/queue-4.9/net-added-pointer-check-for-dst-ops-neigh_lookup-in-dst_neigh_lookup_skb.patch
new file mode 100644 (file)
index 0000000..3fe5c45
--- /dev/null
@@ -0,0 +1,121 @@
+From foo@baz Fri 17 Jul 2020 11:04:44 AM CEST
+From: Martin Varghese <martin.varghese@nokia.com>
+Date: Sun, 5 Jul 2020 14:23:49 +0530
+Subject: net: Added pointer check for dst->ops->neigh_lookup in dst_neigh_lookup_skb
+
+From: Martin Varghese <martin.varghese@nokia.com>
+
+[ Upstream commit 394de110a73395de2ca4516b0de435e91b11b604 ]
+
+The packets from tunnel devices (eg bareudp) may have only
+metadata in the dst pointer of skb. Hence a pointer check of
+neigh_lookup is needed in dst_neigh_lookup_skb
+
+Kernel crashes when packets from bareudp device is processed in
+the kernel neighbour subsytem.
+
+[  133.384484] BUG: kernel NULL pointer dereference, address: 0000000000000000
+[  133.385240] #PF: supervisor instruction fetch in kernel mode
+[  133.385828] #PF: error_code(0x0010) - not-present page
+[  133.386603] PGD 0 P4D 0
+[  133.386875] Oops: 0010 [#1] SMP PTI
+[  133.387275] CPU: 0 PID: 5045 Comm: ping Tainted: G        W         5.8.0-rc2+ #15
+[  133.388052] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
+[  133.391076] RIP: 0010:0x0
+[  133.392401] Code: Bad RIP value.
+[  133.394029] RSP: 0018:ffffb79980003d50 EFLAGS: 00010246
+[  133.396656] RAX: 0000000080000102 RBX: ffff9de2fe0d6600 RCX: ffff9de2fe5e9d00
+[  133.399018] RDX: 0000000000000000 RSI: ffff9de2fe5e9d00 RDI: ffff9de2fc21b400
+[  133.399685] RBP: ffff9de2fe5e9d00 R08: 0000000000000000 R09: 0000000000000000
+[  133.400350] R10: ffff9de2fbc6be22 R11: ffff9de2fe0d6600 R12: ffff9de2fc21b400
+[  133.401010] R13: ffff9de2fe0d6628 R14: 0000000000000001 R15: 0000000000000003
+[  133.401667] FS:  00007fe014918740(0000) GS:ffff9de2fec00000(0000) knlGS:0000000000000000
+[  133.402412] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[  133.402948] CR2: ffffffffffffffd6 CR3: 000000003bb72000 CR4: 00000000000006f0
+[  133.403611] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[  133.404270] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[  133.404933] Call Trace:
+[  133.405169]  <IRQ>
+[  133.405367]  __neigh_update+0x5a4/0x8f0
+[  133.405734]  arp_process+0x294/0x820
+[  133.406076]  ? __netif_receive_skb_core+0x866/0xe70
+[  133.406557]  arp_rcv+0x129/0x1c0
+[  133.406882]  __netif_receive_skb_one_core+0x95/0xb0
+[  133.407340]  process_backlog+0xa7/0x150
+[  133.407705]  net_rx_action+0x2af/0x420
+[  133.408457]  __do_softirq+0xda/0x2a8
+[  133.408813]  asm_call_on_stack+0x12/0x20
+[  133.409290]  </IRQ>
+[  133.409519]  do_softirq_own_stack+0x39/0x50
+[  133.410036]  do_softirq+0x50/0x60
+[  133.410401]  __local_bh_enable_ip+0x50/0x60
+[  133.410871]  ip_finish_output2+0x195/0x530
+[  133.411288]  ip_output+0x72/0xf0
+[  133.411673]  ? __ip_finish_output+0x1f0/0x1f0
+[  133.412122]  ip_send_skb+0x15/0x40
+[  133.412471]  raw_sendmsg+0x853/0xab0
+[  133.412855]  ? insert_pfn+0xfe/0x270
+[  133.413827]  ? vvar_fault+0xec/0x190
+[  133.414772]  sock_sendmsg+0x57/0x80
+[  133.415685]  __sys_sendto+0xdc/0x160
+[  133.416605]  ? syscall_trace_enter+0x1d4/0x2b0
+[  133.417679]  ? __audit_syscall_exit+0x1d9/0x280
+[  133.418753]  ? __prepare_exit_to_usermode+0x5d/0x1a0
+[  133.419819]  __x64_sys_sendto+0x24/0x30
+[  133.420848]  do_syscall_64+0x4d/0x90
+[  133.421768]  entry_SYSCALL_64_after_hwframe+0x44/0xa9
+[  133.422833] RIP: 0033:0x7fe013689c03
+[  133.423749] Code: Bad RIP value.
+[  133.424624] RSP: 002b:00007ffc7288f418 EFLAGS: 00000246 ORIG_RAX: 000000000000002c
+[  133.425940] RAX: ffffffffffffffda RBX: 000056151fc63720 RCX: 00007fe013689c03
+[  133.427225] RDX: 0000000000000040 RSI: 000056151fc63720 RDI: 0000000000000003
+[  133.428481] RBP: 00007ffc72890b30 R08: 000056151fc60500 R09: 0000000000000010
+[  133.429757] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000040
+[  133.431041] R13: 000056151fc636e0 R14: 000056151fc616bc R15: 0000000000000080
+[  133.432481] Modules linked in: mpls_iptunnel act_mirred act_tunnel_key cls_flower sch_ingress veth mpls_router ip_tunnel bareudp ip6_udp_tunnel udp_tunnel macsec udp_diag inet_diag unix_diag af_packet_diag netlink_diag binfmt_misc xt_MASQUERADE iptable_nat xt_addrtype xt_conntrack nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 br_netfilter bridge stp llc ebtable_filter ebtables overlay ip6table_filter ip6_tables iptable_filter sunrpc ext4 mbcache jbd2 pcspkr i2c_piix4 virtio_balloon joydev ip_tables xfs libcrc32c ata_generic qxl pata_acpi drm_ttm_helper ttm drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops drm ata_piix libata virtio_net net_failover virtio_console failover virtio_blk i2c_core virtio_pci virtio_ring serio_raw floppy virtio dm_mirror dm_region_hash dm_log dm_mod
+[  133.444045] CR2: 0000000000000000
+[  133.445082] ---[ end trace f4aeee1958fd1638 ]---
+[  133.446236] RIP: 0010:0x0
+[  133.447180] Code: Bad RIP value.
+[  133.448152] RSP: 0018:ffffb79980003d50 EFLAGS: 00010246
+[  133.449363] RAX: 0000000080000102 RBX: ffff9de2fe0d6600 RCX: ffff9de2fe5e9d00
+[  133.450835] RDX: 0000000000000000 RSI: ffff9de2fe5e9d00 RDI: ffff9de2fc21b400
+[  133.452237] RBP: ffff9de2fe5e9d00 R08: 0000000000000000 R09: 0000000000000000
+[  133.453722] R10: ffff9de2fbc6be22 R11: ffff9de2fe0d6600 R12: ffff9de2fc21b400
+[  133.455149] R13: ffff9de2fe0d6628 R14: 0000000000000001 R15: 0000000000000003
+[  133.456520] FS:  00007fe014918740(0000) GS:ffff9de2fec00000(0000) knlGS:0000000000000000
+[  133.458046] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[  133.459342] CR2: ffffffffffffffd6 CR3: 000000003bb72000 CR4: 00000000000006f0
+[  133.460782] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[  133.462240] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[  133.463697] Kernel panic - not syncing: Fatal exception in interrupt
+[  133.465226] Kernel Offset: 0xfa00000 from 0xffffffff81000000 (relocation range: 0xffffffff80000000-0xffffffffbfffffff)
+[  133.467025] ---[ end Kernel panic - not syncing: Fatal exception in interrupt ]---
+
+Fixes: aaa0c23cb901 ("Fix dst_neigh_lookup/dst_neigh_lookup_skb return value handling bug")
+Signed-off-by: Martin Varghese <martin.varghese@nokia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/dst.h |   10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -479,7 +479,15 @@ static inline struct neighbour *dst_neig
+ static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
+                                                    struct sk_buff *skb)
+ {
+-      struct neighbour *n =  dst->ops->neigh_lookup(dst, skb, NULL);
++      struct neighbour *n = NULL;
++
++      /* The packets from tunnel devices (eg bareudp) may have only
++       * metadata in the dst pointer of skb. Hence a pointer check of
++       * neigh_lookup is needed.
++       */
++      if (dst->ops->neigh_lookup)
++              n = dst->ops->neigh_lookup(dst, skb, NULL);
++
+       return IS_ERR(n) ? NULL : n;
+ }
diff --git a/queue-4.9/net-usb-qmi_wwan-add-support-for-quectel-eg95-lte-modem.patch b/queue-4.9/net-usb-qmi_wwan-add-support-for-quectel-eg95-lte-modem.patch
new file mode 100644 (file)
index 0000000..dd462d7
--- /dev/null
@@ -0,0 +1,41 @@
+From foo@baz Fri 17 Jul 2020 11:04:44 AM CEST
+From: AceLan Kao <acelan.kao@canonical.com>
+Date: Tue, 7 Jul 2020 16:14:45 +0800
+Subject: net: usb: qmi_wwan: add support for Quectel EG95 LTE modem
+
+From: AceLan Kao <acelan.kao@canonical.com>
+
+[ Upstream commit f815dd5cf48b905eeecf0a2b990e9b7ab048b4f1 ]
+
+Add support for Quectel Wireless Solutions Co., Ltd. EG95 LTE modem
+
+T:  Bus=01 Lev=01 Prnt=01 Port=02 Cnt=02 Dev#=  5 Spd=480 MxCh= 0
+D:  Ver= 2.00 Cls=ef(misc ) Sub=02 Prot=01 MxPS=64 #Cfgs=  1
+P:  Vendor=2c7c ProdID=0195 Rev=03.18
+S:  Manufacturer=Android
+S:  Product=Android
+C:  #Ifs= 5 Cfg#= 1 Atr=a0 MxPwr=500mA
+I:  If#=0x0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=(none)
+I:  If#=0x1 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=(none)
+I:  If#=0x2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=(none)
+I:  If#=0x3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=(none)
+I:  If#=0x4 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=(none)
+
+Signed-off-by: AceLan Kao <acelan.kao@canonical.com>
+Acked-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/qmi_wwan.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -962,6 +962,7 @@ static const struct usb_device_id produc
+       {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0  Mini PCIe */
+       {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
+       {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
++      {QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */
+       {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},    /* Quectel BG96 */
+       {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
index 13ac3b5bb884873497cea0912890ed08d806fba5..77d01b3d477657c607e984137938519de5cc3402 100644 (file)
@@ -24,3 +24,16 @@ drm-radeon-fix-double-free.patch
 arc-entry-fix-potential-efa-clobber-when-tif_syscall_trace.patch
 arc-elf-use-right-elf_arch.patch
 s390-mm-fix-huge-pte-soft-dirty-copying.patch
+ipv4-fill-fl4_icmp_-type-code-in-ping_v4_sendmsg.patch
+l2tp-remove-skb_dst_set-from-l2tp_xmit_skb.patch
+llc-make-sure-applications-use-arphrd_ether.patch
+net-added-pointer-check-for-dst-ops-neigh_lookup-in-dst_neigh_lookup_skb.patch
+net-usb-qmi_wwan-add-support-for-quectel-eg95-lte-modem.patch
+tcp-md5-add-missing-memory-barriers-in-tcp_md5_do_add-tcp_md5_hash_key.patch
+tcp-md5-refine-tcp_md5_do_add-tcp_md5_hash_key-barriers.patch
+genetlink-remove-genl_bind.patch
+tcp-make-sure-listeners-don-t-initialize-congestion-control-state.patch
+tcp-md5-do-not-send-silly-options-in-syncookies.patch
+tcp-md5-allow-changing-md5-keys-in-all-socket-states.patch
+cgroup-fix-cgroup_sk_alloc-for-sk_clone_lock.patch
+cgroup-fix-sock_cgroup_data-on-big-endian.patch
diff --git a/queue-4.9/tcp-make-sure-listeners-don-t-initialize-congestion-control-state.patch b/queue-4.9/tcp-make-sure-listeners-don-t-initialize-congestion-control-state.patch
new file mode 100644 (file)
index 0000000..73edd18
--- /dev/null
@@ -0,0 +1,145 @@
+From foo@baz Fri 17 Jul 2020 10:23:31 AM CEST
+From: Christoph Paasch <cpaasch@apple.com>
+Date: Wed, 8 Jul 2020 16:18:34 -0700
+Subject: tcp: make sure listeners don't initialize congestion-control state
+
+From: Christoph Paasch <cpaasch@apple.com>
+
+[ Upstream commit ce69e563b325f620863830c246a8698ccea52048 ]
+
+syzkaller found its way into setsockopt with TCP_CONGESTION "cdg".
+tcp_cdg_init() does a kcalloc to store the gradients. As sk_clone_lock
+just copies all the memory, the allocated pointer will be copied as
+well, if the app called setsockopt(..., TCP_CONGESTION) on the listener.
+If now the socket will be destroyed before the congestion-control
+has properly been initialized (through a call to tcp_init_transfer), we
+will end up freeing memory that does not belong to that particular
+socket, opening the door to a double-free:
+
+[   11.413102] ==================================================================
+[   11.414181] BUG: KASAN: double-free or invalid-free in tcp_cleanup_congestion_control+0x58/0xd0
+[   11.415329]
+[   11.415560] CPU: 3 PID: 4884 Comm: syz-executor.5 Not tainted 5.8.0-rc2 #80
+[   11.416544] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.1-0-ga5cab58e9a3f-prebuilt.qemu.org 04/01/2014
+[   11.418148] Call Trace:
+[   11.418534]  <IRQ>
+[   11.418834]  dump_stack+0x7d/0xb0
+[   11.419297]  print_address_description.constprop.0+0x1a/0x210
+[   11.422079]  kasan_report_invalid_free+0x51/0x80
+[   11.423433]  __kasan_slab_free+0x15e/0x170
+[   11.424761]  kfree+0x8c/0x230
+[   11.425157]  tcp_cleanup_congestion_control+0x58/0xd0
+[   11.425872]  tcp_v4_destroy_sock+0x57/0x5a0
+[   11.426493]  inet_csk_destroy_sock+0x153/0x2c0
+[   11.427093]  tcp_v4_syn_recv_sock+0xb29/0x1100
+[   11.427731]  tcp_get_cookie_sock+0xc3/0x4a0
+[   11.429457]  cookie_v4_check+0x13d0/0x2500
+[   11.433189]  tcp_v4_do_rcv+0x60e/0x780
+[   11.433727]  tcp_v4_rcv+0x2869/0x2e10
+[   11.437143]  ip_protocol_deliver_rcu+0x23/0x190
+[   11.437810]  ip_local_deliver+0x294/0x350
+[   11.439566]  __netif_receive_skb_one_core+0x15d/0x1a0
+[   11.441995]  process_backlog+0x1b1/0x6b0
+[   11.443148]  net_rx_action+0x37e/0xc40
+[   11.445361]  __do_softirq+0x18c/0x61a
+[   11.445881]  asm_call_on_stack+0x12/0x20
+[   11.446409]  </IRQ>
+[   11.446716]  do_softirq_own_stack+0x34/0x40
+[   11.447259]  do_softirq.part.0+0x26/0x30
+[   11.447827]  __local_bh_enable_ip+0x46/0x50
+[   11.448406]  ip_finish_output2+0x60f/0x1bc0
+[   11.450109]  __ip_queue_xmit+0x71c/0x1b60
+[   11.451861]  __tcp_transmit_skb+0x1727/0x3bb0
+[   11.453789]  tcp_rcv_state_process+0x3070/0x4d3a
+[   11.456810]  tcp_v4_do_rcv+0x2ad/0x780
+[   11.457995]  __release_sock+0x14b/0x2c0
+[   11.458529]  release_sock+0x4a/0x170
+[   11.459005]  __inet_stream_connect+0x467/0xc80
+[   11.461435]  inet_stream_connect+0x4e/0xa0
+[   11.462043]  __sys_connect+0x204/0x270
+[   11.465515]  __x64_sys_connect+0x6a/0xb0
+[   11.466088]  do_syscall_64+0x3e/0x70
+[   11.466617]  entry_SYSCALL_64_after_hwframe+0x44/0xa9
+[   11.467341] RIP: 0033:0x7f56046dc469
+[   11.467844] Code: Bad RIP value.
+[   11.468282] RSP: 002b:00007f5604dccdd8 EFLAGS: 00000246 ORIG_RAX: 000000000000002a
+[   11.469326] RAX: ffffffffffffffda RBX: 000000000068bf00 RCX: 00007f56046dc469
+[   11.470379] RDX: 0000000000000010 RSI: 0000000020000000 RDI: 0000000000000004
+[   11.471311] RBP: 00000000ffffffff R08: 0000000000000000 R09: 0000000000000000
+[   11.472286] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
+[   11.473341] R13: 000000000041427c R14: 00007f5604dcd5c0 R15: 0000000000000003
+[   11.474321]
+[   11.474527] Allocated by task 4884:
+[   11.475031]  save_stack+0x1b/0x40
+[   11.475548]  __kasan_kmalloc.constprop.0+0xc2/0xd0
+[   11.476182]  tcp_cdg_init+0xf0/0x150
+[   11.476744]  tcp_init_congestion_control+0x9b/0x3a0
+[   11.477435]  tcp_set_congestion_control+0x270/0x32f
+[   11.478088]  do_tcp_setsockopt.isra.0+0x521/0x1a00
+[   11.478744]  __sys_setsockopt+0xff/0x1e0
+[   11.479259]  __x64_sys_setsockopt+0xb5/0x150
+[   11.479895]  do_syscall_64+0x3e/0x70
+[   11.480395]  entry_SYSCALL_64_after_hwframe+0x44/0xa9
+[   11.481097]
+[   11.481321] Freed by task 4872:
+[   11.481783]  save_stack+0x1b/0x40
+[   11.482230]  __kasan_slab_free+0x12c/0x170
+[   11.482839]  kfree+0x8c/0x230
+[   11.483240]  tcp_cleanup_congestion_control+0x58/0xd0
+[   11.483948]  tcp_v4_destroy_sock+0x57/0x5a0
+[   11.484502]  inet_csk_destroy_sock+0x153/0x2c0
+[   11.485144]  tcp_close+0x932/0xfe0
+[   11.485642]  inet_release+0xc1/0x1c0
+[   11.486131]  __sock_release+0xc0/0x270
+[   11.486697]  sock_close+0xc/0x10
+[   11.487145]  __fput+0x277/0x780
+[   11.487632]  task_work_run+0xeb/0x180
+[   11.488118]  __prepare_exit_to_usermode+0x15a/0x160
+[   11.488834]  do_syscall_64+0x4a/0x70
+[   11.489326]  entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Wei Wang fixed a part of these CDG-malloc issues with commit c12014440750
+("tcp: memset ca_priv data to 0 properly").
+
+This patch here fixes the listener-scenario: We make sure that listeners
+setting the congestion-control through setsockopt won't initialize it
+(thus CDG never allocates on listeners). For those who use AF_UNSPEC to
+reuse a socket, tcp_disconnect() is changed to cleanup afterwards.
+
+(The issue can be reproduced at least down to v4.4.x.)
+
+Cc: Wei Wang <weiwan@google.com>
+Cc: Eric Dumazet <edumazet@google.com>
+Fixes: 2b0a8c9eee81 ("tcp: add CDG congestion control")
+Signed-off-by: Christoph Paasch <cpaasch@apple.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp.c      |    3 +++
+ net/ipv4/tcp_cong.c |    2 +-
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2299,6 +2299,9 @@ int tcp_disconnect(struct sock *sk, int
+       tp->snd_cwnd_cnt = 0;
+       tp->window_clamp = 0;
+       tp->delivered = 0;
++      if (icsk->icsk_ca_ops->release)
++              icsk->icsk_ca_ops->release(sk);
++      memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
+       tcp_set_ca_state(sk, TCP_CA_Open);
+       tp->is_sack_reneg = 0;
+       tcp_clear_retrans(tp);
+--- a/net/ipv4/tcp_cong.c
++++ b/net/ipv4/tcp_cong.c
+@@ -198,7 +198,7 @@ static void tcp_reinit_congestion_contro
+       icsk->icsk_ca_setsockopt = 1;
+       memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
+-      if (sk->sk_state != TCP_CLOSE)
++      if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
+               tcp_init_congestion_control(sk);
+ }
diff --git a/queue-4.9/tcp-md5-add-missing-memory-barriers-in-tcp_md5_do_add-tcp_md5_hash_key.patch b/queue-4.9/tcp-md5-add-missing-memory-barriers-in-tcp_md5_do_add-tcp_md5_hash_key.patch
new file mode 100644 (file)
index 0000000..38bf37d
--- /dev/null
@@ -0,0 +1,64 @@
+From foo@baz Fri 17 Jul 2020 11:04:44 AM CEST
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 30 Jun 2020 16:41:01 -0700
+Subject: tcp: md5: add missing memory barriers in tcp_md5_do_add()/tcp_md5_hash_key()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 6a2febec338df7e7699a52d00b2e1207dcf65b28 ]
+
+MD5 keys are read with RCU protection, and tcp_md5_do_add()
+might update in-place a prior key.
+
+Normally, typical RCU updates would allocate a new piece
+of memory. In this case only key->key and key->keylen might
+be updated, and we do not care if an incoming packet could
+see the old key, the new one, or some intermediate value,
+since changing the key on a live flow is known to be problematic
+anyway.
+
+We only want to make sure that in the case key->keylen
+is changed, cpus in tcp_md5_hash_key() wont try to use
+uninitialized data, or crash because key->keylen was
+read twice to feed sg_init_one() and ahash_request_set_crypt()
+
+Fixes: 9ea88a153001 ("tcp: md5: check md5 signature without socket lock")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp.c      |    7 +++++--
+ net/ipv4/tcp_ipv4.c |    3 +++
+ 2 files changed, 8 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3207,10 +3207,13 @@ EXPORT_SYMBOL(tcp_md5_hash_skb_data);
+ int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
+ {
++      u8 keylen = key->keylen;
+       struct scatterlist sg;
+-      sg_init_one(&sg, key->key, key->keylen);
+-      ahash_request_set_crypt(hp->md5_req, &sg, NULL, key->keylen);
++      smp_rmb(); /* paired with smp_wmb() in tcp_md5_do_add() */
++
++      sg_init_one(&sg, key->key, keylen);
++      ahash_request_set_crypt(hp->md5_req, &sg, NULL, keylen);
+       return crypto_ahash_update(hp->md5_req);
+ }
+ EXPORT_SYMBOL(tcp_md5_hash_key);
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -938,6 +938,9 @@ int tcp_md5_do_add(struct sock *sk, cons
+       if (key) {
+               /* Pre-existing entry - just update that one. */
+               memcpy(key->key, newkey, newkeylen);
++
++              smp_wmb(); /* pairs with smp_rmb() in tcp_md5_hash_key() */
++
+               key->keylen = newkeylen;
+               return 0;
+       }
diff --git a/queue-4.9/tcp-md5-allow-changing-md5-keys-in-all-socket-states.patch b/queue-4.9/tcp-md5-allow-changing-md5-keys-in-all-socket-states.patch
new file mode 100644 (file)
index 0000000..d1a21d7
--- /dev/null
@@ -0,0 +1,66 @@
+From foo@baz Fri 17 Jul 2020 11:04:44 AM CEST
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 1 Jul 2020 18:39:33 -0700
+Subject: tcp: md5: allow changing MD5 keys in all socket states
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 1ca0fafd73c5268e8fc4b997094b8bb2bfe8deea ]
+
+This essentially reverts commit 721230326891 ("tcp: md5: reject TCP_MD5SIG
+or TCP_MD5SIG_EXT on established sockets")
+
+Mathieu reported that many vendors BGP implementations can
+actually switch TCP MD5 on established flows.
+
+Quoting Mathieu :
+   Here is a list of a few network vendors along with their behavior
+   with respect to TCP MD5:
+
+   - Cisco: Allows for password to be changed, but within the hold-down
+     timer (~180 seconds).
+   - Juniper: When password is initially set on active connection it will
+     reset, but after that any subsequent password changes no network
+     resets.
+   - Nokia: No notes on if they flap the tcp connection or not.
+   - Ericsson/RedBack: Allows for 2 password (old/new) to co-exist until
+     both sides are ok with new passwords.
+   - Meta-Switch: Expects the password to be set before a connection is
+     attempted, but no further info on whether they reset the TCP
+     connection on a change.
+   - Avaya: Disable the neighbor, then set password, then re-enable.
+   - Zebos: Would normally allow the change when socket connected.
+
+We can revert my prior change because commit 9424e2e7ad93 ("tcp: md5: fix potential
+overestimation of TCP option space") removed the leak of 4 kernel bytes to
+the wire that was the main reason for my patch.
+
+While doing my investigations, I found a bug when a MD5 key is changed, leading
+to these commits that stable teams want to consider before backporting this revert :
+
+ Commit 6a2febec338d ("tcp: md5: add missing memory barriers in tcp_md5_do_add()/tcp_md5_hash_key()")
+ Commit e6ced831ef11 ("tcp: md5: refine tcp_md5_do_add()/tcp_md5_hash_key() barriers")
+
+Fixes: 721230326891 "tcp: md5: reject TCP_MD5SIG or TCP_MD5SIG_EXT on established sockets"
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp.c |    5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2672,10 +2672,7 @@ static int do_tcp_setsockopt(struct sock
+ #ifdef CONFIG_TCP_MD5SIG
+       case TCP_MD5SIG:
+-              if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
+-                      err = tp->af_specific->md5_parse(sk, optval, optlen);
+-              else
+-                      err = -EINVAL;
++              err = tp->af_specific->md5_parse(sk, optval, optlen);
+               break;
+ #endif
+       case TCP_USER_TIMEOUT:
diff --git a/queue-4.9/tcp-md5-do-not-send-silly-options-in-syncookies.patch b/queue-4.9/tcp-md5-do-not-send-silly-options-in-syncookies.patch
new file mode 100644 (file)
index 0000000..669a69e
--- /dev/null
@@ -0,0 +1,84 @@
+From foo@baz Fri 17 Jul 2020 10:23:31 AM CEST
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 1 Jul 2020 12:41:23 -0700
+Subject: tcp: md5: do not send silly options in SYNCOOKIES
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit e114e1e8ac9d31f25b9dd873bab5d80c1fc482ca ]
+
+Whenever cookie_init_timestamp() has been used to encode
+ECN,SACK,WSCALE options, we can not remove the TS option in the SYNACK.
+
+Otherwise, tcp_synack_options() will still advertize options like WSCALE
+that we can not deduce later when receiving the packet from the client
+to complete 3WHS.
+
+Note that modern linux TCP stacks wont use MD5+TS+SACK in a SYN packet,
+but we can not know for sure that all TCP stacks have the same logic.
+
+Before the fix a tcpdump would exhibit this wrong exchange :
+
+10:12:15.464591 IP C > S: Flags [S], seq 4202415601, win 65535, options [nop,nop,md5 valid,mss 1400,sackOK,TS val 456965269 ecr 0,nop,wscale 8], length 0
+10:12:15.464602 IP S > C: Flags [S.], seq 253516766, ack 4202415602, win 65535, options [nop,nop,md5 valid,mss 1400,nop,nop,sackOK,nop,wscale 8], length 0
+10:12:15.464611 IP C > S: Flags [.], ack 1, win 256, options [nop,nop,md5 valid], length 0
+10:12:15.464678 IP C > S: Flags [P.], seq 1:13, ack 1, win 256, options [nop,nop,md5 valid], length 12
+10:12:15.464685 IP S > C: Flags [.], ack 13, win 65535, options [nop,nop,md5 valid], length 0
+
+After this patch the exchange looks saner :
+
+11:59:59.882990 IP C > S: Flags [S], seq 517075944, win 65535, options [nop,nop,md5 valid,mss 1400,sackOK,TS val 1751508483 ecr 0,nop,wscale 8], length 0
+11:59:59.883002 IP S > C: Flags [S.], seq 1902939253, ack 517075945, win 65535, options [nop,nop,md5 valid,mss 1400,sackOK,TS val 1751508479 ecr 1751508483,nop,wscale 8], length 0
+11:59:59.883012 IP C > S: Flags [.], ack 1, win 256, options [nop,nop,md5 valid,nop,nop,TS val 1751508483 ecr 1751508479], length 0
+11:59:59.883114 IP C > S: Flags [P.], seq 1:13, ack 1, win 256, options [nop,nop,md5 valid,nop,nop,TS val 1751508483 ecr 1751508479], length 12
+11:59:59.883122 IP S > C: Flags [.], ack 13, win 256, options [nop,nop,md5 valid,nop,nop,TS val 1751508483 ecr 1751508483], length 0
+11:59:59.883152 IP S > C: Flags [P.], seq 1:13, ack 13, win 256, options [nop,nop,md5 valid,nop,nop,TS val 1751508484 ecr 1751508483], length 12
+11:59:59.883170 IP C > S: Flags [.], ack 13, win 256, options [nop,nop,md5 valid,nop,nop,TS val 1751508484 ecr 1751508484], length 0
+
+Of course, no SACK block will ever be added later, but nothing should break.
+Technically, we could remove the 4 nops included in MD5+TS options,
+but again some stacks could break seeing not conventional alignment.
+
+Fixes: 4957faade11b ("TCPCT part 1g: Responder Cookie => Initiator")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Florian Westphal <fw@strlen.de>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_output.c |   10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -615,7 +615,8 @@ static unsigned int tcp_synack_options(s
+                                      unsigned int mss, struct sk_buff *skb,
+                                      struct tcp_out_options *opts,
+                                      const struct tcp_md5sig_key *md5,
+-                                     struct tcp_fastopen_cookie *foc)
++                                     struct tcp_fastopen_cookie *foc,
++                                     enum tcp_synack_type synack_type)
+ {
+       struct inet_request_sock *ireq = inet_rsk(req);
+       unsigned int remaining = MAX_TCP_OPTION_SPACE;
+@@ -630,7 +631,8 @@ static unsigned int tcp_synack_options(s
+                * rather than TS in order to fit in better with old,
+                * buggy kernels, but that was deemed to be unnecessary.
+                */
+-              ireq->tstamp_ok &= !ireq->sack_ok;
++              if (synack_type != TCP_SYNACK_COOKIE)
++                      ireq->tstamp_ok &= !ireq->sack_ok;
+       }
+ #endif
+@@ -3165,8 +3167,8 @@ struct sk_buff *tcp_make_synack(const st
+       md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
+ #endif
+       skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
+-      tcp_header_size = tcp_synack_options(req, mss, skb, &opts, md5, foc) +
+-                        sizeof(*th);
++      tcp_header_size = tcp_synack_options(req, mss, skb, &opts, md5,
++                                           foc, synack_type) + sizeof(*th);
+       skb_push(skb, tcp_header_size);
+       skb_reset_transport_header(skb);
diff --git a/queue-4.9/tcp-md5-refine-tcp_md5_do_add-tcp_md5_hash_key-barriers.patch b/queue-4.9/tcp-md5-refine-tcp_md5_do_add-tcp_md5_hash_key-barriers.patch
new file mode 100644 (file)
index 0000000..7b125ef
--- /dev/null
@@ -0,0 +1,90 @@
+From foo@baz Fri 17 Jul 2020 11:04:44 AM CEST
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 1 Jul 2020 11:43:04 -0700
+Subject: tcp: md5: refine tcp_md5_do_add()/tcp_md5_hash_key() barriers
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit e6ced831ef11a2a06e8d00aad9d4fc05b610bf38 ]
+
+My prior fix went a bit too far, according to Herbert and Mathieu.
+
+Since we accept that concurrent TCP MD5 lookups might see inconsistent
+keys, we can use READ_ONCE()/WRITE_ONCE() instead of smp_rmb()/smp_wmb()
+
+Clearing all key->key[] is needed to avoid possible KMSAN reports,
+if key->keylen is increased. Since tcp_md5_do_add() is not fast path,
+using __GFP_ZERO to clear all struct tcp_md5sig_key is simpler.
+
+data_race() was added in linux-5.8 and will prevent KCSAN reports,
+this can safely be removed in stable backports, if data_race() is
+not yet backported.
+
+v2: use data_race() both in tcp_md5_hash_key() and tcp_md5_do_add()
+
+Fixes: 6a2febec338d ("tcp: md5: add missing memory barriers in tcp_md5_do_add()/tcp_md5_hash_key()")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Marco Elver <elver@google.com>
+Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp.c      |    6 +++---
+ net/ipv4/tcp_ipv4.c |   14 ++++++++++----
+ 2 files changed, 13 insertions(+), 7 deletions(-)
+
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3207,13 +3207,13 @@ EXPORT_SYMBOL(tcp_md5_hash_skb_data);
+ int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
+ {
+-      u8 keylen = key->keylen;
++      u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */
+       struct scatterlist sg;
+-      smp_rmb(); /* paired with smp_wmb() in tcp_md5_do_add() */
+-
+       sg_init_one(&sg, key->key, keylen);
+       ahash_request_set_crypt(hp->md5_req, &sg, NULL, keylen);
++
++      /* tcp_md5_do_add() might change key->key under us */
+       return crypto_ahash_update(hp->md5_req);
+ }
+ EXPORT_SYMBOL(tcp_md5_hash_key);
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -936,12 +936,18 @@ int tcp_md5_do_add(struct sock *sk, cons
+       key = tcp_md5_do_lookup(sk, addr, family);
+       if (key) {
+-              /* Pre-existing entry - just update that one. */
++              /* Pre-existing entry - just update that one.
++               * Note that the key might be used concurrently.
++               */
+               memcpy(key->key, newkey, newkeylen);
+-              smp_wmb(); /* pairs with smp_rmb() in tcp_md5_hash_key() */
++              /* Pairs with READ_ONCE() in tcp_md5_hash_key().
++               * Also note that a reader could catch new key->keylen value
++               * but old key->key[], this is the reason we use __GFP_ZERO
++               * at sock_kmalloc() time below these lines.
++               */
++              WRITE_ONCE(key->keylen, newkeylen);
+-              key->keylen = newkeylen;
+               return 0;
+       }
+@@ -957,7 +963,7 @@ int tcp_md5_do_add(struct sock *sk, cons
+               rcu_assign_pointer(tp->md5sig_info, md5sig);
+       }
+-      key = sock_kmalloc(sk, sizeof(*key), gfp);
++      key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO);
+       if (!key)
+               return -ENOMEM;
+       if (!tcp_alloc_md5sig_pool()) {