]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 21 Jun 2018 00:57:25 +0000 (09:57 +0900)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 21 Jun 2018 00:57:25 +0000 (09:57 +0900)
added patches:
bonding-re-evaluate-force_primary-when-the-primary-slave-name-changes.patch
cdc_ncm-avoid-padding-beyond-end-of-skb.patch
hv_netvsc-fix-a-network-regression-after-ifdown-ifup.patch
ipv6-allow-pmtu-exceptions-to-local-routes.patch
net-dsa-add-error-handling-for-pskb_trim_rcsum.patch
net-in-virtio_net_hdr-only-add-vlan_hlen-to-csum_start-if-payload-holds-vlan.patch
net-sched-act_simple-fix-parsing-of-tca_def_data.patch
socket-close-race-condition-between-sock_close-and-sockfs_setattr.patch
tcp-verify-the-checksum-of-the-first-data-segment-in-a-new-connection.patch
tls-fix-use-after-free-in-tls_push_record.patch
udp-fix-rx-queue-len-reported-by-diag-and-proc-interface.patch

12 files changed:
queue-4.14/bonding-re-evaluate-force_primary-when-the-primary-slave-name-changes.patch [new file with mode: 0644]
queue-4.14/cdc_ncm-avoid-padding-beyond-end-of-skb.patch [new file with mode: 0644]
queue-4.14/hv_netvsc-fix-a-network-regression-after-ifdown-ifup.patch [new file with mode: 0644]
queue-4.14/ipv6-allow-pmtu-exceptions-to-local-routes.patch [new file with mode: 0644]
queue-4.14/net-dsa-add-error-handling-for-pskb_trim_rcsum.patch [new file with mode: 0644]
queue-4.14/net-in-virtio_net_hdr-only-add-vlan_hlen-to-csum_start-if-payload-holds-vlan.patch [new file with mode: 0644]
queue-4.14/net-sched-act_simple-fix-parsing-of-tca_def_data.patch [new file with mode: 0644]
queue-4.14/series [new file with mode: 0644]
queue-4.14/socket-close-race-condition-between-sock_close-and-sockfs_setattr.patch [new file with mode: 0644]
queue-4.14/tcp-verify-the-checksum-of-the-first-data-segment-in-a-new-connection.patch [new file with mode: 0644]
queue-4.14/tls-fix-use-after-free-in-tls_push_record.patch [new file with mode: 0644]
queue-4.14/udp-fix-rx-queue-len-reported-by-diag-and-proc-interface.patch [new file with mode: 0644]

diff --git a/queue-4.14/bonding-re-evaluate-force_primary-when-the-primary-slave-name-changes.patch b/queue-4.14/bonding-re-evaluate-force_primary-when-the-primary-slave-name-changes.patch
new file mode 100644 (file)
index 0000000..0ae34b6
--- /dev/null
@@ -0,0 +1,32 @@
+From foo@baz Thu Jun 21 09:50:19 JST 2018
+From: Xiangning Yu <yuxiangning@gmail.com>
+Date: Thu, 7 Jun 2018 13:39:59 +0800
+Subject: bonding: re-evaluate force_primary when the primary slave name changes
+
+From: Xiangning Yu <yuxiangning@gmail.com>
+
+[ Upstream commit eb55bbf865d9979098c6a7a17cbdb41237ece951 ]
+
+There is a timing issue under active-standy mode, when bond_enslave() is
+called, bond->params.primary might not be initialized yet.
+
+Any time the primary slave string changes, bond->force_primary should be
+set to true to make sure the primary becomes the active slave.
+
+Signed-off-by: Xiangning Yu <yuxiangning@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/bonding/bond_options.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -1142,6 +1142,7 @@ static int bond_option_primary_set(struc
+                                  slave->dev->name);
+                       rcu_assign_pointer(bond->primary_slave, slave);
+                       strcpy(bond->params.primary, slave->dev->name);
++                      bond->force_primary = true;
+                       bond_select_active_slave(bond);
+                       goto out;
+               }
diff --git a/queue-4.14/cdc_ncm-avoid-padding-beyond-end-of-skb.patch b/queue-4.14/cdc_ncm-avoid-padding-beyond-end-of-skb.patch
new file mode 100644 (file)
index 0000000..d63427f
--- /dev/null
@@ -0,0 +1,111 @@
+From foo@baz Thu Jun 21 09:50:19 JST 2018
+From: "Bjørn Mork" <bjorn@mork.no>
+Date: Fri, 8 Jun 2018 09:15:24 +0200
+Subject: cdc_ncm: avoid padding beyond end of skb
+
+From: "Bjørn Mork" <bjorn@mork.no>
+
+[ Upstream commit 49c2c3f246e2fc3009039e31a826333dcd0283cd ]
+
+Commit 4a0e3e989d66 ("cdc_ncm: Add support for moving NDP to end
+of NCM frame") added logic to reserve space for the NDP at the
+end of the NTB/skb.  This reservation did not take the final
+alignment of the NDP into account, causing us to reserve too
+little space. Additionally the padding prior to NDP addition did
+not ensure there was enough space for the NDP.
+
+The NTB/skb with the NDP appended would then exceed the configured
+max size. This caused the final padding of the NTB to use a
+negative count, padding to almost INT_MAX, and resulting in:
+
+[60103.825970] BUG: unable to handle kernel paging request at ffff9641f2004000
+[60103.825998] IP: __memset+0x24/0x30
+[60103.826001] PGD a6a06067 P4D a6a06067 PUD 4f65a063 PMD 72003063 PTE 0
+[60103.826013] Oops: 0002 [#1] SMP NOPTI
+[60103.826018] Modules linked in: (removed(
+[60103.826158] CPU: 0 PID: 5990 Comm: Chrome_DevTools Tainted: G           O 4.14.0-3-amd64 #1 Debian 4.14.17-1
+[60103.826162] Hardware name: LENOVO 20081 BIOS 41CN28WW(V2.04) 05/03/2012
+[60103.826166] task: ffff964193484fc0 task.stack: ffffb2890137c000
+[60103.826171] RIP: 0010:__memset+0x24/0x30
+[60103.826174] RSP: 0000:ffff964316c03b68 EFLAGS: 00010216
+[60103.826178] RAX: 0000000000000000 RBX: 00000000fffffffd RCX: 000000001ffa5000
+[60103.826181] RDX: 0000000000000005 RSI: 0000000000000000 RDI: ffff9641f2003ffc
+[60103.826184] RBP: ffff964192f6c800 R08: 00000000304d434e R09: ffff9641f1d2c004
+[60103.826187] R10: 0000000000000002 R11: 00000000000005ae R12: ffff9642e6957a80
+[60103.826190] R13: ffff964282ff2ee8 R14: 000000000000000d R15: ffff9642e4843900
+[60103.826194] FS:  00007f395aaf6700(0000) GS:ffff964316c00000(0000) knlGS:0000000000000000
+[60103.826197] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[60103.826200] CR2: ffff9641f2004000 CR3: 0000000013b0c000 CR4: 00000000000006f0
+[60103.826204] Call Trace:
+[60103.826212]  <IRQ>
+[60103.826225]  cdc_ncm_fill_tx_frame+0x5e3/0x740 [cdc_ncm]
+[60103.826236]  cdc_ncm_tx_fixup+0x57/0x70 [cdc_ncm]
+[60103.826246]  usbnet_start_xmit+0x5d/0x710 [usbnet]
+[60103.826254]  ? netif_skb_features+0x119/0x250
+[60103.826259]  dev_hard_start_xmit+0xa1/0x200
+[60103.826267]  sch_direct_xmit+0xf2/0x1b0
+[60103.826273]  __dev_queue_xmit+0x5e3/0x7c0
+[60103.826280]  ? ip_finish_output2+0x263/0x3c0
+[60103.826284]  ip_finish_output2+0x263/0x3c0
+[60103.826289]  ? ip_output+0x6c/0xe0
+[60103.826293]  ip_output+0x6c/0xe0
+[60103.826298]  ? ip_forward_options+0x1a0/0x1a0
+[60103.826303]  tcp_transmit_skb+0x516/0x9b0
+[60103.826309]  tcp_write_xmit+0x1aa/0xee0
+[60103.826313]  ? sch_direct_xmit+0x71/0x1b0
+[60103.826318]  tcp_tasklet_func+0x177/0x180
+[60103.826325]  tasklet_action+0x5f/0x110
+[60103.826332]  __do_softirq+0xde/0x2b3
+[60103.826337]  irq_exit+0xae/0xb0
+[60103.826342]  do_IRQ+0x81/0xd0
+[60103.826347]  common_interrupt+0x98/0x98
+[60103.826351]  </IRQ>
+[60103.826355] RIP: 0033:0x7f397bdf2282
+[60103.826358] RSP: 002b:00007f395aaf57d8 EFLAGS: 00000206 ORIG_RAX: ffffffffffffff6e
+[60103.826362] RAX: 0000000000000000 RBX: 00002f07bc6d0900 RCX: 00007f39752d7fe7
+[60103.826365] RDX: 0000000000000022 RSI: 0000000000000147 RDI: 00002f07baea02c0
+[60103.826368] RBP: 0000000000000001 R08: 0000000000000000 R09: 0000000000000000
+[60103.826371] R10: 00000000ffffffff R11: 0000000000000000 R12: 00002f07baea02c0
+[60103.826373] R13: 00002f07bba227a0 R14: 00002f07bc6d090c R15: 0000000000000000
+[60103.826377] Code: 90 90 90 90 90 90 90 0f 1f 44 00 00 49 89 f9 48 89 d1 83
+e2 07 48 c1 e9 03 40 0f b6 f6 48 b8 01 01 01 01 01 01 01 01 48 0f af c6 <f3> 48
+ab 89 d1 f3 aa 4c 89 c8 c3 90 49 89 f9 40 88 f0 48 89 d1
+[60103.826442] RIP: __memset+0x24/0x30 RSP: ffff964316c03b68
+[60103.826444] CR2: ffff9641f2004000
+
+Commit e1069bbfcf3b ("net: cdc_ncm: Reduce memory use when kernel
+memory low") made this bug much more likely to trigger by reducing
+the NTB size under memory pressure.
+
+Link: https://bugs.debian.org/893393
+Reported-by: Горбешко Богдан <bodqhrohro@gmail.com>
+Reported-and-tested-by: Dennis Wassenberg <dennis.wassenberg@secunet.com>
+Cc: Enrico Mioso <mrkiko.rs@gmail.com>
+Fixes: 4a0e3e989d66 ("cdc_ncm: Add support for moving NDP to end of NCM frame")
+Signed-off-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/cdc_ncm.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -1124,7 +1124,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev
+        * accordingly. Otherwise, we should check here.
+        */
+       if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
+-              delayed_ndp_size = ctx->max_ndp_size;
++              delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus);
+       else
+               delayed_ndp_size = 0;
+@@ -1285,7 +1285,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev
+       /* If requested, put NDP at end of frame. */
+       if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
+               nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
+-              cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size);
++              cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size - ctx->max_ndp_size);
+               nth16->wNdpIndex = cpu_to_le16(skb_out->len);
+               skb_put_data(skb_out, ctx->delayed_ndp16, ctx->max_ndp_size);
diff --git a/queue-4.14/hv_netvsc-fix-a-network-regression-after-ifdown-ifup.patch b/queue-4.14/hv_netvsc-fix-a-network-regression-after-ifdown-ifup.patch
new file mode 100644 (file)
index 0000000..bdacdd4
--- /dev/null
@@ -0,0 +1,43 @@
+From foo@baz Thu Jun 21 09:50:19 JST 2018
+From: Dexuan Cui <decui@microsoft.com>
+Date: Wed, 6 Jun 2018 21:32:51 +0000
+Subject: hv_netvsc: Fix a network regression after ifdown/ifup
+
+From: Dexuan Cui <decui@microsoft.com>
+
+[ Upstream commit 52acf73b6e9a6962045feb2ba5a8921da2201915 ]
+
+Recently people reported the NIC stops working after
+"ifdown eth0; ifup eth0". It turns out in this case the TX queues are not
+enabled, after the refactoring of the common detach logic: when the NIC
+has sub-channels, usually we enable all the TX queues after all
+sub-channels are set up: see rndis_set_subchannel() ->
+netif_device_attach(), but in the case of "ifdown eth0; ifup eth0" where
+the number of channels doesn't change, we also must make sure the TX queues
+are enabled. The patch fixes the regression.
+
+Fixes: 7b2ee50c0cd5 ("hv_netvsc: common detach logic")
+Signed-off-by: Dexuan Cui <decui@microsoft.com>
+Cc: Stephen Hemminger <sthemmin@microsoft.com>
+Cc: K. Y. Srinivasan <kys@microsoft.com>
+Cc: Haiyang Zhang <haiyangz@microsoft.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/hyperv/netvsc_drv.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -123,8 +123,10 @@ static int netvsc_open(struct net_device
+       }
+       rdev = nvdev->extension;
+-      if (!rdev->link_state)
++      if (!rdev->link_state) {
+               netif_carrier_on(net);
++              netif_tx_wake_all_queues(net);
++      }
+       if (vf_netdev) {
+               /* Setting synthetic device up transparently sets
diff --git a/queue-4.14/ipv6-allow-pmtu-exceptions-to-local-routes.patch b/queue-4.14/ipv6-allow-pmtu-exceptions-to-local-routes.patch
new file mode 100644 (file)
index 0000000..ab17616
--- /dev/null
@@ -0,0 +1,37 @@
+From foo@baz Thu Jun 21 09:50:19 JST 2018
+From: Julian Anastasov <ja@ssi.bg>
+Date: Mon, 11 Jun 2018 02:02:54 +0300
+Subject: ipv6: allow PMTU exceptions to local routes
+
+From: Julian Anastasov <ja@ssi.bg>
+
+[ Upstream commit 0975764684487bf3f7a47eef009e750ea41bd514 ]
+
+IPVS setups with local client and remote tunnel server need
+to create exception for the local virtual IP. What we do is to
+change PMTU from 64KB (on "lo") to 1460 in the common case.
+
+Suggested-by: Martin KaFai Lau <kafai@fb.com>
+Fixes: 45e4fd26683c ("ipv6: Only create RTF_CACHE routes after encountering pmtu exception")
+Fixes: 7343ff31ebf0 ("ipv6: Don't create clones of host routes.")
+Signed-off-by: Julian Anastasov <ja@ssi.bg>
+Acked-by: David Ahern <dsahern@gmail.com>
+Acked-by: Martin KaFai Lau <kafai@fb.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/route.c |    3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1476,9 +1476,6 @@ static void __ip6_rt_update_pmtu(struct
+       const struct in6_addr *daddr, *saddr;
+       struct rt6_info *rt6 = (struct rt6_info *)dst;
+-      if (rt6->rt6i_flags & RTF_LOCAL)
+-              return;
+-
+       if (dst_metric_locked(dst, RTAX_MTU))
+               return;
diff --git a/queue-4.14/net-dsa-add-error-handling-for-pskb_trim_rcsum.patch b/queue-4.14/net-dsa-add-error-handling-for-pskb_trim_rcsum.patch
new file mode 100644 (file)
index 0000000..c4f1df0
--- /dev/null
@@ -0,0 +1,33 @@
+From foo@baz Thu Jun 21 09:50:19 JST 2018
+From: Zhouyang Jia <jiazhouyang09@gmail.com>
+Date: Mon, 11 Jun 2018 13:26:35 +0800
+Subject: net: dsa: add error handling for pskb_trim_rcsum
+
+From: Zhouyang Jia <jiazhouyang09@gmail.com>
+
+[ Upstream commit 349b71d6f427ff8211adf50839dbbff3f27c1805 ]
+
+When pskb_trim_rcsum fails, the lack of error-handling code may
+cause unexpected results.
+
+This patch adds error-handling code after calling pskb_trim_rcsum.
+
+Signed-off-by: Zhouyang Jia <jiazhouyang09@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/dsa/tag_trailer.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/dsa/tag_trailer.c
++++ b/net/dsa/tag_trailer.c
+@@ -79,7 +79,8 @@ static struct sk_buff *trailer_rcv(struc
+       if (unlikely(ds->cpu_port_mask & BIT(source_port)))
+               return NULL;
+-      pskb_trim_rcsum(skb, skb->len - 4);
++      if (pskb_trim_rcsum(skb, skb->len - 4))
++              return NULL;
+       skb->dev = ds->ports[source_port].netdev;
diff --git a/queue-4.14/net-in-virtio_net_hdr-only-add-vlan_hlen-to-csum_start-if-payload-holds-vlan.patch b/queue-4.14/net-in-virtio_net_hdr-only-add-vlan_hlen-to-csum_start-if-payload-holds-vlan.patch
new file mode 100644 (file)
index 0000000..9655ea7
--- /dev/null
@@ -0,0 +1,135 @@
+From foo@baz Thu Jun 21 09:50:19 JST 2018
+From: Willem de Bruijn <willemb@google.com>
+Date: Wed, 6 Jun 2018 11:23:01 -0400
+Subject: net: in virtio_net_hdr only add VLAN_HLEN to csum_start if payload holds vlan
+
+From: Willem de Bruijn <willemb@google.com>
+
+[ Upstream commit fd3a88625844907151737fc3b4201676effa6d27 ]
+
+Tun, tap, virtio, packet and uml vector all use struct virtio_net_hdr
+to communicate packet metadata to userspace.
+
+For skbuffs with vlan, the first two return the packet as it may have
+existed on the wire, inserting the VLAN tag in the user buffer.  Then
+virtio_net_hdr.csum_start needs to be adjusted by VLAN_HLEN bytes.
+
+Commit f09e2249c4f5 ("macvtap: restore vlan header on user read")
+added this feature to macvtap. Commit 3ce9b20f1971 ("macvtap: Fix
+csum_start when VLAN tags are present") then fixed up csum_start.
+
+Virtio, packet and uml do not insert the vlan header in the user
+buffer.
+
+When introducing virtio_net_hdr_from_skb to deduplicate filling in
+the virtio_net_hdr, the variant from macvtap which adds VLAN_HLEN was
+applied uniformly, breaking csum offset for packets with vlan on
+virtio and packet.
+
+Make insertion of VLAN_HLEN optional. Convert the callers to pass it
+when needed.
+
+Fixes: e858fae2b0b8f4 ("virtio_net: use common code for virtio_net_hdr and skb GSO conversion")
+Fixes: 1276f24eeef2 ("packet: use common code for virtio_net_hdr and skb GSO conversion")
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/tap.c          |    5 ++++-
+ drivers/net/tun.c          |    3 ++-
+ drivers/net/virtio_net.c   |    3 ++-
+ include/linux/virtio_net.h |   11 ++++-------
+ net/packet/af_packet.c     |    4 ++--
+ 5 files changed, 14 insertions(+), 12 deletions(-)
+
+--- a/drivers/net/tap.c
++++ b/drivers/net/tap.c
+@@ -777,13 +777,16 @@ static ssize_t tap_put_user(struct tap_q
+       int total;
+       if (q->flags & IFF_VNET_HDR) {
++              int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0;
+               struct virtio_net_hdr vnet_hdr;
++
+               vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
+               if (iov_iter_count(iter) < vnet_hdr_len)
+                       return -EINVAL;
+               if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
+-                                          tap_is_little_endian(q), true))
++                                          tap_is_little_endian(q), true,
++                                          vlan_hlen))
+                       BUG();
+               if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1648,7 +1648,8 @@ static ssize_t tun_put_user(struct tun_s
+                       return -EINVAL;
+               if (virtio_net_hdr_from_skb(skb, &gso,
+-                                          tun_is_little_endian(tun), true)) {
++                                          tun_is_little_endian(tun), true,
++                                          vlan_hlen)) {
+                       struct skb_shared_info *sinfo = skb_shinfo(skb);
+                       pr_err("unexpected GSO type: "
+                              "0x%x, gso_size %d, hdr_len %d\n",
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -1237,7 +1237,8 @@ static int xmit_skb(struct send_queue *s
+               hdr = skb_vnet_hdr(skb);
+       if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
+-                                  virtio_is_little_endian(vi->vdev), false))
++                                  virtio_is_little_endian(vi->vdev), false,
++                                  0))
+               BUG();
+       if (vi->mergeable_rx_bufs)
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -58,7 +58,8 @@ static inline int virtio_net_hdr_to_skb(
+ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
+                                         struct virtio_net_hdr *hdr,
+                                         bool little_endian,
+-                                        bool has_data_valid)
++                                        bool has_data_valid,
++                                        int vlan_hlen)
+ {
+       memset(hdr, 0, sizeof(*hdr));   /* no info leak */
+@@ -83,12 +84,8 @@ static inline int virtio_net_hdr_from_sk
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+-              if (skb_vlan_tag_present(skb))
+-                      hdr->csum_start = __cpu_to_virtio16(little_endian,
+-                              skb_checksum_start_offset(skb) + VLAN_HLEN);
+-              else
+-                      hdr->csum_start = __cpu_to_virtio16(little_endian,
+-                              skb_checksum_start_offset(skb));
++              hdr->csum_start = __cpu_to_virtio16(little_endian,
++                      skb_checksum_start_offset(skb) + vlan_hlen);
+               hdr->csum_offset = __cpu_to_virtio16(little_endian,
+                               skb->csum_offset);
+       } else if (has_data_valid &&
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2046,7 +2046,7 @@ static int packet_rcv_vnet(struct msghdr
+               return -EINVAL;
+       *len -= sizeof(vnet_hdr);
+-      if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true))
++      if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
+               return -EINVAL;
+       return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
+@@ -2313,7 +2313,7 @@ static int tpacket_rcv(struct sk_buff *s
+       if (do_vnet) {
+               if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
+                                           sizeof(struct virtio_net_hdr),
+-                                          vio_le(), true)) {
++                                          vio_le(), true, 0)) {
+                       spin_lock(&sk->sk_receive_queue.lock);
+                       goto drop_n_account;
+               }
diff --git a/queue-4.14/net-sched-act_simple-fix-parsing-of-tca_def_data.patch b/queue-4.14/net-sched-act_simple-fix-parsing-of-tca_def_data.patch
new file mode 100644 (file)
index 0000000..9455ee7
--- /dev/null
@@ -0,0 +1,88 @@
+From foo@baz Thu Jun 21 09:50:19 JST 2018
+From: Davide Caratti <dcaratti@redhat.com>
+Date: Fri, 8 Jun 2018 05:02:31 +0200
+Subject: net/sched: act_simple: fix parsing of TCA_DEF_DATA
+
+From: Davide Caratti <dcaratti@redhat.com>
+
+[ Upstream commit 8d499533e0bc02d44283dbdab03142b599b8ba16 ]
+
+use nla_strlcpy() to avoid copying data beyond the length of TCA_DEF_DATA
+netlink attribute, in case it is less than SIMP_MAX_DATA and it does not
+end with '\0' character.
+
+v2: fix errors in the commit message, thanks Hangbin Liu
+
+Fixes: fa1b1cff3d06 ("net_cls_act: Make act_simple use of netlink policy.")
+Signed-off-by: Davide Caratti <dcaratti@redhat.com>
+Reviewed-by: Simon Horman <simon.horman@netronome.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/act_simple.c |   15 ++++++---------
+ 1 file changed, 6 insertions(+), 9 deletions(-)
+
+--- a/net/sched/act_simple.c
++++ b/net/sched/act_simple.c
+@@ -53,22 +53,22 @@ static void tcf_simp_release(struct tc_a
+       kfree(d->tcfd_defdata);
+ }
+-static int alloc_defdata(struct tcf_defact *d, char *defdata)
++static int alloc_defdata(struct tcf_defact *d, const struct nlattr *defdata)
+ {
+       d->tcfd_defdata = kzalloc(SIMP_MAX_DATA, GFP_KERNEL);
+       if (unlikely(!d->tcfd_defdata))
+               return -ENOMEM;
+-      strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
++      nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
+       return 0;
+ }
+-static void reset_policy(struct tcf_defact *d, char *defdata,
++static void reset_policy(struct tcf_defact *d, const struct nlattr *defdata,
+                        struct tc_defact *p)
+ {
+       spin_lock_bh(&d->tcf_lock);
+       d->tcf_action = p->action;
+       memset(d->tcfd_defdata, 0, SIMP_MAX_DATA);
+-      strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
++      nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
+       spin_unlock_bh(&d->tcf_lock);
+ }
+@@ -87,7 +87,6 @@ static int tcf_simp_init(struct net *net
+       struct tcf_defact *d;
+       bool exists = false;
+       int ret = 0, err;
+-      char *defdata;
+       if (nla == NULL)
+               return -EINVAL;
+@@ -110,8 +109,6 @@ static int tcf_simp_init(struct net *net
+               return -EINVAL;
+       }
+-      defdata = nla_data(tb[TCA_DEF_DATA]);
+-
+       if (!exists) {
+               ret = tcf_idr_create(tn, parm->index, est, a,
+                                    &act_simp_ops, bind, false);
+@@ -119,7 +116,7 @@ static int tcf_simp_init(struct net *net
+                       return ret;
+               d = to_defact(*a);
+-              ret = alloc_defdata(d, defdata);
++              ret = alloc_defdata(d, tb[TCA_DEF_DATA]);
+               if (ret < 0) {
+                       tcf_idr_release(*a, bind);
+                       return ret;
+@@ -133,7 +130,7 @@ static int tcf_simp_init(struct net *net
+               if (!ovr)
+                       return -EEXIST;
+-              reset_policy(d, defdata, parm);
++              reset_policy(d, tb[TCA_DEF_DATA], parm);
+       }
+       if (ret == ACT_P_CREATED)
diff --git a/queue-4.14/series b/queue-4.14/series
new file mode 100644 (file)
index 0000000..87be79a
--- /dev/null
@@ -0,0 +1,11 @@
+bonding-re-evaluate-force_primary-when-the-primary-slave-name-changes.patch
+cdc_ncm-avoid-padding-beyond-end-of-skb.patch
+ipv6-allow-pmtu-exceptions-to-local-routes.patch
+net-dsa-add-error-handling-for-pskb_trim_rcsum.patch
+net-sched-act_simple-fix-parsing-of-tca_def_data.patch
+tcp-verify-the-checksum-of-the-first-data-segment-in-a-new-connection.patch
+socket-close-race-condition-between-sock_close-and-sockfs_setattr.patch
+udp-fix-rx-queue-len-reported-by-diag-and-proc-interface.patch
+net-in-virtio_net_hdr-only-add-vlan_hlen-to-csum_start-if-payload-holds-vlan.patch
+hv_netvsc-fix-a-network-regression-after-ifdown-ifup.patch
+tls-fix-use-after-free-in-tls_push_record.patch
diff --git a/queue-4.14/socket-close-race-condition-between-sock_close-and-sockfs_setattr.patch b/queue-4.14/socket-close-race-condition-between-sock_close-and-sockfs_setattr.patch
new file mode 100644 (file)
index 0000000..2bf77a2
--- /dev/null
@@ -0,0 +1,90 @@
+From foo@baz Thu Jun 21 09:50:19 JST 2018
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Thu, 7 Jun 2018 13:39:49 -0700
+Subject: socket: close race condition between sock_close() and sockfs_setattr()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit 6d8c50dcb029872b298eea68cc6209c866fd3e14 ]
+
+fchownat() doesn't even hold refcnt of fd until it figures out
+fd is really needed (otherwise is ignored) and releases it after
+it resolves the path. This means sock_close() could race with
+sockfs_setattr(), which leads to a NULL pointer dereference
+since typically we set sock->sk to NULL in ->release().
+
+As pointed out by Al, this is unique to sockfs. So we can fix this
+in socket layer by acquiring inode_lock in sock_close() and
+checking against NULL in sockfs_setattr().
+
+sock_release() is called in many places, only the sock_close()
+path matters here. And fortunately, this should not affect normal
+sock_close() as it is only called when the last fd refcnt is gone.
+It only affects sock_close() with a parallel sockfs_setattr() in
+progress, which is not common.
+
+Fixes: 86741ec25462 ("net: core: Add a UID field to struct sock.")
+Reported-by: shankarapailoor <shankarapailoor@gmail.com>
+Cc: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
+Cc: Lorenzo Colitti <lorenzo@google.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/socket.c |   18 +++++++++++++++---
+ 1 file changed, 15 insertions(+), 3 deletions(-)
+
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -538,7 +538,10 @@ static int sockfs_setattr(struct dentry
+       if (!err && (iattr->ia_valid & ATTR_UID)) {
+               struct socket *sock = SOCKET_I(d_inode(dentry));
+-              sock->sk->sk_uid = iattr->ia_uid;
++              if (sock->sk)
++                      sock->sk->sk_uid = iattr->ia_uid;
++              else
++                      err = -ENOENT;
+       }
+       return err;
+@@ -588,12 +591,16 @@ EXPORT_SYMBOL(sock_alloc);
+  *    an inode not a file.
+  */
+-void sock_release(struct socket *sock)
++static void __sock_release(struct socket *sock, struct inode *inode)
+ {
+       if (sock->ops) {
+               struct module *owner = sock->ops->owner;
++              if (inode)
++                      inode_lock(inode);
+               sock->ops->release(sock);
++              if (inode)
++                      inode_unlock(inode);
+               sock->ops = NULL;
+               module_put(owner);
+       }
+@@ -608,6 +615,11 @@ void sock_release(struct socket *sock)
+       }
+       sock->file = NULL;
+ }
++
++void sock_release(struct socket *sock)
++{
++      __sock_release(sock, NULL);
++}
+ EXPORT_SYMBOL(sock_release);
+ void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags)
+@@ -1122,7 +1134,7 @@ static int sock_mmap(struct file *file,
+ static int sock_close(struct inode *inode, struct file *filp)
+ {
+-      sock_release(SOCKET_I(inode));
++      __sock_release(SOCKET_I(inode), inode);
+       return 0;
+ }
diff --git a/queue-4.14/tcp-verify-the-checksum-of-the-first-data-segment-in-a-new-connection.patch b/queue-4.14/tcp-verify-the-checksum-of-the-first-data-segment-in-a-new-connection.patch
new file mode 100644 (file)
index 0000000..30d5f83
--- /dev/null
@@ -0,0 +1,60 @@
+From foo@baz Thu Jun 21 09:50:19 JST 2018
+From: Frank van der Linden <fllinden@amazon.com>
+Date: Tue, 12 Jun 2018 23:09:37 +0000
+Subject: tcp: verify the checksum of the first data segment in a new connection
+
+From: Frank van der Linden <fllinden@amazon.com>
+
+[ Upstream commit 4fd44a98ffe0d048246efef67ed640fdf2098a62 ]
+
+commit 079096f103fa ("tcp/dccp: install syn_recv requests into ehash
+table") introduced an optimization for the handling of child sockets
+created for a new TCP connection.
+
+But this optimization passes any data associated with the last ACK of the
+connection handshake up the stack without verifying its checksum, because it
+calls tcp_child_process(), which in turn calls tcp_rcv_state_process()
+directly.  These lower-level processing functions do not do any checksum
+verification.
+
+Insert a tcp_checksum_complete call in the TCP_NEW_SYN_RECEIVE path to
+fix this.
+
+Fixes: 079096f103fa ("tcp/dccp: install syn_recv requests into ehash table")
+Signed-off-by: Frank van der Linden <fllinden@amazon.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Tested-by: Balbir Singh <bsingharora@gmail.com>
+Reviewed-by: Balbir Singh <bsingharora@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_ipv4.c |    4 ++++
+ net/ipv6/tcp_ipv6.c |    4 ++++
+ 2 files changed, 8 insertions(+)
+
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1675,6 +1675,10 @@ process:
+                       reqsk_put(req);
+                       goto discard_it;
+               }
++              if (tcp_checksum_complete(skb)) {
++                      reqsk_put(req);
++                      goto csum_error;
++              }
+               if (unlikely(sk->sk_state != TCP_LISTEN)) {
+                       inet_csk_reqsk_queue_drop_and_put(sk, req);
+                       goto lookup;
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1453,6 +1453,10 @@ process:
+                       reqsk_put(req);
+                       goto discard_it;
+               }
++              if (tcp_checksum_complete(skb)) {
++                      reqsk_put(req);
++                      goto csum_error;
++              }
+               if (unlikely(sk->sk_state != TCP_LISTEN)) {
+                       inet_csk_reqsk_queue_drop_and_put(sk, req);
+                       goto lookup;
diff --git a/queue-4.14/tls-fix-use-after-free-in-tls_push_record.patch b/queue-4.14/tls-fix-use-after-free-in-tls_push_record.patch
new file mode 100644 (file)
index 0000000..b7ca123
--- /dev/null
@@ -0,0 +1,150 @@
+From foo@baz Thu Jun 21 09:50:19 JST 2018
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Fri, 15 Jun 2018 03:07:45 +0200
+Subject: tls: fix use-after-free in tls_push_record
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit a447da7d00410278c90d3576782a43f8b675d7be ]
+
+syzkaller managed to trigger a use-after-free in tls like the
+following:
+
+  BUG: KASAN: use-after-free in tls_push_record.constprop.15+0x6a2/0x810 [tls]
+  Write of size 1 at addr ffff88037aa08000 by task a.out/2317
+
+  CPU: 3 PID: 2317 Comm: a.out Not tainted 4.17.0+ #144
+  Hardware name: LENOVO 20FBCTO1WW/20FBCTO1WW, BIOS N1FET47W (1.21 ) 11/28/2016
+  Call Trace:
+   dump_stack+0x71/0xab
+   print_address_description+0x6a/0x280
+   kasan_report+0x258/0x380
+   ? tls_push_record.constprop.15+0x6a2/0x810 [tls]
+   tls_push_record.constprop.15+0x6a2/0x810 [tls]
+   tls_sw_push_pending_record+0x2e/0x40 [tls]
+   tls_sk_proto_close+0x3fe/0x710 [tls]
+   ? tcp_check_oom+0x4c0/0x4c0
+   ? tls_write_space+0x260/0x260 [tls]
+   ? kmem_cache_free+0x88/0x1f0
+   inet_release+0xd6/0x1b0
+   __sock_release+0xc0/0x240
+   sock_close+0x11/0x20
+   __fput+0x22d/0x660
+   task_work_run+0x114/0x1a0
+   do_exit+0x71a/0x2780
+   ? mm_update_next_owner+0x650/0x650
+   ? handle_mm_fault+0x2f5/0x5f0
+   ? __do_page_fault+0x44f/0xa50
+   ? mm_fault_error+0x2d0/0x2d0
+   do_group_exit+0xde/0x300
+   __x64_sys_exit_group+0x3a/0x50
+   do_syscall_64+0x9a/0x300
+   ? page_fault+0x8/0x30
+   entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+This happened through fault injection where aead_req allocation in
+tls_do_encryption() eventually failed and we returned -ENOMEM from
+the function. Turns out that the use-after-free is triggered from
+tls_sw_sendmsg() in the second tls_push_record(). The error then
+triggers a jump to waiting for memory in sk_stream_wait_memory()
+resp. returning immediately in case of MSG_DONTWAIT. What follows is
+the trim_both_sgl(sk, orig_size), which drops elements from the sg
+list added via tls_sw_sendmsg(). Now the use-after-free gets triggered
+when the socket is being closed, where tls_sk_proto_close() callback
+is invoked. The tls_complete_pending_work() will figure that there's
+a pending closed tls record to be flushed and thus calls into the
+tls_push_pending_closed_record() from there. ctx->push_pending_record()
+is called from the latter, which is the tls_sw_push_pending_record()
+from sw path. This again calls into tls_push_record(). And here the
+tls_fill_prepend() will panic since the buffer address has been freed
+earlier via trim_both_sgl(). One way to fix it is to move the aead
+request allocation out of tls_do_encryption() early into tls_push_record().
+This means we don't prep the tls header and advance state to the
+TLS_PENDING_CLOSED_RECORD before allocation which could potentially
+fail happened. That fixes the issue on my side.
+
+Fixes: 3c4d7559159b ("tls: kernel TLS support")
+Reported-by: syzbot+5c74af81c547738e1684@syzkaller.appspotmail.com
+Reported-by: syzbot+709f2810a6a05f11d4d3@syzkaller.appspotmail.com
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Dave Watson <davejwatson@fb.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tls/tls_sw.c |   26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -211,18 +211,12 @@ static void tls_free_both_sg(struct sock
+ }
+ static int tls_do_encryption(struct tls_context *tls_ctx,
+-                           struct tls_sw_context *ctx, size_t data_len,
+-                           gfp_t flags)
++                           struct tls_sw_context *ctx,
++                           struct aead_request *aead_req,
++                           size_t data_len)
+ {
+-      unsigned int req_size = sizeof(struct aead_request) +
+-              crypto_aead_reqsize(ctx->aead_send);
+-      struct aead_request *aead_req;
+       int rc;
+-      aead_req = kzalloc(req_size, flags);
+-      if (!aead_req)
+-              return -ENOMEM;
+-
+       ctx->sg_encrypted_data[0].offset += tls_ctx->prepend_size;
+       ctx->sg_encrypted_data[0].length -= tls_ctx->prepend_size;
+@@ -235,7 +229,6 @@ static int tls_do_encryption(struct tls_
+       ctx->sg_encrypted_data[0].offset -= tls_ctx->prepend_size;
+       ctx->sg_encrypted_data[0].length += tls_ctx->prepend_size;
+-      kfree(aead_req);
+       return rc;
+ }
+@@ -244,8 +237,14 @@ static int tls_push_record(struct sock *
+ {
+       struct tls_context *tls_ctx = tls_get_ctx(sk);
+       struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
++      struct aead_request *req;
+       int rc;
++      req = kzalloc(sizeof(struct aead_request) +
++                    crypto_aead_reqsize(ctx->aead_send), sk->sk_allocation);
++      if (!req)
++              return -ENOMEM;
++
+       sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
+       sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
+@@ -261,15 +260,14 @@ static int tls_push_record(struct sock *
+       tls_ctx->pending_open_record_frags = 0;
+       set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
+-      rc = tls_do_encryption(tls_ctx, ctx, ctx->sg_plaintext_size,
+-                             sk->sk_allocation);
++      rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size);
+       if (rc < 0) {
+               /* If we are called from write_space and
+                * we fail, we need to set this SOCK_NOSPACE
+                * to trigger another write_space in the future.
+                */
+               set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+-              return rc;
++              goto out_req;
+       }
+       free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
+@@ -284,6 +282,8 @@ static int tls_push_record(struct sock *
+               tls_err_abort(sk);
+       tls_advance_record_sn(sk, tls_ctx);
++out_req:
++      kfree(req);
+       return rc;
+ }
diff --git a/queue-4.14/udp-fix-rx-queue-len-reported-by-diag-and-proc-interface.patch b/queue-4.14/udp-fix-rx-queue-len-reported-by-diag-and-proc-interface.patch
new file mode 100644 (file)
index 0000000..12ad4d6
--- /dev/null
@@ -0,0 +1,124 @@
+From foo@baz Thu Jun 21 09:50:19 JST 2018
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Fri, 8 Jun 2018 11:35:40 +0200
+Subject: udp: fix rx queue len reported by diag and proc interface
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit 6c206b20092a3623184cff9470dba75d21507874 ]
+
+After commit 6b229cf77d68 ("udp: add batching to udp_rmem_release()")
+the sk_rmem_alloc field does not measure exactly anymore the
+receive queue length, because we batch the rmem release. The issue
+is really apparent only after commit 0d4a6608f68c ("udp: do rmem bulk
+free even if the rx sk queue is empty"): the user space can easily
+check for an empty socket with not-0 queue length reported by the 'ss'
+tool or the procfs interface.
+
+We need to use a custom UDP helper to report the correct queue length,
+taking into account the forward allocation deficit.
+
+Reported-by: trevor.francis@46labs.com
+Fixes: 6b229cf77d68 ("UDP: add batching to udp_rmem_release()")
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/transp_v6.h |   11 +++++++++--
+ include/net/udp.h       |    5 +++++
+ net/ipv4/udp.c          |    2 +-
+ net/ipv4/udp_diag.c     |    2 +-
+ net/ipv6/datagram.c     |    6 +++---
+ net/ipv6/udp.c          |    3 ++-
+ 6 files changed, 21 insertions(+), 8 deletions(-)
+
+--- a/include/net/transp_v6.h
++++ b/include/net/transp_v6.h
+@@ -45,8 +45,15 @@ int ip6_datagram_send_ctl(struct net *ne
+                         struct flowi6 *fl6, struct ipcm6_cookie *ipc6,
+                         struct sockcm_cookie *sockc);
+-void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
+-                           __u16 srcp, __u16 destp, int bucket);
++void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
++                             __u16 srcp, __u16 destp, int rqueue, int bucket);
++static inline void
++ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp,
++                      __u16 destp, int bucket)
++{
++      __ip6_dgram_sock_seq_show(seq, sp, srcp, destp, sk_rmem_alloc_get(sp),
++                                bucket);
++}
+ #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006)
+--- a/include/net/udp.h
++++ b/include/net/udp.h
+@@ -244,6 +244,11 @@ static inline __be16 udp_flow_src_port(s
+       return htons((((u64) hash * (max - min)) >> 32) + min);
+ }
++static inline int udp_rqueue_get(struct sock *sk)
++{
++      return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
++}
++
+ /* net/ipv4/udp.c */
+ void udp_destruct_sock(struct sock *sk);
+ void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -2720,7 +2720,7 @@ static void udp4_format_sock(struct sock
+               " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
+               bucket, src, srcp, dest, destp, sp->sk_state,
+               sk_wmem_alloc_get(sp),
+-              sk_rmem_alloc_get(sp),
++              udp_rqueue_get(sp),
+               0, 0L, 0,
+               from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
+               0, sock_i_ino(sp),
+--- a/net/ipv4/udp_diag.c
++++ b/net/ipv4/udp_diag.c
+@@ -163,7 +163,7 @@ static int udp_diag_dump_one(struct sk_b
+ static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
+               void *info)
+ {
+-      r->idiag_rqueue = sk_rmem_alloc_get(sk);
++      r->idiag_rqueue = udp_rqueue_get(sk);
+       r->idiag_wqueue = sk_wmem_alloc_get(sk);
+ }
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -1026,8 +1026,8 @@ exit_f:
+ }
+ EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
+-void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
+-                           __u16 srcp, __u16 destp, int bucket)
++void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
++                             __u16 srcp, __u16 destp, int rqueue, int bucket)
+ {
+       const struct in6_addr *dest, *src;
+@@ -1043,7 +1043,7 @@ void ip6_dgram_sock_seq_show(struct seq_
+                  dest->s6_addr32[2], dest->s6_addr32[3], destp,
+                  sp->sk_state,
+                  sk_wmem_alloc_get(sp),
+-                 sk_rmem_alloc_get(sp),
++                 rqueue,
+                  0, 0L, 0,
+                  from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
+                  0,
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1503,7 +1503,8 @@ int udp6_seq_show(struct seq_file *seq,
+               struct inet_sock *inet = inet_sk(v);
+               __u16 srcp = ntohs(inet->inet_sport);
+               __u16 destp = ntohs(inet->inet_dport);
+-              ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket);
++              __ip6_dgram_sock_seq_show(seq, v, srcp, destp,
++                                        udp_rqueue_get(v), bucket);
+       }
+       return 0;
+ }