--- /dev/null
+From foo@baz Thu Jun 21 06:54:06 JST 2018
+From: Xiangning Yu <yuxiangning@gmail.com>
+Date: Thu, 7 Jun 2018 13:39:59 +0800
+Subject: bonding: re-evaluate force_primary when the primary slave name changes
+
+From: Xiangning Yu <yuxiangning@gmail.com>
+
+[ Upstream commit eb55bbf865d9979098c6a7a17cbdb41237ece951 ]
+
+There is a timing issue under active-standy mode, when bond_enslave() is
+called, bond->params.primary might not be initialized yet.
+
+Any time the primary slave string changes, bond->force_primary should be
+set to true to make sure the primary becomes the active slave.
+
+Signed-off-by: Xiangning Yu <yuxiangning@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/bonding/bond_options.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -1142,6 +1142,7 @@ static int bond_option_primary_set(struc
+ slave->dev->name);
+ rcu_assign_pointer(bond->primary_slave, slave);
+ strcpy(bond->params.primary, slave->dev->name);
++ bond->force_primary = true;
+ bond_select_active_slave(bond);
+ goto out;
+ }
--- /dev/null
+From foo@baz Thu Jun 21 06:54:06 JST 2018
+From: "Bjørn Mork" <bjorn@mork.no>
+Date: Fri, 8 Jun 2018 09:15:24 +0200
+Subject: cdc_ncm: avoid padding beyond end of skb
+
+From: "Bjørn Mork" <bjorn@mork.no>
+
+[ Upstream commit 49c2c3f246e2fc3009039e31a826333dcd0283cd ]
+
+Commit 4a0e3e989d66 ("cdc_ncm: Add support for moving NDP to end
+of NCM frame") added logic to reserve space for the NDP at the
+end of the NTB/skb. This reservation did not take the final
+alignment of the NDP into account, causing us to reserve too
+little space. Additionally the padding prior to NDP addition did
+not ensure there was enough space for the NDP.
+
+The NTB/skb with the NDP appended would then exceed the configured
+max size. This caused the final padding of the NTB to use a
+negative count, padding to almost INT_MAX, and resulting in:
+
+[60103.825970] BUG: unable to handle kernel paging request at ffff9641f2004000
+[60103.825998] IP: __memset+0x24/0x30
+[60103.826001] PGD a6a06067 P4D a6a06067 PUD 4f65a063 PMD 72003063 PTE 0
+[60103.826013] Oops: 0002 [#1] SMP NOPTI
+[60103.826018] Modules linked in: (removed(
+[60103.826158] CPU: 0 PID: 5990 Comm: Chrome_DevTools Tainted: G O 4.14.0-3-amd64 #1 Debian 4.14.17-1
+[60103.826162] Hardware name: LENOVO 20081 BIOS 41CN28WW(V2.04) 05/03/2012
+[60103.826166] task: ffff964193484fc0 task.stack: ffffb2890137c000
+[60103.826171] RIP: 0010:__memset+0x24/0x30
+[60103.826174] RSP: 0000:ffff964316c03b68 EFLAGS: 00010216
+[60103.826178] RAX: 0000000000000000 RBX: 00000000fffffffd RCX: 000000001ffa5000
+[60103.826181] RDX: 0000000000000005 RSI: 0000000000000000 RDI: ffff9641f2003ffc
+[60103.826184] RBP: ffff964192f6c800 R08: 00000000304d434e R09: ffff9641f1d2c004
+[60103.826187] R10: 0000000000000002 R11: 00000000000005ae R12: ffff9642e6957a80
+[60103.826190] R13: ffff964282ff2ee8 R14: 000000000000000d R15: ffff9642e4843900
+[60103.826194] FS: 00007f395aaf6700(0000) GS:ffff964316c00000(0000) knlGS:0000000000000000
+[60103.826197] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[60103.826200] CR2: ffff9641f2004000 CR3: 0000000013b0c000 CR4: 00000000000006f0
+[60103.826204] Call Trace:
+[60103.826212] <IRQ>
+[60103.826225] cdc_ncm_fill_tx_frame+0x5e3/0x740 [cdc_ncm]
+[60103.826236] cdc_ncm_tx_fixup+0x57/0x70 [cdc_ncm]
+[60103.826246] usbnet_start_xmit+0x5d/0x710 [usbnet]
+[60103.826254] ? netif_skb_features+0x119/0x250
+[60103.826259] dev_hard_start_xmit+0xa1/0x200
+[60103.826267] sch_direct_xmit+0xf2/0x1b0
+[60103.826273] __dev_queue_xmit+0x5e3/0x7c0
+[60103.826280] ? ip_finish_output2+0x263/0x3c0
+[60103.826284] ip_finish_output2+0x263/0x3c0
+[60103.826289] ? ip_output+0x6c/0xe0
+[60103.826293] ip_output+0x6c/0xe0
+[60103.826298] ? ip_forward_options+0x1a0/0x1a0
+[60103.826303] tcp_transmit_skb+0x516/0x9b0
+[60103.826309] tcp_write_xmit+0x1aa/0xee0
+[60103.826313] ? sch_direct_xmit+0x71/0x1b0
+[60103.826318] tcp_tasklet_func+0x177/0x180
+[60103.826325] tasklet_action+0x5f/0x110
+[60103.826332] __do_softirq+0xde/0x2b3
+[60103.826337] irq_exit+0xae/0xb0
+[60103.826342] do_IRQ+0x81/0xd0
+[60103.826347] common_interrupt+0x98/0x98
+[60103.826351] </IRQ>
+[60103.826355] RIP: 0033:0x7f397bdf2282
+[60103.826358] RSP: 002b:00007f395aaf57d8 EFLAGS: 00000206 ORIG_RAX: ffffffffffffff6e
+[60103.826362] RAX: 0000000000000000 RBX: 00002f07bc6d0900 RCX: 00007f39752d7fe7
+[60103.826365] RDX: 0000000000000022 RSI: 0000000000000147 RDI: 00002f07baea02c0
+[60103.826368] RBP: 0000000000000001 R08: 0000000000000000 R09: 0000000000000000
+[60103.826371] R10: 00000000ffffffff R11: 0000000000000000 R12: 00002f07baea02c0
+[60103.826373] R13: 00002f07bba227a0 R14: 00002f07bc6d090c R15: 0000000000000000
+[60103.826377] Code: 90 90 90 90 90 90 90 0f 1f 44 00 00 49 89 f9 48 89 d1 83
+e2 07 48 c1 e9 03 40 0f b6 f6 48 b8 01 01 01 01 01 01 01 01 48 0f af c6 <f3> 48
+ab 89 d1 f3 aa 4c 89 c8 c3 90 49 89 f9 40 88 f0 48 89 d1
+[60103.826442] RIP: __memset+0x24/0x30 RSP: ffff964316c03b68
+[60103.826444] CR2: ffff9641f2004000
+
+Commit e1069bbfcf3b ("net: cdc_ncm: Reduce memory use when kernel
+memory low") made this bug much more likely to trigger by reducing
+the NTB size under memory pressure.
+
+Link: https://bugs.debian.org/893393
+Reported-by: Горбешко Богдан <bodqhrohro@gmail.com>
+Reported-and-tested-by: Dennis Wassenberg <dennis.wassenberg@secunet.com>
+Cc: Enrico Mioso <mrkiko.rs@gmail.com>
+Fixes: 4a0e3e989d66 ("cdc_ncm: Add support for moving NDP to end of NCM frame")
+Signed-off-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/cdc_ncm.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -1124,7 +1124,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev
+ * accordingly. Otherwise, we should check here.
+ */
+ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
+- delayed_ndp_size = ctx->max_ndp_size;
++ delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus);
+ else
+ delayed_ndp_size = 0;
+
+@@ -1285,7 +1285,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev
+ /* If requested, put NDP at end of frame. */
+ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
+ nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
+- cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size);
++ cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size - ctx->max_ndp_size);
+ nth16->wNdpIndex = cpu_to_le16(skb_out->len);
+ skb_put_data(skb_out, ctx->delayed_ndp16, ctx->max_ndp_size);
+
--- /dev/null
+From foo@baz Thu Jun 21 06:54:06 JST 2018
+From: Dexuan Cui <decui@microsoft.com>
+Date: Wed, 6 Jun 2018 21:32:51 +0000
+Subject: hv_netvsc: Fix a network regression after ifdown/ifup
+
+From: Dexuan Cui <decui@microsoft.com>
+
+[ Upstream commit 52acf73b6e9a6962045feb2ba5a8921da2201915 ]
+
+Recently people reported the NIC stops working after
+"ifdown eth0; ifup eth0". It turns out in this case the TX queues are not
+enabled, after the refactoring of the common detach logic: when the NIC
+has sub-channels, usually we enable all the TX queues after all
+sub-channels are set up: see rndis_set_subchannel() ->
+netif_device_attach(), but in the case of "ifdown eth0; ifup eth0" where
+the number of channels doesn't change, we also must make sure the TX queues
+are enabled. The patch fixes the regression.
+
+Fixes: 7b2ee50c0cd5 ("hv_netvsc: common detach logic")
+Signed-off-by: Dexuan Cui <decui@microsoft.com>
+Cc: Stephen Hemminger <sthemmin@microsoft.com>
+Cc: K. Y. Srinivasan <kys@microsoft.com>
+Cc: Haiyang Zhang <haiyangz@microsoft.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/hyperv/netvsc_drv.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -126,8 +126,10 @@ static int netvsc_open(struct net_device
+ }
+
+ rdev = nvdev->extension;
+- if (!rdev->link_state)
++ if (!rdev->link_state) {
+ netif_carrier_on(net);
++ netif_tx_wake_all_queues(net);
++ }
+
+ if (vf_netdev) {
+ /* Setting synthetic device up transparently sets
--- /dev/null
+From foo@baz Thu Jun 21 06:54:06 JST 2018
+From: Julian Anastasov <ja@ssi.bg>
+Date: Mon, 11 Jun 2018 02:02:54 +0300
+Subject: ipv6: allow PMTU exceptions to local routes
+
+From: Julian Anastasov <ja@ssi.bg>
+
+[ Upstream commit 0975764684487bf3f7a47eef009e750ea41bd514 ]
+
+IPVS setups with local client and remote tunnel server need
+to create exception for the local virtual IP. What we do is to
+change PMTU from 64KB (on "lo") to 1460 in the common case.
+
+Suggested-by: Martin KaFai Lau <kafai@fb.com>
+Fixes: 45e4fd26683c ("ipv6: Only create RTF_CACHE routes after encountering pmtu exception")
+Fixes: 7343ff31ebf0 ("ipv6: Don't create clones of host routes.")
+Signed-off-by: Julian Anastasov <ja@ssi.bg>
+Acked-by: David Ahern <dsahern@gmail.com>
+Acked-by: Martin KaFai Lau <kafai@fb.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/route.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2149,9 +2149,6 @@ static void __ip6_rt_update_pmtu(struct
+ const struct in6_addr *daddr, *saddr;
+ struct rt6_info *rt6 = (struct rt6_info *)dst;
+
+- if (rt6->rt6i_flags & RTF_LOCAL)
+- return;
+-
+ if (dst_metric_locked(dst, RTAX_MTU))
+ return;
+
--- /dev/null
+From foo@baz Thu Jun 21 06:54:06 JST 2018
+From: Zhouyang Jia <jiazhouyang09@gmail.com>
+Date: Mon, 11 Jun 2018 13:26:35 +0800
+Subject: net: dsa: add error handling for pskb_trim_rcsum
+
+From: Zhouyang Jia <jiazhouyang09@gmail.com>
+
+[ Upstream commit 349b71d6f427ff8211adf50839dbbff3f27c1805 ]
+
+When pskb_trim_rcsum fails, the lack of error-handling code may
+cause unexpected results.
+
+This patch adds error-handling code after calling pskb_trim_rcsum.
+
+Signed-off-by: Zhouyang Jia <jiazhouyang09@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/dsa/tag_trailer.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/dsa/tag_trailer.c
++++ b/net/dsa/tag_trailer.c
+@@ -75,7 +75,8 @@ static struct sk_buff *trailer_rcv(struc
+ if (!skb->dev)
+ return NULL;
+
+- pskb_trim_rcsum(skb, skb->len - 4);
++ if (pskb_trim_rcsum(skb, skb->len - 4))
++ return NULL;
+
+ return skb;
+ }
--- /dev/null
+From foo@baz Thu Jun 21 06:54:06 JST 2018
+From: Willem de Bruijn <willemb@google.com>
+Date: Wed, 6 Jun 2018 11:23:01 -0400
+Subject: net: in virtio_net_hdr only add VLAN_HLEN to csum_start if payload holds vlan
+
+From: Willem de Bruijn <willemb@google.com>
+
+[ Upstream commit fd3a88625844907151737fc3b4201676effa6d27 ]
+
+Tun, tap, virtio, packet and uml vector all use struct virtio_net_hdr
+to communicate packet metadata to userspace.
+
+For skbuffs with vlan, the first two return the packet as it may have
+existed on the wire, inserting the VLAN tag in the user buffer. Then
+virtio_net_hdr.csum_start needs to be adjusted by VLAN_HLEN bytes.
+
+Commit f09e2249c4f5 ("macvtap: restore vlan header on user read")
+added this feature to macvtap. Commit 3ce9b20f1971 ("macvtap: Fix
+csum_start when VLAN tags are present") then fixed up csum_start.
+
+Virtio, packet and uml do not insert the vlan header in the user
+buffer.
+
+When introducing virtio_net_hdr_from_skb to deduplicate filling in
+the virtio_net_hdr, the variant from macvtap which adds VLAN_HLEN was
+applied uniformly, breaking csum offset for packets with vlan on
+virtio and packet.
+
+Make insertion of VLAN_HLEN optional. Convert the callers to pass it
+when needed.
+
+Fixes: e858fae2b0b8f4 ("virtio_net: use common code for virtio_net_hdr and skb GSO conversion")
+Fixes: 1276f24eeef2 ("packet: use common code for virtio_net_hdr and skb GSO conversion")
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/um/drivers/vector_transports.c | 3 ++-
+ drivers/net/tap.c | 5 ++++-
+ drivers/net/tun.c | 3 ++-
+ drivers/net/virtio_net.c | 3 ++-
+ include/linux/virtio_net.h | 11 ++++-------
+ net/packet/af_packet.c | 4 ++--
+ 6 files changed, 16 insertions(+), 13 deletions(-)
+
+--- a/arch/um/drivers/vector_transports.c
++++ b/arch/um/drivers/vector_transports.c
+@@ -120,7 +120,8 @@ static int raw_form_header(uint8_t *head
+ skb,
+ vheader,
+ virtio_legacy_is_little_endian(),
+- false
++ false,
++ 0
+ );
+
+ return 0;
+--- a/drivers/net/tap.c
++++ b/drivers/net/tap.c
+@@ -774,13 +774,16 @@ static ssize_t tap_put_user(struct tap_q
+ int total;
+
+ if (q->flags & IFF_VNET_HDR) {
++ int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0;
+ struct virtio_net_hdr vnet_hdr;
++
+ vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
+ if (iov_iter_count(iter) < vnet_hdr_len)
+ return -EINVAL;
+
+ if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
+- tap_is_little_endian(q), true))
++ tap_is_little_endian(q), true,
++ vlan_hlen))
+ BUG();
+
+ if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -2078,7 +2078,8 @@ static ssize_t tun_put_user(struct tun_s
+ return -EINVAL;
+
+ if (virtio_net_hdr_from_skb(skb, &gso,
+- tun_is_little_endian(tun), true)) {
++ tun_is_little_endian(tun), true,
++ vlan_hlen)) {
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+ pr_err("unexpected GSO type: "
+ "0x%x, gso_size %d, hdr_len %d\n",
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -1358,7 +1358,8 @@ static int xmit_skb(struct send_queue *s
+ hdr = skb_vnet_hdr(skb);
+
+ if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
+- virtio_is_little_endian(vi->vdev), false))
++ virtio_is_little_endian(vi->vdev), false,
++ 0))
+ BUG();
+
+ if (vi->mergeable_rx_bufs)
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -58,7 +58,8 @@ static inline int virtio_net_hdr_to_skb(
+ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
+ struct virtio_net_hdr *hdr,
+ bool little_endian,
+- bool has_data_valid)
++ bool has_data_valid,
++ int vlan_hlen)
+ {
+ memset(hdr, 0, sizeof(*hdr)); /* no info leak */
+
+@@ -83,12 +84,8 @@ static inline int virtio_net_hdr_from_sk
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+- if (skb_vlan_tag_present(skb))
+- hdr->csum_start = __cpu_to_virtio16(little_endian,
+- skb_checksum_start_offset(skb) + VLAN_HLEN);
+- else
+- hdr->csum_start = __cpu_to_virtio16(little_endian,
+- skb_checksum_start_offset(skb));
++ hdr->csum_start = __cpu_to_virtio16(little_endian,
++ skb_checksum_start_offset(skb) + vlan_hlen);
+ hdr->csum_offset = __cpu_to_virtio16(little_endian,
+ skb->csum_offset);
+ } else if (has_data_valid &&
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2037,7 +2037,7 @@ static int packet_rcv_vnet(struct msghdr
+ return -EINVAL;
+ *len -= sizeof(vnet_hdr);
+
+- if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true))
++ if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
+ return -EINVAL;
+
+ return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
+@@ -2304,7 +2304,7 @@ static int tpacket_rcv(struct sk_buff *s
+ if (do_vnet) {
+ if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
+ sizeof(struct virtio_net_hdr),
+- vio_le(), true)) {
++ vio_le(), true, 0)) {
+ spin_lock(&sk->sk_receive_queue.lock);
+ goto drop_n_account;
+ }
--- /dev/null
+From foo@baz Thu Jun 21 06:54:06 JST 2018
+From: Alvaro Gamez Machado <alvaro.gamez@hazent.com>
+Date: Fri, 8 Jun 2018 12:23:39 +0200
+Subject: net: phy: dp83822: use BMCR_ANENABLE instead of BMSR_ANEGCAPABLE for DP83620
+
+From: Alvaro Gamez Machado <alvaro.gamez@hazent.com>
+
+[ Upstream commit b718e8c8f4f5920aaddc2e52d5e32f494c91129c ]
+
+DP83620 register set is compatible with the DP83848, but it also supports
+100base-FX. When the hardware is configured such as that fiber mode is
+enabled, autonegotiation is not possible.
+
+The chip, however, doesn't expose this information via BMSR_ANEGCAPABLE.
+Instead, this bit is always set high, even if the particular hardware
+configuration makes it so that auto negotiation is not possible [1]. Under
+these circumstances, the phy subsystem keeps trying for autonegotiation to
+happen, without success.
+
+Hereby, we inspect BMCR_ANENABLE bit after genphy_config_init, which on
+reset is set to 0 when auto negotiation is disabled, and so we use this
+value instead of BMSR_ANEGCAPABLE.
+
+[1] https://e2e.ti.com/support/interface/ethernet/f/903/p/697165/2571170
+
+Signed-off-by: Alvaro Gamez Machado <alvaro.gamez@hazent.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/dp83848.c | 35 +++++++++++++++++++++++++++++------
+ 1 file changed, 29 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/phy/dp83848.c
++++ b/drivers/net/phy/dp83848.c
+@@ -74,6 +74,25 @@ static int dp83848_config_intr(struct ph
+ return phy_write(phydev, DP83848_MICR, control);
+ }
+
++static int dp83848_config_init(struct phy_device *phydev)
++{
++ int err;
++ int val;
++
++ err = genphy_config_init(phydev);
++ if (err < 0)
++ return err;
++
++ /* DP83620 always reports Auto Negotiation Ability on BMSR. Instead,
++ * we check initial value of BMCR Auto negotiation enable bit
++ */
++ val = phy_read(phydev, MII_BMCR);
++ if (!(val & BMCR_ANENABLE))
++ phydev->autoneg = AUTONEG_DISABLE;
++
++ return 0;
++}
++
+ static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
+ { TI_DP83848C_PHY_ID, 0xfffffff0 },
+ { NS_DP83848C_PHY_ID, 0xfffffff0 },
+@@ -83,7 +102,7 @@ static struct mdio_device_id __maybe_unu
+ };
+ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
+
+-#define DP83848_PHY_DRIVER(_id, _name) \
++#define DP83848_PHY_DRIVER(_id, _name, _config_init) \
+ { \
+ .phy_id = _id, \
+ .phy_id_mask = 0xfffffff0, \
+@@ -92,7 +111,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
+ .flags = PHY_HAS_INTERRUPT, \
+ \
+ .soft_reset = genphy_soft_reset, \
+- .config_init = genphy_config_init, \
++ .config_init = _config_init, \
+ .suspend = genphy_suspend, \
+ .resume = genphy_resume, \
+ \
+@@ -102,10 +121,14 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
+ }
+
+ static struct phy_driver dp83848_driver[] = {
+- DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"),
+- DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
+- DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY"),
+- DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
++ DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY",
++ genphy_config_init),
++ DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY",
++ genphy_config_init),
++ DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY",
++ dp83848_config_init),
++ DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY",
++ genphy_config_init),
+ };
+ module_phy_driver(dp83848_driver);
+
--- /dev/null
+From foo@baz Thu Jun 21 06:54:06 JST 2018
+From: Davide Caratti <dcaratti@redhat.com>
+Date: Fri, 8 Jun 2018 05:02:31 +0200
+Subject: net/sched: act_simple: fix parsing of TCA_DEF_DATA
+
+From: Davide Caratti <dcaratti@redhat.com>
+
+[ Upstream commit 8d499533e0bc02d44283dbdab03142b599b8ba16 ]
+
+use nla_strlcpy() to avoid copying data beyond the length of TCA_DEF_DATA
+netlink attribute, in case it is less than SIMP_MAX_DATA and it does not
+end with '\0' character.
+
+v2: fix errors in the commit message, thanks Hangbin Liu
+
+Fixes: fa1b1cff3d06 ("net_cls_act: Make act_simple use of netlink policy.")
+Signed-off-by: Davide Caratti <dcaratti@redhat.com>
+Reviewed-by: Simon Horman <simon.horman@netronome.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/act_simple.c | 15 ++++++---------
+ 1 file changed, 6 insertions(+), 9 deletions(-)
+
+--- a/net/sched/act_simple.c
++++ b/net/sched/act_simple.c
+@@ -53,22 +53,22 @@ static void tcf_simp_release(struct tc_a
+ kfree(d->tcfd_defdata);
+ }
+
+-static int alloc_defdata(struct tcf_defact *d, char *defdata)
++static int alloc_defdata(struct tcf_defact *d, const struct nlattr *defdata)
+ {
+ d->tcfd_defdata = kzalloc(SIMP_MAX_DATA, GFP_KERNEL);
+ if (unlikely(!d->tcfd_defdata))
+ return -ENOMEM;
+- strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
++ nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
+ return 0;
+ }
+
+-static void reset_policy(struct tcf_defact *d, char *defdata,
++static void reset_policy(struct tcf_defact *d, const struct nlattr *defdata,
+ struct tc_defact *p)
+ {
+ spin_lock_bh(&d->tcf_lock);
+ d->tcf_action = p->action;
+ memset(d->tcfd_defdata, 0, SIMP_MAX_DATA);
+- strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
++ nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
+ spin_unlock_bh(&d->tcf_lock);
+ }
+
+@@ -87,7 +87,6 @@ static int tcf_simp_init(struct net *net
+ struct tcf_defact *d;
+ bool exists = false;
+ int ret = 0, err;
+- char *defdata;
+
+ if (nla == NULL)
+ return -EINVAL;
+@@ -110,8 +109,6 @@ static int tcf_simp_init(struct net *net
+ return -EINVAL;
+ }
+
+- defdata = nla_data(tb[TCA_DEF_DATA]);
+-
+ if (!exists) {
+ ret = tcf_idr_create(tn, parm->index, est, a,
+ &act_simp_ops, bind, false);
+@@ -119,7 +116,7 @@ static int tcf_simp_init(struct net *net
+ return ret;
+
+ d = to_defact(*a);
+- ret = alloc_defdata(d, defdata);
++ ret = alloc_defdata(d, tb[TCA_DEF_DATA]);
+ if (ret < 0) {
+ tcf_idr_release(*a, bind);
+ return ret;
+@@ -133,7 +130,7 @@ static int tcf_simp_init(struct net *net
+ if (!ovr)
+ return -EEXIST;
+
+- reset_policy(d, defdata, parm);
++ reset_policy(d, tb[TCA_DEF_DATA], parm);
+ }
+
+ if (ret == ACT_P_CREATED)
net-aquantia-fix-unsigned-numvecs-comparison-with-less-than-zero.patch
+bonding-re-evaluate-force_primary-when-the-primary-slave-name-changes.patch
+cdc_ncm-avoid-padding-beyond-end-of-skb.patch
+ipv6-allow-pmtu-exceptions-to-local-routes.patch
+net-dsa-add-error-handling-for-pskb_trim_rcsum.patch
+net-phy-dp83822-use-bmcr_anenable-instead-of-bmsr_anegcapable-for-dp83620.patch
+net-sched-act_simple-fix-parsing-of-tca_def_data.patch
+tcp-verify-the-checksum-of-the-first-data-segment-in-a-new-connection.patch
+tls-fix-use-after-free-in-tls_push_record.patch
+tls-fix-waitall-behavior-in-tls_sw_recvmsg.patch
+socket-close-race-condition-between-sock_close-and-sockfs_setattr.patch
+udp-fix-rx-queue-len-reported-by-diag-and-proc-interface.patch
+net-in-virtio_net_hdr-only-add-vlan_hlen-to-csum_start-if-payload-holds-vlan.patch
+hv_netvsc-fix-a-network-regression-after-ifdown-ifup.patch
--- /dev/null
+From foo@baz Thu Jun 21 06:54:06 JST 2018
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Thu, 7 Jun 2018 13:39:49 -0700
+Subject: socket: close race condition between sock_close() and sockfs_setattr()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit 6d8c50dcb029872b298eea68cc6209c866fd3e14 ]
+
+fchownat() doesn't even hold refcnt of fd until it figures out
+fd is really needed (otherwise is ignored) and releases it after
+it resolves the path. This means sock_close() could race with
+sockfs_setattr(), which leads to a NULL pointer dereference
+since typically we set sock->sk to NULL in ->release().
+
+As pointed out by Al, this is unique to sockfs. So we can fix this
+in socket layer by acquiring inode_lock in sock_close() and
+checking against NULL in sockfs_setattr().
+
+sock_release() is called in many places, only the sock_close()
+path matters here. And fortunately, this should not affect normal
+sock_close() as it is only called when the last fd refcnt is gone.
+It only affects sock_close() with a parallel sockfs_setattr() in
+progress, which is not common.
+
+Fixes: 86741ec25462 ("net: core: Add a UID field to struct sock.")
+Reported-by: shankarapailoor <shankarapailoor@gmail.com>
+Cc: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
+Cc: Lorenzo Colitti <lorenzo@google.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/socket.c | 18 +++++++++++++++---
+ 1 file changed, 15 insertions(+), 3 deletions(-)
+
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -537,7 +537,10 @@ static int sockfs_setattr(struct dentry
+ if (!err && (iattr->ia_valid & ATTR_UID)) {
+ struct socket *sock = SOCKET_I(d_inode(dentry));
+
+- sock->sk->sk_uid = iattr->ia_uid;
++ if (sock->sk)
++ sock->sk->sk_uid = iattr->ia_uid;
++ else
++ err = -ENOENT;
+ }
+
+ return err;
+@@ -586,12 +589,16 @@ EXPORT_SYMBOL(sock_alloc);
+ * an inode not a file.
+ */
+
+-void sock_release(struct socket *sock)
++static void __sock_release(struct socket *sock, struct inode *inode)
+ {
+ if (sock->ops) {
+ struct module *owner = sock->ops->owner;
+
++ if (inode)
++ inode_lock(inode);
+ sock->ops->release(sock);
++ if (inode)
++ inode_unlock(inode);
+ sock->ops = NULL;
+ module_put(owner);
+ }
+@@ -605,6 +612,11 @@ void sock_release(struct socket *sock)
+ }
+ sock->file = NULL;
+ }
++
++void sock_release(struct socket *sock)
++{
++ __sock_release(sock, NULL);
++}
+ EXPORT_SYMBOL(sock_release);
+
+ void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags)
+@@ -1146,7 +1158,7 @@ static int sock_mmap(struct file *file,
+
+ static int sock_close(struct inode *inode, struct file *filp)
+ {
+- sock_release(SOCKET_I(inode));
++ __sock_release(SOCKET_I(inode), inode);
+ return 0;
+ }
+
--- /dev/null
+From foo@baz Thu Jun 21 06:54:06 JST 2018
+From: Frank van der Linden <fllinden@amazon.com>
+Date: Tue, 12 Jun 2018 23:09:37 +0000
+Subject: tcp: verify the checksum of the first data segment in a new connection
+
+From: Frank van der Linden <fllinden@amazon.com>
+
+[ Upstream commit 4fd44a98ffe0d048246efef67ed640fdf2098a62 ]
+
+commit 079096f103fa ("tcp/dccp: install syn_recv requests into ehash
+table") introduced an optimization for the handling of child sockets
+created for a new TCP connection.
+
+But this optimization passes any data associated with the last ACK of the
+connection handshake up the stack without verifying its checksum, because it
+calls tcp_child_process(), which in turn calls tcp_rcv_state_process()
+directly. These lower-level processing functions do not do any checksum
+verification.
+
+Insert a tcp_checksum_complete call in the TCP_NEW_SYN_RECEIVE path to
+fix this.
+
+Fixes: 079096f103fa ("tcp/dccp: install syn_recv requests into ehash table")
+Signed-off-by: Frank van der Linden <fllinden@amazon.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Tested-by: Balbir Singh <bsingharora@gmail.com>
+Reviewed-by: Balbir Singh <bsingharora@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_ipv4.c | 4 ++++
+ net/ipv6/tcp_ipv6.c | 4 ++++
+ 2 files changed, 8 insertions(+)
+
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1689,6 +1689,10 @@ process:
+ reqsk_put(req);
+ goto discard_it;
+ }
++ if (tcp_checksum_complete(skb)) {
++ reqsk_put(req);
++ goto csum_error;
++ }
+ if (unlikely(sk->sk_state != TCP_LISTEN)) {
+ inet_csk_reqsk_queue_drop_and_put(sk, req);
+ goto lookup;
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1475,6 +1475,10 @@ process:
+ reqsk_put(req);
+ goto discard_it;
+ }
++ if (tcp_checksum_complete(skb)) {
++ reqsk_put(req);
++ goto csum_error;
++ }
+ if (unlikely(sk->sk_state != TCP_LISTEN)) {
+ inet_csk_reqsk_queue_drop_and_put(sk, req);
+ goto lookup;
--- /dev/null
+From foo@baz Thu Jun 21 06:54:06 JST 2018
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Fri, 15 Jun 2018 03:07:45 +0200
+Subject: tls: fix use-after-free in tls_push_record
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit a447da7d00410278c90d3576782a43f8b675d7be ]
+
+syzkaller managed to trigger a use-after-free in tls like the
+following:
+
+ BUG: KASAN: use-after-free in tls_push_record.constprop.15+0x6a2/0x810 [tls]
+ Write of size 1 at addr ffff88037aa08000 by task a.out/2317
+
+ CPU: 3 PID: 2317 Comm: a.out Not tainted 4.17.0+ #144
+ Hardware name: LENOVO 20FBCTO1WW/20FBCTO1WW, BIOS N1FET47W (1.21 ) 11/28/2016
+ Call Trace:
+ dump_stack+0x71/0xab
+ print_address_description+0x6a/0x280
+ kasan_report+0x258/0x380
+ ? tls_push_record.constprop.15+0x6a2/0x810 [tls]
+ tls_push_record.constprop.15+0x6a2/0x810 [tls]
+ tls_sw_push_pending_record+0x2e/0x40 [tls]
+ tls_sk_proto_close+0x3fe/0x710 [tls]
+ ? tcp_check_oom+0x4c0/0x4c0
+ ? tls_write_space+0x260/0x260 [tls]
+ ? kmem_cache_free+0x88/0x1f0
+ inet_release+0xd6/0x1b0
+ __sock_release+0xc0/0x240
+ sock_close+0x11/0x20
+ __fput+0x22d/0x660
+ task_work_run+0x114/0x1a0
+ do_exit+0x71a/0x2780
+ ? mm_update_next_owner+0x650/0x650
+ ? handle_mm_fault+0x2f5/0x5f0
+ ? __do_page_fault+0x44f/0xa50
+ ? mm_fault_error+0x2d0/0x2d0
+ do_group_exit+0xde/0x300
+ __x64_sys_exit_group+0x3a/0x50
+ do_syscall_64+0x9a/0x300
+ ? page_fault+0x8/0x30
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+This happened through fault injection where aead_req allocation in
+tls_do_encryption() eventually failed and we returned -ENOMEM from
+the function. Turns out that the use-after-free is triggered from
+tls_sw_sendmsg() in the second tls_push_record(). The error then
+triggers a jump to waiting for memory in sk_stream_wait_memory()
+resp. returning immediately in case of MSG_DONTWAIT. What follows is
+the trim_both_sgl(sk, orig_size), which drops elements from the sg
+list added via tls_sw_sendmsg(). Now the use-after-free gets triggered
+when the socket is being closed, where tls_sk_proto_close() callback
+is invoked. The tls_complete_pending_work() will figure that there's
+a pending closed tls record to be flushed and thus calls into the
+tls_push_pending_closed_record() from there. ctx->push_pending_record()
+is called from the latter, which is the tls_sw_push_pending_record()
+from sw path. This again calls into tls_push_record(). And here the
+tls_fill_prepend() will panic since the buffer address has been freed
+earlier via trim_both_sgl(). One way to fix it is to move the aead
+request allocation out of tls_do_encryption() early into tls_push_record().
+This means we don't prep the tls header and advance state to the
+TLS_PENDING_CLOSED_RECORD before allocation which could potentially
+fail happened. That fixes the issue on my side.
+
+Fixes: 3c4d7559159b ("tls: kernel TLS support")
+Reported-by: syzbot+5c74af81c547738e1684@syzkaller.appspotmail.com
+Reported-by: syzbot+709f2810a6a05f11d4d3@syzkaller.appspotmail.com
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Dave Watson <davejwatson@fb.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tls/tls_sw.c | 26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -191,18 +191,12 @@ static void tls_free_both_sg(struct sock
+ }
+
+ static int tls_do_encryption(struct tls_context *tls_ctx,
+- struct tls_sw_context *ctx, size_t data_len,
+- gfp_t flags)
++ struct tls_sw_context *ctx,
++ struct aead_request *aead_req,
++ size_t data_len)
+ {
+- unsigned int req_size = sizeof(struct aead_request) +
+- crypto_aead_reqsize(ctx->aead_send);
+- struct aead_request *aead_req;
+ int rc;
+
+- aead_req = kzalloc(req_size, flags);
+- if (!aead_req)
+- return -ENOMEM;
+-
+ ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size;
+ ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size;
+
+@@ -219,7 +213,6 @@ static int tls_do_encryption(struct tls_
+ ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
+ ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
+
+- kfree(aead_req);
+ return rc;
+ }
+
+@@ -228,8 +221,14 @@ static int tls_push_record(struct sock *
+ {
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
++ struct aead_request *req;
+ int rc;
+
++ req = kzalloc(sizeof(struct aead_request) +
++ crypto_aead_reqsize(ctx->aead_send), sk->sk_allocation);
++ if (!req)
++ return -ENOMEM;
++
+ sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
+ sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
+
+@@ -245,15 +244,14 @@ static int tls_push_record(struct sock *
+ tls_ctx->pending_open_record_frags = 0;
+ set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
+
+- rc = tls_do_encryption(tls_ctx, ctx, ctx->sg_plaintext_size,
+- sk->sk_allocation);
++ rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size);
+ if (rc < 0) {
+ /* If we are called from write_space and
+ * we fail, we need to set this SOCK_NOSPACE
+ * to trigger another write_space in the future.
+ */
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+- return rc;
++ goto out_req;
+ }
+
+ free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
+@@ -268,6 +266,8 @@ static int tls_push_record(struct sock *
+ tls_err_abort(sk, EBADMSG);
+
+ tls_advance_record_sn(sk, &tls_ctx->tx);
++out_req:
++ kfree(req);
+ return rc;
+ }
+
--- /dev/null
+From foo@baz Thu Jun 21 06:54:06 JST 2018
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Fri, 15 Jun 2018 03:07:46 +0200
+Subject: tls: fix waitall behavior in tls_sw_recvmsg
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit 06030dbaf3b6c5801dcdb7fe4fbab3b91c8da84a ]
+
+Current behavior in tls_sw_recvmsg() is to wait for incoming tls
+messages and copy up to exactly len bytes of data that the user
+provided. This is problematic in the sense that i) if no packet
+is currently queued in strparser we keep waiting until one has been
+processed and pushed into tls receive layer for tls_wait_data() to
+wake up and push the decrypted bits to user space. Given after
+tls decryption, we're back at streaming data, use sock_rcvlowat()
+hint from tcp socket instead. Retain current behavior with MSG_WAITALL
+flag and otherwise use the hint target for breaking the loop and
+returning to application. This is done if currently no ctx->recv_pkt
+is ready, otherwise continue to process it from our strparser
+backlog.
+
+Fixes: c46234ebb4d1 ("tls: RX path for ktls")
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Dave Watson <davejwatson@fb.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tls/tls_sw.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -755,7 +755,7 @@ int tls_sw_recvmsg(struct sock *sk,
+ struct sk_buff *skb;
+ ssize_t copied = 0;
+ bool cmsg = false;
+- int err = 0;
++ int target, err = 0;
+ long timeo;
+
+ flags |= nonblock;
+@@ -765,6 +765,7 @@ int tls_sw_recvmsg(struct sock *sk,
+
+ lock_sock(sk);
+
++ target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ do {
+ bool zc = false;
+@@ -857,6 +858,9 @@ fallback_to_reg_recv:
+ goto recv_end;
+ }
+ }
++ /* If we have a new message from strparser, continue now. */
++ if (copied >= target && !ctx->recv_pkt)
++ break;
+ } while (len);
+
+ recv_end:
--- /dev/null
+From foo@baz Thu Jun 21 06:54:06 JST 2018
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Fri, 8 Jun 2018 11:35:40 +0200
+Subject: udp: fix rx queue len reported by diag and proc interface
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit 6c206b20092a3623184cff9470dba75d21507874 ]
+
+After commit 6b229cf77d68 ("udp: add batching to udp_rmem_release()")
+the sk_rmem_alloc field does not measure exactly anymore the
+receive queue length, because we batch the rmem release. The issue
+is really apparent only after commit 0d4a6608f68c ("udp: do rmem bulk
+free even if the rx sk queue is empty"): the user space can easily
+check for an empty socket with not-0 queue length reported by the 'ss'
+tool or the procfs interface.
+
+We need to use a custom UDP helper to report the correct queue length,
+taking into account the forward allocation deficit.
+
+Reported-by: trevor.francis@46labs.com
+Fixes: 6b229cf77d68 ("UDP: add batching to udp_rmem_release()")
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/transp_v6.h | 11 +++++++++--
+ include/net/udp.h | 5 +++++
+ net/ipv4/udp.c | 2 +-
+ net/ipv4/udp_diag.c | 2 +-
+ net/ipv6/datagram.c | 6 +++---
+ net/ipv6/udp.c | 3 ++-
+ 6 files changed, 21 insertions(+), 8 deletions(-)
+
+--- a/include/net/transp_v6.h
++++ b/include/net/transp_v6.h
+@@ -45,8 +45,15 @@ int ip6_datagram_send_ctl(struct net *ne
+ struct flowi6 *fl6, struct ipcm6_cookie *ipc6,
+ struct sockcm_cookie *sockc);
+
+-void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
+- __u16 srcp, __u16 destp, int bucket);
++void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
++ __u16 srcp, __u16 destp, int rqueue, int bucket);
++static inline void
++ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp,
++ __u16 destp, int bucket)
++{
++ __ip6_dgram_sock_seq_show(seq, sp, srcp, destp, sk_rmem_alloc_get(sp),
++ bucket);
++}
+
+ #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006)
+
+--- a/include/net/udp.h
++++ b/include/net/udp.h
+@@ -244,6 +244,11 @@ static inline __be16 udp_flow_src_port(s
+ return htons((((u64) hash * (max - min)) >> 32) + min);
+ }
+
++static inline int udp_rqueue_get(struct sock *sk)
++{
++ return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
++}
++
+ /* net/ipv4/udp.c */
+ void udp_destruct_sock(struct sock *sk);
+ void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -2718,7 +2718,7 @@ static void udp4_format_sock(struct sock
+ " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
+ bucket, src, srcp, dest, destp, sp->sk_state,
+ sk_wmem_alloc_get(sp),
+- sk_rmem_alloc_get(sp),
++ udp_rqueue_get(sp),
+ 0, 0L, 0,
+ from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
+ 0, sock_i_ino(sp),
+--- a/net/ipv4/udp_diag.c
++++ b/net/ipv4/udp_diag.c
+@@ -163,7 +163,7 @@ static int udp_diag_dump_one(struct sk_b
+ static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
+ void *info)
+ {
+- r->idiag_rqueue = sk_rmem_alloc_get(sk);
++ r->idiag_rqueue = udp_rqueue_get(sk);
+ r->idiag_wqueue = sk_wmem_alloc_get(sk);
+ }
+
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -1019,8 +1019,8 @@ exit_f:
+ }
+ EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
+
+-void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
+- __u16 srcp, __u16 destp, int bucket)
++void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
++ __u16 srcp, __u16 destp, int rqueue, int bucket)
+ {
+ const struct in6_addr *dest, *src;
+
+@@ -1036,7 +1036,7 @@ void ip6_dgram_sock_seq_show(struct seq_
+ dest->s6_addr32[2], dest->s6_addr32[3], destp,
+ sp->sk_state,
+ sk_wmem_alloc_get(sp),
+- sk_rmem_alloc_get(sp),
++ rqueue,
+ 0, 0L, 0,
+ from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
+ 0,
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1475,7 +1475,8 @@ int udp6_seq_show(struct seq_file *seq,
+ struct inet_sock *inet = inet_sk(v);
+ __u16 srcp = ntohs(inet->inet_sport);
+ __u16 destp = ntohs(inet->inet_dport);
+- ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket);
++ __ip6_dgram_sock_seq_show(seq, v, srcp, destp,
++ udp_rqueue_get(v), bucket);
+ }
+ return 0;
+ }