--- /dev/null
+From 212ac181c158c09038c474ba68068be49caecebb Mon Sep 17 00:00:00 2001
+From: Zubin Mithra <zsm@chromium.org>
+Date: Thu, 4 Apr 2019 14:33:55 -0700
+Subject: ALSA: seq: Fix OOB-reads from strlcpy
+
+From: Zubin Mithra <zsm@chromium.org>
+
+commit 212ac181c158c09038c474ba68068be49caecebb upstream.
+
+When ioctl calls are made with non-null-terminated userspace strings,
+strlcpy causes an OOB-read from within strlen. Fix by changing to use
+strscpy instead.
+
+Signed-off-by: Zubin Mithra <zsm@chromium.org>
+Reviewed-by: Guenter Roeck <groeck@chromium.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/core/seq/seq_clientmgr.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -1249,7 +1249,7 @@ static int snd_seq_ioctl_set_client_info
+
+ /* fill the info fields */
+ if (client_info->name[0])
+- strlcpy(client->name, client_info->name, sizeof(client->name));
++ strscpy(client->name, client_info->name, sizeof(client->name));
+
+ client->filter = client_info->filter;
+ client->event_lost = client_info->event_lost;
+@@ -1527,7 +1527,7 @@ static int snd_seq_ioctl_create_queue(st
+ /* set queue name */
+ if (!info->name[0])
+ snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
+- strlcpy(q->name, info->name, sizeof(q->name));
++ strscpy(q->name, info->name, sizeof(q->name));
+ snd_use_lock_free(&q->use_lock);
+
+ return 0;
+@@ -1589,7 +1589,7 @@ static int snd_seq_ioctl_set_queue_info(
+ queuefree(q);
+ return -EPERM;
+ }
+- strlcpy(q->name, info->name, sizeof(q->name));
++ strscpy(q->name, info->name, sizeof(q->name));
+ queuefree(q);
+
+ return 0;
--- /dev/null
+From foo@baz Mon Apr 15 07:47:06 CEST 2019
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Mon, 8 Apr 2019 17:39:54 -0400
+Subject: bnxt_en: Improve RX consumer index validity check.
+
+From: Michael Chan <michael.chan@broadcom.com>
+
+[ Upstream commit a1b0e4e684e9c300b9e759b46cb7a0147e61ddff ]
+
+There is logic to check that the RX/TPA consumer index is the expected
+index to work around a hardware problem. However, the potentially bad
+consumer index is first used to index into an array to reference an entry.
+This can potentially crash if the bad consumer index is beyond legal
+range. Improve the logic to use the consumer index for dereferencing
+after the validity check and log an error message.
+
+Fixes: fa7e28127a5a ("bnxt_en: Add workaround to detect bad opaque in rx completion (part 2)")
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -1076,6 +1076,8 @@ static void bnxt_tpa_start(struct bnxt *
+ tpa_info = &rxr->rx_tpa[agg_id];
+
+ if (unlikely(cons != rxr->rx_next_cons)) {
++ netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
++ cons, rxr->rx_next_cons);
+ bnxt_sched_reset(bp, rxr);
+ return;
+ }
+@@ -1528,15 +1530,17 @@ static int bnxt_rx_pkt(struct bnxt *bp,
+ }
+
+ cons = rxcmp->rx_cmp_opaque;
+- rx_buf = &rxr->rx_buf_ring[cons];
+- data = rx_buf->data;
+- data_ptr = rx_buf->data_ptr;
+ if (unlikely(cons != rxr->rx_next_cons)) {
+ int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
+
++ netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
++ cons, rxr->rx_next_cons);
+ bnxt_sched_reset(bp, rxr);
+ return rc1;
+ }
++ rx_buf = &rxr->rx_buf_ring[cons];
++ data = rx_buf->data;
++ data_ptr = rx_buf->data_ptr;
+ prefetch(data_ptr);
+
+ misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
--- /dev/null
+From foo@baz Mon Apr 15 07:47:06 CEST 2019
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Mon, 8 Apr 2019 17:39:55 -0400
+Subject: bnxt_en: Reset device on RX buffer errors.
+
+From: Michael Chan <michael.chan@broadcom.com>
+
+[ Upstream commit 8e44e96c6c8e8fb80b84a2ca11798a8554f710f2 ]
+
+If the RX completion indicates RX buffers errors, the RX ring will be
+disabled by firmware and no packets will be received on that ring from
+that point on. Recover by resetting the device.
+
+Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.")
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -1557,11 +1557,17 @@ static int bnxt_rx_pkt(struct bnxt *bp,
+
+ rx_buf->data = NULL;
+ if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
++ u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
++
+ bnxt_reuse_rx_data(rxr, cons, data);
+ if (agg_bufs)
+ bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+
+ rc = -EIO;
++ if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
++ netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
++ bnxt_sched_reset(bp, rxr);
++ }
+ goto next_rx;
+ }
+
--- /dev/null
+From foo@baz Mon Apr 15 07:47:06 CEST 2019
+From: Haiyang Zhang <haiyangz@microsoft.com>
+Date: Thu, 28 Mar 2019 19:40:36 +0000
+Subject: hv_netvsc: Fix unwanted wakeup after tx_disable
+
+From: Haiyang Zhang <haiyangz@microsoft.com>
+
+[ Upstream commit 1b704c4a1ba95574832e730f23817b651db2aa59 ]
+
+After queue stopped, the wakeup mechanism may wake it up again
+when ring buffer usage is lower than a threshold. This may cause
+send path panic on NULL pointer when we stopped all tx queues in
+netvsc_detach and start removing the netvsc device.
+
+This patch fix it by adding a tx_disable flag to prevent unwanted
+queue wakeup.
+
+Fixes: 7b2ee50c0cd5 ("hv_netvsc: common detach logic")
+Reported-by: Mohammed Gamal <mgamal@redhat.com>
+Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/hyperv/hyperv_net.h | 1 +
+ drivers/net/hyperv/netvsc.c | 6 ++++--
+ drivers/net/hyperv/netvsc_drv.c | 32 ++++++++++++++++++++++++++------
+ 3 files changed, 31 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/hyperv/hyperv_net.h
++++ b/drivers/net/hyperv/hyperv_net.h
+@@ -779,6 +779,7 @@ struct netvsc_device {
+
+ wait_queue_head_t wait_drain;
+ bool destroy;
++ bool tx_disable; /* if true, do not wake up queue again */
+
+ /* Receive buffer allocated by us but manages by NetVSP */
+ void *recv_buf;
+--- a/drivers/net/hyperv/netvsc.c
++++ b/drivers/net/hyperv/netvsc.c
+@@ -107,6 +107,7 @@ static struct netvsc_device *alloc_net_d
+
+ init_waitqueue_head(&net_device->wait_drain);
+ net_device->destroy = false;
++ net_device->tx_disable = false;
+ atomic_set(&net_device->open_cnt, 0);
+ net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
+ net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
+@@ -712,7 +713,7 @@ static void netvsc_send_tx_complete(stru
+ } else {
+ struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
+
+- if (netif_tx_queue_stopped(txq) &&
++ if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
+ (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
+ queue_sends < 1)) {
+ netif_tx_wake_queue(txq);
+@@ -865,7 +866,8 @@ static inline int netvsc_send_pkt(
+ netif_tx_stop_queue(txq);
+ } else if (ret == -EAGAIN) {
+ netif_tx_stop_queue(txq);
+- if (atomic_read(&nvchan->queue_sends) < 1) {
++ if (atomic_read(&nvchan->queue_sends) < 1 &&
++ !net_device->tx_disable) {
+ netif_tx_wake_queue(txq);
+ ret = -ENOSPC;
+ }
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -108,6 +108,15 @@ static void netvsc_set_rx_mode(struct ne
+ rcu_read_unlock();
+ }
+
++static void netvsc_tx_enable(struct netvsc_device *nvscdev,
++ struct net_device *ndev)
++{
++ nvscdev->tx_disable = false;
++ virt_wmb(); /* ensure queue wake up mechanism is on */
++
++ netif_tx_wake_all_queues(ndev);
++}
++
+ static int netvsc_open(struct net_device *net)
+ {
+ struct net_device_context *ndev_ctx = netdev_priv(net);
+@@ -128,7 +137,7 @@ static int netvsc_open(struct net_device
+ rdev = nvdev->extension;
+ if (!rdev->link_state) {
+ netif_carrier_on(net);
+- netif_tx_wake_all_queues(net);
++ netvsc_tx_enable(nvdev, net);
+ }
+
+ if (vf_netdev) {
+@@ -183,6 +192,17 @@ static int netvsc_wait_until_empty(struc
+ }
+ }
+
++static void netvsc_tx_disable(struct netvsc_device *nvscdev,
++ struct net_device *ndev)
++{
++ if (nvscdev) {
++ nvscdev->tx_disable = true;
++ virt_wmb(); /* ensure txq will not wake up after stop */
++ }
++
++ netif_tx_disable(ndev);
++}
++
+ static int netvsc_close(struct net_device *net)
+ {
+ struct net_device_context *net_device_ctx = netdev_priv(net);
+@@ -191,7 +211,7 @@ static int netvsc_close(struct net_devic
+ struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
+ int ret;
+
+- netif_tx_disable(net);
++ netvsc_tx_disable(nvdev, net);
+
+ /* No need to close rndis filter if it is removed already */
+ if (!nvdev)
+@@ -893,7 +913,7 @@ static int netvsc_detach(struct net_devi
+
+ /* If device was up (receiving) then shutdown */
+ if (netif_running(ndev)) {
+- netif_tx_disable(ndev);
++ netvsc_tx_disable(nvdev, ndev);
+
+ ret = rndis_filter_close(nvdev);
+ if (ret) {
+@@ -1720,7 +1740,7 @@ static void netvsc_link_change(struct wo
+ if (rdev->link_state) {
+ rdev->link_state = false;
+ netif_carrier_on(net);
+- netif_tx_wake_all_queues(net);
++ netvsc_tx_enable(net_device, net);
+ } else {
+ notify = true;
+ }
+@@ -1730,7 +1750,7 @@ static void netvsc_link_change(struct wo
+ if (!rdev->link_state) {
+ rdev->link_state = true;
+ netif_carrier_off(net);
+- netif_tx_stop_all_queues(net);
++ netvsc_tx_disable(net_device, net);
+ }
+ kfree(event);
+ break;
+@@ -1739,7 +1759,7 @@ static void netvsc_link_change(struct wo
+ if (!rdev->link_state) {
+ rdev->link_state = true;
+ netif_carrier_off(net);
+- netif_tx_stop_all_queues(net);
++ netvsc_tx_disable(net_device, net);
+ event->event = RNDIS_STATUS_MEDIA_CONNECT;
+ spin_lock_irqsave(&ndev_ctx->lock, flags);
+ list_add(&event->list, &ndev_ctx->reconfig_events);
--- /dev/null
+From foo@baz Mon Apr 15 07:47:06 CEST 2019
+From: Sheena Mira-ato <sheena.mira-ato@alliedtelesis.co.nz>
+Date: Mon, 1 Apr 2019 13:04:42 +1300
+Subject: ip6_tunnel: Match to ARPHRD_TUNNEL6 for dev type
+
+From: Sheena Mira-ato <sheena.mira-ato@alliedtelesis.co.nz>
+
+[ Upstream commit b2e54b09a3d29c4db883b920274ca8dca4d9f04d ]
+
+The device type for ip6 tunnels is set to
+ARPHRD_TUNNEL6. However, the ip4ip6_err function
+is expecting the device type of the tunnel to be
+ARPHRD_TUNNEL. Since the device types do not
+match, the function exits and the ICMP error
+packet is not sent to the originating host. Note
+that the device type for IPv4 tunnels is set to
+ARPHRD_TUNNEL.
+
+Fix is to expect a tunnel device type of
+ARPHRD_TUNNEL6 instead. Now the tunnel device
+type matches and the ICMP error packet is sent
+to the originating host.
+
+Signed-off-by: Sheena Mira-ato <sheena.mira-ato@alliedtelesis.co.nz>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6_tunnel.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -633,7 +633,7 @@ ip4ip6_err(struct sk_buff *skb, struct i
+ IPPROTO_IPIP,
+ RT_TOS(eiph->tos), 0);
+ if (IS_ERR(rt) ||
+- rt->dst.dev->type != ARPHRD_TUNNEL) {
++ rt->dst.dev->type != ARPHRD_TUNNEL6) {
+ if (!IS_ERR(rt))
+ ip_rt_put(rt);
+ goto out;
+@@ -643,7 +643,7 @@ ip4ip6_err(struct sk_buff *skb, struct i
+ ip_rt_put(rt);
+ if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
+ skb2->dev) ||
+- skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
++ skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
+ goto out;
+ }
+
--- /dev/null
+From foo@baz Mon Apr 15 07:47:06 CEST 2019
+From: Junwei Hu <hujunwei4@huawei.com>
+Date: Tue, 2 Apr 2019 19:38:04 +0800
+Subject: ipv6: Fix dangling pointer when ipv6 fragment
+
+From: Junwei Hu <hujunwei4@huawei.com>
+
+[ Upstream commit ef0efcd3bd3fd0589732b67fb586ffd3c8705806 ]
+
+At the beginning of ip6_fragment func, the prevhdr pointer is
+obtained in the ip6_find_1stfragopt func.
+However, all the pointers pointing into skb header may change
+when calling skb_checksum_help func with
+skb->ip_summed = CHECKSUM_PARTIAL condition.
+The prevhdr pointe will be dangling if it is not reloaded after
+calling __skb_linearize func in skb_checksum_help func.
+
+Here, I add a variable, nexthdr_offset, to evaluate the offset,
+which does not changes even after calling __skb_linearize func.
+
+Fixes: 405c92f7a541 ("ipv6: add defensive check for CHECKSUM_PARTIAL skbs in ip_fragment")
+Signed-off-by: Junwei Hu <hujunwei4@huawei.com>
+Reported-by: Wenhao Zhang <zhangwenhao8@huawei.com>
+Reported-by: syzbot+e8ce541d095e486074fc@syzkaller.appspotmail.com
+Reviewed-by: Zhiqiang Liu <liuzhiqiang26@huawei.com>
+Acked-by: Martin KaFai Lau <kafai@fb.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6_output.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -611,7 +611,7 @@ int ip6_fragment(struct net *net, struct
+ inet6_sk(skb->sk) : NULL;
+ struct ipv6hdr *tmp_hdr;
+ struct frag_hdr *fh;
+- unsigned int mtu, hlen, left, len;
++ unsigned int mtu, hlen, left, len, nexthdr_offset;
+ int hroom, troom;
+ __be32 frag_id;
+ int ptr, offset = 0, err = 0;
+@@ -622,6 +622,7 @@ int ip6_fragment(struct net *net, struct
+ goto fail;
+ hlen = err;
+ nexthdr = *prevhdr;
++ nexthdr_offset = prevhdr - skb_network_header(skb);
+
+ mtu = ip6_skb_dst_mtu(skb);
+
+@@ -656,6 +657,7 @@ int ip6_fragment(struct net *net, struct
+ (err = skb_checksum_help(skb)))
+ goto fail;
+
++ prevhdr = skb_network_header(skb) + nexthdr_offset;
+ hroom = LL_RESERVED_SPACE(rt->dst.dev);
+ if (skb_has_frag_list(skb)) {
+ unsigned int first_len = skb_pagelen(skb);
--- /dev/null
+From foo@baz Mon Apr 15 07:47:06 CEST 2019
+From: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
+Date: Thu, 4 Apr 2019 16:37:53 +0200
+Subject: ipv6: sit: reset ip header pointer in ipip6_rcv
+
+From: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
+
+[ Upstream commit bb9bd814ebf04f579be466ba61fc922625508807 ]
+
+ipip6 tunnels run iptunnel_pull_header on received skbs. This can
+determine the following use-after-free accessing iph pointer since
+the packet will be 'uncloned' running pskb_expand_head if it is a
+cloned gso skb (e.g if the packet has been sent though a veth device)
+
+[ 706.369655] BUG: KASAN: use-after-free in ipip6_rcv+0x1678/0x16e0 [sit]
+[ 706.449056] Read of size 1 at addr ffffe01b6bd855f5 by task ksoftirqd/1/=
+[ 706.669494] Hardware name: HPE ProLiant m400 Server/ProLiant m400 Server, BIOS U02 08/19/2016
+[ 706.771839] Call trace:
+[ 706.801159] dump_backtrace+0x0/0x2f8
+[ 706.845079] show_stack+0x24/0x30
+[ 706.884833] dump_stack+0xe0/0x11c
+[ 706.925629] print_address_description+0x68/0x260
+[ 706.982070] kasan_report+0x178/0x340
+[ 707.025995] __asan_report_load1_noabort+0x30/0x40
+[ 707.083481] ipip6_rcv+0x1678/0x16e0 [sit]
+[ 707.132623] tunnel64_rcv+0xd4/0x200 [tunnel4]
+[ 707.185940] ip_local_deliver_finish+0x3b8/0x988
+[ 707.241338] ip_local_deliver+0x144/0x470
+[ 707.289436] ip_rcv_finish+0x43c/0x14b0
+[ 707.335447] ip_rcv+0x628/0x1138
+[ 707.374151] __netif_receive_skb_core+0x1670/0x2600
+[ 707.432680] __netif_receive_skb+0x28/0x190
+[ 707.482859] process_backlog+0x1d0/0x610
+[ 707.529913] net_rx_action+0x37c/0xf68
+[ 707.574882] __do_softirq+0x288/0x1018
+[ 707.619852] run_ksoftirqd+0x70/0xa8
+[ 707.662734] smpboot_thread_fn+0x3a4/0x9e8
+[ 707.711875] kthread+0x2c8/0x350
+[ 707.750583] ret_from_fork+0x10/0x18
+
+[ 707.811302] Allocated by task 16982:
+[ 707.854182] kasan_kmalloc.part.1+0x40/0x108
+[ 707.905405] kasan_kmalloc+0xb4/0xc8
+[ 707.948291] kasan_slab_alloc+0x14/0x20
+[ 707.994309] __kmalloc_node_track_caller+0x158/0x5e0
+[ 708.053902] __kmalloc_reserve.isra.8+0x54/0xe0
+[ 708.108280] __alloc_skb+0xd8/0x400
+[ 708.150139] sk_stream_alloc_skb+0xa4/0x638
+[ 708.200346] tcp_sendmsg_locked+0x818/0x2b90
+[ 708.251581] tcp_sendmsg+0x40/0x60
+[ 708.292376] inet_sendmsg+0xf0/0x520
+[ 708.335259] sock_sendmsg+0xac/0xf8
+[ 708.377096] sock_write_iter+0x1c0/0x2c0
+[ 708.424154] new_sync_write+0x358/0x4a8
+[ 708.470162] __vfs_write+0xc4/0xf8
+[ 708.510950] vfs_write+0x12c/0x3d0
+[ 708.551739] ksys_write+0xcc/0x178
+[ 708.592533] __arm64_sys_write+0x70/0xa0
+[ 708.639593] el0_svc_handler+0x13c/0x298
+[ 708.686646] el0_svc+0x8/0xc
+
+[ 708.739019] Freed by task 17:
+[ 708.774597] __kasan_slab_free+0x114/0x228
+[ 708.823736] kasan_slab_free+0x10/0x18
+[ 708.868703] kfree+0x100/0x3d8
+[ 708.905320] skb_free_head+0x7c/0x98
+[ 708.948204] skb_release_data+0x320/0x490
+[ 708.996301] pskb_expand_head+0x60c/0x970
+[ 709.044399] __iptunnel_pull_header+0x3b8/0x5d0
+[ 709.098770] ipip6_rcv+0x41c/0x16e0 [sit]
+[ 709.146873] tunnel64_rcv+0xd4/0x200 [tunnel4]
+[ 709.200195] ip_local_deliver_finish+0x3b8/0x988
+[ 709.255596] ip_local_deliver+0x144/0x470
+[ 709.303692] ip_rcv_finish+0x43c/0x14b0
+[ 709.349705] ip_rcv+0x628/0x1138
+[ 709.388413] __netif_receive_skb_core+0x1670/0x2600
+[ 709.446943] __netif_receive_skb+0x28/0x190
+[ 709.497120] process_backlog+0x1d0/0x610
+[ 709.544169] net_rx_action+0x37c/0xf68
+[ 709.589131] __do_softirq+0x288/0x1018
+
+[ 709.651938] The buggy address belongs to the object at ffffe01b6bd85580
+ which belongs to the cache kmalloc-1024 of size 1024
+[ 709.804356] The buggy address is located 117 bytes inside of
+ 1024-byte region [ffffe01b6bd85580, ffffe01b6bd85980)
+[ 709.946340] The buggy address belongs to the page:
+[ 710.003824] page:ffff7ff806daf600 count:1 mapcount:0 mapping:ffffe01c4001f600 index:0x0
+[ 710.099914] flags: 0xfffff8000000100(slab)
+[ 710.149059] raw: 0fffff8000000100 dead000000000100 dead000000000200 ffffe01c4001f600
+[ 710.242011] raw: 0000000000000000 0000000000380038 00000001ffffffff 0000000000000000
+[ 710.334966] page dumped because: kasan: bad access detected
+
+Fix it resetting iph pointer after iptunnel_pull_header
+
+Fixes: a09a4c8dd1ec ("tunnels: Remove encapsulation offloads on decap")
+Tested-by: Jianlin Shi <jishi@redhat.com>
+Signed-off-by: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/sit.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -661,6 +661,10 @@ static int ipip6_rcv(struct sk_buff *skb
+ !net_eq(tunnel->net, dev_net(tunnel->dev))))
+ goto out;
+
++ /* skb can be uncloned in iptunnel_pull_header, so
++ * old iph is no longer valid
++ */
++ iph = (const struct iphdr *)skb_mac_header(skb);
+ err = IP_ECN_decapsulate(iph, skb);
+ if (unlikely(err)) {
+ if (log_ecn_error)
--- /dev/null
+From foo@baz Mon Apr 15 07:47:06 CEST 2019
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Fri, 29 Mar 2019 12:19:46 +0100
+Subject: kcm: switch order of device registration to fix a crash
+
+From: Jiri Slaby <jslaby@suse.cz>
+
+[ Upstream commit 3c446e6f96997f2a95bf0037ef463802162d2323 ]
+
+When kcm is loaded while many processes try to create a KCM socket, a
+crash occurs:
+ BUG: unable to handle kernel NULL pointer dereference at 000000000000000e
+ IP: mutex_lock+0x27/0x40 kernel/locking/mutex.c:240
+ PGD 8000000016ef2067 P4D 8000000016ef2067 PUD 3d6e9067 PMD 0
+ Oops: 0002 [#1] SMP KASAN PTI
+ CPU: 0 PID: 7005 Comm: syz-executor.5 Not tainted 4.12.14-396-default #1 SLE15-SP1 (unreleased)
+ RIP: 0010:mutex_lock+0x27/0x40 kernel/locking/mutex.c:240
+ RSP: 0018:ffff88000d487a00 EFLAGS: 00010246
+ RAX: 0000000000000000 RBX: 000000000000000e RCX: 1ffff100082b0719
+ ...
+ CR2: 000000000000000e CR3: 000000004b1bc003 CR4: 0000000000060ef0
+ Call Trace:
+ kcm_create+0x600/0xbf0 [kcm]
+ __sock_create+0x324/0x750 net/socket.c:1272
+ ...
+
+This is due to race between sock_create and unfinished
+register_pernet_device. kcm_create tries to do "net_generic(net,
+kcm_net_id)". but kcm_net_id is not initialized yet.
+
+So switch the order of the two to close the race.
+
+This can be reproduced with mutiple processes doing socket(PF_KCM, ...)
+and one process doing module removal.
+
+Fixes: ab7ac4eb9832 ("kcm: Kernel Connection Multiplexor module")
+Reviewed-by: Michal Kubecek <mkubecek@suse.cz>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/kcm/kcmsock.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/net/kcm/kcmsock.c
++++ b/net/kcm/kcmsock.c
+@@ -2059,14 +2059,14 @@ static int __init kcm_init(void)
+ if (err)
+ goto fail;
+
+- err = sock_register(&kcm_family_ops);
+- if (err)
+- goto sock_register_fail;
+-
+ err = register_pernet_device(&kcm_net_ops);
+ if (err)
+ goto net_ops_fail;
+
++ err = sock_register(&kcm_family_ops);
++ if (err)
++ goto sock_register_fail;
++
+ err = kcm_proc_init();
+ if (err)
+ goto proc_init_fail;
+@@ -2074,12 +2074,12 @@ static int __init kcm_init(void)
+ return 0;
+
+ proc_init_fail:
+- unregister_pernet_device(&kcm_net_ops);
+-
+-net_ops_fail:
+ sock_unregister(PF_KCM);
+
+ sock_register_fail:
++ unregister_pernet_device(&kcm_net_ops);
++
++net_ops_fail:
+ proto_unregister(&kcm_proto);
+
+ fail:
+@@ -2095,8 +2095,8 @@ fail:
+ static void __exit kcm_exit(void)
+ {
+ kcm_proc_exit();
+- unregister_pernet_device(&kcm_net_ops);
+ sock_unregister(PF_KCM);
++ unregister_pernet_device(&kcm_net_ops);
+ proto_unregister(&kcm_proto);
+ destroy_workqueue(kcm_wq);
+
--- /dev/null
+From foo@baz Mon Apr 15 07:47:06 CEST 2019
+From: Li RongQing <lirongqing@baidu.com>
+Date: Fri, 29 Mar 2019 09:18:02 +0800
+Subject: net: ethtool: not call vzalloc for zero sized memory request
+
+From: Li RongQing <lirongqing@baidu.com>
+
+[ Upstream commit 3d8830266ffc28c16032b859e38a0252e014b631 ]
+
+NULL or ZERO_SIZE_PTR will be returned for zero sized memory
+request, and derefencing them will lead to a segfault
+
+so it is unnecessory to call vzalloc for zero sized memory
+request and not call functions which maybe derefence the
+NULL allocated memory
+
+this also fixes a possible memory leak if phy_ethtool_get_stats
+returns error, memory should be freed before exit
+
+Signed-off-by: Li RongQing <lirongqing@baidu.com>
+Reviewed-by: Wang Li <wangli39@baidu.com>
+Reviewed-by: Michal Kubecek <mkubecek@suse.cz>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/ethtool.c | 43 +++++++++++++++++++++++++++----------------
+ 1 file changed, 27 insertions(+), 16 deletions(-)
+
+--- a/net/core/ethtool.c
++++ b/net/core/ethtool.c
+@@ -1815,11 +1815,15 @@ static int ethtool_get_strings(struct ne
+ WARN_ON_ONCE(!ret);
+
+ gstrings.len = ret;
+- data = vzalloc(gstrings.len * ETH_GSTRING_LEN);
+- if (gstrings.len && !data)
+- return -ENOMEM;
++ if (gstrings.len) {
++ data = vzalloc(gstrings.len * ETH_GSTRING_LEN);
++ if (!data)
++ return -ENOMEM;
+
+- __ethtool_get_strings(dev, gstrings.string_set, data);
++ __ethtool_get_strings(dev, gstrings.string_set, data);
++ } else {
++ data = NULL;
++ }
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
+@@ -1915,11 +1919,14 @@ static int ethtool_get_stats(struct net_
+ return -EFAULT;
+
+ stats.n_stats = n_stats;
+- data = vzalloc(n_stats * sizeof(u64));
+- if (n_stats && !data)
+- return -ENOMEM;
+-
+- ops->get_ethtool_stats(dev, &stats, data);
++ if (n_stats) {
++ data = vzalloc(n_stats * sizeof(u64));
++ if (!data)
++ return -ENOMEM;
++ ops->get_ethtool_stats(dev, &stats, data);
++ } else {
++ data = NULL;
++ }
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &stats, sizeof(stats)))
+@@ -1955,13 +1962,17 @@ static int ethtool_get_phy_stats(struct
+ return -EFAULT;
+
+ stats.n_stats = n_stats;
+- data = vzalloc(n_stats * sizeof(u64));
+- if (n_stats && !data)
+- return -ENOMEM;
+-
+- mutex_lock(&phydev->lock);
+- phydev->drv->get_stats(phydev, &stats, data);
+- mutex_unlock(&phydev->lock);
++ if (n_stats) {
++ data = vzalloc(n_stats * sizeof(u64));
++ if (!data)
++ return -ENOMEM;
++
++ mutex_lock(&phydev->lock);
++ phydev->drv->get_stats(phydev, &stats, data);
++ mutex_unlock(&phydev->lock);
++ } else {
++ data = NULL;
++ }
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &stats, sizeof(stats)))
--- /dev/null
+From foo@baz Mon Apr 15 07:47:06 CEST 2019
+From: Steffen Klassert <steffen.klassert@secunet.com>
+Date: Tue, 2 Apr 2019 08:16:03 +0200
+Subject: net-gro: Fix GRO flush when receiving a GSO packet.
+
+From: Steffen Klassert <steffen.klassert@secunet.com>
+
+[ Upstream commit 0ab03f353d3613ea49d1f924faf98559003670a8 ]
+
+Currently we may merge incorrectly a received GSO packet
+or a packet with frag_list into a packet sitting in the
+gro_hash list. skb_segment() may crash case because
+the assumptions on the skb layout are not met.
+The correct behaviour would be to flush the packet in the
+gro_hash list and send the received GSO packet directly
+afterwards. Commit d61d072e87c8e ("net-gro: avoid reorders")
+sets NAPI_GRO_CB(skb)->flush in this case, but this is not
+checked before merging. This patch makes sure to check this
+flag and to not merge in that case.
+
+Fixes: d61d072e87c8e ("net-gro: avoid reorders")
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/skbuff.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3808,7 +3808,7 @@ int skb_gro_receive(struct sk_buff **hea
+ struct sk_buff *lp, *p = *head;
+ unsigned int delta_truesize;
+
+- if (unlikely(p->len + len >= 65536))
++ if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
+ return -E2BIG;
+
+ lp = NAPI_GRO_CB(p)->last;
--- /dev/null
+From foo@baz Mon Apr 15 07:47:06 CEST 2019
+From: Artemy Kovalyov <artemyko@mellanox.com>
+Date: Tue, 19 Mar 2019 11:24:38 +0200
+Subject: net/mlx5: Decrease default mr cache size
+
+From: Artemy Kovalyov <artemyko@mellanox.com>
+
+[ Upstream commit e8b26b2135dedc0284490bfeac06dfc4418d0105 ]
+
+Delete initialization of high order entries in mr cache to decrease initial
+memory footprint. When required, the administrator can populate the
+entries with memory keys via the /sys interface.
+
+This approach is very helpful to significantly reduce the per HW function
+memory footprint in virtualization environments such as SRIOV.
+
+Fixes: 9603b61de1ee ("mlx5: Move pci device handling from mlx5_ib to mlx5_core")
+Signed-off-by: Artemy Kovalyov <artemyko@mellanox.com>
+Signed-off-by: Moni Shoua <monis@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Reported-by: Shalom Toledo <shalomt@mellanox.com>
+Acked-by: Or Gerlitz <ogerlitz@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/main.c | 20 --------------------
+ 1 file changed, 20 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -155,26 +155,6 @@ static struct mlx5_profile profile[] = {
+ .size = 8,
+ .limit = 4
+ },
+- .mr_cache[16] = {
+- .size = 8,
+- .limit = 4
+- },
+- .mr_cache[17] = {
+- .size = 8,
+- .limit = 4
+- },
+- .mr_cache[18] = {
+- .size = 8,
+- .limit = 4
+- },
+- .mr_cache[19] = {
+- .size = 4,
+- .limit = 2
+- },
+- .mr_cache[20] = {
+- .size = 4,
+- .limit = 2
+- },
+ },
+ };
+
--- /dev/null
+From foo@baz Mon Apr 15 07:47:06 CEST 2019
+From: Yuval Avnery <yuvalav@mellanox.com>
+Date: Mon, 11 Mar 2019 06:18:24 +0200
+Subject: net/mlx5e: Add a lock on tir list
+
+From: Yuval Avnery <yuvalav@mellanox.com>
+
+[ Upstream commit 80a2a9026b24c6bd34b8d58256973e22270bedec ]
+
+Refresh tirs is looping over a global list of tirs while netdevs are
+adding and removing tirs from that list. That is why a lock is
+required.
+
+Fixes: 724b2aa15126 ("net/mlx5e: TIRs management refactoring")
+Signed-off-by: Yuval Avnery <yuvalav@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_common.c | 7 +++++++
+ include/linux/mlx5/driver.h | 2 ++
+ 2 files changed, 9 insertions(+)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+@@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_de
+ if (err)
+ return err;
+
++ mutex_lock(&mdev->mlx5e_res.td.list_lock);
+ list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
++ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
+
+ return 0;
+ }
+@@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_de
+ void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
+ struct mlx5e_tir *tir)
+ {
++ mutex_lock(&mdev->mlx5e_res.td.list_lock);
+ mlx5_core_destroy_tir(mdev, tir->tirn);
+ list_del(&tir->list);
++ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
+ }
+
+ static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
+@@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct m
+ }
+
+ INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
++ mutex_init(&mdev->mlx5e_res.td.list_lock);
+
+ return 0;
+
+@@ -158,6 +163,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv
+
+ MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
+
++ mutex_lock(&mdev->mlx5e_res.td.list_lock);
+ list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
+ tirn = tir->tirn;
+ err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
+@@ -169,6 +175,7 @@ out:
+ kvfree(in);
+ if (err)
+ netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
++ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
+
+ return err;
+ }
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -743,6 +743,8 @@ struct mlx5_pagefault {
+ };
+
+ struct mlx5_td {
++ /* protects tirs list changes while tirs refresh */
++ struct mutex list_lock;
+ struct list_head tirs_list;
+ u32 tdn;
+ };
--- /dev/null
+From foo@baz Mon Apr 15 07:47:06 CEST 2019
+From: Gavi Teitz <gavi@mellanox.com>
+Date: Mon, 11 Mar 2019 11:56:34 +0200
+Subject: net/mlx5e: Fix error handling when refreshing TIRs
+
+From: Gavi Teitz <gavi@mellanox.com>
+
+[ Upstream commit bc87a0036826a37b43489b029af8143bd07c6cca ]
+
+Previously, a false positive would be caught if the TIRs list is
+empty, since the err value was initialized to -ENOMEM, and was only
+updated if a TIR is refreshed. This is resolved by initializing the
+err value to zero.
+
+Fixes: b676f653896a ("net/mlx5e: Refactor refresh TIRs")
+Signed-off-by: Gavi Teitz <gavi@mellanox.com>
+Reviewed-by: Roi Dayan <roid@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_common.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+@@ -140,15 +140,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv
+ {
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_tir *tir;
+- int err = -ENOMEM;
++ int err = 0;
+ u32 tirn = 0;
+ int inlen;
+ void *in;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+- if (!in)
++ if (!in) {
++ err = -ENOMEM;
+ goto out;
++ }
+
+ if (enable_uc_lb)
+ MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
--- /dev/null
+From foo@baz Mon Apr 15 07:47:06 CEST 2019
+From: Mao Wenan <maowenan@huawei.com>
+Date: Thu, 28 Mar 2019 17:10:56 +0800
+Subject: net: rds: force to destroy connection if t_sock is NULL in rds_tcp_kill_sock().
+
+From: Mao Wenan <maowenan@huawei.com>
+
+[ Upstream commit cb66ddd156203daefb8d71158036b27b0e2caf63 ]
+
+When it is to cleanup net namespace, rds_tcp_exit_net() will call
+rds_tcp_kill_sock(), if t_sock is NULL, it will not call
+rds_conn_destroy(), rds_conn_path_destroy() and rds_tcp_conn_free() to free
+connection, and the worker cp_conn_w is not stopped, afterwards the net is freed in
+net_drop_ns(); While cp_conn_w rds_connect_worker() will call rds_tcp_conn_path_connect()
+and reference 'net' which has already been freed.
+
+In rds_tcp_conn_path_connect(), rds_tcp_set_callbacks() will set t_sock = sock before
+sock->ops->connect, but if connect() is failed, it will call
+rds_tcp_restore_callbacks() and set t_sock = NULL, if connect is always
+failed, rds_connect_worker() will try to reconnect all the time, so
+rds_tcp_kill_sock() will never to cancel worker cp_conn_w and free the
+connections.
+
+Therefore, the condition !tc->t_sock is not needed if it is going to do
+cleanup_net->rds_tcp_exit_net->rds_tcp_kill_sock, because tc->t_sock is always
+NULL, and there is on other path to cancel cp_conn_w and free
+connection. So this patch is to fix this.
+
+rds_tcp_kill_sock():
+...
+if (net != c_net || !tc->t_sock)
+...
+Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
+
+==================================================================
+BUG: KASAN: use-after-free in inet_create+0xbcc/0xd28
+net/ipv4/af_inet.c:340
+Read of size 4 at addr ffff8003496a4684 by task kworker/u8:4/3721
+
+CPU: 3 PID: 3721 Comm: kworker/u8:4 Not tainted 5.1.0 #11
+Hardware name: linux,dummy-virt (DT)
+Workqueue: krdsd rds_connect_worker
+Call trace:
+ dump_backtrace+0x0/0x3c0 arch/arm64/kernel/time.c:53
+ show_stack+0x28/0x38 arch/arm64/kernel/traps.c:152
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack+0x120/0x188 lib/dump_stack.c:113
+ print_address_description+0x68/0x278 mm/kasan/report.c:253
+ kasan_report_error mm/kasan/report.c:351 [inline]
+ kasan_report+0x21c/0x348 mm/kasan/report.c:409
+ __asan_report_load4_noabort+0x30/0x40 mm/kasan/report.c:429
+ inet_create+0xbcc/0xd28 net/ipv4/af_inet.c:340
+ __sock_create+0x4f8/0x770 net/socket.c:1276
+ sock_create_kern+0x50/0x68 net/socket.c:1322
+ rds_tcp_conn_path_connect+0x2b4/0x690 net/rds/tcp_connect.c:114
+ rds_connect_worker+0x108/0x1d0 net/rds/threads.c:175
+ process_one_work+0x6e8/0x1700 kernel/workqueue.c:2153
+ worker_thread+0x3b0/0xdd0 kernel/workqueue.c:2296
+ kthread+0x2f0/0x378 kernel/kthread.c:255
+ ret_from_fork+0x10/0x18 arch/arm64/kernel/entry.S:1117
+
+Allocated by task 687:
+ save_stack mm/kasan/kasan.c:448 [inline]
+ set_track mm/kasan/kasan.c:460 [inline]
+ kasan_kmalloc+0xd4/0x180 mm/kasan/kasan.c:553
+ kasan_slab_alloc+0x14/0x20 mm/kasan/kasan.c:490
+ slab_post_alloc_hook mm/slab.h:444 [inline]
+ slab_alloc_node mm/slub.c:2705 [inline]
+ slab_alloc mm/slub.c:2713 [inline]
+ kmem_cache_alloc+0x14c/0x388 mm/slub.c:2718
+ kmem_cache_zalloc include/linux/slab.h:697 [inline]
+ net_alloc net/core/net_namespace.c:384 [inline]
+ copy_net_ns+0xc4/0x2d0 net/core/net_namespace.c:424
+ create_new_namespaces+0x300/0x658 kernel/nsproxy.c:107
+ unshare_nsproxy_namespaces+0xa0/0x198 kernel/nsproxy.c:206
+ ksys_unshare+0x340/0x628 kernel/fork.c:2577
+ __do_sys_unshare kernel/fork.c:2645 [inline]
+ __se_sys_unshare kernel/fork.c:2643 [inline]
+ __arm64_sys_unshare+0x38/0x58 kernel/fork.c:2643
+ __invoke_syscall arch/arm64/kernel/syscall.c:35 [inline]
+ invoke_syscall arch/arm64/kernel/syscall.c:47 [inline]
+ el0_svc_common+0x168/0x390 arch/arm64/kernel/syscall.c:83
+ el0_svc_handler+0x60/0xd0 arch/arm64/kernel/syscall.c:129
+ el0_svc+0x8/0xc arch/arm64/kernel/entry.S:960
+
+Freed by task 264:
+ save_stack mm/kasan/kasan.c:448 [inline]
+ set_track mm/kasan/kasan.c:460 [inline]
+ __kasan_slab_free+0x114/0x220 mm/kasan/kasan.c:521
+ kasan_slab_free+0x10/0x18 mm/kasan/kasan.c:528
+ slab_free_hook mm/slub.c:1370 [inline]
+ slab_free_freelist_hook mm/slub.c:1397 [inline]
+ slab_free mm/slub.c:2952 [inline]
+ kmem_cache_free+0xb8/0x3a8 mm/slub.c:2968
+ net_free net/core/net_namespace.c:400 [inline]
+ net_drop_ns.part.6+0x78/0x90 net/core/net_namespace.c:407
+ net_drop_ns net/core/net_namespace.c:406 [inline]
+ cleanup_net+0x53c/0x6d8 net/core/net_namespace.c:569
+ process_one_work+0x6e8/0x1700 kernel/workqueue.c:2153
+ worker_thread+0x3b0/0xdd0 kernel/workqueue.c:2296
+ kthread+0x2f0/0x378 kernel/kthread.c:255
+ ret_from_fork+0x10/0x18 arch/arm64/kernel/entry.S:1117
+
+The buggy address belongs to the object at ffff8003496a3f80
+ which belongs to the cache net_namespace of size 7872
+The buggy address is located 1796 bytes inside of
+ 7872-byte region [ffff8003496a3f80, ffff8003496a5e40)
+The buggy address belongs to the page:
+page:ffff7e000d25a800 count:1 mapcount:0 mapping:ffff80036ce4b000
+index:0x0 compound_mapcount: 0
+flags: 0xffffe0000008100(slab|head)
+raw: 0ffffe0000008100 dead000000000100 dead000000000200 ffff80036ce4b000
+raw: 0000000000000000 0000000080040004 00000001ffffffff 0000000000000000
+page dumped because: kasan: bad access detected
+
+Memory state around the buggy address:
+ ffff8003496a4580: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ffff8003496a4600: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+>ffff8003496a4680: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ^
+ ffff8003496a4700: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ffff8003496a4780: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+==================================================================
+
+Fixes: 467fa15356ac("RDS-TCP: Support multiple RDS-TCP listen endpoints, one per netns.")
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Mao Wenan <maowenan@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/rds/tcp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/rds/tcp.c
++++ b/net/rds/tcp.c
+@@ -530,7 +530,7 @@ static void rds_tcp_kill_sock(struct net
+ list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
+ struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
+
+- if (net != c_net || !tc->t_sock)
++ if (net != c_net)
+ continue;
+ if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
+ list_move_tail(&tc->t_tcp_node, &tmp_list);
--- /dev/null
+From foo@baz Mon Apr 15 07:47:06 CEST 2019
+From: Davide Caratti <dcaratti@redhat.com>
+Date: Thu, 4 Apr 2019 12:31:35 +0200
+Subject: net/sched: act_sample: fix divide by zero in the traffic path
+
+From: Davide Caratti <dcaratti@redhat.com>
+
+[ Upstream commit fae2708174ae95d98d19f194e03d6e8f688ae195 ]
+
+the control path of 'sample' action does not validate the value of 'rate'
+provided by the user, but then it uses it as divisor in the traffic path.
+Validate it in tcf_sample_init(), and return -EINVAL with a proper extack
+message in case that value is zero, to fix a splat with the script below:
+
+ # tc f a dev test0 egress matchall action sample rate 0 group 1 index 2
+ # tc -s a s action sample
+ total acts 1
+
+ action order 0: sample rate 1/0 group 1 pipe
+ index 2 ref 1 bind 1 installed 19 sec used 19 sec
+ Action statistics:
+ Sent 0 bytes 0 pkt (dropped 0, overlimits 0 requeues 0)
+ backlog 0b 0p requeues 0
+ # ping 192.0.2.1 -I test0 -c1 -q
+
+ divide error: 0000 [#1] SMP PTI
+ CPU: 1 PID: 6192 Comm: ping Not tainted 5.1.0-rc2.diag2+ #591
+ Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
+ RIP: 0010:tcf_sample_act+0x9e/0x1e0 [act_sample]
+ Code: 6a f1 85 c0 74 0d 80 3d 83 1a 00 00 00 0f 84 9c 00 00 00 4d 85 e4 0f 84 85 00 00 00 e8 9b d7 9c f1 44 8b 8b e0 00 00 00 31 d2 <41> f7 f1 85 d2 75 70 f6 85 83 00 00 00 10 48 8b 45 10 8b 88 08 01
+ RSP: 0018:ffffae320190ba30 EFLAGS: 00010246
+ RAX: 00000000b0677d21 RBX: ffff8af1ed9ec000 RCX: 0000000059a9fe49
+ RDX: 0000000000000000 RSI: 000000000c7e33b7 RDI: ffff8af23daa0af0
+ RBP: ffff8af1ee11b200 R08: 0000000074fcaf7e R09: 0000000000000000
+ R10: 0000000000000050 R11: ffffffffb3088680 R12: ffff8af232307f80
+ R13: 0000000000000003 R14: ffff8af1ed9ec000 R15: 0000000000000000
+ FS: 00007fe9c6d2f740(0000) GS:ffff8af23da80000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 00007fff6772f000 CR3: 00000000746a2004 CR4: 00000000001606e0
+ Call Trace:
+ tcf_action_exec+0x7c/0x1c0
+ tcf_classify+0x57/0x160
+ __dev_queue_xmit+0x3dc/0xd10
+ ip_finish_output2+0x257/0x6d0
+ ip_output+0x75/0x280
+ ip_send_skb+0x15/0x40
+ raw_sendmsg+0xae3/0x1410
+ sock_sendmsg+0x36/0x40
+ __sys_sendto+0x10e/0x140
+ __x64_sys_sendto+0x24/0x30
+ do_syscall_64+0x60/0x210
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+ [...]
+ Kernel panic - not syncing: Fatal exception in interrupt
+
+Add a TDC selftest to document that 'rate' is now being validated.
+
+Reported-by: Matteo Croce <mcroce@redhat.com>
+Fixes: 5c5670fae430 ("net/sched: Introduce sample tc action")
+Signed-off-by: Davide Caratti <dcaratti@redhat.com>
+Acked-by: Yotam Gigi <yotam.gi@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/act_sample.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/net/sched/act_sample.c
++++ b/net/sched/act_sample.c
+@@ -45,6 +45,7 @@ static int tcf_sample_init(struct net *n
+ struct tc_sample *parm;
+ struct tcf_sample *s;
+ bool exists = false;
++ u32 rate;
+ int ret;
+
+ if (!nla)
+@@ -73,10 +74,17 @@ static int tcf_sample_init(struct net *n
+ if (!ovr)
+ return -EEXIST;
+ }
+- s = to_sample(*a);
+
++ rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
++ if (!rate) {
++ tcf_idr_release(*a, bind);
++ return -EINVAL;
++ }
++
++ s = to_sample(*a);
+ s->tcf_action = parm->action;
+ s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
++ s->rate = rate;
+ s->psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
+ psample_group = psample_group_get(net, s->psample_group_num);
+ if (!psample_group) {
--- /dev/null
+From foo@baz Mon Apr 15 07:47:06 CEST 2019
+From: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Date: Thu, 28 Mar 2019 10:35:06 +0100
+Subject: net/sched: fix ->get helper of the matchall cls
+
+From: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+
+[ Upstream commit 0db6f8befc32c68bb13d7ffbb2e563c79e913e13 ]
+
+It returned always NULL, thus it was never possible to get the filter.
+
+Example:
+$ ip link add foo type dummy
+$ ip link add bar type dummy
+$ tc qdisc add dev foo clsact
+$ tc filter add dev foo protocol all pref 1 ingress handle 1234 \
+ matchall action mirred ingress mirror dev bar
+
+Before the patch:
+$ tc filter get dev foo protocol all pref 1 ingress handle 1234 matchall
+Error: Specified filter handle not found.
+We have an error talking to the kernel
+
+After:
+$ tc filter get dev foo protocol all pref 1 ingress handle 1234 matchall
+filter ingress protocol all pref 1 matchall chain 0 handle 0x4d2
+ not_in_hw
+ action order 1: mirred (Ingress Mirror to device bar) pipe
+ index 1 ref 1 bind 1
+
+CC: Yotam Gigi <yotamg@mellanox.com>
+CC: Jiri Pirko <jiri@mellanox.com>
+Fixes: fd62d9f5c575 ("net/sched: matchall: Fix configuration race")
+Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/cls_matchall.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/net/sched/cls_matchall.c
++++ b/net/sched/cls_matchall.c
+@@ -125,6 +125,11 @@ static void mall_destroy(struct tcf_prot
+
+ static void *mall_get(struct tcf_proto *tp, u32 handle)
+ {
++ struct cls_mall_head *head = rtnl_dereference(tp->root);
++
++ if (head && head->handle == handle)
++ return head;
++
+ return NULL;
+ }
+
--- /dev/null
+From foo@baz Mon Apr 15 07:47:06 CEST 2019
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 27 Mar 2019 08:21:30 -0700
+Subject: netns: provide pure entropy for net_hash_mix()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 355b98553789b646ed97ad801a619ff898471b92 ]
+
+net_hash_mix() currently uses kernel address of a struct net,
+and is used in many places that could be used to reveal this
+address to a patient attacker, thus defeating KASLR, for
+the typical case (initial net namespace, &init_net is
+not dynamically allocated)
+
+I believe the original implementation tried to avoid spending
+too many cycles in this function, but security comes first.
+
+Also provide entropy regardless of CONFIG_NET_NS.
+
+Fixes: 0b4419162aa6 ("netns: introduce the net_hash_mix "salt" for hashes")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Amit Klein <aksecurity@gmail.com>
+Reported-by: Benny Pinkas <benny@pinkas.net>
+Cc: Pavel Emelyanov <xemul@openvz.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/net_namespace.h | 1 +
+ include/net/netns/hash.h | 15 ++-------------
+ net/core/net_namespace.c | 1 +
+ 3 files changed, 4 insertions(+), 13 deletions(-)
+
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -56,6 +56,7 @@ struct net {
+ */
+ spinlock_t rules_mod_lock;
+
++ u32 hash_mix;
+ atomic64_t cookie_gen;
+
+ struct list_head list; /* list of network namespaces */
+--- a/include/net/netns/hash.h
++++ b/include/net/netns/hash.h
+@@ -2,21 +2,10 @@
+ #ifndef __NET_NS_HASH_H__
+ #define __NET_NS_HASH_H__
+
+-#include <asm/cache.h>
+-
+-struct net;
++#include <net/net_namespace.h>
+
+ static inline u32 net_hash_mix(const struct net *net)
+ {
+-#ifdef CONFIG_NET_NS
+- /*
+- * shift this right to eliminate bits, that are
+- * always zeroed
+- */
+-
+- return (u32)(((unsigned long)net) >> L1_CACHE_SHIFT);
+-#else
+- return 0;
+-#endif
++ return net->hash_mix;
+ }
+ #endif
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -285,6 +285,7 @@ static __net_init int setup_net(struct n
+
+ atomic_set(&net->count, 1);
+ refcount_set(&net->passive, 1);
++ get_random_bytes(&net->hash_mix, sizeof(u32));
+ net->dev_base_seq = 1;
+ net->user_ns = user_ns;
+ idr_init(&net->netns_ids);
--- /dev/null
+From foo@baz Mon Apr 15 07:47:06 CEST 2019
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+Date: Wed, 27 Mar 2019 11:38:38 -0700
+Subject: nfp: validate the return code from dev_queue_xmit()
+
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+
+[ Upstream commit c8ba5b91a04e3e2643e48501c114108802f21cda ]
+
+dev_queue_xmit() may return error codes as well as netdev_tx_t,
+and it always consumes the skb. Make sure we always return a
+correct netdev_tx_t value.
+
+Fixes: eadfa4c3be99 ("nfp: add stats and xmit helpers for representors")
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Reviewed-by: John Hurley <john.hurley@netronome.com>
+Reviewed-by: Simon Horman <simon.horman@netronome.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/netronome/nfp/nfp_net_repr.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+@@ -200,7 +200,7 @@ static netdev_tx_t nfp_repr_xmit(struct
+ ret = dev_queue_xmit(skb);
+ nfp_repr_inc_tx_stats(netdev, len, ret);
+
+- return ret;
++ return NETDEV_TX_OK;
+ }
+
+ static int nfp_repr_stop(struct net_device *netdev)
--- /dev/null
+From foo@baz Mon Apr 15 07:47:06 CEST 2019
+From: Andrea Righi <andrea.righi@canonical.com>
+Date: Thu, 28 Mar 2019 07:36:00 +0100
+Subject: openvswitch: fix flow actions reallocation
+
+From: Andrea Righi <andrea.righi@canonical.com>
+
+[ Upstream commit f28cd2af22a0c134e4aa1c64a70f70d815d473fb ]
+
+The flow action buffer can be resized if it's not big enough to contain
+all the requested flow actions. However, this resize doesn't take into
+account the new requested size, the buffer is only increased by a factor
+of 2x. This might be not enough to contain the new data, causing a
+buffer overflow, for example:
+
+[ 42.044472] =============================================================================
+[ 42.045608] BUG kmalloc-96 (Not tainted): Redzone overwritten
+[ 42.046415] -----------------------------------------------------------------------------
+
+[ 42.047715] Disabling lock debugging due to kernel taint
+[ 42.047716] INFO: 0x8bf2c4a5-0x720c0928. First byte 0x0 instead of 0xcc
+[ 42.048677] INFO: Slab 0xbc6d2040 objects=29 used=18 fp=0xdc07dec4 flags=0x2808101
+[ 42.049743] INFO: Object 0xd53a3464 @offset=2528 fp=0xccdcdebb
+
+[ 42.050747] Redzone 76f1b237: cc cc cc cc cc cc cc cc ........
+[ 42.051839] Object d53a3464: 6b 6b 6b 6b 6b 6b 6b 6b 0c 00 00 00 6c 00 00 00 kkkkkkkk....l...
+[ 42.053015] Object f49a30cc: 6c 00 0c 00 00 00 00 00 00 00 00 03 78 a3 15 f6 l...........x...
+[ 42.054203] Object acfe4220: 20 00 02 00 ff ff ff ff 00 00 00 00 00 00 00 00 ...............
+[ 42.055370] Object 21024e91: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+[ 42.056541] Object 070e04c3: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+[ 42.057797] Object 948a777a: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+[ 42.059061] Redzone 8bf2c4a5: 00 00 00 00 ....
+[ 42.060189] Padding a681b46e: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
+
+Fix by making sure the new buffer is properly resized to contain all the
+requested data.
+
+BugLink: https://bugs.launchpad.net/bugs/1813244
+Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
+Acked-by: Pravin B Shelar <pshelar@ovn.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/openvswitch/flow_netlink.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/openvswitch/flow_netlink.c
++++ b/net/openvswitch/flow_netlink.c
+@@ -1967,14 +1967,14 @@ static struct nlattr *reserve_sfa_size(s
+
+ struct sw_flow_actions *acts;
+ int new_acts_size;
+- int req_size = NLA_ALIGN(attr_len);
++ size_t req_size = NLA_ALIGN(attr_len);
+ int next_offset = offsetof(struct sw_flow_actions, actions) +
+ (*sfa)->actions_len;
+
+ if (req_size <= (ksize(*sfa) - next_offset))
+ goto out;
+
+- new_acts_size = ksize(*sfa) * 2;
++ new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
+
+ if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
+ if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
--- /dev/null
+From foo@baz Mon Apr 15 07:47:06 CEST 2019
+From: "Bjรธrn Mork" <bjorn@mork.no>
+Date: Wed, 27 Mar 2019 15:26:01 +0100
+Subject: qmi_wwan: add Olicard 600
+
+From: "Bjรธrn Mork" <bjorn@mork.no>
+
+[ Upstream commit 6289d0facd9ebce4cc83e5da39e15643ee998dc5 ]
+
+This is a Qualcomm based device with a QMI function on interface 4.
+It is mode switched from 2020:2030 using a standard eject message.
+
+T: Bus=01 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 6 Spd=480 MxCh= 0
+D: Ver= 2.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs= 1
+P: Vendor=2020 ProdID=2031 Rev= 2.32
+S: Manufacturer=Mobile Connect
+S: Product=Mobile Connect
+S: SerialNumber=0123456789ABCDEF
+C:* #Ifs= 6 Cfg#= 1 Atr=80 MxPwr=500mA
+I:* If#= 0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=(none)
+E: Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 1 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=(none)
+E: Ad=83(I) Atr=03(Int.) MxPS= 10 Ivl=32ms
+E: Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=(none)
+E: Ad=85(I) Atr=03(Int.) MxPS= 10 Ivl=32ms
+E: Ad=84(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=(none)
+E: Ad=87(I) Atr=03(Int.) MxPS= 10 Ivl=32ms
+E: Ad=86(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=04(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 4 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=(none)
+E: Ad=89(I) Atr=03(Int.) MxPS= 8 Ivl=32ms
+E: Ad=88(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=05(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 5 Alt= 0 #EPs= 2 Cls=08(stor.) Sub=06 Prot=50 Driver=(none)
+E: Ad=8a(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=06(O) Atr=02(Bulk) MxPS= 512 Ivl=125us
+
+Signed-off-by: Bjรธrn Mork <bjorn@mork.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/qmi_wwan.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1188,6 +1188,7 @@ static const struct usb_device_id produc
+ {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
+ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
+ {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
++ {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
+ {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
+ {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
+ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
--- /dev/null
+From foo@baz Mon Apr 15 07:47:06 CEST 2019
+From: Xin Long <lucien.xin@gmail.com>
+Date: Sun, 31 Mar 2019 16:58:15 +0800
+Subject: sctp: initialize _pad of sockaddr_in before copying to user memory
+
+From: Xin Long <lucien.xin@gmail.com>
+
+[ Upstream commit 09279e615c81ce55e04835970601ae286e3facbe ]
+
+Syzbot report a kernel-infoleak:
+
+ BUG: KMSAN: kernel-infoleak in _copy_to_user+0x16b/0x1f0 lib/usercopy.c:32
+ Call Trace:
+ _copy_to_user+0x16b/0x1f0 lib/usercopy.c:32
+ copy_to_user include/linux/uaccess.h:174 [inline]
+ sctp_getsockopt_peer_addrs net/sctp/socket.c:5911 [inline]
+ sctp_getsockopt+0x1668e/0x17f70 net/sctp/socket.c:7562
+ ...
+ Uninit was stored to memory at:
+ sctp_transport_init net/sctp/transport.c:61 [inline]
+ sctp_transport_new+0x16d/0x9a0 net/sctp/transport.c:115
+ sctp_assoc_add_peer+0x532/0x1f70 net/sctp/associola.c:637
+ sctp_process_param net/sctp/sm_make_chunk.c:2548 [inline]
+ sctp_process_init+0x1a1b/0x3ed0 net/sctp/sm_make_chunk.c:2361
+ ...
+ Bytes 8-15 of 16 are uninitialized
+
+It was caused by that th _pad field (the 8-15 bytes) of a v4 addr (saved in
+struct sockaddr_in) wasn't initialized, but directly copied to user memory
+in sctp_getsockopt_peer_addrs().
+
+So fix it by calling memset(addr->v4.sin_zero, 0, 8) to initialize _pad of
+sockaddr_in before copying it to user memory in sctp_v4_addr_to_user(), as
+sctp_v6_addr_to_user() does.
+
+Reported-by: syzbot+86b5c7c236a22616a72f@syzkaller.appspotmail.com
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Tested-by: Alexander Potapenko <glider@google.com>
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/protocol.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -605,6 +605,7 @@ out:
+ static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
+ {
+ /* No address mapping for V4 sockets */
++ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
+ return sizeof(struct sockaddr_in);
+ }
+
lib-string.c-implement-a-basic-bcmp.patch
stating-ccree-revert-staging-ccree-fix-leak-of-impor.patch
arm64-kaslr-reserve-size-of-arm64_memstart_align-in-.patch
+tty-mark-siemens-r3964-line-discipline-as-broken.patch
+tty-ldisc-add-sysctl-to-prevent-autoloading-of-ldiscs.patch
+ipv6-fix-dangling-pointer-when-ipv6-fragment.patch
+ipv6-sit-reset-ip-header-pointer-in-ipip6_rcv.patch
+kcm-switch-order-of-device-registration-to-fix-a-crash.patch
+net-gro-fix-gro-flush-when-receiving-a-gso-packet.patch
+net-mlx5-decrease-default-mr-cache-size.patch
+net-rds-force-to-destroy-connection-if-t_sock-is-null-in-rds_tcp_kill_sock.patch
+net-sched-fix-get-helper-of-the-matchall-cls.patch
+openvswitch-fix-flow-actions-reallocation.patch
+qmi_wwan-add-olicard-600.patch
+sctp-initialize-_pad-of-sockaddr_in-before-copying-to-user-memory.patch
+tcp-ensure-dctcp-reacts-to-losses.patch
+vrf-check-accept_source_route-on-the-original-netdevice.patch
+net-mlx5e-fix-error-handling-when-refreshing-tirs.patch
+net-mlx5e-add-a-lock-on-tir-list.patch
+nfp-validate-the-return-code-from-dev_queue_xmit.patch
+bnxt_en-improve-rx-consumer-index-validity-check.patch
+bnxt_en-reset-device-on-rx-buffer-errors.patch
+net-sched-act_sample-fix-divide-by-zero-in-the-traffic-path.patch
+netns-provide-pure-entropy-for-net_hash_mix.patch
+net-ethtool-not-call-vzalloc-for-zero-sized-memory-request.patch
+alsa-seq-fix-oob-reads-from-strlcpy.patch
+ip6_tunnel-match-to-arphrd_tunnel6-for-dev-type.patch
+hv_netvsc-fix-unwanted-wakeup-after-tx_disable.patch
--- /dev/null
+From foo@baz Mon Apr 15 07:47:06 CEST 2019
+From: Koen De Schepper <koen.de_schepper@nokia-bell-labs.com>
+Date: Thu, 4 Apr 2019 12:24:02 +0000
+Subject: tcp: Ensure DCTCP reacts to losses
+
+From: Koen De Schepper <koen.de_schepper@nokia-bell-labs.com>
+
+[ Upstream commit aecfde23108b8e637d9f5c5e523b24fb97035dc3 ]
+
+RFC8257 ยง3.5 explicitly states that "A DCTCP sender MUST react to
+loss episodes in the same way as conventional TCP".
+
+Currently, Linux DCTCP performs no cwnd reduction when losses
+are encountered. Optionally, the dctcp_clamp_alpha_on_loss resets
+alpha to its maximal value if a RTO happens. This behavior
+is sub-optimal for at least two reasons: i) it ignores losses
+triggering fast retransmissions; and ii) it causes unnecessary large
+cwnd reduction in the future if the loss was isolated as it resets
+the historical term of DCTCP's alpha EWMA to its maximal value (i.e.,
+denoting a total congestion). The second reason has an especially
+noticeable effect when using DCTCP in high BDP environments, where
+alpha normally stays at low values.
+
+This patch replace the clamping of alpha by setting ssthresh to
+half of cwnd for both fast retransmissions and RTOs, at most once
+per RTT. Consequently, the dctcp_clamp_alpha_on_loss module parameter
+has been removed.
+
+The table below shows experimental results where we measured the
+drop probability of a PIE AQM (not applying ECN marks) at a
+bottleneck in the presence of a single TCP flow with either the
+alpha-clamping option enabled or the cwnd halving proposed by this
+patch. Results using reno or cubic are given for comparison.
+
+ | Link | RTT | Drop
+ TCP CC | speed | base+AQM | probability
+ ==================|=========|==========|============
+ CUBIC | 40Mbps | 7+20ms | 0.21%
+ RENO | | | 0.19%
+ DCTCP-CLAMP-ALPHA | | | 25.80%
+ DCTCP-HALVE-CWND | | | 0.22%
+ ------------------|---------|----------|------------
+ CUBIC | 100Mbps | 7+20ms | 0.03%
+ RENO | | | 0.02%
+ DCTCP-CLAMP-ALPHA | | | 23.30%
+ DCTCP-HALVE-CWND | | | 0.04%
+ ------------------|---------|----------|------------
+ CUBIC | 800Mbps | 1+1ms | 0.04%
+ RENO | | | 0.05%
+ DCTCP-CLAMP-ALPHA | | | 18.70%
+ DCTCP-HALVE-CWND | | | 0.06%
+
+We see that, without halving its cwnd for all source of losses,
+DCTCP drives the AQM to large drop probabilities in order to keep
+the queue length under control (i.e., it repeatedly faces RTOs).
+Instead, if DCTCP reacts to all source of losses, it can then be
+controlled by the AQM using similar drop levels than cubic or reno.
+
+Signed-off-by: Koen De Schepper <koen.de_schepper@nokia-bell-labs.com>
+Signed-off-by: Olivier Tilmans <olivier.tilmans@nokia-bell-labs.com>
+Cc: Bob Briscoe <research@bobbriscoe.net>
+Cc: Lawrence Brakmo <brakmo@fb.com>
+Cc: Florian Westphal <fw@strlen.de>
+Cc: Daniel Borkmann <borkmann@iogearbox.net>
+Cc: Yuchung Cheng <ycheng@google.com>
+Cc: Neal Cardwell <ncardwell@google.com>
+Cc: Eric Dumazet <edumazet@google.com>
+Cc: Andrew Shewmaker <agshew@gmail.com>
+Cc: Glenn Judd <glenn.judd@morganstanley.com>
+Acked-by: Florian Westphal <fw@strlen.de>
+Acked-by: Neal Cardwell <ncardwell@google.com>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_dctcp.c | 36 ++++++++++++++++++------------------
+ 1 file changed, 18 insertions(+), 18 deletions(-)
+
+--- a/net/ipv4/tcp_dctcp.c
++++ b/net/ipv4/tcp_dctcp.c
+@@ -66,11 +66,6 @@ static unsigned int dctcp_alpha_on_init
+ module_param(dctcp_alpha_on_init, uint, 0644);
+ MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
+
+-static unsigned int dctcp_clamp_alpha_on_loss __read_mostly;
+-module_param(dctcp_clamp_alpha_on_loss, uint, 0644);
+-MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss,
+- "parameter for clamping alpha on loss");
+-
+ static struct tcp_congestion_ops dctcp_reno;
+
+ static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
+@@ -211,21 +206,23 @@ static void dctcp_update_alpha(struct so
+ }
+ }
+
+-static void dctcp_state(struct sock *sk, u8 new_state)
++static void dctcp_react_to_loss(struct sock *sk)
+ {
+- if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) {
+- struct dctcp *ca = inet_csk_ca(sk);
++ struct dctcp *ca = inet_csk_ca(sk);
++ struct tcp_sock *tp = tcp_sk(sk);
+
+- /* If this extension is enabled, we clamp dctcp_alpha to
+- * max on packet loss; the motivation is that dctcp_alpha
+- * is an indicator to the extend of congestion and packet
+- * loss is an indicator of extreme congestion; setting
+- * this in practice turned out to be beneficial, and
+- * effectively assumes total congestion which reduces the
+- * window by half.
+- */
+- ca->dctcp_alpha = DCTCP_MAX_ALPHA;
+- }
++ ca->loss_cwnd = tp->snd_cwnd;
++ tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
++}
++
++static void dctcp_state(struct sock *sk, u8 new_state)
++{
++ if (new_state == TCP_CA_Recovery &&
++ new_state != inet_csk(sk)->icsk_ca_state)
++ dctcp_react_to_loss(sk);
++ /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
++ * one loss-adjustment per RTT.
++ */
+ }
+
+ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
+@@ -237,6 +234,9 @@ static void dctcp_cwnd_event(struct sock
+ case CA_EVENT_ECN_NO_CE:
+ dctcp_ce_state_1_to_0(sk);
+ break;
++ case CA_EVENT_LOSS:
++ dctcp_react_to_loss(sk);
++ break;
+ default:
+ /* Don't care for the rest. */
+ break;
--- /dev/null
+From 7c0cca7c847e6e019d67b7d793efbbe3b947d004 Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Mon, 21 Jan 2019 17:26:42 +0100
+Subject: tty: ldisc: add sysctl to prevent autoloading of ldiscs
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+commit 7c0cca7c847e6e019d67b7d793efbbe3b947d004 upstream.
+
+By default, the kernel will automatically load the module of any line
+dicipline that is asked for. As this sometimes isn't the safest thing
+to do, provide a sysctl to disable this feature.
+
+By default, we set this to 'y' as that is the historical way that Linux
+has worked, and we do not want to break working systems. But in the
+future, perhaps this can default to 'n' to prevent this functionality.
+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/Kconfig | 24 ++++++++++++++++++++++++
+ drivers/tty/tty_io.c | 3 +++
+ drivers/tty/tty_ldisc.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 74 insertions(+)
+
+--- a/drivers/tty/Kconfig
++++ b/drivers/tty/Kconfig
+@@ -467,4 +467,28 @@ config VCC
+ depends on SUN_LDOMS
+ help
+ Support for Sun logical domain consoles.
++
++config LDISC_AUTOLOAD
++ bool "Automatically load TTY Line Disciplines"
++ default y
++ help
++ Historically the kernel has always automatically loaded any
++ line discipline that is in a kernel module when a user asks
++ for it to be loaded with the TIOCSETD ioctl, or through other
++ means. This is not always the best thing to do on systems
++ where you know you will not be using some of the more
++ "ancient" line disciplines, so prevent the kernel from doing
++ this unless the request is coming from a process with the
++ CAP_SYS_MODULE permissions.
++
++ Say 'Y' here if you trust your userspace users to do the right
++ thing, or if you have only provided the line disciplines that
++ you know you will be using, or if you wish to continue to use
++ the traditional method of on-demand loading of these modules
++ by any user.
++
++ This functionality can be changed at runtime with the
++ dev.tty.ldisc_autoload sysctl, this configuration option will
++ only set the default value of this functionality.
++
+ endif # TTY
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -511,6 +511,8 @@ static const struct file_operations hung
+ static DEFINE_SPINLOCK(redirect_lock);
+ static struct file *redirect;
+
++extern void tty_sysctl_init(void);
++
+ /**
+ * tty_wakeup - request more data
+ * @tty: terminal
+@@ -3332,6 +3334,7 @@ void console_sysfs_notify(void)
+ */
+ int __init tty_init(void)
+ {
++ tty_sysctl_init();
+ cdev_init(&tty_cdev, &tty_fops);
+ if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
+ register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
+--- a/drivers/tty/tty_ldisc.c
++++ b/drivers/tty/tty_ldisc.c
+@@ -155,6 +155,13 @@ static void put_ldops(struct tty_ldisc_o
+ * takes tty_ldiscs_lock to guard against ldisc races
+ */
+
++#if defined(CONFIG_LDISC_AUTOLOAD)
++ #define INITIAL_AUTOLOAD_STATE 1
++#else
++ #define INITIAL_AUTOLOAD_STATE 0
++#endif
++static int tty_ldisc_autoload = INITIAL_AUTOLOAD_STATE;
++
+ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
+ {
+ struct tty_ldisc *ld;
+@@ -169,6 +176,8 @@ static struct tty_ldisc *tty_ldisc_get(s
+ */
+ ldops = get_ldops(disc);
+ if (IS_ERR(ldops)) {
++ if (!capable(CAP_SYS_MODULE) && !tty_ldisc_autoload)
++ return ERR_PTR(-EPERM);
+ request_module("tty-ldisc-%d", disc);
+ ldops = get_ldops(disc);
+ if (IS_ERR(ldops))
+@@ -841,3 +850,41 @@ void tty_ldisc_deinit(struct tty_struct
+ tty_ldisc_put(tty->ldisc);
+ tty->ldisc = NULL;
+ }
++
++static int zero;
++static int one = 1;
++static struct ctl_table tty_table[] = {
++ {
++ .procname = "ldisc_autoload",
++ .data = &tty_ldisc_autoload,
++ .maxlen = sizeof(tty_ldisc_autoload),
++ .mode = 0644,
++ .proc_handler = proc_dointvec,
++ .extra1 = &zero,
++ .extra2 = &one,
++ },
++ { }
++};
++
++static struct ctl_table tty_dir_table[] = {
++ {
++ .procname = "tty",
++ .mode = 0555,
++ .child = tty_table,
++ },
++ { }
++};
++
++static struct ctl_table tty_root_table[] = {
++ {
++ .procname = "dev",
++ .mode = 0555,
++ .child = tty_dir_table,
++ },
++ { }
++};
++
++void tty_sysctl_init(void)
++{
++ register_sysctl_table(tty_root_table);
++}
--- /dev/null
+From c7084edc3f6d67750f50d4183134c4fb5712a5c8 Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Fri, 5 Apr 2019 15:39:26 +0200
+Subject: tty: mark Siemens R3964 line discipline as BROKEN
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+commit c7084edc3f6d67750f50d4183134c4fb5712a5c8 upstream.
+
+The n_r3964 line discipline driver was written in a different time, when
+SMP machines were rare, and users were trusted to do the right thing.
+Since then, the world has moved on but not this code, it has stayed
+rooted in the past with its lovely hand-crafted list structures and
+loads of "interesting" race conditions all over the place.
+
+After attempting to clean up most of the issues, I just gave up and am
+now marking the driver as BROKEN so that hopefully someone who has this
+hardware will show up out of the woodwork (I know you are out there!)
+and will help with debugging a raft of changes that I had laying around
+for the code, but was too afraid to commit as odds are they would break
+things.
+
+Many thanks to Jann and Linus for pointing out the initial problems in
+this codebase, as well as many reviews of my attempts to fix the issues.
+It was a case of whack-a-mole, and as you can see, the mole won.
+
+Reported-by: Jann Horn <jannh@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+
+---
+ drivers/char/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/char/Kconfig
++++ b/drivers/char/Kconfig
+@@ -380,7 +380,7 @@ config XILINX_HWICAP
+
+ config R3964
+ tristate "Siemens R3964 line discipline"
+- depends on TTY
++ depends on TTY && BROKEN
+ ---help---
+ This driver allows synchronous communication with devices using the
+ Siemens R3964 packet protocol. Unless you are dealing with special
--- /dev/null
+From foo@baz Mon Apr 15 07:47:06 CEST 2019
+From: Stephen Suryaputra <ssuryaextr@gmail.com>
+Date: Mon, 1 Apr 2019 09:17:32 -0400
+Subject: vrf: check accept_source_route on the original netdevice
+
+From: Stephen Suryaputra <ssuryaextr@gmail.com>
+
+[ Upstream commit 8c83f2df9c6578ea4c5b940d8238ad8a41b87e9e ]
+
+Configuration check to accept source route IP options should be made on
+the incoming netdevice when the skb->dev is an l3mdev master. The route
+lookup for the source route next hop also needs the incoming netdev.
+
+v2->v3:
+- Simplify by passing the original netdevice down the stack (per David
+ Ahern).
+
+Signed-off-by: Stephen Suryaputra <ssuryaextr@gmail.com>
+Reviewed-by: David Ahern <dsahern@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/ip.h | 2 +-
+ net/ipv4/ip_input.c | 7 +++----
+ net/ipv4/ip_options.c | 4 ++--
+ 3 files changed, 6 insertions(+), 7 deletions(-)
+
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -603,7 +603,7 @@ int ip_options_get_from_user(struct net
+ unsigned char __user *data, int optlen);
+ void ip_options_undo(struct ip_options *opt);
+ void ip_forward_options(struct sk_buff *skb);
+-int ip_options_rcv_srr(struct sk_buff *skb);
++int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
+
+ /*
+ * Functions provided by ip_sockglue.c
+--- a/net/ipv4/ip_input.c
++++ b/net/ipv4/ip_input.c
+@@ -259,11 +259,10 @@ int ip_local_deliver(struct sk_buff *skb
+ ip_local_deliver_finish);
+ }
+
+-static inline bool ip_rcv_options(struct sk_buff *skb)
++static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct ip_options *opt;
+ const struct iphdr *iph;
+- struct net_device *dev = skb->dev;
+
+ /* It looks as overkill, because not all
+ IP options require packet mangling.
+@@ -299,7 +298,7 @@ static inline bool ip_rcv_options(struct
+ }
+ }
+
+- if (ip_options_rcv_srr(skb))
++ if (ip_options_rcv_srr(skb, dev))
+ goto drop;
+ }
+
+@@ -362,7 +361,7 @@ static int ip_rcv_finish(struct net *net
+ }
+ #endif
+
+- if (iph->ihl > 5 && ip_rcv_options(skb))
++ if (iph->ihl > 5 && ip_rcv_options(skb, dev))
+ goto drop;
+
+ rt = skb_rtable(skb);
+--- a/net/ipv4/ip_options.c
++++ b/net/ipv4/ip_options.c
+@@ -612,7 +612,7 @@ void ip_forward_options(struct sk_buff *
+ }
+ }
+
+-int ip_options_rcv_srr(struct sk_buff *skb)
++int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct ip_options *opt = &(IPCB(skb)->opt);
+ int srrspace, srrptr;
+@@ -647,7 +647,7 @@ int ip_options_rcv_srr(struct sk_buff *s
+
+ orefdst = skb->_skb_refdst;
+ skb_dst_set(skb, NULL);
+- err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev);
++ err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev);
+ rt2 = skb_rtable(skb);
+ if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
+ skb_dst_drop(skb);