--- /dev/null
+From foo@baz Sun 10 Nov 2019 11:41:56 AM CET
+From: Jay Vosburgh <jay.vosburgh@canonical.com>
+Date: Fri, 1 Nov 2019 21:56:42 -0700
+Subject: bonding: fix state transition issue in link monitoring
+
+From: Jay Vosburgh <jay.vosburgh@canonical.com>
+
+[ Upstream commit 1899bb325149e481de31a4f32b59ea6f24e176ea ]
+
+Since de77ecd4ef02 ("bonding: improve link-status update in
+mii-monitoring"), the bonding driver has utilized two separate variables
+to indicate the next link state a particular slave should transition to.
+Each is used to communicate to a different portion of the link state
+change commit logic; one to the bond_miimon_commit function itself, and
+another to the state transition logic.
+
+ Unfortunately, the two variables can become unsynchronized,
+resulting in incorrect link state transitions within bonding. This can
+cause slaves to become stuck in an incorrect link state until a
+subsequent carrier state transition.
+
+ The issue occurs when a special case in bond_slave_netdev_event
+sets slave->link directly to BOND_LINK_FAIL. On the next pass through
+bond_miimon_inspect after the slave goes carrier up, the BOND_LINK_FAIL
+case will set the proposed next state (link_new_state) to BOND_LINK_UP,
+but the new_link to BOND_LINK_DOWN. The setting of the final link state
+from new_link comes after that from link_new_state, and so the slave
+will end up incorrectly in _DOWN state.
+
+ Resolve this by combining the two variables into one.
+
+Reported-by: Aleksei Zakharov <zakharov.a.g@yandex.ru>
+Reported-by: Sha Zhang <zhangsha.zhang@huawei.com>
+Cc: Mahesh Bandewar <maheshb@google.com>
+Fixes: de77ecd4ef02 ("bonding: improve link-status update in mii-monitoring")
+Signed-off-by: Jay Vosburgh <jay.vosburgh@canonical.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/bonding/bond_main.c | 44 ++++++++++++++++++++--------------------
+ include/net/bonding.h | 3 --
+ 2 files changed, 23 insertions(+), 24 deletions(-)
+
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -2086,8 +2086,7 @@ static int bond_miimon_inspect(struct bo
+ ignore_updelay = !rcu_dereference(bond->curr_active_slave);
+
+ bond_for_each_slave_rcu(bond, slave, iter) {
+- slave->new_link = BOND_LINK_NOCHANGE;
+- slave->link_new_state = slave->link;
++ bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
+
+ link_state = bond_check_dev_link(bond, slave->dev, 0);
+
+@@ -2121,7 +2120,7 @@ static int bond_miimon_inspect(struct bo
+ }
+
+ if (slave->delay <= 0) {
+- slave->new_link = BOND_LINK_DOWN;
++ bond_propose_link_state(slave, BOND_LINK_DOWN);
+ commit++;
+ continue;
+ }
+@@ -2158,7 +2157,7 @@ static int bond_miimon_inspect(struct bo
+ slave->delay = 0;
+
+ if (slave->delay <= 0) {
+- slave->new_link = BOND_LINK_UP;
++ bond_propose_link_state(slave, BOND_LINK_UP);
+ commit++;
+ ignore_updelay = false;
+ continue;
+@@ -2196,7 +2195,7 @@ static void bond_miimon_commit(struct bo
+ struct slave *slave, *primary;
+
+ bond_for_each_slave(bond, slave, iter) {
+- switch (slave->new_link) {
++ switch (slave->link_new_state) {
+ case BOND_LINK_NOCHANGE:
+ /* For 802.3ad mode, check current slave speed and
+ * duplex again in case its port was disabled after
+@@ -2268,8 +2267,8 @@ static void bond_miimon_commit(struct bo
+
+ default:
+ slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n",
+- slave->new_link);
+- slave->new_link = BOND_LINK_NOCHANGE;
++ slave->link_new_state);
++ bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
+
+ continue;
+ }
+@@ -2677,13 +2676,13 @@ static void bond_loadbalance_arp_mon(str
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ unsigned long trans_start = dev_trans_start(slave->dev);
+
+- slave->new_link = BOND_LINK_NOCHANGE;
++ bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
+
+ if (slave->link != BOND_LINK_UP) {
+ if (bond_time_in_interval(bond, trans_start, 1) &&
+ bond_time_in_interval(bond, slave->last_rx, 1)) {
+
+- slave->new_link = BOND_LINK_UP;
++ bond_propose_link_state(slave, BOND_LINK_UP);
+ slave_state_changed = 1;
+
+ /* primary_slave has no meaning in round-robin
+@@ -2708,7 +2707,7 @@ static void bond_loadbalance_arp_mon(str
+ if (!bond_time_in_interval(bond, trans_start, 2) ||
+ !bond_time_in_interval(bond, slave->last_rx, 2)) {
+
+- slave->new_link = BOND_LINK_DOWN;
++ bond_propose_link_state(slave, BOND_LINK_DOWN);
+ slave_state_changed = 1;
+
+ if (slave->link_failure_count < UINT_MAX)
+@@ -2739,8 +2738,8 @@ static void bond_loadbalance_arp_mon(str
+ goto re_arm;
+
+ bond_for_each_slave(bond, slave, iter) {
+- if (slave->new_link != BOND_LINK_NOCHANGE)
+- slave->link = slave->new_link;
++ if (slave->link_new_state != BOND_LINK_NOCHANGE)
++ slave->link = slave->link_new_state;
+ }
+
+ if (slave_state_changed) {
+@@ -2763,9 +2762,9 @@ re_arm:
+ }
+
+ /* Called to inspect slaves for active-backup mode ARP monitor link state
+- * changes. Sets new_link in slaves to specify what action should take
+- * place for the slave. Returns 0 if no changes are found, >0 if changes
+- * to link states must be committed.
++ * changes. Sets proposed link state in slaves to specify what action
++ * should take place for the slave. Returns 0 if no changes are found, >0
++ * if changes to link states must be committed.
+ *
+ * Called with rcu_read_lock held.
+ */
+@@ -2777,12 +2776,12 @@ static int bond_ab_arp_inspect(struct bo
+ int commit = 0;
+
+ bond_for_each_slave_rcu(bond, slave, iter) {
+- slave->new_link = BOND_LINK_NOCHANGE;
++ bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
+ last_rx = slave_last_rx(bond, slave);
+
+ if (slave->link != BOND_LINK_UP) {
+ if (bond_time_in_interval(bond, last_rx, 1)) {
+- slave->new_link = BOND_LINK_UP;
++ bond_propose_link_state(slave, BOND_LINK_UP);
+ commit++;
+ }
+ continue;
+@@ -2810,7 +2809,7 @@ static int bond_ab_arp_inspect(struct bo
+ if (!bond_is_active_slave(slave) &&
+ !rcu_access_pointer(bond->current_arp_slave) &&
+ !bond_time_in_interval(bond, last_rx, 3)) {
+- slave->new_link = BOND_LINK_DOWN;
++ bond_propose_link_state(slave, BOND_LINK_DOWN);
+ commit++;
+ }
+
+@@ -2823,7 +2822,7 @@ static int bond_ab_arp_inspect(struct bo
+ if (bond_is_active_slave(slave) &&
+ (!bond_time_in_interval(bond, trans_start, 2) ||
+ !bond_time_in_interval(bond, last_rx, 2))) {
+- slave->new_link = BOND_LINK_DOWN;
++ bond_propose_link_state(slave, BOND_LINK_DOWN);
+ commit++;
+ }
+ }
+@@ -2843,7 +2842,7 @@ static void bond_ab_arp_commit(struct bo
+ struct slave *slave;
+
+ bond_for_each_slave(bond, slave, iter) {
+- switch (slave->new_link) {
++ switch (slave->link_new_state) {
+ case BOND_LINK_NOCHANGE:
+ continue;
+
+@@ -2893,8 +2892,9 @@ static void bond_ab_arp_commit(struct bo
+ continue;
+
+ default:
+- slave_err(bond->dev, slave->dev, "impossible: new_link %d on slave\n",
+- slave->new_link);
++ slave_err(bond->dev, slave->dev,
++ "impossible: link_new_state %d on slave\n",
++ slave->link_new_state);
+ continue;
+ }
+
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -159,7 +159,6 @@ struct slave {
+ unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
+ s8 link; /* one of BOND_LINK_XXXX */
+ s8 link_new_state; /* one of BOND_LINK_XXXX */
+- s8 new_link;
+ u8 backup:1, /* indicates backup slave. Value corresponds with
+ BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
+ inactive:1, /* indicates inactive slave */
+@@ -549,7 +548,7 @@ static inline void bond_propose_link_sta
+
+ static inline void bond_commit_link_state(struct slave *slave, bool notify)
+ {
+- if (slave->link == slave->link_new_state)
++ if (slave->link_new_state == BOND_LINK_NOCHANGE)
+ return;
+
+ slave->link = slave->link_new_state;
--- /dev/null
+From foo@baz Sun 10 Nov 2019 11:41:56 AM CET
+From: Oliver Neukum <oneukum@suse.com>
+Date: Thu, 7 Nov 2019 09:48:01 +0100
+Subject: CDC-NCM: handle incomplete transfer of MTU
+
+From: Oliver Neukum <oneukum@suse.com>
+
+[ Upstream commit 332f989a3b0041b810836c5c3747e59aad7e9d0b ]
+
+A malicious device may give half an answer when asked
+for its MTU. The driver will proceed after this with
+a garbage MTU. Anything but a complete answer must be treated
+as an error.
+
+V2: used sizeof as request by Alexander
+
+Reported-and-tested-by: syzbot+0631d878823ce2411636@syzkaller.appspotmail.com
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/cdc_ncm.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -578,8 +578,8 @@ static void cdc_ncm_set_dgram_size(struc
+ /* read current mtu value from device */
+ err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
+ USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
+- 0, iface_no, &max_datagram_size, 2);
+- if (err < 0) {
++ 0, iface_no, &max_datagram_size, sizeof(max_datagram_size));
++ if (err < sizeof(max_datagram_size)) {
+ dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
+ goto out;
+ }
+@@ -590,7 +590,7 @@ static void cdc_ncm_set_dgram_size(struc
+ max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
+ err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE,
+ USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE,
+- 0, iface_no, &max_datagram_size, 2);
++ 0, iface_no, &max_datagram_size, sizeof(max_datagram_size));
+ if (err < 0)
+ dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n");
+
--- /dev/null
+From foo@baz Sun 10 Nov 2019 11:41:56 AM CET
+From: David Ahern <dsahern@kernel.org>
+Date: Thu, 7 Nov 2019 18:29:52 +0000
+Subject: ipv4: Fix table id reference in fib_sync_down_addr
+
+From: David Ahern <dsahern@kernel.org>
+
+[ Upstream commit e0a312629fefa943534fc46f7bfbe6de3fdaf463 ]
+
+Hendrik reported routes in the main table using source address are not
+removed when the address is removed. The problem is that fib_sync_down_addr
+does not account for devices in the default VRF which are associated
+with the main table. Fix by updating the table id reference.
+
+Fixes: 5a56a0b3a45d ("net: Don't delete routes in different VRFs")
+Reported-by: Hendrik Donner <hd@os-cillation.de>
+Signed-off-by: David Ahern <dsahern@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/fib_semantics.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -1814,8 +1814,8 @@ int fib_sync_down_addr(struct net_device
+ int ret = 0;
+ unsigned int hash = fib_laddr_hashfn(local);
+ struct hlist_head *head = &fib_info_laddrhash[hash];
++ int tb_id = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
+ struct net *net = dev_net(dev);
+- int tb_id = l3mdev_fib_table(dev);
+ struct fib_info *fi;
+
+ if (!fib_info_laddrhash || local == 0)
--- /dev/null
+From foo@baz Sun 10 Nov 2019 11:41:56 AM CET
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 7 Nov 2019 09:26:19 -0800
+Subject: ipv6: fixes rt6_probe() and fib6_nh->last_probe init
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 1bef4c223b8588cf50433bdc2c6953d82949b3b3 ]
+
+While looking at a syzbot KCSAN report [1], I found multiple
+issues in this code :
+
+1) fib6_nh->last_probe has an initial value of 0.
+
+ While probably okay on 64bit kernels, this causes an issue
+ on 32bit kernels since the time_after(jiffies, 0 + interval)
+ might be false ~24 days after boot (for HZ=1000)
+
+2) The data-race found by KCSAN
+ I could use READ_ONCE() and WRITE_ONCE(), but we also can
+ take the opportunity of not piling-up too many rt6_probe_deferred()
+ works by using instead cmpxchg() so that only one cpu wins the race.
+
+[1]
+BUG: KCSAN: data-race in find_match / find_match
+
+write to 0xffff8880bb7aabe8 of 8 bytes by interrupt on cpu 1:
+ rt6_probe net/ipv6/route.c:663 [inline]
+ find_match net/ipv6/route.c:757 [inline]
+ find_match+0x5bd/0x790 net/ipv6/route.c:733
+ __find_rr_leaf+0xe3/0x780 net/ipv6/route.c:831
+ find_rr_leaf net/ipv6/route.c:852 [inline]
+ rt6_select net/ipv6/route.c:896 [inline]
+ fib6_table_lookup+0x383/0x650 net/ipv6/route.c:2164
+ ip6_pol_route+0xee/0x5c0 net/ipv6/route.c:2200
+ ip6_pol_route_output+0x48/0x60 net/ipv6/route.c:2452
+ fib6_rule_lookup+0x3d6/0x470 net/ipv6/fib6_rules.c:117
+ ip6_route_output_flags_noref+0x16b/0x230 net/ipv6/route.c:2484
+ ip6_route_output_flags+0x50/0x1a0 net/ipv6/route.c:2497
+ ip6_dst_lookup_tail+0x25d/0xc30 net/ipv6/ip6_output.c:1049
+ ip6_dst_lookup_flow+0x68/0x120 net/ipv6/ip6_output.c:1150
+ inet6_csk_route_socket+0x2f7/0x420 net/ipv6/inet6_connection_sock.c:106
+ inet6_csk_xmit+0x91/0x1f0 net/ipv6/inet6_connection_sock.c:121
+ __tcp_transmit_skb+0xe81/0x1d60 net/ipv4/tcp_output.c:1169
+ tcp_transmit_skb net/ipv4/tcp_output.c:1185 [inline]
+ tcp_xmit_probe_skb+0x19b/0x1d0 net/ipv4/tcp_output.c:3735
+
+read to 0xffff8880bb7aabe8 of 8 bytes by interrupt on cpu 0:
+ rt6_probe net/ipv6/route.c:657 [inline]
+ find_match net/ipv6/route.c:757 [inline]
+ find_match+0x521/0x790 net/ipv6/route.c:733
+ __find_rr_leaf+0xe3/0x780 net/ipv6/route.c:831
+ find_rr_leaf net/ipv6/route.c:852 [inline]
+ rt6_select net/ipv6/route.c:896 [inline]
+ fib6_table_lookup+0x383/0x650 net/ipv6/route.c:2164
+ ip6_pol_route+0xee/0x5c0 net/ipv6/route.c:2200
+ ip6_pol_route_output+0x48/0x60 net/ipv6/route.c:2452
+ fib6_rule_lookup+0x3d6/0x470 net/ipv6/fib6_rules.c:117
+ ip6_route_output_flags_noref+0x16b/0x230 net/ipv6/route.c:2484
+ ip6_route_output_flags+0x50/0x1a0 net/ipv6/route.c:2497
+ ip6_dst_lookup_tail+0x25d/0xc30 net/ipv6/ip6_output.c:1049
+ ip6_dst_lookup_flow+0x68/0x120 net/ipv6/ip6_output.c:1150
+ inet6_csk_route_socket+0x2f7/0x420 net/ipv6/inet6_connection_sock.c:106
+ inet6_csk_xmit+0x91/0x1f0 net/ipv6/inet6_connection_sock.c:121
+ __tcp_transmit_skb+0xe81/0x1d60 net/ipv4/tcp_output.c:1169
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 0 PID: 18894 Comm: udevd Not tainted 5.4.0-rc3+ #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+
+Fixes: cc3a86c802f0 ("ipv6: Change rt6_probe to take a fib6_nh")
+Fixes: f547fac624be ("ipv6: rate-limit probes for neighbourless routes")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Reviewed-by: David Ahern <dsahern@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/route.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -621,6 +621,7 @@ static void rt6_probe(struct fib6_nh *fi
+ {
+ struct __rt6_probe_work *work = NULL;
+ const struct in6_addr *nh_gw;
++ unsigned long last_probe;
+ struct neighbour *neigh;
+ struct net_device *dev;
+ struct inet6_dev *idev;
+@@ -639,6 +640,7 @@ static void rt6_probe(struct fib6_nh *fi
+ nh_gw = &fib6_nh->fib_nh_gw6;
+ dev = fib6_nh->fib_nh_dev;
+ rcu_read_lock_bh();
++ last_probe = READ_ONCE(fib6_nh->last_probe);
+ idev = __in6_dev_get(dev);
+ neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
+ if (neigh) {
+@@ -654,13 +656,15 @@ static void rt6_probe(struct fib6_nh *fi
+ __neigh_set_probe_once(neigh);
+ }
+ write_unlock(&neigh->lock);
+- } else if (time_after(jiffies, fib6_nh->last_probe +
++ } else if (time_after(jiffies, last_probe +
+ idev->cnf.rtr_probe_interval)) {
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ }
+
+- if (work) {
+- fib6_nh->last_probe = jiffies;
++ if (!work || cmpxchg(&fib6_nh->last_probe,
++ last_probe, jiffies) != last_probe) {
++ kfree(work);
++ } else {
+ INIT_WORK(&work->work, rt6_probe_deferred);
+ work->target = *nh_gw;
+ dev_hold(dev);
+@@ -3385,6 +3389,9 @@ int fib6_nh_init(struct net *net, struct
+ int err;
+
+ fib6_nh->fib_nh_family = AF_INET6;
++#ifdef CONFIG_IPV6_ROUTER_PREF
++ fib6_nh->last_probe = jiffies;
++#endif
+
+ err = -ENODEV;
+ if (cfg->fc_ifindex) {
--- /dev/null
+From foo@baz Sun 10 Nov 2019 11:41:56 AM CET
+From: Alexander Sverdlin <alexander.sverdlin@nokia.com>
+Date: Fri, 8 Nov 2019 10:00:44 +0000
+Subject: net: ethernet: octeon_mgmt: Account for second possible VLAN header
+
+From: Alexander Sverdlin <alexander.sverdlin@nokia.com>
+
+[ Upstream commit e4dd5608033efe7b6030cde359bfdbaeb73bc22d ]
+
+Octeon's input ring-buffer entry has 14 bits-wide size field, so to account
+for second possible VLAN header max_mtu must be further reduced.
+
+Fixes: 109cc16526c6d ("ethernet/cavium: use core min/max MTU checking")
+Signed-off-by: Alexander Sverdlin <alexander.sverdlin@nokia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/cavium/octeon/octeon_mgmt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
++++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+@@ -1499,7 +1499,7 @@ static int octeon_mgmt_probe(struct plat
+ netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
+
+ netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM;
+- netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM;
++ netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN;
+
+ mac = of_get_mac_address(pdev->dev.of_node);
+
--- /dev/null
+From foo@baz Sun 10 Nov 2019 11:41:56 AM CET
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 7 Nov 2019 20:08:19 -0800
+Subject: net: fix data-race in neigh_event_send()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 1b53d64435d56902fc234ff2507142d971a09687 ]
+
+KCSAN reported the following data-race [1]
+
+The fix will also prevent the compiler from optimizing out
+the condition.
+
+[1]
+
+BUG: KCSAN: data-race in neigh_resolve_output / neigh_resolve_output
+
+write to 0xffff8880a41dba78 of 8 bytes by interrupt on cpu 1:
+ neigh_event_send include/net/neighbour.h:443 [inline]
+ neigh_resolve_output+0x78/0x480 net/core/neighbour.c:1474
+ neigh_output include/net/neighbour.h:511 [inline]
+ ip_finish_output2+0x4af/0xe40 net/ipv4/ip_output.c:228
+ __ip_finish_output net/ipv4/ip_output.c:308 [inline]
+ __ip_finish_output+0x23a/0x490 net/ipv4/ip_output.c:290
+ ip_finish_output+0x41/0x160 net/ipv4/ip_output.c:318
+ NF_HOOK_COND include/linux/netfilter.h:294 [inline]
+ ip_output+0xdf/0x210 net/ipv4/ip_output.c:432
+ dst_output include/net/dst.h:436 [inline]
+ ip_local_out+0x74/0x90 net/ipv4/ip_output.c:125
+ __ip_queue_xmit+0x3a8/0xa40 net/ipv4/ip_output.c:532
+ ip_queue_xmit+0x45/0x60 include/net/ip.h:237
+ __tcp_transmit_skb+0xe81/0x1d60 net/ipv4/tcp_output.c:1169
+ tcp_transmit_skb net/ipv4/tcp_output.c:1185 [inline]
+ __tcp_retransmit_skb+0x4bd/0x15f0 net/ipv4/tcp_output.c:2976
+ tcp_retransmit_skb+0x36/0x1a0 net/ipv4/tcp_output.c:2999
+ tcp_retransmit_timer+0x719/0x16d0 net/ipv4/tcp_timer.c:515
+ tcp_write_timer_handler+0x42d/0x510 net/ipv4/tcp_timer.c:598
+ tcp_write_timer+0xd1/0xf0 net/ipv4/tcp_timer.c:618
+
+read to 0xffff8880a41dba78 of 8 bytes by interrupt on cpu 0:
+ neigh_event_send include/net/neighbour.h:442 [inline]
+ neigh_resolve_output+0x57/0x480 net/core/neighbour.c:1474
+ neigh_output include/net/neighbour.h:511 [inline]
+ ip_finish_output2+0x4af/0xe40 net/ipv4/ip_output.c:228
+ __ip_finish_output net/ipv4/ip_output.c:308 [inline]
+ __ip_finish_output+0x23a/0x490 net/ipv4/ip_output.c:290
+ ip_finish_output+0x41/0x160 net/ipv4/ip_output.c:318
+ NF_HOOK_COND include/linux/netfilter.h:294 [inline]
+ ip_output+0xdf/0x210 net/ipv4/ip_output.c:432
+ dst_output include/net/dst.h:436 [inline]
+ ip_local_out+0x74/0x90 net/ipv4/ip_output.c:125
+ __ip_queue_xmit+0x3a8/0xa40 net/ipv4/ip_output.c:532
+ ip_queue_xmit+0x45/0x60 include/net/ip.h:237
+ __tcp_transmit_skb+0xe81/0x1d60 net/ipv4/tcp_output.c:1169
+ tcp_transmit_skb net/ipv4/tcp_output.c:1185 [inline]
+ __tcp_retransmit_skb+0x4bd/0x15f0 net/ipv4/tcp_output.c:2976
+ tcp_retransmit_skb+0x36/0x1a0 net/ipv4/tcp_output.c:2999
+ tcp_retransmit_timer+0x719/0x16d0 net/ipv4/tcp_timer.c:515
+ tcp_write_timer_handler+0x42d/0x510 net/ipv4/tcp_timer.c:598
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.4.0-rc3+ #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/neighbour.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/include/net/neighbour.h
++++ b/include/net/neighbour.h
+@@ -439,8 +439,8 @@ static inline int neigh_event_send(struc
+ {
+ unsigned long now = jiffies;
+
+- if (neigh->used != now)
+- neigh->used = now;
++ if (READ_ONCE(neigh->used) != now)
++ WRITE_ONCE(neigh->used, now);
+ if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE)))
+ return __neigh_event_send(neigh, skb);
+ return 0;
--- /dev/null
+From foo@baz Sun 10 Nov 2019 11:41:56 AM CET
+From: Salil Mehta <salil.mehta@huawei.com>
+Date: Thu, 7 Nov 2019 17:09:53 +0000
+Subject: net: hns: Fix the stray netpoll locks causing deadlock in NAPI path
+
+From: Salil Mehta <salil.mehta@huawei.com>
+
+[ Upstream commit bf5a6b4c474c589244dc25ee1af2c3c829228ef8 ]
+
+This patch fixes the problem of the spin locks, originally
+meant for the netpoll path of hns driver, causing deadlock in
+the normal NAPI poll path. The issue happened due to the presence
+of the stray leftover spin lock code related to the netpoll,
+whose support was earlier removed from the HNS[1], got activated
+due to enabling of NET_POLL_CONTROLLER switch.
+
+Earlier background:
+The netpoll handling code originally had this bug(as identified
+by Marc Zyngier[2]) of wrong spin lock API being used which did
+not disable the interrupts and hence could cause locking issues.
+i.e. if the lock were first acquired in context to thread like
+'ip' util and this lock if ever got later acquired again in
+context to the interrupt context like TX/RX (Interrupts could
+always pre-empt the lock holding task and acquire the lock again)
+and hence could cause deadlock.
+
+Proposed Solution:
+1. If the netpoll was enabled in the HNS driver, which is not
+ right now, we could have simply used spin_[un]lock_irqsave()
+2. But as netpoll is disabled, therefore, it is best to get rid
+ of the existing locks and stray code for now. This should
+ solve the problem reported by Marc.
+
+[1] https://git.kernel.org/torvalds/c/4bd2c03be7
+[2] https://patchwork.ozlabs.org/patch/1189139/
+
+Fixes: 4bd2c03be707 ("net: hns: remove ndo_poll_controller")
+Cc: lipeng <lipeng321@huawei.com>
+Cc: Yisen Zhuang <yisen.zhuang@huawei.com>
+Cc: Eric Dumazet <edumazet@google.com>
+Cc: David S. Miller <davem@davemloft.net>
+Reported-by: Marc Zyngier <maz@kernel.org>
+Acked-by: Marc Zyngier <maz@kernel.org>
+Tested-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Salil Mehta <salil.mehta@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/hisilicon/hns/hnae.c | 1 -
+ drivers/net/ethernet/hisilicon/hns/hnae.h | 3 ---
+ drivers/net/ethernet/hisilicon/hns/hns_enet.c | 22 +---------------------
+ 3 files changed, 1 insertion(+), 25 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
++++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
+@@ -199,7 +199,6 @@ hnae_init_ring(struct hnae_queue *q, str
+
+ ring->q = q;
+ ring->flags = flags;
+- spin_lock_init(&ring->lock);
+ ring->coal_param = q->handle->coal_param;
+ assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);
+
+--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
++++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
+@@ -274,9 +274,6 @@ struct hnae_ring {
+ /* statistic */
+ struct ring_stats stats;
+
+- /* ring lock for poll one */
+- spinlock_t lock;
+-
+ dma_addr_t desc_dma_addr;
+ u32 buf_size; /* size for hnae_desc->addr, preset by AE */
+ u16 desc_num; /* total number of desc */
+--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+@@ -943,15 +943,6 @@ static int is_valid_clean_head(struct hn
+ return u > c ? (h > c && h <= u) : (h > c || h <= u);
+ }
+
+-/* netif_tx_lock will turn down the performance, set only when necessary */
+-#ifdef CONFIG_NET_POLL_CONTROLLER
+-#define NETIF_TX_LOCK(ring) spin_lock(&(ring)->lock)
+-#define NETIF_TX_UNLOCK(ring) spin_unlock(&(ring)->lock)
+-#else
+-#define NETIF_TX_LOCK(ring)
+-#define NETIF_TX_UNLOCK(ring)
+-#endif
+-
+ /* reclaim all desc in one budget
+ * return error or number of desc left
+ */
+@@ -965,21 +956,16 @@ static int hns_nic_tx_poll_one(struct hn
+ int head;
+ int bytes, pkts;
+
+- NETIF_TX_LOCK(ring);
+-
+ head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+ rmb(); /* make sure head is ready before touch any data */
+
+- if (is_ring_empty(ring) || head == ring->next_to_clean) {
+- NETIF_TX_UNLOCK(ring);
++ if (is_ring_empty(ring) || head == ring->next_to_clean)
+ return 0; /* no data to poll */
+- }
+
+ if (!is_valid_clean_head(ring, head)) {
+ netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
+ ring->next_to_use, ring->next_to_clean);
+ ring->stats.io_err_cnt++;
+- NETIF_TX_UNLOCK(ring);
+ return -EIO;
+ }
+
+@@ -994,8 +980,6 @@ static int hns_nic_tx_poll_one(struct hn
+ ring->stats.tx_pkts += pkts;
+ ring->stats.tx_bytes += bytes;
+
+- NETIF_TX_UNLOCK(ring);
+-
+ dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
+ netdev_tx_completed_queue(dev_queue, pkts, bytes);
+
+@@ -1055,16 +1039,12 @@ static void hns_nic_tx_clr_all_bufs(stru
+ int head;
+ int bytes, pkts;
+
+- NETIF_TX_LOCK(ring);
+-
+ head = ring->next_to_use; /* ntu :soft setted ring position*/
+ bytes = 0;
+ pkts = 0;
+ while (head != ring->next_to_clean)
+ hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
+
+- NETIF_TX_UNLOCK(ring);
+-
+ dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
+ netdev_tx_reset_queue(dev_queue);
+ }
--- /dev/null
+From foo@baz Sun 10 Nov 2019 11:41:56 AM CET
+From: Claudiu Manoil <claudiu.manoil@nxp.com>
+Date: Tue, 5 Nov 2019 23:50:13 +0200
+Subject: net: mscc: ocelot: don't handle netdev events for other netdevs
+
+From: Claudiu Manoil <claudiu.manoil@nxp.com>
+
+[ Upstream commit 7afb3e575e5aa9f5a200a3eb3f45d8130f6d6601 ]
+
+The check that the event is actually for this device should be moved
+from the "port" handler to the net device handler.
+
+Otherwise the port handler will deny bonding configuration for other
+net devices in the same system (like enetc in the LS1028A) that don't
+have the lag_upper_info->tx_type restriction that ocelot has.
+
+Fixes: dc96ee3730fc ("net: mscc: ocelot: add bonding support")
+Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
+Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mscc/ocelot.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/mscc/ocelot.c
++++ b/drivers/net/ethernet/mscc/ocelot.c
+@@ -1499,9 +1499,6 @@ static int ocelot_netdevice_port_event(s
+ struct ocelot_port *ocelot_port = netdev_priv(dev);
+ int err = 0;
+
+- if (!ocelot_netdevice_dev_check(dev))
+- return 0;
+-
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ if (netif_is_bridge_master(info->upper_dev)) {
+@@ -1538,6 +1535,9 @@ static int ocelot_netdevice_event(struct
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ int ret = 0;
+
++ if (!ocelot_netdevice_dev_check(dev))
++ return 0;
++
+ if (event == NETDEV_PRECHANGEUPPER &&
+ netif_is_lag_master(info->upper_dev)) {
+ struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
--- /dev/null
+From foo@baz Sun 10 Nov 2019 11:41:56 AM CET
+From: Claudiu Manoil <claudiu.manoil@nxp.com>
+Date: Tue, 5 Nov 2019 23:50:14 +0200
+Subject: net: mscc: ocelot: fix NULL pointer on LAG slave removal
+
+From: Claudiu Manoil <claudiu.manoil@nxp.com>
+
+[ Upstream commit 3b3eed8eec47259939ee6c3d58aea1c311ddee3b ]
+
+lag_upper_info may be NULL on slave removal.
+
+Fixes: dc96ee3730fc ("net: mscc: ocelot: add bonding support")
+Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
+Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mscc/ocelot.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mscc/ocelot.c
++++ b/drivers/net/ethernet/mscc/ocelot.c
+@@ -1543,7 +1543,8 @@ static int ocelot_netdevice_event(struct
+ struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
+ struct netlink_ext_ack *extack;
+
+- if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
++ if (lag_upper_info &&
++ lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ extack = netdev_notifier_info_to_extack(&info->info);
+ NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
+
--- /dev/null
+From foo@baz Sun 10 Nov 2019 11:41:56 AM CET
+From: Eric Dumazet <edumazet@google.com>
+Date: Mon, 4 Nov 2019 21:38:43 -0800
+Subject: net: prevent load/store tearing on sk->sk_stamp
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit f75359f3ac855940c5718af10ba089b8977bf339 ]
+
+Add a couple of READ_ONCE() and WRITE_ONCE() to prevent
+load-tearing and store-tearing in sock_read_timestamp()
+and sock_write_timestamp()
+
+This might prevent another KCSAN report.
+
+Fixes: 3a0ed3e96197 ("sock: Make sock->sk_stamp thread-safe")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Deepa Dinamani <deepa.kernel@gmail.com>
+Acked-by: Deepa Dinamani <deepa.kernel@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/sock.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2331,7 +2331,7 @@ static inline ktime_t sock_read_timestam
+
+ return kt;
+ #else
+- return sk->sk_stamp;
++ return READ_ONCE(sk->sk_stamp);
+ #endif
+ }
+
+@@ -2342,7 +2342,7 @@ static inline void sock_write_timestamp(
+ sk->sk_stamp = kt;
+ write_sequnlock(&sk->sk_stamp_seq);
+ #else
+- sk->sk_stamp = kt;
++ WRITE_ONCE(sk->sk_stamp, kt);
+ #endif
+ }
+
--- /dev/null
+From foo@baz Sun 10 Nov 2019 11:41:56 AM CET
+From: Sean Tranchetti <stranche@codeaurora.org>
+Date: Mon, 4 Nov 2019 17:54:22 -0700
+Subject: net: qualcomm: rmnet: Fix potential UAF when unregistering
+
+From: Sean Tranchetti <stranche@codeaurora.org>
+
+[ Upstream commit e7a86c687e64ab24f88330ad24ecc9442ce40c5a ]
+
+During the exit/unregistration process of the RmNet driver, the function
+rmnet_unregister_real_device() is called to handle freeing the driver's
+internal state and removing the RX handler on the underlying physical
+device. However, the order of operations this function performs is wrong
+and can lead to a use after free of the rmnet_port structure.
+
+Before calling netdev_rx_handler_unregister(), this port structure is
+freed with kfree(). If packets are received on any RmNet devices before
+synchronize_net() completes, they will attempt to use this already-freed
+port structure when processing the packet. As such, before cleaning up any
+other internal state, the RX handler must be unregistered in order to
+guarantee that no further packets will arrive on the device.
+
+Fixes: ceed73a2cf4a ("drivers: net: ethernet: qualcomm: rmnet: Initial implementation")
+Signed-off-by: Sean Tranchetti <stranche@codeaurora.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+@@ -57,10 +57,10 @@ static int rmnet_unregister_real_device(
+ if (port->nr_rmnet_devs)
+ return -EINVAL;
+
+- kfree(port);
+-
+ netdev_rx_handler_unregister(real_dev);
+
++ kfree(port);
++
+ /* release reference on real_dev */
+ dev_put(real_dev);
+
--- /dev/null
+From foo@baz Sun 10 Nov 2019 11:41:56 AM CET
+From: John Hurley <john.hurley@netronome.com>
+Date: Sat, 2 Nov 2019 14:17:47 +0000
+Subject: net: sched: prevent duplicate flower rules from tcf_proto destroy race
+
+From: John Hurley <john.hurley@netronome.com>
+
+[ Upstream commit 59eb87cb52c9f7164804bc8639c4d03ba9b0c169 ]
+
+When a new filter is added to cls_api, the function
+tcf_chain_tp_insert_unique() looks up the protocol/priority/chain to
+determine if the tcf_proto is duplicated in the chain's hashtable. It then
+creates a new entry or continues with an existing one. In cls_flower, this
+allows the function fl_ht_insert_unque to determine if a filter is a
+duplicate and reject appropriately, meaning that the duplicate will not be
+passed to drivers via the offload hooks. However, when a tcf_proto is
+destroyed it is removed from its chain before a hardware remove hook is
+hit. This can lead to a race whereby the driver has not received the
+remove message but duplicate flows can be accepted. This, in turn, can
+lead to the offload driver receiving incorrect duplicate flows and out of
+order add/delete messages.
+
+Prevent duplicates by utilising an approach suggested by Vlad Buslov. A
+hash table per block stores each unique chain/protocol/prio being
+destroyed. This entry is only removed when the full destroy (and hardware
+offload) has completed. If a new flow is being added with the same
+identiers as a tc_proto being detroyed, then the add request is replayed
+until the destroy is complete.
+
+Fixes: 8b64678e0af8 ("net: sched: refactor tp insert/delete for concurrent execution")
+Signed-off-by: John Hurley <john.hurley@netronome.com>
+Signed-off-by: Vlad Buslov <vladbu@mellanox.com>
+Reviewed-by: Simon Horman <simon.horman@netronome.com>
+Reported-by: Louis Peens <louis.peens@netronome.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/sch_generic.h | 4 ++
+ net/sched/cls_api.c | 83 +++++++++++++++++++++++++++++++++++++++++++---
+ 2 files changed, 83 insertions(+), 4 deletions(-)
+
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -13,6 +13,7 @@
+ #include <linux/refcount.h>
+ #include <linux/workqueue.h>
+ #include <linux/mutex.h>
++#include <linux/hashtable.h>
+ #include <net/gen_stats.h>
+ #include <net/rtnetlink.h>
+ #include <net/flow_offload.h>
+@@ -359,6 +360,7 @@ struct tcf_proto {
+ bool deleting;
+ refcount_t refcnt;
+ struct rcu_head rcu;
++ struct hlist_node destroy_ht_node;
+ };
+
+ struct qdisc_skb_cb {
+@@ -409,6 +411,8 @@ struct tcf_block {
+ struct list_head filter_chain_list;
+ } chain0;
+ struct rcu_head rcu;
++ DECLARE_HASHTABLE(proto_destroy_ht, 7);
++ struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */
+ };
+
+ #ifdef CONFIG_PROVE_LOCKING
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -21,6 +21,7 @@
+ #include <linux/slab.h>
+ #include <linux/idr.h>
+ #include <linux/rhashtable.h>
++#include <linux/jhash.h>
+ #include <net/net_namespace.h>
+ #include <net/sock.h>
+ #include <net/netlink.h>
+@@ -45,6 +46,62 @@ static LIST_HEAD(tcf_proto_base);
+ /* Protects list of registered TC modules. It is pure SMP lock. */
+ static DEFINE_RWLOCK(cls_mod_lock);
+
++static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
++{
++ return jhash_3words(tp->chain->index, tp->prio,
++ (__force __u32)tp->protocol, 0);
++}
++
++static void tcf_proto_signal_destroying(struct tcf_chain *chain,
++ struct tcf_proto *tp)
++{
++ struct tcf_block *block = chain->block;
++
++ mutex_lock(&block->proto_destroy_lock);
++ hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
++ destroy_obj_hashfn(tp));
++ mutex_unlock(&block->proto_destroy_lock);
++}
++
++static bool tcf_proto_cmp(const struct tcf_proto *tp1,
++ const struct tcf_proto *tp2)
++{
++ return tp1->chain->index == tp2->chain->index &&
++ tp1->prio == tp2->prio &&
++ tp1->protocol == tp2->protocol;
++}
++
++static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
++ struct tcf_proto *tp)
++{
++ u32 hash = destroy_obj_hashfn(tp);
++ struct tcf_proto *iter;
++ bool found = false;
++
++ rcu_read_lock();
++ hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
++ destroy_ht_node, hash) {
++ if (tcf_proto_cmp(tp, iter)) {
++ found = true;
++ break;
++ }
++ }
++ rcu_read_unlock();
++
++ return found;
++}
++
++static void
++tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
++{
++ struct tcf_block *block = chain->block;
++
++ mutex_lock(&block->proto_destroy_lock);
++ if (hash_hashed(&tp->destroy_ht_node))
++ hash_del_rcu(&tp->destroy_ht_node);
++ mutex_unlock(&block->proto_destroy_lock);
++}
++
+ /* Find classifier type by string name */
+
+ static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
+@@ -232,9 +289,11 @@ static void tcf_proto_get(struct tcf_pro
+ static void tcf_chain_put(struct tcf_chain *chain);
+
+ static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
+- struct netlink_ext_ack *extack)
++ bool sig_destroy, struct netlink_ext_ack *extack)
+ {
+ tp->ops->destroy(tp, rtnl_held, extack);
++ if (sig_destroy)
++ tcf_proto_signal_destroyed(tp->chain, tp);
+ tcf_chain_put(tp->chain);
+ module_put(tp->ops->owner);
+ kfree_rcu(tp, rcu);
+@@ -244,7 +303,7 @@ static void tcf_proto_put(struct tcf_pro
+ struct netlink_ext_ack *extack)
+ {
+ if (refcount_dec_and_test(&tp->refcnt))
+- tcf_proto_destroy(tp, rtnl_held, extack);
++ tcf_proto_destroy(tp, rtnl_held, true, extack);
+ }
+
+ static int walker_check_empty(struct tcf_proto *tp, void *fh,
+@@ -368,6 +427,7 @@ static bool tcf_chain_detach(struct tcf_
+ static void tcf_block_destroy(struct tcf_block *block)
+ {
+ mutex_destroy(&block->lock);
++ mutex_destroy(&block->proto_destroy_lock);
+ kfree_rcu(block, rcu);
+ }
+
+@@ -543,6 +603,12 @@ static void tcf_chain_flush(struct tcf_c
+
+ mutex_lock(&chain->filter_chain_lock);
+ tp = tcf_chain_dereference(chain->filter_chain, chain);
++ while (tp) {
++ tp_next = rcu_dereference_protected(tp->next, 1);
++ tcf_proto_signal_destroying(chain, tp);
++ tp = tp_next;
++ }
++ tp = tcf_chain_dereference(chain->filter_chain, chain);
+ RCU_INIT_POINTER(chain->filter_chain, NULL);
+ tcf_chain0_head_change(chain, NULL);
+ chain->flushing = true;
+@@ -1002,6 +1068,7 @@ static struct tcf_block *tcf_block_creat
+ return ERR_PTR(-ENOMEM);
+ }
+ mutex_init(&block->lock);
++ mutex_init(&block->proto_destroy_lock);
+ flow_block_init(&block->flow_block);
+ INIT_LIST_HEAD(&block->chain_list);
+ INIT_LIST_HEAD(&block->owner_list);
+@@ -1754,6 +1821,12 @@ static struct tcf_proto *tcf_chain_tp_in
+
+ mutex_lock(&chain->filter_chain_lock);
+
++ if (tcf_proto_exists_destroying(chain, tp_new)) {
++ mutex_unlock(&chain->filter_chain_lock);
++ tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
++ return ERR_PTR(-EAGAIN);
++ }
++
+ tp = tcf_chain_tp_find(chain, &chain_info,
+ protocol, prio, false);
+ if (!tp)
+@@ -1761,10 +1834,10 @@ static struct tcf_proto *tcf_chain_tp_in
+ mutex_unlock(&chain->filter_chain_lock);
+
+ if (tp) {
+- tcf_proto_destroy(tp_new, rtnl_held, NULL);
++ tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
+ tp_new = tp;
+ } else if (err) {
+- tcf_proto_destroy(tp_new, rtnl_held, NULL);
++ tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
+ tp_new = ERR_PTR(err);
+ }
+
+@@ -1802,6 +1875,7 @@ static void tcf_chain_tp_delete_empty(st
+ return;
+ }
+
++ tcf_proto_signal_destroying(chain, tp);
+ next = tcf_chain_dereference(chain_info.next, chain);
+ if (tp == chain->filter_chain)
+ tcf_chain0_head_change(chain, next);
+@@ -2321,6 +2395,7 @@ static int tc_del_tfilter(struct sk_buff
+ err = -EINVAL;
+ goto errout_locked;
+ } else if (t->tcm_handle == 0) {
++ tcf_proto_signal_destroying(chain, tp);
+ tcf_chain_tp_remove(chain, &chain_info, tp);
+ mutex_unlock(&chain->filter_chain_lock);
+
--- /dev/null
+From foo@baz Sun 10 Nov 2019 11:41:56 AM CET
+From: Ursula Braun <ubraun@linux.ibm.com>
+Date: Wed, 6 Nov 2019 10:49:57 +0100
+Subject: net/smc: fix ethernet interface refcounting
+
+From: Ursula Braun <ubraun@linux.ibm.com>
+
+[ Upstream commit 98f3375505b8d6517bd6710bc6d4f6289eeb30aa ]
+
+If a pnet table entry is to be added mentioning a valid ethernet
+interface, but an invalid infiniband or ISM device, the dev_put()
+operation for the ethernet interface is called twice, resulting
+in a negative refcount for the ethernet interface, which disables
+removal of such a network interface.
+
+This patch removes one of the dev_put() calls.
+
+Fixes: 890a2cb4a966 ("net/smc: rework pnet table")
+Signed-off-by: Ursula Braun <ubraun@linux.ibm.com>
+Signed-off-by: Karsten Graul <kgraul@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/smc/smc_pnet.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/net/smc/smc_pnet.c
++++ b/net/smc/smc_pnet.c
+@@ -376,8 +376,6 @@ static int smc_pnet_fill_entry(struct ne
+ return 0;
+
+ error:
+- if (pnetelem->ndev)
+- dev_put(pnetelem->ndev);
+ return rc;
+ }
+
--- /dev/null
+From foo@baz Sun 10 Nov 2019 11:41:56 AM CET
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+Date: Tue, 5 Nov 2019 14:24:35 -0800
+Subject: net/tls: add a TX lock
+
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+
+[ Upstream commit 79ffe6087e9145d2377385cac48d0d6a6b4225a5 ]
+
+TLS TX needs to release and re-acquire the socket lock if send buffer
+fills up.
+
+TLS SW TX path currently depends on only allowing one thread to enter
+the function by the abuse of sk_write_pending. If another writer is
+already waiting for memory no new ones are allowed in.
+
+This has two problems:
+ - writers don't wake other threads up when they leave the kernel;
+ meaning that this scheme works for single extra thread (second
+ application thread or delayed work) because memory becoming
+ available will send a wake up request, but as Mallesham and
+ Pooja report with larger number of threads it leads to threads
+ being put to sleep indefinitely;
+ - the delayed work does not get _scheduled_ but it may _run_ when
+ other writers are present leading to crashes as writers don't
+ expect state to change under their feet (same records get pushed
+ and freed multiple times); it's hard to reliably bail from the
+ work, however, because the mere presence of a writer does not
+ guarantee that the writer will push pending records before exiting.
+
+Ensuring wakeups always happen will make the code basically open
+code a mutex. Just use a mutex.
+
+The TLS HW TX path does not have any locking (not even the
+sk_write_pending hack), yet it uses a per-socket sg_tx_data
+array to push records.
+
+Fixes: a42055e8d2c3 ("net/tls: Add support for async encryption of records for performance")
+Reported-by: Mallesham Jatharakonda <mallesh537@gmail.com>
+Reported-by: Pooja Trivedi <poojatrivedi@gmail.com>
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Reviewed-by: Simon Horman <simon.horman@netronome.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/tls.h | 5 +++++
+ net/tls/tls_device.c | 6 ++++++
+ net/tls/tls_main.c | 2 ++
+ net/tls/tls_sw.c | 21 +++++++--------------
+ 4 files changed, 20 insertions(+), 14 deletions(-)
+
+--- a/include/net/tls.h
++++ b/include/net/tls.h
+@@ -40,6 +40,7 @@
+ #include <linux/socket.h>
+ #include <linux/tcp.h>
+ #include <linux/skmsg.h>
++#include <linux/mutex.h>
+ #include <linux/netdevice.h>
+
+ #include <net/tcp.h>
+@@ -268,6 +269,10 @@ struct tls_context {
+
+ bool in_tcp_sendpages;
+ bool pending_open_record_frags;
++
++ struct mutex tx_lock; /* protects partially_sent_* fields and
++ * per-type TX fields
++ */
+ unsigned long flags;
+
+ /* cache cold stuff */
+--- a/net/tls/tls_device.c
++++ b/net/tls/tls_device.c
+@@ -482,8 +482,10 @@ last_record:
+ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+ {
+ unsigned char record_type = TLS_RECORD_TYPE_DATA;
++ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ int rc;
+
++ mutex_lock(&tls_ctx->tx_lock);
+ lock_sock(sk);
+
+ if (unlikely(msg->msg_controllen)) {
+@@ -497,12 +499,14 @@ int tls_device_sendmsg(struct sock *sk,
+
+ out:
+ release_sock(sk);
++ mutex_unlock(&tls_ctx->tx_lock);
+ return rc;
+ }
+
+ int tls_device_sendpage(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags)
+ {
++ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct iov_iter msg_iter;
+ char *kaddr = kmap(page);
+ struct kvec iov;
+@@ -511,6 +515,7 @@ int tls_device_sendpage(struct sock *sk,
+ if (flags & MSG_SENDPAGE_NOTLAST)
+ flags |= MSG_MORE;
+
++ mutex_lock(&tls_ctx->tx_lock);
+ lock_sock(sk);
+
+ if (flags & MSG_OOB) {
+@@ -527,6 +532,7 @@ int tls_device_sendpage(struct sock *sk,
+
+ out:
+ release_sock(sk);
++ mutex_unlock(&tls_ctx->tx_lock);
+ return rc;
+ }
+
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -258,6 +258,7 @@ void tls_ctx_free(struct tls_context *ct
+
+ memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send));
+ memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv));
++ mutex_destroy(&ctx->tx_lock);
+ kfree(ctx);
+ }
+
+@@ -615,6 +616,7 @@ static struct tls_context *create_ctx(st
+ ctx->getsockopt = sk->sk_prot->getsockopt;
+ ctx->sk_proto_close = sk->sk_prot->close;
+ ctx->unhash = sk->sk_prot->unhash;
++ mutex_init(&ctx->tx_lock);
+ return ctx;
+ }
+
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -897,15 +897,9 @@ int tls_sw_sendmsg(struct sock *sk, stru
+ if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
+ return -ENOTSUPP;
+
++ mutex_lock(&tls_ctx->tx_lock);
+ lock_sock(sk);
+
+- /* Wait till there is any pending write on socket */
+- if (unlikely(sk->sk_write_pending)) {
+- ret = wait_on_pending_writer(sk, &timeo);
+- if (unlikely(ret))
+- goto send_end;
+- }
+-
+ if (unlikely(msg->msg_controllen)) {
+ ret = tls_proccess_cmsg(sk, msg, &record_type);
+ if (ret) {
+@@ -1091,6 +1085,7 @@ send_end:
+ ret = sk_stream_error(sk, msg->msg_flags, ret);
+
+ release_sock(sk);
++ mutex_unlock(&tls_ctx->tx_lock);
+ return copied ? copied : ret;
+ }
+
+@@ -1114,13 +1109,6 @@ static int tls_sw_do_sendpage(struct soc
+ eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
+ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+
+- /* Wait till there is any pending write on socket */
+- if (unlikely(sk->sk_write_pending)) {
+- ret = wait_on_pending_writer(sk, &timeo);
+- if (unlikely(ret))
+- goto sendpage_end;
+- }
+-
+ /* Call the sk_stream functions to manage the sndbuf mem. */
+ while (size > 0) {
+ size_t copy, required_size;
+@@ -1219,15 +1207,18 @@ sendpage_end:
+ int tls_sw_sendpage(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags)
+ {
++ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ int ret;
+
+ if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
+ MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
+ return -ENOTSUPP;
+
++ mutex_lock(&tls_ctx->tx_lock);
+ lock_sock(sk);
+ ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
+ release_sock(sk);
++ mutex_unlock(&tls_ctx->tx_lock);
+ return ret;
+ }
+
+@@ -2172,9 +2163,11 @@ static void tx_work_handler(struct work_
+
+ if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
+ return;
++ mutex_lock(&tls_ctx->tx_lock);
+ lock_sock(sk);
+ tls_tx_records(sk, -1);
+ release_sock(sk);
++ mutex_unlock(&tls_ctx->tx_lock);
+ }
+
+ void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
--- /dev/null
+From foo@baz Sun 10 Nov 2019 11:41:56 AM CET
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+Date: Tue, 5 Nov 2019 14:24:34 -0800
+Subject: net/tls: don't pay attention to sk_write_pending when pushing partial records
+
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+
+[ Upstream commit 02b1fa07bb58f5d1f349b5b09eb936739a7b20fc ]
+
+sk_write_pending being not zero does not guarantee that partial
+record will be pushed. If the thread waiting for memory times out
+the pending record may get stuck.
+
+In case of tls_device there is no path where parial record is
+set and writer present in the first place. Partial record is
+set only in tls_push_sg() and tls_push_sg() will return an
+error immediately. All tls_device callers of tls_push_sg()
+will return (and not wait for memory) if it failed.
+
+Fixes: a42055e8d2c3 ("net/tls: Add support for async encryption of records for performance")
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Reviewed-by: Simon Horman <simon.horman@netronome.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tls/tls_device.c | 4 +++-
+ net/tls/tls_sw.c | 9 +++------
+ 2 files changed, 6 insertions(+), 7 deletions(-)
+
+--- a/net/tls/tls_device.c
++++ b/net/tls/tls_device.c
+@@ -575,9 +575,11 @@ static int tls_device_push_pending_recor
+
+ void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
+ {
+- if (!sk->sk_write_pending && tls_is_partially_sent_record(ctx)) {
++ if (tls_is_partially_sent_record(ctx)) {
+ gfp_t sk_allocation = sk->sk_allocation;
+
++ WARN_ON_ONCE(sk->sk_write_pending);
++
+ sk->sk_allocation = GFP_ATOMIC;
+ tls_push_partial_record(sk, ctx,
+ MSG_DONTWAIT | MSG_NOSIGNAL |
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -2182,12 +2182,9 @@ void tls_sw_write_space(struct sock *sk,
+ struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
+
+ /* Schedule the transmission if tx list is ready */
+- if (is_tx_ready(tx_ctx) && !sk->sk_write_pending) {
+- /* Schedule the transmission */
+- if (!test_and_set_bit(BIT_TX_SCHEDULED,
+- &tx_ctx->tx_bitmask))
+- schedule_delayed_work(&tx_ctx->tx_work.work, 0);
+- }
++ if (is_tx_ready(tx_ctx) &&
++ !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
++ schedule_delayed_work(&tx_ctx->tx_work.work, 0);
+ }
+
+ void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
--- /dev/null
+From foo@baz Sun 10 Nov 2019 11:41:56 AM CET
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+Date: Mon, 4 Nov 2019 15:36:57 -0800
+Subject: net/tls: fix sk_msg trim on fallback to copy mode
+
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+
+[ Upstream commit 683916f6a84023407761d843048f1aea486b2612 ]
+
+sk_msg_trim() tries to only update curr pointer if it falls into
+the trimmed region. The logic, however, does not take into the
+account pointer wrapping that sk_msg_iter_var_prev() does nor
+(as John points out) the fact that msg->sg is a ring buffer.
+
+This means that when the message was trimmed completely, the new
+curr pointer would have the value of MAX_MSG_FRAGS - 1, which is
+neither smaller than any other value, nor would it actually be
+correct.
+
+Special case the trimming to 0 length a little bit and rework
+the comparison between curr and end to take into account wrapping.
+
+This bug caused the TLS code to not copy all of the message, if
+zero copy filled in fewer sg entries than memcopy would need.
+
+Big thanks to Alexander Potapenko for the non-KMSAN reproducer.
+
+v2:
+ - take into account that msg->sg is a ring buffer (John).
+
+Link: https://lore.kernel.org/netdev/20191030160542.30295-1-jakub.kicinski@netronome.com/ (v1)
+
+Fixes: d829e9c4112b ("tls: convert to generic sk_msg interface")
+Reported-by: syzbot+f8495bff23a879a6d0bd@syzkaller.appspotmail.com
+Reported-by: syzbot+6f50c99e8f6194bf363f@syzkaller.appspotmail.com
+Co-developed-by: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Signed-off-by: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/skmsg.h | 9 ++++++---
+ net/core/skmsg.c | 20 +++++++++++++++-----
+ 2 files changed, 21 insertions(+), 8 deletions(-)
+
+--- a/include/linux/skmsg.h
++++ b/include/linux/skmsg.h
+@@ -139,6 +139,11 @@ static inline void sk_msg_apply_bytes(st
+ }
+ }
+
++static inline u32 sk_msg_iter_dist(u32 start, u32 end)
++{
++ return end >= start ? end - start : end + (MAX_MSG_FRAGS - start);
++}
++
+ #define sk_msg_iter_var_prev(var) \
+ do { \
+ if (var == 0) \
+@@ -198,9 +203,7 @@ static inline u32 sk_msg_elem_used(const
+ if (sk_msg_full(msg))
+ return MAX_MSG_FRAGS;
+
+- return msg->sg.end >= msg->sg.start ?
+- msg->sg.end - msg->sg.start :
+- msg->sg.end + (MAX_MSG_FRAGS - msg->sg.start);
++ return sk_msg_iter_dist(msg->sg.start, msg->sg.end);
+ }
+
+ static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which)
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -271,18 +271,28 @@ void sk_msg_trim(struct sock *sk, struct
+
+ msg->sg.data[i].length -= trim;
+ sk_mem_uncharge(sk, trim);
++ /* Adjust copybreak if it falls into the trimmed part of last buf */
++ if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
++ msg->sg.copybreak = msg->sg.data[i].length;
+ out:
+- /* If we trim data before curr pointer update copybreak and current
+- * so that any future copy operations start at new copy location.
++ sk_msg_iter_var_next(i);
++ msg->sg.end = i;
++
++ /* If we trim data a full sg elem before curr pointer update
++ * copybreak and current so that any future copy operations
++ * start at new copy location.
+ * However trimed data that has not yet been used in a copy op
+ * does not require an update.
+ */
+- if (msg->sg.curr >= i) {
++ if (!msg->sg.size) {
++ msg->sg.curr = msg->sg.start;
++ msg->sg.copybreak = 0;
++ } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
++ sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
++ sk_msg_iter_var_prev(i);
+ msg->sg.curr = i;
+ msg->sg.copybreak = msg->sg.data[i].length;
+ }
+- sk_msg_iter_var_next(i);
+- msg->sg.end = i;
+ }
+ EXPORT_SYMBOL_GPL(sk_msg_trim);
+
--- /dev/null
+From foo@baz Sun 10 Nov 2019 11:41:56 AM CET
+From: Aleksander Morgado <aleksander@aleksander.es>
+Date: Thu, 7 Nov 2019 11:57:01 +0100
+Subject: net: usb: qmi_wwan: add support for DW5821e with eSIM support
+
+From: Aleksander Morgado <aleksander@aleksander.es>
+
+[ Upstream commit e497df686e8fed8c1dd69179010656362858edb3 ]
+
+Exactly same layout as the default DW5821e module, just a different
+vid/pid.
+
+The QMI interface is exposed in USB configuration #1:
+
+P: Vendor=413c ProdID=81e0 Rev=03.18
+S: Manufacturer=Dell Inc.
+S: Product=DW5821e-eSIM Snapdragon X20 LTE
+S: SerialNumber=0123456789ABCDEF
+C: #Ifs= 6 Cfg#= 1 Atr=a0 MxPwr=500mA
+I: If#=0x0 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan
+I: If#=0x1 Alt= 0 #EPs= 1 Cls=03(HID ) Sub=00 Prot=00 Driver=usbhid
+I: If#=0x2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+I: If#=0x3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+I: If#=0x4 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+I: If#=0x5 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option
+
+Signed-off-by: Aleksander Morgado <aleksander@aleksander.es>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/qmi_wwan.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1361,6 +1361,7 @@ static const struct usb_device_id produc
+ {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
+ {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
+ {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
++ {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
+ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
+ {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
+ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
--- /dev/null
+From foo@baz Sun 10 Nov 2019 11:41:56 AM CET
+From: Pan Bian <bianpan2016@163.com>
+Date: Tue, 5 Nov 2019 16:34:07 +0800
+Subject: NFC: fdp: fix incorrect free object
+
+From: Pan Bian <bianpan2016@163.com>
+
+[ Upstream commit 517ce4e93368938b204451285e53014549804868 ]
+
+The address of fw_vsc_cfg is on stack. Releasing it with devm_kfree() is
+incorrect, which may result in a system crash or other security impacts.
+The expected object to free is *fw_vsc_cfg.
+
+Signed-off-by: Pan Bian <bianpan2016@163.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nfc/fdp/i2c.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/nfc/fdp/i2c.c
++++ b/drivers/nfc/fdp/i2c.c
+@@ -259,7 +259,7 @@ static void fdp_nci_i2c_read_device_prop
+ *fw_vsc_cfg, len);
+
+ if (r) {
+- devm_kfree(dev, fw_vsc_cfg);
++ devm_kfree(dev, *fw_vsc_cfg);
+ goto vsc_read_err;
+ }
+ } else {
--- /dev/null
+From foo@baz Sun 10 Nov 2019 11:41:56 AM CET
+From: Pan Bian <bianpan2016@163.com>
+Date: Thu, 7 Nov 2019 14:29:50 +0800
+Subject: nfc: netlink: fix double device reference drop
+
+From: Pan Bian <bianpan2016@163.com>
+
+[ Upstream commit 025ec40b81d785a98f76b8bdb509ac10773b4f12 ]
+
+The function nfc_put_device(dev) is called twice to drop the reference
+to dev when there is no associated local llcp. Remove one of them to fix
+the bug.
+
+Fixes: 52feb444a903 ("NFC: Extend netlink interface for LTO, RW, and MIUX parameters support")
+Fixes: d9b8d8e19b07 ("NFC: llcp: Service Name Lookup netlink interface")
+Signed-off-by: Pan Bian <bianpan2016@163.com>
+Reviewed-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/nfc/netlink.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/net/nfc/netlink.c
++++ b/net/nfc/netlink.c
+@@ -1099,7 +1099,6 @@ static int nfc_genl_llc_set_params(struc
+
+ local = nfc_llcp_find_local(dev);
+ if (!local) {
+- nfc_put_device(dev);
+ rc = -ENODEV;
+ goto exit;
+ }
+@@ -1159,7 +1158,6 @@ static int nfc_genl_llc_sdreq(struct sk_
+
+ local = nfc_llcp_find_local(dev);
+ if (!local) {
+- nfc_put_device(dev);
+ rc = -ENODEV;
+ goto exit;
+ }
--- /dev/null
+From foo@baz Sun 10 Nov 2019 11:41:56 AM CET
+From: Pan Bian <bianpan2016@163.com>
+Date: Thu, 7 Nov 2019 09:33:20 +0800
+Subject: NFC: st21nfca: fix double free
+
+From: Pan Bian <bianpan2016@163.com>
+
+[ Upstream commit 99a8efbb6e30b72ac98cecf81103f847abffb1e5 ]
+
+The variable nfcid_skb is not changed in the callee nfc_hci_get_param()
+if error occurs. Consequently, the freed variable nfcid_skb will be
+freed again, resulting in a double free bug. Set nfcid_skb to NULL after
+releasing it to fix the bug.
+
+Signed-off-by: Pan Bian <bianpan2016@163.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nfc/st21nfca/core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/nfc/st21nfca/core.c
++++ b/drivers/nfc/st21nfca/core.c
+@@ -708,6 +708,7 @@ static int st21nfca_hci_complete_target_
+ NFC_PROTO_FELICA_MASK;
+ } else {
+ kfree_skb(nfcid_skb);
++ nfcid_skb = NULL;
+ /* P2P in type A */
+ r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_F_GATE,
+ ST21NFCA_RF_READER_F_NFCID1,
--- /dev/null
+From foo@baz Sun 10 Nov 2019 11:41:56 AM CET
+From: Manish Chopra <manishc@marvell.com>
+Date: Fri, 8 Nov 2019 02:42:30 -0800
+Subject: qede: fix NULL pointer deref in __qede_remove()
+
+From: Manish Chopra <manishc@marvell.com>
+
+[ Upstream commit deabc87111c690097c03765ea017cd500f7376fc ]
+
+While rebooting the system with SR-IOV vfs enabled leads
+to below crash due to recurrence of __qede_remove() on the VF
+devices (first from .shutdown() flow of the VF itself and
+another from PF's .shutdown() flow executing pci_disable_sriov())
+
+This patch adds a safeguard in __qede_remove() flow to fix this,
+so that driver doesn't attempt to remove "already removed" devices.
+
+[ 194.360134] BUG: unable to handle kernel NULL pointer dereference at 00000000000008dc
+[ 194.360227] IP: [<ffffffffc03553c4>] __qede_remove+0x24/0x130 [qede]
+[ 194.360304] PGD 0
+[ 194.360325] Oops: 0000 [#1] SMP
+[ 194.360360] Modules linked in: tcp_lp fuse tun bridge stp llc devlink bonding ip_set nfnetlink ib_isert iscsi_target_mod ib_srpt target_core_mod ib_srp scsi_transport_srp scsi_tgt ib_ipoib ib_umad rpcrdma sunrpc rdma_ucm ib_uverbs ib_iser rdma_cm iw_cm ib_cm libiscsi scsi_transport_iscsi dell_smbios iTCO_wdt iTCO_vendor_support dell_wmi_descriptor dcdbas vfat fat pcc_cpufreq skx_edac intel_powerclamp coretemp intel_rapl iosf_mbi kvm_intel kvm irqbypass crc32_pclmul ghash_clmulni_intel aesni_intel lrw gf128mul glue_helper ablk_helper cryptd qedr ib_core pcspkr ses enclosure joydev ipmi_ssif sg i2c_i801 lpc_ich mei_me mei wmi ipmi_si ipmi_devintf ipmi_msghandler tpm_crb acpi_pad acpi_power_meter xfs libcrc32c sd_mod crc_t10dif crct10dif_generic crct10dif_pclmul crct10dif_common crc32c_intel mgag200
+[ 194.361044] qede i2c_algo_bit drm_kms_helper qed syscopyarea sysfillrect nvme sysimgblt fb_sys_fops ttm nvme_core mpt3sas crc8 ptp drm pps_core ahci raid_class scsi_transport_sas libahci libata drm_panel_orientation_quirks nfit libnvdimm dm_mirror dm_region_hash dm_log dm_mod [last unloaded: ip_tables]
+[ 194.361297] CPU: 51 PID: 7996 Comm: reboot Kdump: loaded Not tainted 3.10.0-1062.el7.x86_64 #1
+[ 194.361359] Hardware name: Dell Inc. PowerEdge MX840c/0740HW, BIOS 2.4.6 10/15/2019
+[ 194.361412] task: ffff9cea9b360000 ti: ffff9ceabebdc000 task.ti: ffff9ceabebdc000
+[ 194.361463] RIP: 0010:[<ffffffffc03553c4>] [<ffffffffc03553c4>] __qede_remove+0x24/0x130 [qede]
+[ 194.361534] RSP: 0018:ffff9ceabebdfac0 EFLAGS: 00010282
+[ 194.361570] RAX: 0000000000000000 RBX: ffff9cd013846098 RCX: 0000000000000000
+[ 194.361621] RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff9cd013846098
+[ 194.361668] RBP: ffff9ceabebdfae8 R08: 0000000000000000 R09: 0000000000000000
+[ 194.361715] R10: 00000000bfe14201 R11: ffff9ceabfe141e0 R12: 0000000000000000
+[ 194.361762] R13: ffff9cd013846098 R14: 0000000000000000 R15: ffff9ceab5e48000
+[ 194.361810] FS: 00007f799c02d880(0000) GS:ffff9ceacb0c0000(0000) knlGS:0000000000000000
+[ 194.361865] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 194.361903] CR2: 00000000000008dc CR3: 0000001bdac76000 CR4: 00000000007607e0
+[ 194.361953] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ 194.362002] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[ 194.362051] PKRU: 55555554
+[ 194.362073] Call Trace:
+[ 194.362109] [<ffffffffc0355500>] qede_remove+0x10/0x20 [qede]
+[ 194.362180] [<ffffffffb97d0f3e>] pci_device_remove+0x3e/0xc0
+[ 194.362240] [<ffffffffb98b3c52>] __device_release_driver+0x82/0xf0
+[ 194.362285] [<ffffffffb98b3ce3>] device_release_driver+0x23/0x30
+[ 194.362343] [<ffffffffb97c86d4>] pci_stop_bus_device+0x84/0xa0
+[ 194.362388] [<ffffffffb97c87e2>] pci_stop_and_remove_bus_device+0x12/0x20
+[ 194.362450] [<ffffffffb97f153f>] pci_iov_remove_virtfn+0xaf/0x160
+[ 194.362496] [<ffffffffb97f1aec>] sriov_disable+0x3c/0xf0
+[ 194.362534] [<ffffffffb97f1bc3>] pci_disable_sriov+0x23/0x30
+[ 194.362599] [<ffffffffc02f83c3>] qed_sriov_disable+0x5e3/0x650 [qed]
+[ 194.362658] [<ffffffffb9622df6>] ? kfree+0x106/0x140
+[ 194.362709] [<ffffffffc02cc0c0>] ? qed_free_stream_mem+0x70/0x90 [qed]
+[ 194.362754] [<ffffffffb9622df6>] ? kfree+0x106/0x140
+[ 194.362803] [<ffffffffc02cd659>] qed_slowpath_stop+0x1a9/0x1d0 [qed]
+[ 194.362854] [<ffffffffc035544e>] __qede_remove+0xae/0x130 [qede]
+[ 194.362904] [<ffffffffc03554e0>] qede_shutdown+0x10/0x20 [qede]
+[ 194.362956] [<ffffffffb97cf90a>] pci_device_shutdown+0x3a/0x60
+[ 194.363010] [<ffffffffb98b180b>] device_shutdown+0xfb/0x1f0
+[ 194.363066] [<ffffffffb94b66c6>] kernel_restart_prepare+0x36/0x40
+[ 194.363107] [<ffffffffb94b66e2>] kernel_restart+0x12/0x60
+[ 194.363146] [<ffffffffb94b6959>] SYSC_reboot+0x229/0x260
+[ 194.363196] [<ffffffffb95f200d>] ? handle_mm_fault+0x39d/0x9b0
+[ 194.363253] [<ffffffffb942b621>] ? __switch_to+0x151/0x580
+[ 194.363304] [<ffffffffb9b7ec28>] ? __schedule+0x448/0x9c0
+[ 194.363343] [<ffffffffb94b69fe>] SyS_reboot+0xe/0x10
+[ 194.363387] [<ffffffffb9b8bede>] system_call_fastpath+0x25/0x2a
+[ 194.363430] Code: f9 e9 37 ff ff ff 90 0f 1f 44 00 00 55 48 89 e5 41 57 41 56 41 55 4c 8d af 98 00 00 00 41 54 4c 89 ef 41 89 f4 53 e8 4c e4 55 f9 <80> b8 dc 08 00 00 01 48 89 c3 4c 8d b8 c0 08 00 00 4c 8b b0 c0
+[ 194.363712] RIP [<ffffffffc03553c4>] __qede_remove+0x24/0x130 [qede]
+[ 194.363764] RSP <ffff9ceabebdfac0>
+[ 194.363791] CR2: 00000000000008dc
+
+Signed-off-by: Manish Chopra <manishc@marvell.com>
+Signed-off-by: Ariel Elior <aelior@marvell.com>
+Signed-off-by: Sudarsana Kalluru <skalluru@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/qlogic/qede/qede_main.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
+@@ -1208,8 +1208,16 @@ enum qede_remove_mode {
+ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
+ {
+ struct net_device *ndev = pci_get_drvdata(pdev);
+- struct qede_dev *edev = netdev_priv(ndev);
+- struct qed_dev *cdev = edev->cdev;
++ struct qede_dev *edev;
++ struct qed_dev *cdev;
++
++ if (!ndev) {
++ dev_info(&pdev->dev, "Device has already been removed\n");
++ return;
++ }
++
++ edev = netdev_priv(ndev);
++ cdev = edev->cdev;
+
+ DP_INFO(edev, "Starting qede_remove\n");
+
--- /dev/null
+From foo@baz Sun 10 Nov 2019 11:41:56 AM CET
+From: Heiner Kallweit <hkallweit1@gmail.com>
+Date: Wed, 6 Nov 2019 21:51:31 +0100
+Subject: r8169: fix page read in r8168g_mdio_read
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+[ Upstream commit 9c6850fea3edefef6e7153b2c466f09155399882 ]
+
+Functions like phy_modify_paged() read the current page, on Realtek
+PHY's this means reading the value of register 0x1f. Add special
+handling for reading this register, similar to what we do already
+in r8168g_mdio_write(). Currently we read a random value that by
+chance seems to be 0 always.
+
+Fixes: a2928d28643e ("r8169: use paged versions of phylib MDIO access functions")
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/realtek/r8169_main.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -863,6 +863,9 @@ static void r8168g_mdio_write(struct rtl
+
+ static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
+ {
++ if (reg == 0x1f)
++ return tp->ocp_base == OCP_STD_PHY_BASE ? 0 : tp->ocp_base >> 4;
++
+ if (tp->ocp_base != OCP_STD_PHY_BASE)
+ reg -= 0x10;
+
--- /dev/null
+From foo@baz Sun 10 Nov 2019 11:41:56 AM CET
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+Date: Tue, 5 Nov 2019 14:24:36 -0800
+Subject: selftests/tls: add test for concurrent recv and send
+
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+
+[ Upstream commit 41098af59d8d753aa8d3bb4310cc4ecb61fc82c7 ]
+
+Add a test which spawns 16 threads and performs concurrent
+send and recv calls on the same socket.
+
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Reviewed-by: Simon Horman <simon.horman@netronome.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/tls.c | 108 ++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 108 insertions(+)
+
+--- a/tools/testing/selftests/net/tls.c
++++ b/tools/testing/selftests/net/tls.c
+@@ -898,6 +898,114 @@ TEST_F(tls, nonblocking)
+ }
+ }
+
++static void
++test_mutliproc(struct __test_metadata *_metadata, struct _test_data_tls *self,
++ bool sendpg, unsigned int n_readers, unsigned int n_writers)
++{
++ const unsigned int n_children = n_readers + n_writers;
++ const size_t data = 6 * 1000 * 1000;
++ const size_t file_sz = data / 100;
++ size_t read_bias, write_bias;
++ int i, fd, child_id;
++ char buf[file_sz];
++ pid_t pid;
++
++ /* Only allow multiples for simplicity */
++ ASSERT_EQ(!(n_readers % n_writers) || !(n_writers % n_readers), true);
++ read_bias = n_writers / n_readers ?: 1;
++ write_bias = n_readers / n_writers ?: 1;
++
++ /* prep a file to send */
++ fd = open("/tmp/", O_TMPFILE | O_RDWR, 0600);
++ ASSERT_GE(fd, 0);
++
++ memset(buf, 0xac, file_sz);
++ ASSERT_EQ(write(fd, buf, file_sz), file_sz);
++
++ /* spawn children */
++ for (child_id = 0; child_id < n_children; child_id++) {
++ pid = fork();
++ ASSERT_NE(pid, -1);
++ if (!pid)
++ break;
++ }
++
++ /* parent waits for all children */
++ if (pid) {
++ for (i = 0; i < n_children; i++) {
++ int status;
++
++ wait(&status);
++ EXPECT_EQ(status, 0);
++ }
++
++ return;
++ }
++
++ /* Split threads for reading and writing */
++ if (child_id < n_readers) {
++ size_t left = data * read_bias;
++ char rb[8001];
++
++ while (left) {
++ int res;
++
++ res = recv(self->cfd, rb,
++ left > sizeof(rb) ? sizeof(rb) : left, 0);
++
++ EXPECT_GE(res, 0);
++ left -= res;
++ }
++ } else {
++ size_t left = data * write_bias;
++
++ while (left) {
++ int res;
++
++ ASSERT_EQ(lseek(fd, 0, SEEK_SET), 0);
++ if (sendpg)
++ res = sendfile(self->fd, fd, NULL,
++ left > file_sz ? file_sz : left);
++ else
++ res = send(self->fd, buf,
++ left > file_sz ? file_sz : left, 0);
++
++ EXPECT_GE(res, 0);
++ left -= res;
++ }
++ }
++}
++
++TEST_F(tls, mutliproc_even)
++{
++ test_mutliproc(_metadata, self, false, 6, 6);
++}
++
++TEST_F(tls, mutliproc_readers)
++{
++ test_mutliproc(_metadata, self, false, 4, 12);
++}
++
++TEST_F(tls, mutliproc_writers)
++{
++ test_mutliproc(_metadata, self, false, 10, 2);
++}
++
++TEST_F(tls, mutliproc_sendpage_even)
++{
++ test_mutliproc(_metadata, self, true, 6, 6);
++}
++
++TEST_F(tls, mutliproc_sendpage_readers)
++{
++ test_mutliproc(_metadata, self, true, 4, 12);
++}
++
++TEST_F(tls, mutliproc_sendpage_writers)
++{
++ test_mutliproc(_metadata, self, true, 10, 2);
++}
++
+ TEST_F(tls, control_msg)
+ {
+ if (self->notls)
--- /dev/null
+From foo@baz Sun 10 Nov 2019 11:41:56 AM CET
+From: Stefano Garzarella <sgarzare@redhat.com>
+Date: Fri, 8 Nov 2019 17:08:50 +0100
+Subject: vsock/virtio: fix sock refcnt holding during the shutdown
+
+From: Stefano Garzarella <sgarzare@redhat.com>
+
+[ Upstream commit ad8a7220355d39cddce8eac1cea9677333e8b821 ]
+
+The "42f5cda5eaf4" commit rightly set SOCK_DONE on peer shutdown,
+but there is an issue if we receive the SHUTDOWN(RDWR) while the
+virtio_transport_close_timeout() is scheduled.
+In this case, when the timeout fires, the SOCK_DONE is already
+set and the virtio_transport_close_timeout() will not call
+virtio_transport_reset() and virtio_transport_do_close().
+This causes that both sockets remain open and will never be released,
+preventing the unloading of [virtio|vhost]_transport modules.
+
+This patch fixes this issue, calling virtio_transport_reset() and
+virtio_transport_do_close() when we receive the SHUTDOWN(RDWR)
+and there is nothing left to read.
+
+Fixes: 42f5cda5eaf4 ("vsock/virtio: set SOCK_DONE on peer shutdown")
+Cc: Stephen Barber <smbarber@chromium.org>
+Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/vmw_vsock/virtio_transport_common.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -870,9 +870,11 @@ virtio_transport_recv_connected(struct s
+ if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
+ vsk->peer_shutdown |= SEND_SHUTDOWN;
+ if (vsk->peer_shutdown == SHUTDOWN_MASK &&
+- vsock_stream_has_data(vsk) <= 0) {
+- sock_set_flag(sk, SOCK_DONE);
+- sk->sk_state = TCP_CLOSING;
++ vsock_stream_has_data(vsk) <= 0 &&
++ !sock_flag(sk, SOCK_DONE)) {
++ (void)virtio_transport_reset(vsk, NULL);
++
++ virtio_transport_do_close(vsk, true);
+ }
+ if (le32_to_cpu(pkt->hdr.flags))
+ sk->sk_state_change(sk);