--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: 배석진 <soukjin.bae@samsung.com>
+Date: Fri, 9 Nov 2018 16:53:06 -0800
+Subject: flow_dissector: do not dissect l4 ports for fragments
+
+From: 배석진 <soukjin.bae@samsung.com>
+
+[ Upstream commit 62230715fd2453b3ba948c9d83cfb3ada9169169 ]
+
+Only first fragment has the sport/dport information,
+not the following ones.
+
+If we want consistent hash for all fragments, we need to
+ignore ports even for first fragment.
+
+This bug is visible for IPv6 traffic, if incoming fragments
+do not have a flow label, since skb_get_hash() will give
+different results for first fragment and following ones.
+
+It is also visible if any routing rule wants dissection
+and sport or dport.
+
+See commit 5e5d6fed3741 ("ipv6: route: dissect flow
+in input path if fib rules need it") for details.
+
+[edumazet] rewrote the changelog completely.
+
+Fixes: 06635a35d13d ("flow_dissect: use programable dissector in skb_flow_dissect and friends")
+Signed-off-by: 배석진 <soukjin.bae@samsung.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/flow_dissector.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -1026,8 +1026,8 @@ ip_proto_again:
+ break;
+ }
+
+- if (dissector_uses_key(flow_dissector,
+- FLOW_DISSECTOR_KEY_PORTS)) {
++ if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS) &&
++ !(key_control->flags & FLOW_DIS_IS_FRAGMENT)) {
+ key_ports = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ target_container);
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: "Michał Mirosław" <mirq-linux@rere.qmqm.pl>
+Date: Wed, 7 Nov 2018 17:50:52 +0100
+Subject: ibmvnic: fix accelerated VLAN handling
+
+From: "Michał Mirosław" <mirq-linux@rere.qmqm.pl>
+
+[ Upstream commit e84b47941e15e6666afb8ee8b21d1c3fc1a013af ]
+
+Don't request tag insertion when it isn't present in outgoing skb.
+
+Signed-off-by: Michał Mirosław <mirq-linux@rere.qmqm.pl>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/ibm/ibmvnic.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -1545,7 +1545,7 @@ static int ibmvnic_xmit(struct sk_buff *
+ tx_crq.v1.sge_len = cpu_to_be32(skb->len);
+ tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
+
+- if (adapter->vlan_header_insertion) {
++ if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
+ tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
+ tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
+ }
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 8 Nov 2018 17:34:27 -0800
+Subject: inet: frags: better deal with smp races
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 0d5b9311baf27bb545f187f12ecfd558220c607d ]
+
+Multiple cpus might attempt to insert a new fragment in rhashtable,
+if for example RPS is buggy, as reported by 배석진 in
+https://patchwork.ozlabs.org/patch/994601/
+
+We use rhashtable_lookup_get_insert_key() instead of
+rhashtable_insert_fast() to let cpus losing the race
+free their own inet_frag_queue and use the one that
+was inserted by another cpu.
+
+Fixes: 648700f76b03 ("inet: frags: use rhashtables for reassembly units")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: 배석진 <soukjin.bae@samsung.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/inet_fragment.c | 29 +++++++++++++++--------------
+ 1 file changed, 15 insertions(+), 14 deletions(-)
+
+--- a/net/ipv4/inet_fragment.c
++++ b/net/ipv4/inet_fragment.c
+@@ -178,21 +178,22 @@ static struct inet_frag_queue *inet_frag
+ }
+
+ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
+- void *arg)
++ void *arg,
++ struct inet_frag_queue **prev)
+ {
+ struct inet_frags *f = nf->f;
+ struct inet_frag_queue *q;
+- int err;
+
+ q = inet_frag_alloc(nf, f, arg);
+- if (!q)
++ if (!q) {
++ *prev = ERR_PTR(-ENOMEM);
+ return NULL;
+-
++ }
+ mod_timer(&q->timer, jiffies + nf->timeout);
+
+- err = rhashtable_insert_fast(&nf->rhashtable, &q->node,
+- f->rhash_params);
+- if (err < 0) {
++ *prev = rhashtable_lookup_get_insert_key(&nf->rhashtable, &q->key,
++ &q->node, f->rhash_params);
++ if (*prev) {
+ q->flags |= INET_FRAG_COMPLETE;
+ inet_frag_kill(q);
+ inet_frag_destroy(q);
+@@ -204,22 +205,22 @@ static struct inet_frag_queue *inet_frag
+ /* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
+ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
+ {
+- struct inet_frag_queue *fq;
++ struct inet_frag_queue *fq = NULL, *prev;
+
+ if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
+ return NULL;
+
+ rcu_read_lock();
+
+- fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
+- if (fq) {
++ prev = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
++ if (!prev)
++ fq = inet_frag_create(nf, key, &prev);
++ if (prev && !IS_ERR(prev)) {
++ fq = prev;
+ if (!refcount_inc_not_zero(&fq->refcnt))
+ fq = NULL;
+- rcu_read_unlock();
+- return fq;
+ }
+ rcu_read_unlock();
+-
+- return inet_frag_create(nf, key);
++ return fq;
+ }
+ EXPORT_SYMBOL(inet_frag_find);
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Sabrina Dubroca <sd@queasysnail.net>
+Date: Fri, 16 Nov 2018 16:58:19 +0100
+Subject: ip_tunnel: don't force DF when MTU is locked
+
+From: Sabrina Dubroca <sd@queasysnail.net>
+
+[ Upstream commit 16f7eb2b77b55da816c4e207f3f9440a8cafc00a ]
+
+The various types of tunnels running over IPv4 can ask to set the DF
+bit to do PMTU discovery. However, PMTU discovery is subject to the
+threshold set by the net.ipv4.route.min_pmtu sysctl, and is also
+disabled on routes with "mtu lock". In those cases, we shouldn't set
+the DF bit.
+
+This patch makes setting the DF bit conditional on the route's MTU
+locking state.
+
+This issue seems to be older than git history.
+
+Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_tunnel_core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -80,7 +80,7 @@ void iptunnel_xmit(struct sock *sk, stru
+
+ iph->version = 4;
+ iph->ihl = sizeof(struct iphdr) >> 2;
+- iph->frag_off = df;
++ iph->frag_off = ip_mtu_locked(&rt->dst) ? 0 : df;
+ iph->protocol = proto;
+ iph->tos = tos;
+ iph->daddr = dst;
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Xin Long <lucien.xin@gmail.com>
+Date: Wed, 14 Nov 2018 00:48:28 +0800
+Subject: ipv6: fix a dst leak when removing its exception
+
+From: Xin Long <lucien.xin@gmail.com>
+
+[ Upstream commit 761f60261b4401aa368d71d431b4c218af0efcee ]
+
+These is no need to hold dst before calling rt6_remove_exception_rt().
+The call to dst_hold_safe() in ip6_link_failure() was for ip6_del_rt(),
+which has been removed in Commit 93531c674315 ("net/ipv6: separate
+handling of FIB entries from dst based routes"). Otherwise, it will
+cause a dst leak.
+
+This patch is to simply remove the dst_hold_safe() call before calling
+rt6_remove_exception_rt() and also do the same in ip6_del_cached_rt().
+It's safe, because the removal of the exception that holds its dst's
+refcnt is protected by rt6_exception_lock.
+
+Fixes: 93531c674315 ("net/ipv6: separate handling of FIB entries from dst based routes")
+Fixes: 23fb93a4d3f1 ("net/ipv6: Cleanup exception and cache route handling")
+Reported-by: Li Shuang <shuali@redhat.com>
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Reviewed-by: David Ahern <dsahern@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/route.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2263,8 +2263,7 @@ static void ip6_link_failure(struct sk_b
+ if (rt) {
+ rcu_read_lock();
+ if (rt->rt6i_flags & RTF_CACHE) {
+- if (dst_hold_safe(&rt->dst))
+- rt6_remove_exception_rt(rt);
++ rt6_remove_exception_rt(rt);
+ } else {
+ struct fib6_info *from;
+ struct fib6_node *fn;
+@@ -3266,8 +3265,8 @@ static int ip6_del_cached_rt(struct rt6_
+ if (cfg->fc_flags & RTF_GATEWAY &&
+ !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
+ goto out;
+- if (dst_hold_safe(&rt->dst))
+- rc = rt6_remove_exception_rt(rt);
++
++ rc = rt6_remove_exception_rt(rt);
+ out:
+ return rc;
+ }
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: David Ahern <dsahern@gmail.com>
+Date: Sun, 18 Nov 2018 10:45:30 -0800
+Subject: ipv6: Fix PMTU updates for UDP/raw sockets in presence of VRF
+
+From: David Ahern <dsahern@gmail.com>
+
+[ Upstream commit 7ddacfa564870cdd97275fd87decb6174abc6380 ]
+
+Preethi reported that PMTU discovery for UDP/raw applications is not
+working in the presence of VRF when the socket is not bound to a device.
+The problem is that ip6_sk_update_pmtu does not consider the L3 domain
+of the skb device if the socket is not bound. Update the function to
+set oif to the L3 master device if relevant.
+
+Fixes: ca254490c8df ("net: Add VRF support to IPv6 stack")
+Reported-by: Preethi Ramachandra <preethir@juniper.net>
+Signed-off-by: David Ahern <dsahern@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/route.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2391,10 +2391,13 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu);
+
+ void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
+ {
++ int oif = sk->sk_bound_dev_if;
+ struct dst_entry *dst;
+
+- ip6_update_pmtu(skb, sock_net(sk), mtu,
+- sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid);
++ if (!oif && skb->dev)
++ oif = l3mdev_master_ifindex(skb->dev);
++
++ ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
+
+ dst = __sk_dst_get(sk);
+ if (!dst || !dst->obsolete ||
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Xin Long <lucien.xin@gmail.com>
+Date: Tue, 13 Nov 2018 01:08:25 +0800
+Subject: l2tp: fix a sock refcnt leak in l2tp_tunnel_register
+
+From: Xin Long <lucien.xin@gmail.com>
+
+[ Upstream commit f8504f4ca0a0e9f84546ef86e00b24d2ea9a0bd2 ]
+
+This issue happens when trying to add an existent tunnel. It
+doesn't call sock_put() before returning -EEXIST to release
+the sock refcnt that was held by calling sock_hold() before
+the existence check.
+
+This patch is to fix it by holding the sock after doing the
+existence check.
+
+Fixes: f6cd651b056f ("l2tp: fix race in duplicate tunnel detection")
+Reported-by: Jianlin Shi <jishi@redhat.com>
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Reviewed-by: Guillaume Nault <g.nault@alphalink.fr>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/l2tp/l2tp_core.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -1490,12 +1490,7 @@ int l2tp_tunnel_register(struct l2tp_tun
+ goto err_sock;
+ }
+
+- sk = sock->sk;
+-
+- sock_hold(sk);
+- tunnel->sock = sk;
+ tunnel->l2tp_net = net;
+-
+ pn = l2tp_pernet(net);
+
+ spin_lock_bh(&pn->l2tp_tunnel_list_lock);
+@@ -1510,6 +1505,10 @@ int l2tp_tunnel_register(struct l2tp_tun
+ list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
+ spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+
++ sk = sock->sk;
++ sock_hold(sk);
++ tunnel->sock = sk;
++
+ if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
+ struct udp_tunnel_sock_cfg udp_cfg = {
+ .sk_user_data = tunnel,
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Shalom Toledo <shalomt@mellanox.com>
+Date: Fri, 2 Nov 2018 19:49:15 +0000
+Subject: mlxsw: spectrum: Fix IP2ME CPU policer configuration
+
+From: Shalom Toledo <shalomt@mellanox.com>
+
+[ Upstream commit 96801552f846460fe9ac10f1b189602992f004e1 ]
+
+The CPU policer used to police packets being trapped via a local route
+(IP2ME) was incorrectly configured to police based on bytes per second
+instead of packets per second.
+
+Change the policer to police based on packets per second and avoid
+packet loss under certain circumstances.
+
+Fixes: 9148e7cf73ce ("mlxsw: spectrum: Add policers for trap groups")
+Signed-off-by: Shalom Toledo <shalomt@mellanox.com>
+Signed-off-by: Ido Schimmel <idosch@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -3519,7 +3519,6 @@ static int mlxsw_sp_cpu_policers_set(str
+ burst_size = 7;
+ break;
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
+- is_bytes = true;
+ rate = 4 * 1024;
+ burst_size = 4;
+ break;
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Doug Berger <opendmb@gmail.com>
+Date: Thu, 1 Nov 2018 15:55:37 -0700
+Subject: net: bcmgenet: protect stop from timeout
+
+From: Doug Berger <opendmb@gmail.com>
+
+A timing hazard exists when the network interface is stopped that
+allows a watchdog timeout to be processed by a separate core in
+parallel. This creates the potential for the timeout handler to
+wake the queues while the driver is shutting down, or access
+registers after their clocks have been removed.
+
+The more common case is that the watchdog timeout will produce a
+warning message which doesn't lead to a crash. The chances of this
+are greatly increased by the fact that bcmgenet_netif_stop stops
+the transmit queues which can easily precipitate a watchdog time-
+out because of stale trans_start data in the queues.
+
+This commit corrects the behavior by ensuring that the watchdog
+timeout is disabled before enterring bcmgenet_netif_stop. There
+are currently only two users of the bcmgenet_netif_stop function:
+close and suspend.
+
+The close case already handles the issue by exiting the RUNNING
+state before invoking the driver close service.
+
+The suspend case now performs the netif_device_detach to exit the
+PRESENT state before the call to bcmgenet_netif_stop rather than
+after it.
+
+These behaviors prevent any future scheduling of the driver timeout
+service during the window. The netif_tx_stop_all_queues function
+in bcmgenet_netif_stop is replaced with netif_tx_disable to ensure
+synchronization with any transmit or timeout threads that may
+already be executing on other cores.
+
+For symmetry, the netif_device_attach call upon resume is moved to
+after the call to bcmgenet_netif_start. Since it wakes the transmit
+queues it is not necessary to invoke netif_tx_start_all_queues from
+bcmgenet_netif_start so it is moved into the driver open service.
+
+[ Upstream commit 09e805d2570a3a94f13dd9c9ad2bcab23da76e09 ]
+
+Fixes: 1c1008c793fa ("net: bcmgenet: add main driver file")
+Signed-off-by: Doug Berger <opendmb@gmail.com>
+Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/genet/bcmgenet.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -2855,7 +2855,6 @@ static void bcmgenet_netif_start(struct
+
+ umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
+
+- netif_tx_start_all_queues(dev);
+ bcmgenet_enable_tx_napi(priv);
+
+ /* Monitor link interrupts now */
+@@ -2937,6 +2936,8 @@ static int bcmgenet_open(struct net_devi
+
+ bcmgenet_netif_start(dev);
+
++ netif_tx_start_all_queues(dev);
++
+ return 0;
+
+ err_irq1:
+@@ -2958,7 +2959,7 @@ static void bcmgenet_netif_stop(struct n
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ bcmgenet_disable_tx_napi(priv);
+- netif_tx_stop_all_queues(dev);
++ netif_tx_disable(dev);
+
+ /* Disable MAC receive */
+ umac_enable_set(priv, CMD_RX_EN, false);
+@@ -3620,13 +3621,13 @@ static int bcmgenet_suspend(struct devic
+ if (!netif_running(dev))
+ return 0;
+
++ netif_device_detach(dev);
++
+ bcmgenet_netif_stop(dev);
+
+ if (!device_may_wakeup(d))
+ phy_suspend(dev->phydev);
+
+- netif_device_detach(dev);
+-
+ /* Prepare the device for Wake-on-LAN and switch to the slow clock */
+ if (device_may_wakeup(d) && priv->wolopts) {
+ ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
+@@ -3700,8 +3701,6 @@ static int bcmgenet_resume(struct device
+ /* Always enable ring 16 - descriptor ring */
+ bcmgenet_enable_dma(priv, dma_ctrl);
+
+- netif_device_attach(dev);
+-
+ if (!device_may_wakeup(d))
+ phy_resume(dev->phydev);
+
+@@ -3710,6 +3709,8 @@ static int bcmgenet_resume(struct device
+
+ bcmgenet_netif_start(dev);
+
++ netif_device_attach(dev);
++
+ return 0;
+
+ out_clk_disable:
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Tristram Ha <Tristram.Ha@microchip.com>
+Date: Fri, 2 Nov 2018 19:23:41 -0700
+Subject: net: dsa: microchip: initialize mutex before use
+
+From: Tristram Ha <Tristram.Ha@microchip.com>
+
+[ Upstream commit 284fb78ed7572117846f8e1d1d8e3dbfd16880c2 ]
+
+Initialize mutex before use. Avoid kernel complaint when
+CONFIG_DEBUG_LOCK_ALLOC is enabled.
+
+Fixes: b987e98e50ab90e5 ("dsa: add DSA switch driver for Microchip KSZ9477")
+Signed-off-by: Tristram Ha <Tristram.Ha@microchip.com>
+Reviewed-by: Pavel Machek <pavel@ucw.cz>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/dsa/microchip/ksz_common.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -1117,11 +1117,6 @@ static int ksz_switch_init(struct ksz_de
+ {
+ int i;
+
+- mutex_init(&dev->reg_mutex);
+- mutex_init(&dev->stats_mutex);
+- mutex_init(&dev->alu_mutex);
+- mutex_init(&dev->vlan_mutex);
+-
+ dev->ds->ops = &ksz_switch_ops;
+
+ for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) {
+@@ -1206,6 +1201,11 @@ int ksz_switch_register(struct ksz_devic
+ if (dev->pdata)
+ dev->chip_id = dev->pdata->chip_id;
+
++ mutex_init(&dev->reg_mutex);
++ mutex_init(&dev->stats_mutex);
++ mutex_init(&dev->alu_mutex);
++ mutex_init(&dev->vlan_mutex);
++
+ if (ksz_switch_detect(dev))
+ return -EINVAL;
+
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Andrew Lunn <andrew@lunn.ch>
+Date: Sun, 11 Nov 2018 00:41:10 +0100
+Subject: net: dsa: mv88e6xxx: Fix clearing of stats counters
+
+From: Andrew Lunn <andrew@lunn.ch>
+
+[ Upstream commit a9049ff9214da68df1179a7d5e36b43479abc9b8 ]
+
+The mv88e6161 would sometime fail to probe with a timeout waiting for
+the switch to complete an operation. This operation is supposed to
+clear the statistics counters. However, due to a read/modify/write,
+without the needed mask, the operation actually carried out was more
+random, with invalid parameters, resulting in the switch not
+responding. We need to preserve the histogram mode bits, so apply a
+mask to keep them.
+
+Reported-by: Chris Healy <Chris.Healy@zii.aero>
+Fixes: 40cff8fca9e3 ("net: dsa: mv88e6xxx: Fix stats histogram mode")
+Signed-off-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/dsa/mv88e6xxx/global1.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/dsa/mv88e6xxx/global1.c
++++ b/drivers/net/dsa/mv88e6xxx/global1.c
+@@ -567,6 +567,8 @@ int mv88e6xxx_g1_stats_clear(struct mv88
+ if (err)
+ return err;
+
++ /* Keep the histogram mode bits */
++ val &= MV88E6XXX_G1_STATS_OP_HIST_RX_TX;
+ val |= MV88E6XXX_G1_STATS_OP_BUSY | MV88E6XXX_G1_STATS_OP_FLUSH_ALL;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val);
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Eric Dumazet <edumazet@google.com>
+Date: Sat, 17 Nov 2018 21:57:02 -0800
+Subject: net-gro: reset skb->pkt_type in napi_reuse_skb()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 33d9a2c72f086cbf1087b2fd2d1a15aa9df14a7f ]
+
+eth_type_trans() assumes initial value for skb->pkt_type
+is PACKET_HOST.
+
+This is indeed the value right after a fresh skb allocation.
+
+However, it is possible that GRO merged a packet with a different
+value (like PACKET_OTHERHOST in case macvlan is used), so
+we need to make sure napi->skb will have pkt_type set back to
+PACKET_HOST.
+
+Otherwise, valid packets might be dropped by the stack because
+their pkt_type is not PACKET_HOST.
+
+napi_reuse_skb() was added in commit 96e93eab2033 ("gro: Add
+internal interfaces for VLAN"), but this bug always has
+been there.
+
+Fixes: 96e93eab2033 ("gro: Add internal interfaces for VLAN")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/dev.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -5630,6 +5630,10 @@ static void napi_reuse_skb(struct napi_s
+ skb->vlan_tci = 0;
+ skb->dev = napi->dev;
+ skb->skb_iif = 0;
++
++ /* eth_type_trans() assumes pkt_type is PACKET_HOST */
++ skb->pkt_type = PACKET_HOST;
++
+ skb->encapsulation = 0;
+ skb_shinfo(skb)->gso_type = 0;
+ skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Raed Salem <raeds@mellanox.com>
+Date: Thu, 18 Oct 2018 08:55:21 +0300
+Subject: net/mlx5: IPSec, Fix the SA context hash key
+
+From: Raed Salem <raeds@mellanox.com>
+
+[ Upstream commit f2b18732ee9863ac036759baf616ffa03c252ed5 ]
+
+The commit "net/mlx5: Refactor accel IPSec code" introduced a
+bug where asynchronous short time change in hash key value
+by create/release SA context might happen during an asynchronous
+hash resize operation this could cause a subsequent remove SA
+context operation to fail as the key value used during resize is
+not the same key value used when remove SA context operation is
+invoked.
+
+This commit fixes the bug by defining the SA context hash key
+such that it includes only fields that never change during the
+lifetime of the SA context object.
+
+Fixes: d6c4f0298cec ("net/mlx5: Refactor accel IPSec code")
+Signed-off-by: Raed Salem <raeds@mellanox.com>
+Reviewed-by: Aviad Yehezkel <aviadye@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+@@ -83,8 +83,14 @@ struct mlx5_fpga_ipsec_rule {
+ };
+
+ static const struct rhashtable_params rhash_sa = {
+- .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
+- .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
++ /* Keep out "cmd" field from the key as it's
++ * value is not constant during the lifetime
++ * of the key object.
++ */
++ .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) -
++ FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
++ .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) +
++ FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
+ .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
+ .automatic_shrinking = true,
+ .min_size = 1,
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Yuval Avnery <yuvalav@mellanox.com>
+Date: Tue, 16 Oct 2018 15:20:20 -0500
+Subject: net/mlx5e: Adjust to max number of channles when re-attaching
+
+From: Yuval Avnery <yuvalav@mellanox.com>
+
+[ Upstream commit a1f240f1801721f76bee734c50df2d9529da86e0 ]
+
+When core driver enters deattach/attach flow after pci reset,
+Number of logical CPUs may have changed.
+As a result we need to update the cpu affiliated resource tables.
+ 1. indirect rqt list
+ 2. eq table
+
+Reproduction (PowerPC):
+ echo 1000 > /sys/kernel/debug/powerpc/eeh_max_freezes
+ ppc64_cpu --smt=on
+ # Restart driver
+ modprobe -r ... ; modprobe ...
+ # Link up
+ ifconfig ...
+ # Only physical CPUs
+ ppc64_cpu --smt=off
+ # Inject PCI errors so PCI will reset - calling the pci error handler
+ echo 0x8000000000000000 > /sys/kernel/debug/powerpc/<PCI BUS>/err_injct_inboundA
+
+Call trace when trying to add non-existing rqs to an indirect rqt:
+ mlx5e_redirect_rqt+0x84/0x260 [mlx5_core] (unreliable)
+ mlx5e_redirect_rqts+0x188/0x190 [mlx5_core]
+ mlx5e_activate_priv_channels+0x488/0x570 [mlx5_core]
+ mlx5e_open_locked+0xbc/0x140 [mlx5_core]
+ mlx5e_open+0x50/0x130 [mlx5_core]
+ mlx5e_nic_enable+0x174/0x1b0 [mlx5_core]
+ mlx5e_attach_netdev+0x154/0x290 [mlx5_core]
+ mlx5e_attach+0x88/0xd0 [mlx5_core]
+ mlx5_attach_device+0x168/0x1e0 [mlx5_core]
+ mlx5_load_one+0x1140/0x1210 [mlx5_core]
+ mlx5_pci_resume+0x6c/0xf0 [mlx5_core]
+
+Create cq will fail when trying to use non-existing EQ.
+
+Fixes: 89d44f0a6c73 ("net/mlx5_core: Add pci error handlers to mlx5_core driver")
+Signed-off-by: Yuval Avnery <yuvalav@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 27 +++++++++++++++++-----
+ 1 file changed, 22 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -1610,13 +1610,15 @@ static int mlx5e_alloc_cq_common(struct
+ int err;
+ u32 i;
+
++ err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
++ if (err)
++ return err;
++
+ err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq,
+ &cq->wq_ctrl);
+ if (err)
+ return err;
+
+- mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
+-
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+@@ -1674,6 +1676,10 @@ static int mlx5e_create_cq(struct mlx5e_
+ int eqn;
+ int err;
+
++ err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
++ if (err)
++ return err;
++
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+@@ -1687,8 +1693,6 @@ static int mlx5e_create_cq(struct mlx5e_
+ mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+- mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
+-
+ MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
+ MLX5_SET(cqc, cqc, c_eqn, eqn);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+@@ -1908,6 +1912,10 @@ static int mlx5e_open_channel(struct mlx
+ int err;
+ int eqn;
+
++ err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
++ if (err)
++ return err;
++
+ c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
+ if (!c)
+ return -ENOMEM;
+@@ -1924,7 +1932,6 @@ static int mlx5e_open_channel(struct mlx
+ c->xdp = !!params->xdp_prog;
+ c->stats = &priv->channel_stats[ix].ch;
+
+- mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
+ c->irq_desc = irq_to_desc(irq);
+
+ netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
+@@ -4964,11 +4971,21 @@ int mlx5e_attach_netdev(struct mlx5e_pri
+ {
+ struct mlx5_core_dev *mdev = priv->mdev;
+ const struct mlx5e_profile *profile;
++ int max_nch;
+ int err;
+
+ profile = priv->profile;
+ clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
+
++ /* max number of channels may have changed */
++ max_nch = mlx5e_get_max_num_channels(priv->mdev);
++ if (priv->channels.params.num_channels > max_nch) {
++ mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
++ priv->channels.params.num_channels = max_nch;
++ mlx5e_build_default_indir_rqt(priv->channels.params.indirection_rqt,
++ MLX5E_INDIR_RQT_SIZE, max_nch);
++ }
++
+ err = profile->init_tx(priv);
+ if (err)
+ goto out;
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Or Gerlitz <ogerlitz@mellanox.com>
+Date: Sun, 28 Oct 2018 12:27:29 +0200
+Subject: net/mlx5e: Always use the match level enum when parsing TC rule match
+
+From: Or Gerlitz <ogerlitz@mellanox.com>
+
+[ Upstream commit 83621b7df6a646e550fd3d36db2e301cf9a5096b ]
+
+We get the match level (none, l2, l3, l4) while going over the match
+dissectors of an offloaded tc rule. When doing this, the match level
+enum and the not min inline enum values should be used, fix that.
+
+This worked accidentally b/c both enums have the same numerical values.
+
+Fixes: d708f902989b ('net/mlx5e: Get the required HW match level while parsing TC flow matches')
+Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
+Reviewed-by: Roi Dayan <roid@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -1450,10 +1450,10 @@ static int __parse_cls_flower(struct mlx
+
+ /* the HW doesn't need L3 inline to match on frag=no */
+ if (!(key->flags & FLOW_DIS_IS_FRAGMENT))
+- *match_level = MLX5_INLINE_MODE_L2;
++ *match_level = MLX5_MATCH_L2;
+ /* *** L2 attributes parsing up to here *** */
+ else
+- *match_level = MLX5_INLINE_MODE_IP;
++ *match_level = MLX5_MATCH_L3;
+ }
+ }
+
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Or Gerlitz <ogerlitz@mellanox.com>
+Date: Thu, 18 Oct 2018 12:31:27 +0200
+Subject: net/mlx5e: Claim TC hw offloads support only under a proper build config
+
+From: Or Gerlitz <ogerlitz@mellanox.com>
+
+[ Upstream commit 077ecd785d90c6cbba08d719faa4be8561aa0a1e ]
+
+Currently, we are only supporting tc hw offloads when the eswitch
+support is compiled in, but we are not gating the adevertizment
+of the NETIF_F_HW_TC feature on this config being set.
+
+Fix it, and while doing that, also avoid dealing with the feature
+on ethtool when the config is not set.
+
+Fixes: e8f887ac6a45 ('net/mlx5e: Introduce tc offload support')
+Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
+Reviewed-by: Roi Dayan <roid@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -3566,6 +3566,7 @@ static int set_feature_cvlan_filter(stru
+ return 0;
+ }
+
++#ifdef CONFIG_MLX5_ESWITCH
+ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
+ {
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+@@ -3578,6 +3579,7 @@ static int set_feature_tc_num_filters(st
+
+ return 0;
+ }
++#endif
+
+ static int set_feature_rx_all(struct net_device *netdev, bool enable)
+ {
+@@ -3676,7 +3678,9 @@ static int mlx5e_set_features(struct net
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
+ set_feature_cvlan_filter);
++#ifdef CONFIG_MLX5_ESWITCH
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
++#endif
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
+@@ -4685,7 +4689,9 @@ static void mlx5e_build_nic_netdev(struc
+ FT_CAP(modify_root) &&
+ FT_CAP(identified_miss_table_mode) &&
+ FT_CAP(flow_table_modify)) {
++#ifdef CONFIG_MLX5_ESWITCH
+ netdev->hw_features |= NETIF_F_HW_TC;
++#endif
+ #ifdef CONFIG_MLX5_EN_ARFS
+ netdev->hw_features |= NETIF_F_NTUPLE;
+ #endif
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Or Gerlitz <ogerlitz@mellanox.com>
+Date: Thu, 25 Oct 2018 15:41:58 +0000
+Subject: net/mlx5e: Don't match on vlan non-existence if ethertype is wildcarded
+
+From: Or Gerlitz <ogerlitz@mellanox.com>
+
+[ Upstream commit d3a80bb5a3eac311ddf28387402593977574460d ]
+
+For the "all" ethertype we should not care whether the packet has
+vlans. Besides being wrong, the way we did it caused FW error
+for rules such as:
+
+tc filter add dev eth0 protocol all parent ffff: \
+ prio 1 flower skip_sw action drop
+
+b/c the matching meta-data (outer headers bit in struct mlx5_flow_spec)
+wasn't set. Fix that by matching on vlan non-existence only if we were
+also told to match on the ethertype.
+
+Fixes: cee26487620b ('net/mlx5e: Set vlan masks for all offloaded TC rules')
+Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
+Reported-by: Slava Ovsiienko <viacheslavo@mellanox.com>
+Reviewed-by: Jianbo Liu <jianbol@mellanox.com>
+Reviewed-by: Roi Dayan <roid@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 63 ++++++++++++------------
+ 1 file changed, 32 insertions(+), 31 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -1310,31 +1310,21 @@ static int __parse_cls_flower(struct mlx
+ inner_headers);
+ }
+
+- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+- struct flow_dissector_key_eth_addrs *key =
++ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
++ struct flow_dissector_key_basic *key =
+ skb_flow_dissector_target(f->dissector,
+- FLOW_DISSECTOR_KEY_ETH_ADDRS,
++ FLOW_DISSECTOR_KEY_BASIC,
+ f->key);
+- struct flow_dissector_key_eth_addrs *mask =
++ struct flow_dissector_key_basic *mask =
+ skb_flow_dissector_target(f->dissector,
+- FLOW_DISSECTOR_KEY_ETH_ADDRS,
++ FLOW_DISSECTOR_KEY_BASIC,
+ f->mask);
++ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
++ ntohs(mask->n_proto));
++ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
++ ntohs(key->n_proto));
+
+- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+- dmac_47_16),
+- mask->dst);
+- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+- dmac_47_16),
+- key->dst);
+-
+- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+- smac_47_16),
+- mask->src);
+- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+- smac_47_16),
+- key->src);
+-
+- if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
++ if (mask->n_proto)
+ *match_level = MLX5_MATCH_L2;
+ }
+
+@@ -1368,9 +1358,10 @@ static int __parse_cls_flower(struct mlx
+
+ *match_level = MLX5_MATCH_L2;
+ }
+- } else {
++ } else if (*match_level != MLX5_MATCH_NONE) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
++ *match_level = MLX5_MATCH_L2;
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
+@@ -1408,21 +1399,31 @@ static int __parse_cls_flower(struct mlx
+ }
+ }
+
+- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+- struct flow_dissector_key_basic *key =
++ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
++ struct flow_dissector_key_eth_addrs *key =
+ skb_flow_dissector_target(f->dissector,
+- FLOW_DISSECTOR_KEY_BASIC,
++ FLOW_DISSECTOR_KEY_ETH_ADDRS,
+ f->key);
+- struct flow_dissector_key_basic *mask =
++ struct flow_dissector_key_eth_addrs *mask =
+ skb_flow_dissector_target(f->dissector,
+- FLOW_DISSECTOR_KEY_BASIC,
++ FLOW_DISSECTOR_KEY_ETH_ADDRS,
+ f->mask);
+- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
+- ntohs(mask->n_proto));
+- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+- ntohs(key->n_proto));
+
+- if (mask->n_proto)
++ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
++ dmac_47_16),
++ mask->dst);
++ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
++ dmac_47_16),
++ key->dst);
++
++ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
++ smac_47_16),
++ mask->src);
++ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
++ smac_47_16),
++ key->src);
++
++ if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
+ *match_level = MLX5_MATCH_L2;
+ }
+
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Valentine Fatiev <valentinef@mellanox.com>
+Date: Wed, 17 Oct 2018 11:45:07 +0300
+Subject: net/mlx5e: Fix selftest for small MTUs
+
+From: Valentine Fatiev <valentinef@mellanox.com>
+
+[ Upstream commit 228c4cd04dfd0667eda182c91504b83c17d97584 ]
+
+Loopback test had fixed packet size, which can be bigger than configured
+MTU. Shorten the loopback packet size to be bigger than minimal MTU
+allowed by the device. Text field removed from struct 'mlx5ehdr'
+as redundant to allow send small packets as minimal allowed MTU.
+
+Fixes: d605d66 ("net/mlx5e: Add support for ethtool self diagnostics test")
+Signed-off-by: Valentine Fatiev <valentinef@mellanox.com>
+Reviewed-by: Eran Ben Elisha <eranbe@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c | 26 ++++++------------
+ 1 file changed, 10 insertions(+), 16 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+@@ -98,18 +98,17 @@ static int mlx5e_test_link_speed(struct
+ return 1;
+ }
+
+-#ifdef CONFIG_INET
+-/* loopback test */
+-#define MLX5E_TEST_PKT_SIZE (MLX5E_RX_MAX_HEAD - NET_IP_ALIGN)
+-static const char mlx5e_test_text[ETH_GSTRING_LEN] = "MLX5E SELF TEST";
+-#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
+-
+ struct mlx5ehdr {
+ __be32 version;
+ __be64 magic;
+- char text[ETH_GSTRING_LEN];
+ };
+
++#ifdef CONFIG_INET
++/* loopback test */
++#define MLX5E_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) +\
++ sizeof(struct udphdr) + sizeof(struct mlx5ehdr))
++#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
++
+ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
+ {
+ struct sk_buff *skb = NULL;
+@@ -117,10 +116,7 @@ static struct sk_buff *mlx5e_test_get_ud
+ struct ethhdr *ethh;
+ struct udphdr *udph;
+ struct iphdr *iph;
+- int datalen, iplen;
+-
+- datalen = MLX5E_TEST_PKT_SIZE -
+- (sizeof(*ethh) + sizeof(*iph) + sizeof(*udph));
++ int iplen;
+
+ skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE);
+ if (!skb) {
+@@ -149,7 +145,7 @@ static struct sk_buff *mlx5e_test_get_ud
+ /* Fill UDP header */
+ udph->source = htons(9);
+ udph->dest = htons(9); /* Discard Protocol */
+- udph->len = htons(datalen + sizeof(struct udphdr));
++ udph->len = htons(sizeof(struct mlx5ehdr) + sizeof(struct udphdr));
+ udph->check = 0;
+
+ /* Fill IP header */
+@@ -157,7 +153,8 @@ static struct sk_buff *mlx5e_test_get_ud
+ iph->ttl = 32;
+ iph->version = 4;
+ iph->protocol = IPPROTO_UDP;
+- iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + datalen;
++ iplen = sizeof(struct iphdr) + sizeof(struct udphdr) +
++ sizeof(struct mlx5ehdr);
+ iph->tot_len = htons(iplen);
+ iph->frag_off = 0;
+ iph->saddr = 0;
+@@ -170,9 +167,6 @@ static struct sk_buff *mlx5e_test_get_ud
+ mlxh = skb_put(skb, sizeof(*mlxh));
+ mlxh->version = 0;
+ mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC);
+- strlcpy(mlxh->text, mlx5e_test_text, sizeof(mlxh->text));
+- datalen -= sizeof(*mlxh);
+- skb_put_zero(skb, datalen);
+
+ skb->csum = 0;
+ skb->ip_summed = CHECKSUM_PARTIAL;
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Denis Drozdov <denisd@mellanox.com>
+Date: Thu, 27 Sep 2018 14:17:54 +0300
+Subject: net/mlx5e: IPoIB, Reset QP after channels are closed
+
+From: Denis Drozdov <denisd@mellanox.com>
+
+[ Upstream commit acf3766b36d8e59ecbc307894c6d05703ee48014 ]
+
+The mlx5e channels should be closed before mlx5i_uninit_underlay_qp
+puts the QP into RST (reset) state during mlx5i_close. Currently QP
+state incorrectly set to RST before channels got deactivated and closed,
+since mlx5_post_send request expects QP in RTS (Ready To Send) state.
+
+The fix is to keep QP in RTS state until mlx5e channels get closed
+and to reset QP afterwards.
+
+Also this fix is simply correct in order to keep the open/close flow
+symmetric, i.e mlx5i_init_underlay_qp() is called first thing at open,
+the correct thing to do is to call mlx5i_uninit_underlay_qp() last thing
+at close, which is exactly what this patch is doing.
+
+Fixes: dae37456c8ac ("net/mlx5: Support for attaching multiple underlay QPs to root flow table")
+Signed-off-by: Denis Drozdov <denisd@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+@@ -502,9 +502,9 @@ static int mlx5i_close(struct net_device
+
+ netif_carrier_off(epriv->netdev);
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+- mlx5i_uninit_underlay_qp(epriv);
+ mlx5e_deactivate_priv_channels(epriv);
+ mlx5e_close_channels(&epriv->channels);
++ mlx5i_uninit_underlay_qp(epriv);
+ unlock:
+ mutex_unlock(&epriv->state_lock);
+ return 0;
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Shay Agroskin <shayag@mellanox.com>
+Date: Sun, 28 Oct 2018 09:06:11 +0200
+Subject: net/mlx5e: Removed unnecessary warnings in FEC caps query
+
+From: Shay Agroskin <shayag@mellanox.com>
+
+[ Upstream commit 64e283348458e2fd2fe41b60dfb6c30e88ee695f ]
+
+Querying interface FEC caps with 'ethtool [int]' after link reset
+throws warning regading link speed.
+This warning is not needed as there is already an indication in
+user space that the link is not up.
+
+Fixes: 0696d60853d5 ("net/mlx5e: Receive buffer configuration")
+Signed-off-by: Shay Agroskin <shayag@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en/port.c | 4 +---
+ drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c | 4 +++-
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+@@ -88,10 +88,8 @@ int mlx5e_port_linkspeed(struct mlx5_cor
+
+ eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
+ *speed = mlx5e_port_ptys2speed(eth_proto_oper);
+- if (!(*speed)) {
+- mlx5_core_warn(mdev, "cannot get port speed\n");
++ if (!(*speed))
+ err = -EINVAL;
+- }
+
+ return err;
+ }
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+@@ -130,8 +130,10 @@ static u32 calculate_xoff(struct mlx5e_p
+ int err;
+
+ err = mlx5e_port_linkspeed(priv->mdev, &speed);
+- if (err)
++ if (err) {
++ mlx5_core_warn(priv->mdev, "cannot get port speed\n");
+ return 0;
++ }
+
+ xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
+
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Moshe Shemesh <moshe@mellanox.com>
+Date: Thu, 11 Oct 2018 07:31:10 +0300
+Subject: net/mlx5e: RX, verify received packet size in Linear Striding RQ
+
+From: Moshe Shemesh <moshe@mellanox.com>
+
+[ Upstream commit 0073c8f72736b423aade8a817587a5f3e4df4ad8 ]
+
+In case of striding RQ, we use MPWRQ (Multi Packet WQE RQ), which means
+that WQE (RX descriptor) can be used for many packets and so the WQE is
+much bigger than MTU. In virtualization setups where the port mtu can
+be larger than the vf mtu, if received packet is bigger than MTU, it
+won't be dropped by HW on too small receive WQE. If we use linear SKB in
+striding RQ, since each stride has room for mtu size payload and skb
+info, an oversized packet can lead to crash for crossing allocated page
+boundary upon the call to build_skb. So driver needs to check packet
+size and drop it.
+
+Introduce new SW rx counter, rx_oversize_pkts_sw_drop, which counts the
+number of packets dropped by the driver for being too large.
+
+As a new field is added to the RQ struct, re-open the channels whenever
+this field is being used in datapath (i.e., in the case of linear
+Striding RQ).
+
+Fixes: 619a8f2a42f1 ("net/mlx5e: Use linear SKB in Striding RQ")
+Signed-off-by: Moshe Shemesh <moshe@mellanox.com>
+Reviewed-by: Tariq Toukan <tariqt@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 +
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 4 +++-
+ drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 6 ++++++
+ drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 3 +++
+ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 2 ++
+ 5 files changed, 15 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -566,6 +566,7 @@ struct mlx5e_rq {
+
+ unsigned long state;
+ int ix;
++ unsigned int hw_mtu;
+
+ struct net_dim dim; /* Dynamic Interrupt Moderation */
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -492,6 +492,7 @@ static int mlx5e_alloc_rq(struct mlx5e_c
+ rq->channel = c;
+ rq->ix = c->ix;
+ rq->mdev = mdev;
++ rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ rq->stats = &c->priv->channel_stats[c->ix].rq;
+
+ rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
+@@ -3758,10 +3759,11 @@ int mlx5e_change_mtu(struct net_device *
+ }
+
+ if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
++ bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, &new_channels.params);
+ u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
+ u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
+
+- reset = reset && (ppw_old != ppw_new);
++ reset = reset && (is_linear || (ppw_old != ppw_new));
+ }
+
+ if (!reset) {
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -1064,6 +1064,12 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct m
+ u32 frag_size;
+ bool consumed;
+
++ /* Check packet size. Note LRO doesn't use linear SKB */
++ if (unlikely(cqe_bcnt > rq->hw_mtu)) {
++ rq->stats->oversize_pkts_sw_drop++;
++ return NULL;
++ }
++
+ va = page_address(di->page) + head_offset;
+ data = va + rx_headroom;
+ frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+@@ -82,6 +82,7 @@ static const struct counter_desc sw_stat
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
++ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
+@@ -158,6 +159,7 @@ void mlx5e_grp_sw_update_stats(struct ml
+ s->rx_wqe_err += rq_stats->wqe_err;
+ s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
+ s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
++ s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
+ s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
+ s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
+ s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
+@@ -1148,6 +1150,7 @@ static const struct counter_desc rq_stat
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
++ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+@@ -95,6 +95,7 @@ struct mlx5e_sw_stats {
+ u64 rx_wqe_err;
+ u64 rx_mpwqe_filler_cqes;
+ u64 rx_mpwqe_filler_strides;
++ u64 rx_oversize_pkts_sw_drop;
+ u64 rx_buff_alloc_err;
+ u64 rx_cqe_compress_blks;
+ u64 rx_cqe_compress_pkts;
+@@ -190,6 +191,7 @@ struct mlx5e_rq_stats {
+ u64 wqe_err;
+ u64 mpwqe_filler_cqes;
+ u64 mpwqe_filler_strides;
++ u64 oversize_pkts_sw_drop;
+ u64 buff_alloc_err;
+ u64 cqe_compress_blks;
+ u64 cqe_compress_pkts;
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Martin Schiller <ms@dev.tdt.de>
+Date: Fri, 16 Nov 2018 08:38:36 +0100
+Subject: net: phy: mdio-gpio: Fix working over slow can_sleep GPIOs
+
+From: Martin Schiller <ms@dev.tdt.de>
+
+[ Upstream commit df5a8ec64eed7fe45b556cfff503acd6429ab817 ]
+
+Up until commit 7e5fbd1e0700 ("net: mdio-gpio: Convert to use gpiod
+functions where possible"), the _cansleep variants of the gpio_ API was
+used. After that commit and the change to gpiod_ API, the _cansleep()
+was dropped. This then results in WARN_ON() when used with GPIO
+devices which do sleep. Add back the _cansleep() to avoid this.
+
+Fixes: 7e5fbd1e0700 ("net: mdio-gpio: Convert to use gpiod functions where possible")
+Signed-off-by: Martin Schiller <ms@dev.tdt.de>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/mdio-gpio.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/phy/mdio-gpio.c
++++ b/drivers/net/phy/mdio-gpio.c
+@@ -63,7 +63,7 @@ static void mdio_dir(struct mdiobb_ctrl
+ * assume the pin serves as pull-up. If direction is
+ * output, the default value is high.
+ */
+- gpiod_set_value(bitbang->mdo, 1);
++ gpiod_set_value_cansleep(bitbang->mdo, 1);
+ return;
+ }
+
+@@ -78,7 +78,7 @@ static int mdio_get(struct mdiobb_ctrl *
+ struct mdio_gpio_info *bitbang =
+ container_of(ctrl, struct mdio_gpio_info, ctrl);
+
+- return gpiod_get_value(bitbang->mdio);
++ return gpiod_get_value_cansleep(bitbang->mdio);
+ }
+
+ static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
+@@ -87,9 +87,9 @@ static void mdio_set(struct mdiobb_ctrl
+ container_of(ctrl, struct mdio_gpio_info, ctrl);
+
+ if (bitbang->mdo)
+- gpiod_set_value(bitbang->mdo, what);
++ gpiod_set_value_cansleep(bitbang->mdo, what);
+ else
+- gpiod_set_value(bitbang->mdio, what);
++ gpiod_set_value_cansleep(bitbang->mdio, what);
+ }
+
+ static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
+@@ -97,7 +97,7 @@ static void mdc_set(struct mdiobb_ctrl *
+ struct mdio_gpio_info *bitbang =
+ container_of(ctrl, struct mdio_gpio_info, ctrl);
+
+- gpiod_set_value(bitbang->mdc, what);
++ gpiod_set_value_cansleep(bitbang->mdc, what);
+ }
+
+ static const struct mdiobb_ops mdio_gpio_ops = {
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: "Holger Hoffstätte" <holger@applied-asynchrony.com>
+Date: Sun, 4 Nov 2018 19:02:42 +0100
+Subject: net: phy: realtek: fix RTL8201F sysfs name
+
+From: "Holger Hoffstätte" <holger@applied-asynchrony.com>
+
+[ Upstream commit 0432e833191ad4d17b7fc2364941f91dad51db1a ]
+
+Since 4.19 the following error in sysfs has appeared when using the
+r8169 NIC driver:
+
+$cd /sys/module/realtek/drivers
+$ls -l
+ls: cannot access 'mdio_bus:RTL8201F 10/100Mbps Ethernet': No such file or directory
+[..garbled dir entries follow..]
+
+Apparently the forward slash in "10/100Mbps Ethernet" is interpreted
+as directory separator that leads nowhere, and was introduced in commit
+513588dd44b ("net: phy: realtek: add RTL8201F phy-id and functions").
+
+Fix this by removing the offending slash in the driver name.
+
+Other drivers in net/phy seem to have the same problem, but I cannot
+test/verify them.
+
+Fixes: 513588dd44b ("net: phy: realtek: add RTL8201F phy-id and functions")
+Signed-off-by: Holger Hoffstätte <holger@applied-asynchrony.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/realtek.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/phy/realtek.c
++++ b/drivers/net/phy/realtek.c
+@@ -220,7 +220,7 @@ static struct phy_driver realtek_drvs[]
+ .flags = PHY_HAS_INTERRUPT,
+ }, {
+ .phy_id = 0x001cc816,
+- .name = "RTL8201F 10/100Mbps Ethernet",
++ .name = "RTL8201F Fast Ethernet",
+ .phy_id_mask = 0x001fffff,
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
+Date: Fri, 9 Nov 2018 18:56:27 -0700
+Subject: net: qualcomm: rmnet: Fix incorrect assignment of real_dev
+
+From: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
+
+[ Upstream commit d02854dc1999ed3e7fd79ec700c64ac23ac0c458 ]
+
+A null dereference was observed when a sysctl was being set
+from userspace and rmnet was stuck trying to complete some actions
+in the NETDEV_REGISTER callback. This is because the real_dev is set
+only after the device registration handler completes.
+
+sysctl call stack -
+
+<6> Unable to handle kernel NULL pointer dereference at
+ virtual address 00000108
+<2> pc : rmnet_vnd_get_iflink+0x1c/0x28
+<2> lr : dev_get_iflink+0x2c/0x40
+<2> rmnet_vnd_get_iflink+0x1c/0x28
+<2> inet6_fill_ifinfo+0x15c/0x234
+<2> inet6_ifinfo_notify+0x68/0xd4
+<2> ndisc_ifinfo_sysctl_change+0x1b8/0x234
+<2> proc_sys_call_handler+0xac/0x100
+<2> proc_sys_write+0x3c/0x4c
+<2> __vfs_write+0x54/0x14c
+<2> vfs_write+0xcc/0x188
+<2> SyS_write+0x60/0xc0
+<2> el0_svc_naked+0x34/0x38
+
+device register call stack -
+
+<2> notifier_call_chain+0x84/0xbc
+<2> raw_notifier_call_chain+0x38/0x48
+<2> call_netdevice_notifiers_info+0x40/0x70
+<2> call_netdevice_notifiers+0x38/0x60
+<2> register_netdevice+0x29c/0x3d8
+<2> rmnet_vnd_newlink+0x68/0xe8
+<2> rmnet_newlink+0xa0/0x160
+<2> rtnl_newlink+0x57c/0x6c8
+<2> rtnetlink_rcv_msg+0x1dc/0x328
+<2> netlink_rcv_skb+0xac/0x118
+<2> rtnetlink_rcv+0x24/0x30
+<2> netlink_unicast+0x158/0x1f0
+<2> netlink_sendmsg+0x32c/0x338
+<2> sock_sendmsg+0x44/0x60
+<2> SyS_sendto+0x150/0x1ac
+<2> el0_svc_naked+0x34/0x38
+
+Fixes: b752eff5be24 ("net: qualcomm: rmnet: Implement ndo_get_iflink")
+Signed-off-by: Sean Tranchetti <stranche@codeaurora.org>
+Signed-off-by: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+@@ -234,7 +234,7 @@ int rmnet_vnd_newlink(u8 id, struct net_
+ struct net_device *real_dev,
+ struct rmnet_endpoint *ep)
+ {
+- struct rmnet_priv *priv;
++ struct rmnet_priv *priv = netdev_priv(rmnet_dev);
+ int rc;
+
+ if (ep->egress_dev)
+@@ -247,6 +247,8 @@ int rmnet_vnd_newlink(u8 id, struct net_
+ rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ rmnet_dev->hw_features |= NETIF_F_SG;
+
++ priv->real_dev = real_dev;
++
+ rc = register_netdevice(rmnet_dev);
+ if (!rc) {
+ ep->egress_dev = rmnet_dev;
+@@ -255,9 +257,7 @@ int rmnet_vnd_newlink(u8 id, struct net_
+
+ rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
+
+- priv = netdev_priv(rmnet_dev);
+ priv->mux_id = id;
+- priv->real_dev = real_dev;
+
+ netdev_dbg(rmnet_dev, "rmnet dev created\n");
+ }
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Davide Caratti <dcaratti@redhat.com>
+Date: Wed, 14 Nov 2018 12:17:25 +0100
+Subject: net/sched: act_pedit: fix memory leak when IDR allocation fails
+
+From: Davide Caratti <dcaratti@redhat.com>
+
+[ Upstream commit 19ab69107d3ecfb7cd3e38ad262a881be40c01a3 ]
+
+tcf_idr_check_alloc() can return a negative value, on allocation failures
+(-ENOMEM) or IDR exhaustion (-ENOSPC): don't leak keys_ex in these cases.
+
+Fixes: 0190c1d452a9 ("net: sched: atomically check-allocate action")
+Signed-off-by: Davide Caratti <dcaratti@redhat.com>
+Acked-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/act_pedit.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/sched/act_pedit.c
++++ b/net/sched/act_pedit.c
+@@ -201,7 +201,8 @@ static int tcf_pedit_init(struct net *ne
+ goto out_release;
+ }
+ } else {
+- return err;
++ ret = err;
++ goto out_free;
+ }
+
+ p = to_pedit(*a);
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+Date: Fri, 9 Nov 2018 21:06:26 -0800
+Subject: net: sched: cls_flower: validate nested enc_opts_policy to avoid warning
+
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+
+[ Upstream commit 63c82997f5c0f3e1b914af43d82f712a86bc5f3a ]
+
+TCA_FLOWER_KEY_ENC_OPTS and TCA_FLOWER_KEY_ENC_OPTS_MASK can only
+currently contain further nested attributes, which are parsed by
+hand, so the policy is never actually used resulting in a W=1
+build warning:
+
+net/sched/cls_flower.c:492:1: warning: ‘enc_opts_policy’ defined but not used [-Wunused-const-variable=]
+ enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
+
+Add the validation anyway to avoid potential bugs when other
+attributes are added and to make the attribute structure slightly
+more clear. Validation will also set extact to point to bad
+attribute on error.
+
+Fixes: 0a6e77784f49 ("net/sched: allow flower to match tunnel options")
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Acked-by: Simon Horman <simon.horman@netronome.com>
+Acked-by: Jiri Pirko <jiri@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/cls_flower.c | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -709,11 +709,23 @@ static int fl_set_enc_opt(struct nlattr
+ struct netlink_ext_ack *extack)
+ {
+ const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
+- int option_len, key_depth, msk_depth = 0;
++ int err, option_len, key_depth, msk_depth = 0;
++
++ err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS],
++ TCA_FLOWER_KEY_ENC_OPTS_MAX,
++ enc_opts_policy, extack);
++ if (err)
++ return err;
+
+ nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
+
+ if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
++ err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
++ TCA_FLOWER_KEY_ENC_OPTS_MAX,
++ enc_opts_policy, extack);
++ if (err)
++ return err;
++
+ nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
+ msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
+ }
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Stefan Wahren <stefan.wahren@i2se.com>
+Date: Thu, 8 Nov 2018 20:38:26 +0100
+Subject: net: smsc95xx: Fix MTU range
+
+From: Stefan Wahren <stefan.wahren@i2se.com>
+
+[ Upstream commit 85b18b0237ce9986a81a1b9534b5e2ee116f5504 ]
+
+The commit f77f0aee4da4 ("net: use core MTU range checking in USB NIC
+drivers") introduce a common MTU handling for usbnet. But it's missing
+the necessary changes for smsc95xx. So set the MTU range accordingly.
+
+This patch has been tested on a Raspberry Pi 3.
+
+Fixes: f77f0aee4da4 ("net: use core MTU range checking in USB NIC drivers")
+Signed-off-by: Stefan Wahren <stefan.wahren@i2se.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/smsc95xx.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/usb/smsc95xx.c
++++ b/drivers/net/usb/smsc95xx.c
+@@ -1321,6 +1321,8 @@ static int smsc95xx_bind(struct usbnet *
+ dev->net->ethtool_ops = &smsc95xx_ethtool_ops;
+ dev->net->flags |= IFF_MULTICAST;
+ dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
++ dev->net->min_mtu = ETH_MIN_MTU;
++ dev->net->max_mtu = ETH_DATA_LEN;
+ dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
+ pdata->dev = dev;
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Thu, 1 Nov 2018 15:55:38 -0700
+Subject: net: systemport: Protect stop from timeout
+
+From: Florian Fainelli <f.fainelli@gmail.com>
+
+[ Upstream commit 7cb6a2a2c72c1ed8f42fb01f1a661281b568dead ]
+
+A timing hazard exists when the network interface is stopped that
+allows a watchdog timeout to be processed by a separate core in
+parallel. This creates the potential for the timeout handler to
+wake the queues while the driver is shutting down, or access
+registers after their clocks have been removed.
+
+The more common case is that the watchdog timeout will produce a
+warning message which doesn't lead to a crash. The chances of this
+are greatly increased by the fact that bcm_sysport_netif_stop stops
+the transmit queues which can easily precipitate a watchdog time-
+out because of stale trans_start data in the queues.
+
+This commit corrects the behavior by ensuring that the watchdog
+timeout is disabled before enterring bcm_sysport_netif_stop. There
+are currently only two users of the bcm_sysport_netif_stop function:
+close and suspend.
+
+The close case already handles the issue by exiting the RUNNING
+state before invoking the driver close service.
+
+The suspend case now performs the netif_device_detach to exit the
+PRESENT state before the call to bcm_sysport_netif_stop rather than
+after it.
+
+These behaviors prevent any future scheduling of the driver timeout
+service during the window. The netif_tx_stop_all_queues function
+in bcm_sysport_netif_stop is replaced with netif_tx_disable to ensure
+synchronization with any transmit or timeout threads that may
+already be executing on other cores.
+
+For symmetry, the netif_device_attach call upon resume is moved to
+after the call to bcm_sysport_netif_start. Since it wakes the transmit
+queues it is not necessary to invoke netif_tx_start_all_queues from
+bcm_sysport_netif_start so it is moved into the driver open service.
+
+Fixes: 40755a0fce17 ("net: systemport: add suspend and resume support")
+Fixes: 80105befdb4b ("net: systemport: add Broadcom SYSTEMPORT Ethernet MAC driver")
+Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/bcmsysport.c | 15 +++++++--------
+ 1 file changed, 7 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
+@@ -1897,9 +1897,6 @@ static void bcm_sysport_netif_start(stru
+ intrl2_1_mask_clear(priv, 0xffffffff);
+ else
+ intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
+-
+- /* Last call before we start the real business */
+- netif_tx_start_all_queues(dev);
+ }
+
+ static void rbuf_init(struct bcm_sysport_priv *priv)
+@@ -2045,6 +2042,8 @@ static int bcm_sysport_open(struct net_d
+
+ bcm_sysport_netif_start(dev);
+
++ netif_tx_start_all_queues(dev);
++
+ return 0;
+
+ out_clear_rx_int:
+@@ -2068,7 +2067,7 @@ static void bcm_sysport_netif_stop(struc
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ /* stop all software from updating hardware */
+- netif_tx_stop_all_queues(dev);
++ netif_tx_disable(dev);
+ napi_disable(&priv->napi);
+ cancel_work_sync(&priv->dim.dim.work);
+ phy_stop(dev->phydev);
+@@ -2654,12 +2653,12 @@ static int __maybe_unused bcm_sysport_su
+ if (!netif_running(dev))
+ return 0;
+
++ netif_device_detach(dev);
++
+ bcm_sysport_netif_stop(dev);
+
+ phy_suspend(dev->phydev);
+
+- netif_device_detach(dev);
+-
+ /* Disable UniMAC RX */
+ umac_enable_set(priv, CMD_RX_EN, 0);
+
+@@ -2743,8 +2742,6 @@ static int __maybe_unused bcm_sysport_re
+ goto out_free_rx_ring;
+ }
+
+- netif_device_attach(dev);
+-
+ /* RX pipe enable */
+ topctrl_writel(priv, 0, RX_FLUSH_CNTL);
+
+@@ -2789,6 +2786,8 @@ static int __maybe_unused bcm_sysport_re
+
+ bcm_sysport_netif_start(dev);
+
++ netif_device_attach(dev);
++
+ return 0;
+
+ out_free_rx_ring:
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Xin Long <lucien.xin@gmail.com>
+Date: Sun, 18 Nov 2018 16:14:47 +0800
+Subject: Revert "sctp: remove sctp_transport_pmtu_check"
+
+From: Xin Long <lucien.xin@gmail.com>
+
+[ Upstream commit 69fec325a64383667b8a35df5d48d6ce52fb2782 ]
+
+This reverts commit 22d7be267eaa8114dcc28d66c1c347f667d7878a.
+
+The dst's mtu in transport can be updated by a non sctp place like
+in xfrm where the MTU information didn't get synced between asoc,
+transport and dst, so it is still needed to do the pmtu check
+in sctp_packet_config.
+
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/sctp/sctp.h | 12 ++++++++++++
+ net/sctp/output.c | 3 +++
+ 2 files changed, 15 insertions(+)
+
+--- a/include/net/sctp/sctp.h
++++ b/include/net/sctp/sctp.h
+@@ -608,4 +608,16 @@ static inline __u32 sctp_dst_mtu(const s
+ SCTP_DEFAULT_MINSEGMENT));
+ }
+
++static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
++{
++ __u32 pmtu = sctp_dst_mtu(t->dst);
++
++ if (t->pathmtu == pmtu)
++ return true;
++
++ t->pathmtu = pmtu;
++
++ return false;
++}
++
+ #endif /* __net_sctp_h__ */
+--- a/net/sctp/output.c
++++ b/net/sctp/output.c
+@@ -118,6 +118,9 @@ void sctp_packet_config(struct sctp_pack
+ sctp_transport_route(tp, NULL, sp);
+ if (asoc->param_flags & SPP_PMTUD_ENABLE)
+ sctp_assoc_sync_pmtu(asoc);
++ } else if (!sctp_transport_pmtu_check(tp)) {
++ if (asoc->param_flags & SPP_PMTUD_ENABLE)
++ sctp_assoc_sync_pmtu(asoc);
+ }
+
+ if (asoc->pmtu_pending) {
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: David Howells <dhowells@redhat.com>
+Date: Thu, 1 Nov 2018 13:39:53 +0000
+Subject: rxrpc: Fix lockup due to no error backoff after ack transmit error
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit c7e86acfcee30794dc99a0759924bf7b9d43f1ca ]
+
+If the network becomes (partially) unavailable, say by disabling IPv6, the
+background ACK transmission routine can get itself into a tizzy by
+proposing immediate ACK retransmission. Since we're in the call event
+processor, that happens immediately without returning to the workqueue
+manager.
+
+The condition should clear after a while when either the network comes back
+or the call times out.
+
+Fix this by:
+
+ (1) When re-proposing an ACK on failed Tx, don't schedule it immediately.
+ This will allow a certain amount of time to elapse before we try
+ again.
+
+ (2) Enforce a return to the workqueue manager after a certain number of
+ iterations of the call processing loop.
+
+ (3) Add a backoff delay that increases the delay on deferred ACKs by a
+ jiffy per failed transmission to a limit of HZ. The backoff delay is
+ cleared on a successful return from kernel_sendmsg().
+
+ (4) Cancel calls immediately if the opening sendmsg fails. The layer
+ above can arrange retransmission or rotate to another server.
+
+Fixes: 248f219cb8bc ("rxrpc: Rewrite the data and ack handling code")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/rxrpc/ar-internal.h | 1 +
+ net/rxrpc/call_event.c | 18 ++++++++++++++----
+ net/rxrpc/output.c | 35 +++++++++++++++++++++++++++++++----
+ 3 files changed, 46 insertions(+), 8 deletions(-)
+
+--- a/net/rxrpc/ar-internal.h
++++ b/net/rxrpc/ar-internal.h
+@@ -611,6 +611,7 @@ struct rxrpc_call {
+ * not hard-ACK'd packet follows this.
+ */
+ rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */
++ u16 tx_backoff; /* Delay to insert due to Tx failure */
+
+ /* TCP-style slow-start congestion control [RFC5681]. Since the SMSS
+ * is fixed, we keep these numbers in terms of segments (ie. DATA
+--- a/net/rxrpc/call_event.c
++++ b/net/rxrpc/call_event.c
+@@ -123,6 +123,7 @@ static void __rxrpc_propose_ACK(struct r
+ else
+ ack_at = expiry;
+
++ ack_at += READ_ONCE(call->tx_backoff);
+ ack_at += now;
+ if (time_before(ack_at, call->ack_at)) {
+ WRITE_ONCE(call->ack_at, ack_at);
+@@ -311,6 +312,7 @@ void rxrpc_process_call(struct work_stru
+ container_of(work, struct rxrpc_call, processor);
+ rxrpc_serial_t *send_ack;
+ unsigned long now, next, t;
++ unsigned int iterations = 0;
+
+ rxrpc_see_call(call);
+
+@@ -319,6 +321,11 @@ void rxrpc_process_call(struct work_stru
+ call->debug_id, rxrpc_call_states[call->state], call->events);
+
+ recheck_state:
++ /* Limit the number of times we do this before returning to the manager */
++ iterations++;
++ if (iterations > 5)
++ goto requeue;
++
+ if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
+ rxrpc_send_abort_packet(call);
+ goto recheck_state;
+@@ -447,13 +454,16 @@ recheck_state:
+ rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
+
+ /* other events may have been raised since we started checking */
+- if (call->events && call->state < RXRPC_CALL_COMPLETE) {
+- __rxrpc_queue_call(call);
+- goto out;
+- }
++ if (call->events && call->state < RXRPC_CALL_COMPLETE)
++ goto requeue;
+
+ out_put:
+ rxrpc_put_call(call, rxrpc_call_put);
+ out:
+ _leave("");
++ return;
++
++requeue:
++ __rxrpc_queue_call(call);
++ goto out;
+ }
+--- a/net/rxrpc/output.c
++++ b/net/rxrpc/output.c
+@@ -35,6 +35,21 @@ struct rxrpc_abort_buffer {
+ static const char rxrpc_keepalive_string[] = "";
+
+ /*
++ * Increase Tx backoff on transmission failure and clear it on success.
++ */
++static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret)
++{
++ if (ret < 0) {
++ u16 tx_backoff = READ_ONCE(call->tx_backoff);
++
++ if (tx_backoff < HZ)
++ WRITE_ONCE(call->tx_backoff, tx_backoff + 1);
++ } else {
++ WRITE_ONCE(call->tx_backoff, 0);
++ }
++}
++
++/*
+ * Arrange for a keepalive ping a certain time after we last transmitted. This
+ * lets the far side know we're still interested in this call and helps keep
+ * the route through any intervening firewall open.
+@@ -210,6 +225,7 @@ int rxrpc_send_ack_packet(struct rxrpc_c
+ else
+ trace_rxrpc_tx_packet(call->debug_id, &pkt->whdr,
+ rxrpc_tx_point_call_ack);
++ rxrpc_tx_backoff(call, ret);
+
+ if (call->state < RXRPC_CALL_COMPLETE) {
+ if (ret < 0) {
+@@ -218,7 +234,7 @@ int rxrpc_send_ack_packet(struct rxrpc_c
+ rxrpc_propose_ACK(call, pkt->ack.reason,
+ ntohs(pkt->ack.maxSkew),
+ ntohl(pkt->ack.serial),
+- true, true,
++ false, true,
+ rxrpc_propose_ack_retry_tx);
+ } else {
+ spin_lock_bh(&call->lock);
+@@ -300,7 +316,7 @@ int rxrpc_send_abort_packet(struct rxrpc
+ else
+ trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr,
+ rxrpc_tx_point_call_abort);
+-
++ rxrpc_tx_backoff(call, ret);
+
+ rxrpc_put_connection(conn);
+ return ret;
+@@ -411,6 +427,7 @@ int rxrpc_send_data_packet(struct rxrpc_
+ else
+ trace_rxrpc_tx_packet(call->debug_id, &whdr,
+ rxrpc_tx_point_call_data_nofrag);
++ rxrpc_tx_backoff(call, ret);
+ if (ret == -EMSGSIZE)
+ goto send_fragmentable;
+
+@@ -445,9 +462,18 @@ done:
+ rxrpc_reduce_call_timer(call, expect_rx_by, nowj,
+ rxrpc_timer_set_for_normal);
+ }
+- }
+
+- rxrpc_set_keepalive(call);
++ rxrpc_set_keepalive(call);
++ } else {
++ /* Cancel the call if the initial transmission fails,
++ * particularly if that's due to network routing issues that
++ * aren't going away anytime soon. The layer above can arrange
++ * the retransmission.
++ */
++ if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags))
++ rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
++ RX_USER_ABORT, ret);
++ }
+
+ _leave(" = %d [%u]", ret, call->peer->maxdata);
+ return ret;
+@@ -506,6 +532,7 @@ send_fragmentable:
+ else
+ trace_rxrpc_tx_packet(call->debug_id, &whdr,
+ rxrpc_tx_point_call_data_frag);
++ rxrpc_tx_backoff(call, ret);
+
+ up_write(&conn->params.local->defrag_sem);
+ goto done;
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Xin Long <lucien.xin@gmail.com>
+Date: Sat, 3 Nov 2018 14:01:31 +0800
+Subject: sctp: define SCTP_SS_DEFAULT for Stream schedulers
+
+From: Xin Long <lucien.xin@gmail.com>
+
+[ Upstream commit 12480e3b16982c4026de10dd8155823219cd6391 ]
+
+According to rfc8260#section-4.3.2, SCTP_SS_DEFAULT is required to
+defined as SCTP_SS_FCFS or SCTP_SS_RR.
+
+SCTP_SS_FCFS is used for SCTP_SS_DEFAULT's value in this patch.
+
+Fixes: 5bbbbe32a431 ("sctp: introduce stream scheduler foundations")
+Reported-by: Jianwen Ji <jiji@redhat.com>
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/uapi/linux/sctp.h | 1 +
+ net/sctp/outqueue.c | 2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+--- a/include/uapi/linux/sctp.h
++++ b/include/uapi/linux/sctp.h
+@@ -1151,6 +1151,7 @@ struct sctp_add_streams {
+ /* SCTP Stream schedulers */
+ enum sctp_sched_type {
+ SCTP_SS_FCFS,
++ SCTP_SS_DEFAULT = SCTP_SS_FCFS,
+ SCTP_SS_PRIO,
+ SCTP_SS_RR,
+ SCTP_SS_MAX = SCTP_SS_RR
+--- a/net/sctp/outqueue.c
++++ b/net/sctp/outqueue.c
+@@ -212,7 +212,7 @@ void sctp_outq_init(struct sctp_associat
+ INIT_LIST_HEAD(&q->retransmit);
+ INIT_LIST_HEAD(&q->sacked);
+ INIT_LIST_HEAD(&q->abandoned);
+- sctp_sched_set_sched(asoc, SCTP_SS_FCFS);
++ sctp_sched_set_sched(asoc, SCTP_SS_DEFAULT);
+ }
+
+ /* Free the outqueue structure and any related pending chunks.
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Xin Long <lucien.xin@gmail.com>
+Date: Sat, 3 Nov 2018 13:59:45 +0800
+Subject: sctp: fix strchange_flags name for Stream Change Event
+
+From: Xin Long <lucien.xin@gmail.com>
+
+[ Upstream commit fd82d61ba142f0b83463e47064bf5460aac57b6e ]
+
+As defined in rfc6525#section-6.1.3, SCTP_STREAM_CHANGE_DENIED
+and SCTP_STREAM_CHANGE_FAILED should be used instead of
+SCTP_ASSOC_CHANGE_DENIED and SCTP_ASSOC_CHANGE_FAILED.
+
+To keep the compatibility, fix it by adding two macros.
+
+Fixes: b444153fb5a6 ("sctp: add support for generating add stream change event notification")
+Reported-by: Jianwen Ji <jiji@redhat.com>
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/uapi/linux/sctp.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/include/uapi/linux/sctp.h
++++ b/include/uapi/linux/sctp.h
+@@ -568,6 +568,8 @@ struct sctp_assoc_reset_event {
+
+ #define SCTP_ASSOC_CHANGE_DENIED 0x0004
+ #define SCTP_ASSOC_CHANGE_FAILED 0x0008
++#define SCTP_STREAM_CHANGE_DENIED SCTP_ASSOC_CHANGE_DENIED
++#define SCTP_STREAM_CHANGE_FAILED SCTP_ASSOC_CHANGE_FAILED
+ struct sctp_stream_change_event {
+ __u16 strchange_type;
+ __u16 strchange_flags;
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Xin Long <lucien.xin@gmail.com>
+Date: Sun, 18 Nov 2018 15:21:53 +0800
+Subject: sctp: not allow to set asoc prsctp_enable by sockopt
+
+From: Xin Long <lucien.xin@gmail.com>
+
+[ Upstream commit cc3ccf26f0649089b3a34a2781977755ea36e72c ]
+
+As rfc7496#section4.5 says about SCTP_PR_SUPPORTED:
+
+ This socket option allows the enabling or disabling of the
+ negotiation of PR-SCTP support for future associations. For existing
+ associations, it allows one to query whether or not PR-SCTP support
+ was negotiated on a particular association.
+
+It means only sctp sock's prsctp_enable can be set.
+
+Note that for the limitation of SCTP_{CURRENT|ALL}_ASSOC, we will
+add it when introducing SCTP_{FUTURE|CURRENT|ALL}_ASSOC for linux
+sctp in another patchset.
+
+v1->v2:
+ - drop the params.assoc_id check as Neil suggested.
+
+Fixes: 28aa4c26fce2 ("sctp: add SCTP_PR_SUPPORTED on sctp sockopt")
+Reported-by: Ying Xu <yinxu@redhat.com>
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/socket.c | 26 +++++---------------------
+ 1 file changed, 5 insertions(+), 21 deletions(-)
+
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -3958,32 +3958,16 @@ static int sctp_setsockopt_pr_supported(
+ unsigned int optlen)
+ {
+ struct sctp_assoc_value params;
+- struct sctp_association *asoc;
+- int retval = -EINVAL;
+
+ if (optlen != sizeof(params))
+- goto out;
++ return -EINVAL;
+
+- if (copy_from_user(¶ms, optval, optlen)) {
+- retval = -EFAULT;
+- goto out;
+- }
++ if (copy_from_user(¶ms, optval, optlen))
++ return -EFAULT;
+
+- asoc = sctp_id2assoc(sk, params.assoc_id);
+- if (asoc) {
+- asoc->prsctp_enable = !!params.assoc_value;
+- } else if (!params.assoc_id) {
+- struct sctp_sock *sp = sctp_sk(sk);
++ sctp_sk(sk)->ep->prsctp_enable = !!params.assoc_value;
+
+- sp->ep->prsctp_enable = !!params.assoc_value;
+- } else {
+- goto out;
+- }
+-
+- retval = 0;
+-
+-out:
+- return retval;
++ return 0;
+ }
+
+ static int sctp_setsockopt_default_prinfo(struct sock *sk,
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Xin Long <lucien.xin@gmail.com>
+Date: Sun, 18 Nov 2018 21:59:49 +0800
+Subject: sctp: not increase stream's incnt before sending addstrm_in request
+
+From: Xin Long <lucien.xin@gmail.com>
+
+[ Upstream commit e1e46479847e66f78f79d8c24d5169a5954b3fc2 ]
+
+Different from processing the addstrm_out request, The receiver handles
+an addstrm_in request by sending back an addstrm_out request to the
+sender who will increase its stream's in and incnt later.
+
+Now stream->incnt has been increased since it sent out the addstrm_in
+request in sctp_send_add_streams(), with the wrong stream->incnt will
+even cause crash when copying stream info from the old stream's in to
+the new one's in sctp_process_strreset_addstrm_out().
+
+This patch is to fix it by simply removing the stream->incnt change
+from sctp_send_add_streams().
+
+Fixes: 242bd2d519d7 ("sctp: implement sender-side procedures for Add Incoming/Outgoing Streams Request Parameter")
+Reported-by: Jianwen Ji <jiji@redhat.com>
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/stream.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/net/sctp/stream.c
++++ b/net/sctp/stream.c
+@@ -535,7 +535,6 @@ int sctp_send_add_streams(struct sctp_as
+ goto out;
+ }
+
+- stream->incnt = incnt;
+ stream->outcnt = outcnt;
+
+ asoc->strreset_outstanding = !!out + !!in;
--- /dev/null
+flow_dissector-do-not-dissect-l4-ports-for-fragments.patch
+ibmvnic-fix-accelerated-vlan-handling.patch
+ip_tunnel-don-t-force-df-when-mtu-is-locked.patch
+ipv6-fix-a-dst-leak-when-removing-its-exception.patch
+ipv6-fix-pmtu-updates-for-udp-raw-sockets-in-presence-of-vrf.patch
+net-bcmgenet-protect-stop-from-timeout.patch
+net-gro-reset-skb-pkt_type-in-napi_reuse_skb.patch
+sctp-not-allow-to-set-asoc-prsctp_enable-by-sockopt.patch
+tcp-fix-sof_timestamping_rx_hardware-to-use-the-latest-timestamp-during-tcp-coalescing.patch
+tg3-add-phy-reset-for-5717-5719-5720-in-change-ring-and-flow-control-paths.patch
+tipc-don-t-assume-linear-buffer-when-reading-ancillary-data.patch
+tipc-fix-lockdep-warning-when-reinitilaizing-sockets.patch
+tuntap-fix-multiqueue-rx.patch
+net-systemport-protect-stop-from-timeout.patch
+net-sched-act_pedit-fix-memory-leak-when-idr-allocation-fails.patch
+net-sched-cls_flower-validate-nested-enc_opts_policy-to-avoid-warning.patch
+tipc-fix-link-re-establish-failure.patch
+net-mlx5e-don-t-match-on-vlan-non-existence-if-ethertype-is-wildcarded.patch
+net-mlx5e-claim-tc-hw-offloads-support-only-under-a-proper-build-config.patch
+net-mlx5e-adjust-to-max-number-of-channles-when-re-attaching.patch
+net-mlx5e-rx-verify-received-packet-size-in-linear-striding-rq.patch
+revert-sctp-remove-sctp_transport_pmtu_check.patch
+net-mlx5e-always-use-the-match-level-enum-when-parsing-tc-rule-match.patch
+net-mlx5e-fix-selftest-for-small-mtus.patch
+net-mlx5e-removed-unnecessary-warnings-in-fec-caps-query.patch
+inet-frags-better-deal-with-smp-races.patch
+l2tp-fix-a-sock-refcnt-leak-in-l2tp_tunnel_register.patch
+net-mlx5-ipsec-fix-the-sa-context-hash-key.patch
+net-mlx5e-ipoib-reset-qp-after-channels-are-closed.patch
+net-dsa-mv88e6xxx-fix-clearing-of-stats-counters.patch
+net-phy-realtek-fix-rtl8201f-sysfs-name.patch
+sctp-define-sctp_ss_default-for-stream-schedulers.patch
+net-qualcomm-rmnet-fix-incorrect-assignment-of-real_dev.patch
+net-dsa-microchip-initialize-mutex-before-use.patch
+sctp-fix-strchange_flags-name-for-stream-change-event.patch
+net-phy-mdio-gpio-fix-working-over-slow-can_sleep-gpios.patch
+sctp-not-increase-stream-s-incnt-before-sending-addstrm_in-request.patch
+mlxsw-spectrum-fix-ip2me-cpu-policer-configuration.patch
+net-smsc95xx-fix-mtu-range.patch
+rxrpc-fix-lockup-due-to-no-error-backoff-after-ack-transmit-error.patch
+usbnet-smsc95xx-disable-carrier-check-while-suspending.patch
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Stephen Mallon <stephen.mallon@sydney.edu.au>
+Date: Tue, 20 Nov 2018 19:15:02 +1100
+Subject: tcp: Fix SOF_TIMESTAMPING_RX_HARDWARE to use the latest timestamp during TCP coalescing
+
+From: Stephen Mallon <stephen.mallon@sydney.edu.au>
+
+[ Upstream commit cadf9df27e7cf40e390e060a1c71bb86ecde798b ]
+
+During tcp coalescing ensure that the skb hardware timestamp refers to the
+highest sequence number data.
+Previously only the software timestamp was updated during coalescing.
+
+Signed-off-by: Stephen Mallon <stephen.mallon@sydney.edu.au>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_input.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -4371,6 +4371,7 @@ static bool tcp_try_coalesce(struct sock
+ if (TCP_SKB_CB(from)->has_rxtstamp) {
+ TCP_SKB_CB(to)->has_rxtstamp = true;
+ to->tstamp = from->tstamp;
++ skb_hwtstamps(to)->hwtstamp = skb_hwtstamps(from)->hwtstamp;
+ }
+
+ return true;
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Siva Reddy Kallam <siva.kallam@broadcom.com>
+Date: Tue, 20 Nov 2018 10:04:04 +0530
+Subject: tg3: Add PHY reset for 5717/5719/5720 in change ring and flow control paths
+
+From: Siva Reddy Kallam <siva.kallam@broadcom.com>
+
+[ Upstream commit 59663e42199c93d1d7314d1446f6782fc4b1eb81 ]
+
+This patch has the fix to avoid PHY lockup with 5717/5719/5720 in change
+ring and flow control paths. This patch solves the RX hang while doing
+continuous ring or flow control parameters with heavy traffic from peer.
+
+Signed-off-by: Siva Reddy Kallam <siva.kallam@broadcom.com>
+Acked-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/tg3.c | 18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -12426,6 +12426,7 @@ static int tg3_set_ringparam(struct net_
+ {
+ struct tg3 *tp = netdev_priv(dev);
+ int i, irq_sync = 0, err = 0;
++ bool reset_phy = false;
+
+ if ((ering->rx_pending > tp->rx_std_ring_mask) ||
+ (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
+@@ -12457,7 +12458,13 @@ static int tg3_set_ringparam(struct net_
+
+ if (netif_running(dev)) {
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+- err = tg3_restart_hw(tp, false);
++ /* Reset PHY to avoid PHY lock up */
++ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
++ tg3_asic_rev(tp) == ASIC_REV_5719 ||
++ tg3_asic_rev(tp) == ASIC_REV_5720)
++ reset_phy = true;
++
++ err = tg3_restart_hw(tp, reset_phy);
+ if (!err)
+ tg3_netif_start(tp);
+ }
+@@ -12491,6 +12498,7 @@ static int tg3_set_pauseparam(struct net
+ {
+ struct tg3 *tp = netdev_priv(dev);
+ int err = 0;
++ bool reset_phy = false;
+
+ if (tp->link_config.autoneg == AUTONEG_ENABLE)
+ tg3_warn_mgmt_link_flap(tp);
+@@ -12581,7 +12589,13 @@ static int tg3_set_pauseparam(struct net
+
+ if (netif_running(dev)) {
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+- err = tg3_restart_hw(tp, false);
++ /* Reset PHY to avoid PHY lock up */
++ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
++ tg3_asic_rev(tp) == ASIC_REV_5719 ||
++ tg3_asic_rev(tp) == ASIC_REV_5720)
++ reset_phy = true;
++
++ err = tg3_restart_hw(tp, reset_phy);
+ if (!err)
+ tg3_netif_start(tp);
+ }
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Jon Maloy <donmalo99@gmail.com>
+Date: Sat, 17 Nov 2018 12:17:06 -0500
+Subject: tipc: don't assume linear buffer when reading ancillary data
+
+From: Jon Maloy <donmalo99@gmail.com>
+
+[ Upstream commit 1c1274a56999fbdf9cf84e332b28448bb2d55221 ]
+
+The code for reading ancillary data from a received buffer is assuming
+the buffer is linear. To make this assumption true we have to linearize
+the buffer before message data is read.
+
+Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/socket.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -1548,16 +1548,17 @@ static void tipc_sk_set_orig_addr(struct
+ /**
+ * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
+ * @m: descriptor for message info
+- * @msg: received message header
++ * @skb: received message buffer
+ * @tsk: TIPC port associated with message
+ *
+ * Note: Ancillary data is not captured if not requested by receiver.
+ *
+ * Returns 0 if successful, otherwise errno
+ */
+-static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
++static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
+ struct tipc_sock *tsk)
+ {
++ struct tipc_msg *msg;
+ u32 anc_data[3];
+ u32 err;
+ u32 dest_type;
+@@ -1566,6 +1567,7 @@ static int tipc_sk_anc_data_recv(struct
+
+ if (likely(m->msg_controllen == 0))
+ return 0;
++ msg = buf_msg(skb);
+
+ /* Optionally capture errored message object(s) */
+ err = msg ? msg_errcode(msg) : 0;
+@@ -1576,6 +1578,9 @@ static int tipc_sk_anc_data_recv(struct
+ if (res)
+ return res;
+ if (anc_data[1]) {
++ if (skb_linearize(skb))
++ return -ENOMEM;
++ msg = buf_msg(skb);
+ res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
+ msg_data(msg));
+ if (res)
+@@ -1737,9 +1742,10 @@ static int tipc_recvmsg(struct socket *s
+
+ /* Collect msg meta data, including error code and rejected data */
+ tipc_sk_set_orig_addr(m, skb);
+- rc = tipc_sk_anc_data_recv(m, hdr, tsk);
++ rc = tipc_sk_anc_data_recv(m, skb, tsk);
+ if (unlikely(rc))
+ goto exit;
++ hdr = buf_msg(skb);
+
+ /* Capture data if non-error msg, otherwise just set return value */
+ if (likely(!err)) {
+@@ -1849,9 +1855,10 @@ static int tipc_recvstream(struct socket
+ /* Collect msg meta data, incl. error code and rejected data */
+ if (!copied) {
+ tipc_sk_set_orig_addr(m, skb);
+- rc = tipc_sk_anc_data_recv(m, hdr, tsk);
++ rc = tipc_sk_anc_data_recv(m, skb, tsk);
+ if (rc)
+ break;
++ hdr = buf_msg(skb);
+ }
+
+ /* Copy data if msg ok, otherwise return error/partial data */
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Jon Maloy <donmalo99@gmail.com>
+Date: Sat, 10 Nov 2018 17:30:24 -0500
+Subject: tipc: fix link re-establish failure
+
+From: Jon Maloy <donmalo99@gmail.com>
+
+[ Upstream commit 7ab412d33b4c7ff3e0148d3db25dd861edd1283d ]
+
+When a link failure is detected locally, the link is reset, the flag
+link->in_session is set to false, and a RESET_MSG with the 'stopping'
+bit set is sent to the peer.
+
+The purpose of this bit is to inform the peer that this endpoint just
+is going down, and that the peer should handle the reception of this
+particular RESET message as a local failure. This forces the peer to
+accept another RESET or ACTIVATE message from this endpoint before it
+can re-establish the link. This again is necessary to ensure that
+link session numbers are properly exchanged before the link comes up
+again.
+
+If a failure is detected locally at the same time at the peer endpoint
+this will do the same, which is also a correct behavior.
+
+However, when receiving such messages, the endpoints will not
+distinguish between 'stopping' RESETs and ordinary ones when it comes
+to updating session numbers. Both endpoints will copy the received
+session number and set their 'in_session' flags to true at the
+reception, while they are still expecting another RESET from the
+peer before they can go ahead and re-establish. This is contradictory,
+since, after applying the validation check referred to below, the
+'in_session' flag will cause rejection of all such messages, and the
+link will never come up again.
+
+We now fix this by not only handling received RESET/STOPPING messages
+as a local failure, but also by omitting to set a new session number
+and the 'in_session' flag in such cases.
+
+Fixes: 7ea817f4e832 ("tipc: check session number before accepting link protocol messages")
+Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/link.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/net/tipc/link.c
++++ b/net/tipc/link.c
+@@ -1594,14 +1594,17 @@ static int tipc_link_proto_rcv(struct ti
+ if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
+ l->priority = peers_prio;
+
+- /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
+- if (msg_peer_stopping(hdr))
++ /* If peer is going down we want full re-establish cycle */
++ if (msg_peer_stopping(hdr)) {
+ rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
+- else if ((mtyp == RESET_MSG) || !link_is_up(l))
++ break;
++ }
++ /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
++ if (mtyp == RESET_MSG || !link_is_up(l))
+ rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
+
+ /* ACTIVATE_MSG takes up link if it was already locally reset */
+- if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
++ if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
+ rc = TIPC_LINK_UP_EVT;
+
+ l->peer_session = msg_session(hdr);
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Jon Maloy <donmalo99@gmail.com>
+Date: Fri, 16 Nov 2018 16:55:04 -0500
+Subject: tipc: fix lockdep warning when reinitilaizing sockets
+
+From: Jon Maloy <donmalo99@gmail.com>
+
+[ Upstream commit adba75be0d23cca92a028749d92c60c8909bbdb3 ]
+
+We get the following warning:
+
+[ 47.926140] 32-bit node address hash set to 2010a0a
+[ 47.927202]
+[ 47.927433] ================================
+[ 47.928050] WARNING: inconsistent lock state
+[ 47.928661] 4.19.0+ #37 Tainted: G E
+[ 47.929346] --------------------------------
+[ 47.929954] inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage.
+[ 47.930116] swapper/3/0 [HC0[0]:SC1[3]:HE1:SE0] takes:
+[ 47.930116] 00000000af8bc31e (&(&ht->lock)->rlock){+.?.}, at: rhashtable_walk_enter+0x36/0xb0
+[ 47.930116] {SOFTIRQ-ON-W} state was registered at:
+[ 47.930116] _raw_spin_lock+0x29/0x60
+[ 47.930116] rht_deferred_worker+0x556/0x810
+[ 47.930116] process_one_work+0x1f5/0x540
+[ 47.930116] worker_thread+0x64/0x3e0
+[ 47.930116] kthread+0x112/0x150
+[ 47.930116] ret_from_fork+0x3a/0x50
+[ 47.930116] irq event stamp: 14044
+[ 47.930116] hardirqs last enabled at (14044): [<ffffffff9a07fbba>] __local_bh_enable_ip+0x7a/0xf0
+[ 47.938117] hardirqs last disabled at (14043): [<ffffffff9a07fb81>] __local_bh_enable_ip+0x41/0xf0
+[ 47.938117] softirqs last enabled at (14028): [<ffffffff9a0803ee>] irq_enter+0x5e/0x60
+[ 47.938117] softirqs last disabled at (14029): [<ffffffff9a0804a5>] irq_exit+0xb5/0xc0
+[ 47.938117]
+[ 47.938117] other info that might help us debug this:
+[ 47.938117] Possible unsafe locking scenario:
+[ 47.938117]
+[ 47.938117] CPU0
+[ 47.938117] ----
+[ 47.938117] lock(&(&ht->lock)->rlock);
+[ 47.938117] <Interrupt>
+[ 47.938117] lock(&(&ht->lock)->rlock);
+[ 47.938117]
+[ 47.938117] *** DEADLOCK ***
+[ 47.938117]
+[ 47.938117] 2 locks held by swapper/3/0:
+[ 47.938117] #0: 0000000062c64f90 ((&d->timer)){+.-.}, at: call_timer_fn+0x5/0x280
+[ 47.938117] #1: 00000000ee39619c (&(&d->lock)->rlock){+.-.}, at: tipc_disc_timeout+0xc8/0x540 [tipc]
+[ 47.938117]
+[ 47.938117] stack backtrace:
+[ 47.938117] CPU: 3 PID: 0 Comm: swapper/3 Tainted: G E 4.19.0+ #37
+[ 47.938117] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
+[ 47.938117] Call Trace:
+[ 47.938117] <IRQ>
+[ 47.938117] dump_stack+0x5e/0x8b
+[ 47.938117] print_usage_bug+0x1ed/0x1ff
+[ 47.938117] mark_lock+0x5b5/0x630
+[ 47.938117] __lock_acquire+0x4c0/0x18f0
+[ 47.938117] ? lock_acquire+0xa6/0x180
+[ 47.938117] lock_acquire+0xa6/0x180
+[ 47.938117] ? rhashtable_walk_enter+0x36/0xb0
+[ 47.938117] _raw_spin_lock+0x29/0x60
+[ 47.938117] ? rhashtable_walk_enter+0x36/0xb0
+[ 47.938117] rhashtable_walk_enter+0x36/0xb0
+[ 47.938117] tipc_sk_reinit+0xb0/0x410 [tipc]
+[ 47.938117] ? mark_held_locks+0x6f/0x90
+[ 47.938117] ? __local_bh_enable_ip+0x7a/0xf0
+[ 47.938117] ? lockdep_hardirqs_on+0x20/0x1a0
+[ 47.938117] tipc_net_finalize+0xbf/0x180 [tipc]
+[ 47.938117] tipc_disc_timeout+0x509/0x540 [tipc]
+[ 47.938117] ? call_timer_fn+0x5/0x280
+[ 47.938117] ? tipc_disc_msg_xmit.isra.19+0xa0/0xa0 [tipc]
+[ 47.938117] ? tipc_disc_msg_xmit.isra.19+0xa0/0xa0 [tipc]
+[ 47.938117] call_timer_fn+0xa1/0x280
+[ 47.938117] ? tipc_disc_msg_xmit.isra.19+0xa0/0xa0 [tipc]
+[ 47.938117] run_timer_softirq+0x1f2/0x4d0
+[ 47.938117] __do_softirq+0xfc/0x413
+[ 47.938117] irq_exit+0xb5/0xc0
+[ 47.938117] smp_apic_timer_interrupt+0xac/0x210
+[ 47.938117] apic_timer_interrupt+0xf/0x20
+[ 47.938117] </IRQ>
+[ 47.938117] RIP: 0010:default_idle+0x1c/0x140
+[ 47.938117] Code: 90 90 90 90 90 90 90 90 90 90 90 90 90 90 0f 1f 44 00 00 41 54 55 53 65 8b 2d d8 2b 74 65 0f 1f 44 00 00 e8 c6 2c 8b ff fb f4 <65> 8b 2d c5 2b 74 65 0f 1f 44 00 00 5b 5d 41 5c c3 65 8b 05 b4 2b
+[ 47.938117] RSP: 0018:ffffaf6ac0207ec8 EFLAGS: 00000206 ORIG_RAX: ffffffffffffff13
+[ 47.938117] RAX: ffff8f5b3735e200 RBX: 0000000000000003 RCX: 0000000000000001
+[ 47.938117] RDX: 0000000000000001 RSI: 0000000000000001 RDI: ffff8f5b3735e200
+[ 47.938117] RBP: 0000000000000003 R08: 0000000000000001 R09: 0000000000000000
+[ 47.938117] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000
+[ 47.938117] R13: 0000000000000000 R14: ffff8f5b3735e200 R15: ffff8f5b3735e200
+[ 47.938117] ? default_idle+0x1a/0x140
+[ 47.938117] do_idle+0x1bc/0x280
+[ 47.938117] cpu_startup_entry+0x19/0x20
+[ 47.938117] start_secondary+0x187/0x1c0
+[ 47.938117] secondary_startup_64+0xa4/0xb0
+
+The reason seems to be that tipc_net_finalize()->tipc_sk_reinit() is
+calling the function rhashtable_walk_enter() within a timer interrupt.
+We fix this by executing tipc_net_finalize() in work queue context.
+
+Acked-by: Ying Xue <ying.xue@windriver.com>
+Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/discover.c | 19 ++++++++++---------
+ net/tipc/net.c | 45 +++++++++++++++++++++++++++++++++++++--------
+ net/tipc/net.h | 2 +-
+ 3 files changed, 48 insertions(+), 18 deletions(-)
+
+--- a/net/tipc/discover.c
++++ b/net/tipc/discover.c
+@@ -166,7 +166,8 @@ static bool tipc_disc_addr_trial_msg(str
+
+ /* Apply trial address if we just left trial period */
+ if (!trial && !self) {
+- tipc_net_finalize(net, tn->trial_addr);
++ tipc_sched_net_finalize(net, tn->trial_addr);
++ msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
+ msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
+ }
+
+@@ -300,14 +301,12 @@ static void tipc_disc_timeout(struct tim
+ goto exit;
+ }
+
+- /* Trial period over ? */
+- if (!time_before(jiffies, tn->addr_trial_end)) {
+- /* Did we just leave it ? */
+- if (!tipc_own_addr(net))
+- tipc_net_finalize(net, tn->trial_addr);
+-
+- msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
+- msg_set_prevnode(buf_msg(d->skb), tipc_own_addr(net));
++ /* Did we just leave trial period ? */
++ if (!time_before(jiffies, tn->addr_trial_end) && !tipc_own_addr(net)) {
++ mod_timer(&d->timer, jiffies + TIPC_DISC_INIT);
++ spin_unlock_bh(&d->lock);
++ tipc_sched_net_finalize(net, tn->trial_addr);
++ return;
+ }
+
+ /* Adjust timeout interval according to discovery phase */
+@@ -319,6 +318,8 @@ static void tipc_disc_timeout(struct tim
+ d->timer_intv = TIPC_DISC_SLOW;
+ else if (!d->num_nodes && d->timer_intv > TIPC_DISC_FAST)
+ d->timer_intv = TIPC_DISC_FAST;
++ msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
++ msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
+ }
+
+ mod_timer(&d->timer, jiffies + d->timer_intv);
+--- a/net/tipc/net.c
++++ b/net/tipc/net.c
+@@ -104,6 +104,14 @@
+ * - A local spin_lock protecting the queue of subscriber events.
+ */
+
++struct tipc_net_work {
++ struct work_struct work;
++ struct net *net;
++ u32 addr;
++};
++
++static void tipc_net_finalize(struct net *net, u32 addr);
++
+ int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
+ {
+ if (tipc_own_id(net)) {
+@@ -119,17 +127,38 @@ int tipc_net_init(struct net *net, u8 *n
+ return 0;
+ }
+
+-void tipc_net_finalize(struct net *net, u32 addr)
++static void tipc_net_finalize(struct net *net, u32 addr)
+ {
+ struct tipc_net *tn = tipc_net(net);
+
+- if (!cmpxchg(&tn->node_addr, 0, addr)) {
+- tipc_set_node_addr(net, addr);
+- tipc_named_reinit(net);
+- tipc_sk_reinit(net);
+- tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
+- TIPC_CLUSTER_SCOPE, 0, addr);
+- }
++ if (cmpxchg(&tn->node_addr, 0, addr))
++ return;
++ tipc_set_node_addr(net, addr);
++ tipc_named_reinit(net);
++ tipc_sk_reinit(net);
++ tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
++ TIPC_CLUSTER_SCOPE, 0, addr);
++}
++
++static void tipc_net_finalize_work(struct work_struct *work)
++{
++ struct tipc_net_work *fwork;
++
++ fwork = container_of(work, struct tipc_net_work, work);
++ tipc_net_finalize(fwork->net, fwork->addr);
++ kfree(fwork);
++}
++
++void tipc_sched_net_finalize(struct net *net, u32 addr)
++{
++ struct tipc_net_work *fwork = kzalloc(sizeof(*fwork), GFP_ATOMIC);
++
++ if (!fwork)
++ return;
++ INIT_WORK(&fwork->work, tipc_net_finalize_work);
++ fwork->net = net;
++ fwork->addr = addr;
++ schedule_work(&fwork->work);
+ }
+
+ void tipc_net_stop(struct net *net)
+--- a/net/tipc/net.h
++++ b/net/tipc/net.h
+@@ -42,7 +42,7 @@
+ extern const struct nla_policy tipc_nl_net_policy[];
+
+ int tipc_net_init(struct net *net, u8 *node_id, u32 addr);
+-void tipc_net_finalize(struct net *net, u32 addr);
++void tipc_sched_net_finalize(struct net *net, u32 addr);
+ void tipc_net_stop(struct net *net);
+ int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
+ int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Matthew Cover <werekraken@gmail.com>
+Date: Sun, 18 Nov 2018 00:46:00 -0700
+Subject: tuntap: fix multiqueue rx
+
+From: Matthew Cover <werekraken@gmail.com>
+
+[ Upstream commit 8ebebcba559a1bfbaec7bbda64feb9870b9c58da ]
+
+When writing packets to a descriptor associated with a combined queue, the
+packets should end up on that queue.
+
+Before this change all packets written to any descriptor associated with a
+tap interface end up on rx-0, even when the descriptor is associated with a
+different queue.
+
+The rx traffic can be generated by either of the following.
+ 1. a simple tap program which spins up multiple queues and writes packets
+ to each of the file descriptors
+ 2. tx from a qemu vm with a tap multiqueue netdev
+
+The queue for rx traffic can be observed by either of the following (done
+on the hypervisor in the qemu case).
+ 1. a simple netmap program which opens and reads from per-queue
+ descriptors
+ 2. configuring RPS and doing per-cpu captures with rxtxcpu
+
+Alternatively, if you printk() the return value of skb_get_rx_queue() just
+before each instance of netif_receive_skb() in tun.c, you will get 65535
+for every skb.
+
+Calling skb_record_rx_queue() to set the rx queue to the queue_index fixes
+the association between descriptor and rx queue.
+
+Signed-off-by: Matthew Cover <matthew.cover@stackpath.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/tun.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1527,6 +1527,7 @@ static void tun_rx_batched(struct tun_st
+
+ if (!rx_batched || (!more && skb_queue_empty(queue))) {
+ local_bh_disable();
++ skb_record_rx_queue(skb, tfile->queue_index);
+ netif_receive_skb(skb);
+ local_bh_enable();
+ return;
+@@ -1546,8 +1547,11 @@ static void tun_rx_batched(struct tun_st
+ struct sk_buff *nskb;
+
+ local_bh_disable();
+- while ((nskb = __skb_dequeue(&process_queue)))
++ while ((nskb = __skb_dequeue(&process_queue))) {
++ skb_record_rx_queue(nskb, tfile->queue_index);
+ netif_receive_skb(nskb);
++ }
++ skb_record_rx_queue(skb, tfile->queue_index);
+ netif_receive_skb(skb);
+ local_bh_enable();
+ }
--- /dev/null
+From foo@baz Wed Nov 21 12:16:27 CET 2018
+From: Frieder Schrempf <frieder.schrempf@kontron.de>
+Date: Wed, 31 Oct 2018 22:52:19 +0100
+Subject: usbnet: smsc95xx: disable carrier check while suspending
+
+From: Frieder Schrempf <frieder.schrempf@kontron.de>
+
+[ Upstream commit 7b900ead6cc66b2ee873cb042dfba169aa68b56c ]
+
+We need to make sure, that the carrier check polling is disabled
+while suspending. Otherwise we can end up with usbnet_read_cmd()
+being issued when only usbnet_read_cmd_nopm() is allowed. If this
+happens, read operations lock up.
+
+Fixes: d69d169493 ("usbnet: smsc95xx: fix link detection for disabled autonegotiation")
+Signed-off-by: Frieder Schrempf <frieder.schrempf@kontron.de>
+Reviewed-by: Raghuram Chary J <RaghuramChary.Jallipalli@microchip.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/smsc95xx.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/net/usb/smsc95xx.c
++++ b/drivers/net/usb/smsc95xx.c
+@@ -1600,6 +1600,8 @@ static int smsc95xx_suspend(struct usb_i
+ return ret;
+ }
+
++ cancel_delayed_work_sync(&pdata->carrier_check);
++
+ if (pdata->suspend_flags) {
+ netdev_warn(dev->net, "error during last resume\n");
+ pdata->suspend_flags = 0;
+@@ -1842,6 +1844,11 @@ done:
+ */
+ if (ret && PMSG_IS_AUTO(message))
+ usbnet_resume(intf);
++
++ if (ret)
++ schedule_delayed_work(&pdata->carrier_check,
++ CARRIER_CHECK_DELAY);
++
+ return ret;
+ }
+