--- /dev/null
+From foo@baz Sat 28 Mar 2020 12:53:42 PM CET
+From: Edwin Peer <edwin.peer@broadcom.com>
+Date: Sun, 22 Mar 2020 16:40:02 -0400
+Subject: bnxt_en: fix memory leaks in bnxt_dcbnl_ieee_getets()
+
+From: Edwin Peer <edwin.peer@broadcom.com>
+
+[ Upstream commit 62d4073e86e62e316bea2c53e77db10418fd5dd7 ]
+
+The allocated ieee_ets structure goes out of scope without being freed,
+leaking memory. Appropriate result codes should be returned so that
+callers do not rely on invalid data passed by reference.
+
+Also cache the ETS config retrieved from the device so that it doesn't
+need to be freed. The balance of the code was clearly written with the
+intent of having the results of querying the hardware cached in the
+device structure. The commensurate store was evidently missed though.
+
+Fixes: 7df4ae9fe855 ("bnxt_en: Implement DCBNL to support host-based DCBX.")
+Signed-off-by: Edwin Peer <edwin.peer@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+@@ -387,24 +387,26 @@ static int bnxt_dcbnl_ieee_getets(struct
+ {
+ struct bnxt *bp = netdev_priv(dev);
+ struct ieee_ets *my_ets = bp->ieee_ets;
++ int rc;
+
+ ets->ets_cap = bp->max_tc;
+
+ if (!my_ets) {
+- int rc;
+-
+ if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
+ return 0;
+
+ my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
+ if (!my_ets)
+- return 0;
++ return -ENOMEM;
+ rc = bnxt_hwrm_queue_cos2bw_qcfg(bp, my_ets);
+ if (rc)
+- return 0;
++ goto error;
+ rc = bnxt_hwrm_queue_pri2cos_qcfg(bp, my_ets);
+ if (rc)
+- return 0;
++ goto error;
++
++ /* cache result */
++ bp->ieee_ets = my_ets;
+ }
+
+ ets->cbs = my_ets->cbs;
+@@ -413,6 +415,9 @@ static int bnxt_dcbnl_ieee_getets(struct
+ memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
+ return 0;
++error:
++ kfree(my_ets);
++ return rc;
+ }
+
+ static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
--- /dev/null
+From foo@baz Sat 28 Mar 2020 12:53:42 PM CET
+From: Taehee Yoo <ap420073@gmail.com>
+Date: Fri, 13 Mar 2020 06:50:24 +0000
+Subject: hsr: add restart routine into hsr_get_node_list()
+
+From: Taehee Yoo <ap420073@gmail.com>
+
+[ Upstream commit ca19c70f5225771c05bcdcb832b4eb84d7271c5e ]
+
+The hsr_get_node_list() is to send node addresses to the userspace.
+If there are so many nodes, it could fail because of buffer size.
+In order to avoid this failure, the restart routine is added.
+
+Fixes: f421436a591d ("net/hsr: Add support for the High-availability Seamless Redundancy protocol (HSRv0)")
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/hsr/hsr_netlink.c | 38 ++++++++++++++++++++++++--------------
+ 1 file changed, 24 insertions(+), 14 deletions(-)
+
+--- a/net/hsr/hsr_netlink.c
++++ b/net/hsr/hsr_netlink.c
+@@ -366,16 +366,14 @@ fail:
+ */
+ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
+ {
+- /* For receiving */
+- struct nlattr *na;
++ unsigned char addr[ETH_ALEN];
+ struct net_device *hsr_dev;
+-
+- /* For sending */
+ struct sk_buff *skb_out;
+- void *msg_head;
+ struct hsr_priv *hsr;
+- void *pos;
+- unsigned char addr[ETH_ALEN];
++ bool restart = false;
++ struct nlattr *na;
++ void *pos = NULL;
++ void *msg_head;
+ int res;
+
+ if (!info)
+@@ -393,8 +391,9 @@ static int hsr_get_node_list(struct sk_b
+ if (!is_hsr_master(hsr_dev))
+ goto rcu_unlock;
+
++restart:
+ /* Send reply */
+- skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
++ skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!skb_out) {
+ res = -ENOMEM;
+ goto fail;
+@@ -408,17 +407,28 @@ static int hsr_get_node_list(struct sk_b
+ goto nla_put_failure;
+ }
+
+- res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
+- if (res < 0)
+- goto nla_put_failure;
++ if (!restart) {
++ res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
++ if (res < 0)
++ goto nla_put_failure;
++ }
+
+ hsr = netdev_priv(hsr_dev);
+
+- pos = hsr_get_next_node(hsr, NULL, addr);
++ if (!pos)
++ pos = hsr_get_next_node(hsr, NULL, addr);
+ while (pos) {
+ res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
+- if (res < 0)
++ if (res < 0) {
++ if (res == -EMSGSIZE) {
++ genlmsg_end(skb_out, msg_head);
++ genlmsg_unicast(genl_info_net(info), skb_out,
++ info->snd_portid);
++ restart = true;
++ goto restart;
++ }
+ goto nla_put_failure;
++ }
+ pos = hsr_get_next_node(hsr, pos, addr);
+ }
+ rcu_read_unlock();
+@@ -435,7 +445,7 @@ invalid:
+ return 0;
+
+ nla_put_failure:
+- kfree_skb(skb_out);
++ nlmsg_free(skb_out);
+ /* Fall through */
+
+ fail:
--- /dev/null
+From foo@baz Sat 28 Mar 2020 12:53:42 PM CET
+From: Taehee Yoo <ap420073@gmail.com>
+Date: Sat, 21 Mar 2020 06:46:50 +0000
+Subject: hsr: fix general protection fault in hsr_addr_is_self()
+
+From: Taehee Yoo <ap420073@gmail.com>
+
+[ Upstream commit 3a303cfdd28d5f930a307c82e8a9d996394d5ebd ]
+
+The port->hsr is used in the hsr_handle_frame(), which is a
+callback of rx_handler.
+hsr master and slaves are initialized in hsr_add_port().
+This function initializes several pointers, which includes port->hsr after
+registering rx_handler.
+So, in the rx_handler routine, un-initialized pointer would be used.
+In order to fix this, pointers should be initialized before
+registering rx_handler.
+
+Test commands:
+ ip netns del left
+ ip netns del right
+ modprobe -rv veth
+ modprobe -rv hsr
+ killall ping
+ modprobe hsr
+ ip netns add left
+ ip netns add right
+ ip link add veth0 type veth peer name veth1
+ ip link add veth2 type veth peer name veth3
+ ip link add veth4 type veth peer name veth5
+ ip link set veth1 netns left
+ ip link set veth3 netns right
+ ip link set veth4 netns left
+ ip link set veth5 netns right
+ ip link set veth0 up
+ ip link set veth2 up
+ ip link set veth0 address fc:00:00:00:00:01
+ ip link set veth2 address fc:00:00:00:00:02
+ ip netns exec left ip link set veth1 up
+ ip netns exec left ip link set veth4 up
+ ip netns exec right ip link set veth3 up
+ ip netns exec right ip link set veth5 up
+ ip link add hsr0 type hsr slave1 veth0 slave2 veth2
+ ip a a 192.168.100.1/24 dev hsr0
+ ip link set hsr0 up
+ ip netns exec left ip link add hsr1 type hsr slave1 veth1 slave2 veth4
+ ip netns exec left ip a a 192.168.100.2/24 dev hsr1
+ ip netns exec left ip link set hsr1 up
+ ip netns exec left ip n a 192.168.100.1 dev hsr1 lladdr \
+ fc:00:00:00:00:01 nud permanent
+ ip netns exec left ip n r 192.168.100.1 dev hsr1 lladdr \
+ fc:00:00:00:00:01 nud permanent
+ for i in {1..100}
+ do
+ ip netns exec left ping 192.168.100.1 &
+ done
+ ip netns exec left hping3 192.168.100.1 -2 --flood &
+ ip netns exec right ip link add hsr2 type hsr slave1 veth3 slave2 veth5
+ ip netns exec right ip a a 192.168.100.3/24 dev hsr2
+ ip netns exec right ip link set hsr2 up
+ ip netns exec right ip n a 192.168.100.1 dev hsr2 lladdr \
+ fc:00:00:00:00:02 nud permanent
+ ip netns exec right ip n r 192.168.100.1 dev hsr2 lladdr \
+ fc:00:00:00:00:02 nud permanent
+ for i in {1..100}
+ do
+ ip netns exec right ping 192.168.100.1 &
+ done
+ ip netns exec right hping3 192.168.100.1 -2 --flood &
+ while :
+ do
+ ip link add hsr0 type hsr slave1 veth0 slave2 veth2
+ ip a a 192.168.100.1/24 dev hsr0
+ ip link set hsr0 up
+ ip link del hsr0
+ done
+
+Splat looks like:
+[ 120.954938][ C0] general protection fault, probably for non-canonical address 0xdffffc0000000006: 0000 [#1]I
+[ 120.957761][ C0] KASAN: null-ptr-deref in range [0x0000000000000030-0x0000000000000037]
+[ 120.959064][ C0] CPU: 0 PID: 1511 Comm: hping3 Not tainted 5.6.0-rc5+ #460
+[ 120.960054][ C0] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006
+[ 120.962261][ C0] RIP: 0010:hsr_addr_is_self+0x65/0x2a0 [hsr]
+[ 120.963149][ C0] Code: 44 24 18 70 73 2f c0 48 c1 eb 03 48 8d 04 13 c7 00 f1 f1 f1 f1 c7 40 04 00 f2 f2 f2 4
+[ 120.966277][ C0] RSP: 0018:ffff8880d9c09af0 EFLAGS: 00010206
+[ 120.967293][ C0] RAX: 0000000000000006 RBX: 1ffff1101b38135f RCX: 0000000000000000
+[ 120.968516][ C0] RDX: dffffc0000000000 RSI: ffff8880d17cb208 RDI: 0000000000000000
+[ 120.969718][ C0] RBP: 0000000000000030 R08: ffffed101b3c0e3c R09: 0000000000000001
+[ 120.972203][ C0] R10: 0000000000000001 R11: ffffed101b3c0e3b R12: 0000000000000000
+[ 120.973379][ C0] R13: ffff8880aaf80100 R14: ffff8880aaf800f2 R15: ffff8880aaf80040
+[ 120.974410][ C0] FS: 00007f58e693f740(0000) GS:ffff8880d9c00000(0000) knlGS:0000000000000000
+[ 120.979794][ C0] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 120.980773][ C0] CR2: 00007ffcb8b38f29 CR3: 00000000afe8e001 CR4: 00000000000606f0
+[ 120.981945][ C0] Call Trace:
+[ 120.982411][ C0] <IRQ>
+[ 120.982848][ C0] ? hsr_add_node+0x8c0/0x8c0 [hsr]
+[ 120.983522][ C0] ? rcu_read_lock_held+0x90/0xa0
+[ 120.984159][ C0] ? rcu_read_lock_sched_held+0xc0/0xc0
+[ 120.984944][ C0] hsr_handle_frame+0x1db/0x4e0 [hsr]
+[ 120.985597][ C0] ? hsr_nl_nodedown+0x2b0/0x2b0 [hsr]
+[ 120.986289][ C0] __netif_receive_skb_core+0x6bf/0x3170
+[ 120.992513][ C0] ? check_chain_key+0x236/0x5d0
+[ 120.993223][ C0] ? do_xdp_generic+0x1460/0x1460
+[ 120.993875][ C0] ? register_lock_class+0x14d0/0x14d0
+[ 120.994609][ C0] ? __netif_receive_skb_one_core+0x8d/0x160
+[ 120.995377][ C0] __netif_receive_skb_one_core+0x8d/0x160
+[ 120.996204][ C0] ? __netif_receive_skb_core+0x3170/0x3170
+[ ... ]
+
+Reported-by: syzbot+fcf5dd39282ceb27108d@syzkaller.appspotmail.com
+Fixes: c5a759117210 ("net/hsr: Use list_head (and rcu) instead of array for slave devices.")
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/hsr/hsr_slave.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/net/hsr/hsr_slave.c
++++ b/net/hsr/hsr_slave.c
+@@ -152,16 +152,16 @@ int hsr_add_port(struct hsr_priv *hsr, s
+ if (port == NULL)
+ return -ENOMEM;
+
++ port->hsr = hsr;
++ port->dev = dev;
++ port->type = type;
++
+ if (type != HSR_PT_MASTER) {
+ res = hsr_portdev_setup(dev, port);
+ if (res)
+ goto fail_dev_setup;
+ }
+
+- port->hsr = hsr;
+- port->dev = dev;
+- port->type = type;
+-
+ list_add_tail_rcu(&port->port_list, &hsr->ports);
+ synchronize_rcu();
+
--- /dev/null
+From foo@baz Sat 28 Mar 2020 10:29:55 AM CET
+From: Taehee Yoo <ap420073@gmail.com>
+Date: Fri, 13 Mar 2020 06:50:33 +0000
+Subject: hsr: set .netnsok flag
+
+From: Taehee Yoo <ap420073@gmail.com>
+
+[ Upstream commit 09e91dbea0aa32be02d8877bd50490813de56b9a ]
+
+The hsr module has been supporting the list and status command.
+(HSR_C_GET_NODE_LIST and HSR_C_GET_NODE_STATUS)
+These commands send node information to the user-space via generic netlink.
+But, in the non-init_net namespace, these commands are not allowed
+because .netnsok flag is false.
+So, there is no way to get node information in the non-init_net namespace.
+
+Fixes: f421436a591d ("net/hsr: Add support for the High-availability Seamless Redundancy protocol (HSRv0)")
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/hsr/hsr_netlink.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/hsr/hsr_netlink.c
++++ b/net/hsr/hsr_netlink.c
+@@ -476,6 +476,7 @@ static struct genl_family hsr_genl_famil
+ .name = "HSR",
+ .version = 1,
+ .maxattr = HSR_A_MAX,
++ .netnsok = true,
+ .module = THIS_MODULE,
+ .ops = hsr_ops,
+ .n_ops = ARRAY_SIZE(hsr_ops),
--- /dev/null
+From foo@baz Sat 28 Mar 2020 10:29:55 AM CET
+From: Taehee Yoo <ap420073@gmail.com>
+Date: Fri, 13 Mar 2020 06:50:14 +0000
+Subject: hsr: use rcu_read_lock() in hsr_get_node_{list/status}()
+
+From: Taehee Yoo <ap420073@gmail.com>
+
+[ Upstream commit 173756b86803655d70af7732079b3aa935e6ab68 ]
+
+hsr_get_node_{list/status}() are not under rtnl_lock() because
+they are callback functions of generic netlink.
+But they use __dev_get_by_index() without rtnl_lock().
+So, it would use unsafe data.
+In order to fix it, rcu_read_lock() and dev_get_by_index_rcu()
+are used instead of __dev_get_by_index().
+
+Fixes: f421436a591d ("net/hsr: Add support for the High-availability Seamless Redundancy protocol (HSRv0)")
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/hsr/hsr_framereg.c | 10 ++--------
+ net/hsr/hsr_netlink.c | 43 +++++++++++++++++++++----------------------
+ 2 files changed, 23 insertions(+), 30 deletions(-)
+
+--- a/net/hsr/hsr_framereg.c
++++ b/net/hsr/hsr_framereg.c
+@@ -468,13 +468,9 @@ int hsr_get_node_data(struct hsr_priv *h
+ struct hsr_port *port;
+ unsigned long tdiff;
+
+-
+- rcu_read_lock();
+ node = find_node_by_AddrA(&hsr->node_db, addr);
+- if (!node) {
+- rcu_read_unlock();
+- return -ENOENT; /* No such entry */
+- }
++ if (!node)
++ return -ENOENT;
+
+ ether_addr_copy(addr_b, node->MacAddressB);
+
+@@ -509,7 +505,5 @@ int hsr_get_node_data(struct hsr_priv *h
+ *addr_b_ifindex = -1;
+ }
+
+- rcu_read_unlock();
+-
+ return 0;
+ }
+--- a/net/hsr/hsr_netlink.c
++++ b/net/hsr/hsr_netlink.c
+@@ -259,17 +259,16 @@ static int hsr_get_node_status(struct sk
+ if (!na)
+ goto invalid;
+
+- hsr_dev = __dev_get_by_index(genl_info_net(info),
+- nla_get_u32(info->attrs[HSR_A_IFINDEX]));
++ rcu_read_lock();
++ hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
++ nla_get_u32(info->attrs[HSR_A_IFINDEX]));
+ if (!hsr_dev)
+- goto invalid;
++ goto rcu_unlock;
+ if (!is_hsr_master(hsr_dev))
+- goto invalid;
+-
++ goto rcu_unlock;
+
+ /* Send reply */
+-
+- skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
++ skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ if (!skb_out) {
+ res = -ENOMEM;
+ goto fail;
+@@ -321,12 +320,10 @@ static int hsr_get_node_status(struct sk
+ res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
+ if (res < 0)
+ goto nla_put_failure;
+- rcu_read_lock();
+ port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
+ if (port)
+ res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
+ port->dev->ifindex);
+- rcu_read_unlock();
+ if (res < 0)
+ goto nla_put_failure;
+
+@@ -336,20 +333,22 @@ static int hsr_get_node_status(struct sk
+ res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
+ if (res < 0)
+ goto nla_put_failure;
+- rcu_read_lock();
+ port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
+ if (port)
+ res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
+ port->dev->ifindex);
+- rcu_read_unlock();
+ if (res < 0)
+ goto nla_put_failure;
+
++ rcu_read_unlock();
++
+ genlmsg_end(skb_out, msg_head);
+ genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
+
+ return 0;
+
++rcu_unlock:
++ rcu_read_unlock();
+ invalid:
+ netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
+ return 0;
+@@ -359,6 +358,7 @@ nla_put_failure:
+ /* Fall through */
+
+ fail:
++ rcu_read_unlock();
+ return res;
+ }
+
+@@ -385,17 +385,16 @@ static int hsr_get_node_list(struct sk_b
+ if (!na)
+ goto invalid;
+
+- hsr_dev = __dev_get_by_index(genl_info_net(info),
+- nla_get_u32(info->attrs[HSR_A_IFINDEX]));
++ rcu_read_lock();
++ hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
++ nla_get_u32(info->attrs[HSR_A_IFINDEX]));
+ if (!hsr_dev)
+- goto invalid;
++ goto rcu_unlock;
+ if (!is_hsr_master(hsr_dev))
+- goto invalid;
+-
++ goto rcu_unlock;
+
+ /* Send reply */
+-
+- skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
++ skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ if (!skb_out) {
+ res = -ENOMEM;
+ goto fail;
+@@ -415,14 +414,11 @@ static int hsr_get_node_list(struct sk_b
+
+ hsr = netdev_priv(hsr_dev);
+
+- rcu_read_lock();
+ pos = hsr_get_next_node(hsr, NULL, addr);
+ while (pos) {
+ res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
+- if (res < 0) {
+- rcu_read_unlock();
++ if (res < 0)
+ goto nla_put_failure;
+- }
+ pos = hsr_get_next_node(hsr, pos, addr);
+ }
+ rcu_read_unlock();
+@@ -432,6 +428,8 @@ static int hsr_get_node_list(struct sk_b
+
+ return 0;
+
++rcu_unlock:
++ rcu_read_unlock();
+ invalid:
+ netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
+ return 0;
+@@ -441,6 +439,7 @@ nla_put_failure:
+ /* Fall through */
+
+ fail:
++ rcu_read_unlock();
+ return res;
+ }
+
--- /dev/null
+From foo@baz Sat 28 Mar 2020 12:53:42 PM CET
+From: Willem de Bruijn <willemb@google.com>
+Date: Sun, 22 Mar 2020 13:51:13 -0400
+Subject: macsec: restrict to ethernet devices
+
+From: Willem de Bruijn <willemb@google.com>
+
+[ Upstream commit b06d072ccc4b1acd0147b17914b7ad1caa1818bb ]
+
+Only attach macsec to ethernet devices.
+
+Syzbot was able to trigger a KMSAN warning in macsec_handle_frame
+by attaching to a phonet device.
+
+Macvlan has a similar check in macvlan_port_create.
+
+v1->v2
+ - fix commit message typo
+
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/macsec.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -19,6 +19,7 @@
+ #include <net/genetlink.h>
+ #include <net/sock.h>
+ #include <net/gro_cells.h>
++#include <linux/if_arp.h>
+
+ #include <uapi/linux/if_macsec.h>
+
+@@ -3219,6 +3220,8 @@ static int macsec_newlink(struct net *ne
+ real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
+ if (!real_dev)
+ return -ENODEV;
++ if (real_dev->type != ARPHRD_ETHER)
++ return -EINVAL;
+
+ dev->priv_flags |= IFF_MACSEC;
+
--- /dev/null
+From foo@baz Sat 28 Mar 2020 12:53:42 PM CET
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Sun, 22 Mar 2020 13:58:50 -0700
+Subject: net: dsa: Fix duplicate frames flooded by learning
+
+From: Florian Fainelli <f.fainelli@gmail.com>
+
+[ Upstream commit 0e62f543bed03a64495bd2651d4fe1aa4bcb7fe5 ]
+
+When both the switch and the bridge are learning about new addresses,
+switch ports attached to the bridge would see duplicate ARP frames
+because both entities would attempt to send them.
+
+Fixes: 5037d532b83d ("net: dsa: add Broadcom tag RX/TX handler")
+Reported-by: Maxime Bizon <mbizon@freebox.fr>
+Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
+Reviewed-by: Vivien Didelot <vivien.didelot@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/dsa/tag_brcm.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/dsa/tag_brcm.c
++++ b/net/dsa/tag_brcm.c
+@@ -134,6 +134,8 @@ static struct sk_buff *brcm_tag_rcv(stru
+
+ skb->dev = ds->ports[source_port].netdev;
+
++ skb->offload_fwd_mark = 1;
++
+ return skb;
+ }
+
--- /dev/null
+From foo@baz Sat 28 Mar 2020 10:29:54 AM CET
+From: "René van Dorst" <opensource@vdorst.com>
+Date: Thu, 19 Mar 2020 14:47:56 +0100
+Subject: net: dsa: mt7530: Change the LINK bit to reflect the link status
+
+From: "René van Dorst" <opensource@vdorst.com>
+
+[ Upstream commit 22259471b51925353bd7b16f864c79fdd76e425e ]
+
+Andrew reported:
+
+After a number of network port link up/down changes, sometimes the switch
+port gets stuck in a state where it thinks it is still transmitting packets
+but the cpu port is not actually transmitting anymore. In this state you
+will see a message on the console
+"mtk_soc_eth 1e100000.ethernet eth0: transmit timed out" and the Tx counter
+in ifconfig will be incrementing on virtual port, but not incrementing on
+cpu port.
+
+The issue is that MAC TX/RX status has no impact on the link status or
+queue manager of the switch. So the queue manager just queues up packets
+of a disabled port and sends out pause frames when the queue is full.
+
+Change the LINK bit to reflect the link status.
+
+Fixes: b8f126a8d543 ("net-next: dsa: add dsa support for Mediatek MT7530 switch")
+Reported-by: Andrew Smith <andrew.smith@digi.com>
+Signed-off-by: René van Dorst <opensource@vdorst.com>
+Reviewed-by: Vivien Didelot <vivien.didelot@gmail.com>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/dsa/mt7530.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -549,7 +549,7 @@ mt7530_mib_reset(struct dsa_switch *ds)
+ static void
+ mt7530_port_set_status(struct mt7530_priv *priv, int port, int enable)
+ {
+- u32 mask = PMCR_TX_EN | PMCR_RX_EN;
++ u32 mask = PMCR_TX_EN | PMCR_RX_EN | PMCR_FORCE_LNK;
+
+ if (enable)
+ mt7530_set(priv, MT7530_PMCR_P(port), mask);
--- /dev/null
+From foo@baz Sat 28 Mar 2020 12:53:42 PM CET
+From: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
+Date: Mon, 16 Mar 2020 22:56:36 +0800
+Subject: net: mvneta: Fix the case where the last poll did not process all rx
+
+From: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
+
+[ Upstream commit 065fd83e1be2e1ba0d446a257fd86a3cc7bddb51 ]
+
+For the case where the last mvneta_poll did not process all
+RX packets, we need to xor the pp->cause_rx_tx or port->cause_rx_tx
+before claculating the rx_queue.
+
+Fixes: 2dcf75e2793c ("net: mvneta: Associate RX queues with each CPU")
+Signed-off-by: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/marvell/mvneta.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -2759,11 +2759,10 @@ static int mvneta_poll(struct napi_struc
+ /* For the case where the last mvneta_poll did not process all
+ * RX packets
+ */
+- rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
+-
+ cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
+ port->cause_rx_tx;
+
++ rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
+ if (rx_queue) {
+ rx_queue = rx_queue - 1;
+ if (pp->bm_priv)
--- /dev/null
+From foo@baz Sat 28 Mar 2020 12:53:42 PM CET
+From: Willem de Bruijn <willemb@google.com>
+Date: Fri, 13 Mar 2020 12:18:09 -0400
+Subject: net/packet: tpacket_rcv: avoid a producer race condition
+
+From: Willem de Bruijn <willemb@google.com>
+
+[ Upstream commit 61fad6816fc10fb8793a925d5c1256d1c3db0cd2 ]
+
+PACKET_RX_RING can cause multiple writers to access the same slot if a
+fast writer wraps the ring while a slow writer is still copying. This
+is particularly likely with few, large, slots (e.g., GSO packets).
+
+Synchronize kernel thread ownership of rx ring slots with a bitmap.
+
+Writers acquire a slot race-free by testing tp_status TP_STATUS_KERNEL
+while holding the sk receive queue lock. They release this lock before
+copying and set tp_status to TP_STATUS_USER to release to userspace
+when done. During copying, another writer may take the lock, also see
+TP_STATUS_KERNEL, and start writing to the same slot.
+
+Introduce a new rx_owner_map bitmap with a bit per slot. To acquire a
+slot, test and set with the lock held. To release race-free, update
+tp_status and owner bit as a transaction, so take the lock again.
+
+This is the one of a variety of discussed options (see Link below):
+
+* instead of a shadow ring, embed the data in the slot itself, such as
+in tp_padding. But any test for this field may match a value left by
+userspace, causing deadlock.
+
+* avoid the lock on release. This leaves a small race if releasing the
+shadow slot before setting TP_STATUS_USER. The below reproducer showed
+that this race is not academic. If releasing the slot after tp_status,
+the race is more subtle. See the first link for details.
+
+* add a new tp_status TP_KERNEL_OWNED to avoid the transactional store
+of two fields. But, legacy applications may interpret all non-zero
+tp_status as owned by the user. As libpcap does. So this is possible
+only opt-in by newer processes. It can be added as an optional mode.
+
+* embed the struct at the tail of pg_vec to avoid extra allocation.
+The implementation proved no less complex than a separate field.
+
+The additional locking cost on release adds contention, no different
+than scaling on multicore or multiqueue h/w. In practice, below
+reproducer nor small packet tcpdump showed a noticeable change in
+perf report in cycles spent in spinlock. Where contention is
+problematic, packet sockets support mitigation through PACKET_FANOUT.
+And we can consider adding opt-in state TP_KERNEL_OWNED.
+
+Easy to reproduce by running multiple netperf or similar TCP_STREAM
+flows concurrently with `tcpdump -B 129 -n greater 60000`.
+
+Based on an earlier patchset by Jon Rosen. See links below.
+
+I believe this issue goes back to the introduction of tpacket_rcv,
+which predates git history.
+
+Link: https://www.mail-archive.com/netdev@vger.kernel.org/msg237222.html
+Suggested-by: Jon Rosen <jrosen@cisco.com>
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: Jon Rosen <jrosen@cisco.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/packet/af_packet.c | 21 +++++++++++++++++++++
+ net/packet/internal.h | 5 ++++-
+ 2 files changed, 25 insertions(+), 1 deletion(-)
+
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2204,6 +2204,7 @@ static int tpacket_rcv(struct sk_buff *s
+ struct timespec ts;
+ __u32 ts_status;
+ bool is_drop_n_account = false;
++ unsigned int slot_id = 0;
+ bool do_vnet = false;
+
+ /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
+@@ -2300,6 +2301,13 @@ static int tpacket_rcv(struct sk_buff *s
+ if (!h.raw)
+ goto drop_n_account;
+
++ if (po->tp_version <= TPACKET_V2) {
++ slot_id = po->rx_ring.head;
++ if (test_bit(slot_id, po->rx_ring.rx_owner_map))
++ goto drop_n_account;
++ __set_bit(slot_id, po->rx_ring.rx_owner_map);
++ }
++
+ if (do_vnet &&
+ virtio_net_hdr_from_skb(skb, h.raw + macoff -
+ sizeof(struct virtio_net_hdr),
+@@ -2405,7 +2413,10 @@ static int tpacket_rcv(struct sk_buff *s
+ #endif
+
+ if (po->tp_version <= TPACKET_V2) {
++ spin_lock(&sk->sk_receive_queue.lock);
+ __packet_set_status(po, h.raw, status);
++ __clear_bit(slot_id, po->rx_ring.rx_owner_map);
++ spin_unlock(&sk->sk_receive_queue.lock);
+ sk->sk_data_ready(sk);
+ } else {
+ prb_clear_blk_fill_status(&po->rx_ring);
+@@ -4298,6 +4309,7 @@ static int packet_set_ring(struct sock *
+ {
+ struct pgv *pg_vec = NULL;
+ struct packet_sock *po = pkt_sk(sk);
++ unsigned long *rx_owner_map = NULL;
+ int was_running, order = 0;
+ struct packet_ring_buffer *rb;
+ struct sk_buff_head *rb_queue;
+@@ -4383,6 +4395,12 @@ static int packet_set_ring(struct sock *
+ }
+ break;
+ default:
++ if (!tx_ring) {
++ rx_owner_map = bitmap_alloc(req->tp_frame_nr,
++ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
++ if (!rx_owner_map)
++ goto out_free_pg_vec;
++ }
+ break;
+ }
+ }
+@@ -4412,6 +4430,8 @@ static int packet_set_ring(struct sock *
+ err = 0;
+ spin_lock_bh(&rb_queue->lock);
+ swap(rb->pg_vec, pg_vec);
++ if (po->tp_version <= TPACKET_V2)
++ swap(rb->rx_owner_map, rx_owner_map);
+ rb->frame_max = (req->tp_frame_nr - 1);
+ rb->head = 0;
+ rb->frame_size = req->tp_frame_size;
+@@ -4443,6 +4463,7 @@ static int packet_set_ring(struct sock *
+ }
+
+ out_free_pg_vec:
++ bitmap_free(rx_owner_map);
+ if (pg_vec)
+ free_pg_vec(pg_vec, order, req->tp_block_nr);
+ out:
+--- a/net/packet/internal.h
++++ b/net/packet/internal.h
+@@ -70,7 +70,10 @@ struct packet_ring_buffer {
+
+ unsigned int __percpu *pending_refcnt;
+
+- struct tpacket_kbdq_core prb_bdqc;
++ union {
++ unsigned long *rx_owner_map;
++ struct tpacket_kbdq_core prb_bdqc;
++ };
+ };
+
+ extern struct mutex fanout_mutex;
--- /dev/null
+From foo@baz Sat 28 Mar 2020 12:53:42 PM CET
+From: Pawel Dembicki <paweldembicki@gmail.com>
+Date: Fri, 20 Mar 2020 21:46:14 +0100
+Subject: net: qmi_wwan: add support for ASKEY WWHC050
+
+From: Pawel Dembicki <paweldembicki@gmail.com>
+
+[ Upstream commit 12a5ba5a1994568d4ceaff9e78c6b0329d953386 ]
+
+ASKEY WWHC050 is a mcie LTE modem.
+The oem configuration states:
+
+T: Bus=01 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 2 Spd=480 MxCh= 0
+D: Ver= 2.10 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs= 1
+P: Vendor=1690 ProdID=7588 Rev=ff.ff
+S: Manufacturer=Android
+S: Product=Android
+S: SerialNumber=813f0eef6e6e
+C:* #Ifs= 6 Cfg#= 1 Atr=80 MxPwr=500mA
+I:* If#= 0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option
+E: Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 1 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=42 Prot=01 Driver=(none)
+E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E: Ad=84(I) Atr=03(Int.) MxPS= 10 Ivl=32ms
+E: Ad=83(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E: Ad=86(I) Atr=03(Int.) MxPS= 10 Ivl=32ms
+E: Ad=85(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=04(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 4 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan
+E: Ad=88(I) Atr=03(Int.) MxPS= 8 Ivl=32ms
+E: Ad=87(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=05(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 5 Alt= 0 #EPs= 2 Cls=08(stor.) Sub=06 Prot=50 Driver=(none)
+E: Ad=89(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=06(O) Atr=02(Bulk) MxPS= 512 Ivl=125us
+
+Tested on openwrt distribution.
+
+Signed-off-by: Cezary Jackiewicz <cezary@eko.one.pl>
+Signed-off-by: Pawel Dembicki <paweldembicki@gmail.com>
+Acked-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/qmi_wwan.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1139,6 +1139,7 @@ static const struct usb_device_id produc
+ {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
+ {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
+ {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
++ {QMI_FIXED_INTF(0x1690, 0x7588, 4)}, /* ASKEY WWHC050 */
+ {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
+ {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
+ {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
--- /dev/null
+From foo@baz Sat 28 Mar 2020 12:53:42 PM CET
+From: Emil Renner Berthing <kernel@esmil.dk>
+Date: Sat, 21 Mar 2020 15:36:19 +0100
+Subject: net: stmmac: dwmac-rk: fix error path in rk_gmac_probe
+
+From: Emil Renner Berthing <kernel@esmil.dk>
+
+[ Upstream commit 9de9aa487daff7a5c73434c24269b44ed6a428e6 ]
+
+Make sure we clean up devicetree related configuration
+also when clock init fails.
+
+Fixes: fecd4d7eef8b ("net: stmmac: dwmac-rk: Add integrated PHY support")
+Signed-off-by: Emil Renner Berthing <kernel@esmil.dk>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+@@ -1362,7 +1362,7 @@ static int rk_gmac_probe(struct platform
+
+ ret = rk_gmac_clk_init(plat_dat);
+ if (ret)
+- return ret;
++ goto err_remove_config_dt;
+
+ ret = rk_gmac_powerup(plat_dat->bsp_priv);
+ if (ret)
--- /dev/null
+From foo@baz Sat 28 Mar 2020 12:53:42 PM CET
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Fri, 13 Mar 2020 22:29:54 -0700
+Subject: net_sched: cls_route: remove the right filter from hashtable
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit ef299cc3fa1a9e1288665a9fdc8bff55629fd359 ]
+
+route4_change() allocates a new filter and copies values from
+the old one. After the new filter is inserted into the hash
+table, the old filter should be removed and freed, as the final
+step of the update.
+
+However, the current code mistakenly removes the new one. This
+looks apparently wrong to me, and it causes double "free" and
+use-after-free too, as reported by syzbot.
+
+Reported-and-tested-by: syzbot+f9b32aaacd60305d9687@syzkaller.appspotmail.com
+Reported-and-tested-by: syzbot+2f8c233f131943d6056d@syzkaller.appspotmail.com
+Reported-and-tested-by: syzbot+9c2df9fd5e9445b74e01@syzkaller.appspotmail.com
+Fixes: 1109c00547fc ("net: sched: RCU cls_route")
+Cc: Jamal Hadi Salim <jhs@mojatatu.com>
+Cc: Jiri Pirko <jiri@resnulli.us>
+Cc: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/cls_route.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/sched/cls_route.c
++++ b/net/sched/cls_route.c
+@@ -539,8 +539,8 @@ static int route4_change(struct net *net
+ fp = &b->ht[h];
+ for (pfp = rtnl_dereference(*fp); pfp;
+ fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
+- if (pfp == f) {
+- *fp = f->next;
++ if (pfp == fold) {
++ rcu_assign_pointer(*fp, fold->next);
+ break;
+ }
+ }
--- /dev/null
+From foo@baz Sat 28 Mar 2020 12:53:42 PM CET
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Wed, 11 Mar 2020 22:42:28 -0700
+Subject: net_sched: keep alloc_hash updated after hash allocation
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit 0d1c3530e1bd38382edef72591b78e877e0edcd3 ]
+
+In commit 599be01ee567 ("net_sched: fix an OOB access in cls_tcindex")
+I moved cp->hash calculation before the first
+tcindex_alloc_perfect_hash(), but cp->alloc_hash is left untouched.
+This difference could lead to another out of bound access.
+
+cp->alloc_hash should always be the size allocated, we should
+update it after this tcindex_alloc_perfect_hash().
+
+Reported-and-tested-by: syzbot+dcc34d54d68ef7d2d53d@syzkaller.appspotmail.com
+Reported-and-tested-by: syzbot+c72da7b9ed57cde6fca2@syzkaller.appspotmail.com
+Fixes: 599be01ee567 ("net_sched: fix an OOB access in cls_tcindex")
+Cc: Jamal Hadi Salim <jhs@mojatatu.com>
+Cc: Jiri Pirko <jiri@resnulli.us>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/cls_tcindex.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/sched/cls_tcindex.c
++++ b/net/sched/cls_tcindex.c
+@@ -375,6 +375,7 @@ tcindex_set_parms(struct net *net, struc
+
+ if (tcindex_alloc_perfect_hash(cp) < 0)
+ goto errout;
++ cp->alloc_hash = cp->hash;
+ for (i = 0; i < min(cp->hash, p->hash); i++)
+ cp->perfect[i].res = p->perfect[i].res;
+ balloc = 1;
--- /dev/null
+From foo@baz Sat 28 Mar 2020 12:53:42 PM CET
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Fri, 20 Mar 2020 16:21:17 +0300
+Subject: NFC: fdp: Fix a signedness bug in fdp_nci_send_patch()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit 0dcdf9f64028ec3b75db6b691560f8286f3898bf ]
+
+The nci_conn_max_data_pkt_payload_size() function sometimes returns
+-EPROTO so "max_size" needs to be signed for the error handling to
+work. We can make "payload_size" an int as well.
+
+Fixes: a06347c04c13 ("NFC: Add Intel Fields Peak NFC solution driver")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nfc/fdp/fdp.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/drivers/nfc/fdp/fdp.c
++++ b/drivers/nfc/fdp/fdp.c
+@@ -192,7 +192,7 @@ static int fdp_nci_send_patch(struct nci
+ const struct firmware *fw;
+ struct sk_buff *skb;
+ unsigned long len;
+- u8 max_size, payload_size;
++ int max_size, payload_size;
+ int rc = 0;
+
+ if ((type == NCI_PATCH_TYPE_OTP && !info->otp_patch) ||
+@@ -215,8 +215,7 @@ static int fdp_nci_send_patch(struct nci
+
+ while (len) {
+
+- payload_size = min_t(unsigned long, (unsigned long) max_size,
+- len);
++ payload_size = min_t(unsigned long, max_size, len);
+
+ skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + payload_size),
+ GFP_KERNEL);
staging-greybus-loopback_test-fix-potential-path-truncation.patch
staging-greybus-loopback_test-fix-potential-path-truncations.patch
revert-drm-dp_mst-skip-validating-ports-during-destruction-just-ref.patch
+hsr-fix-general-protection-fault-in-hsr_addr_is_self.patch
+macsec-restrict-to-ethernet-devices.patch
+net-dsa-fix-duplicate-frames-flooded-by-learning.patch
+net-mvneta-fix-the-case-where-the-last-poll-did-not-process-all-rx.patch
+net-packet-tpacket_rcv-avoid-a-producer-race-condition.patch
+net-qmi_wwan-add-support-for-askey-wwhc050.patch
+net_sched-cls_route-remove-the-right-filter-from-hashtable.patch
+net_sched-keep-alloc_hash-updated-after-hash-allocation.patch
+net-stmmac-dwmac-rk-fix-error-path-in-rk_gmac_probe.patch
+nfc-fdp-fix-a-signedness-bug-in-fdp_nci_send_patch.patch
+slcan-not-call-free_netdev-before-rtnl_unlock-in-slcan_open.patch
+bnxt_en-fix-memory-leaks-in-bnxt_dcbnl_ieee_getets.patch
+net-dsa-mt7530-change-the-link-bit-to-reflect-the-link-status.patch
+vxlan-check-return-value-of-gro_cells_init.patch
+hsr-use-rcu_read_lock-in-hsr_get_node_-list-status.patch
+hsr-add-restart-routine-into-hsr_get_node_list.patch
+hsr-set-.netnsok-flag.patch
--- /dev/null
+From foo@baz Sat 28 Mar 2020 12:53:42 PM CET
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+Date: Sat, 21 Mar 2020 14:08:29 +0100
+Subject: slcan: not call free_netdev before rtnl_unlock in slcan_open
+
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+
+[ Upstream commit 2091a3d42b4f339eaeed11228e0cbe9d4f92f558 ]
+
+As the description before netdev_run_todo, we cannot call free_netdev
+before rtnl_unlock, fix it by reorder the code.
+
+This patch is a 1:1 copy of upstream slip.c commit f596c87005f7
+("slip: not call free_netdev before rtnl_unlock in slip_open").
+
+Reported-by: yangerkun <yangerkun@huawei.com>
+Signed-off-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/can/slcan.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/net/can/slcan.c
++++ b/drivers/net/can/slcan.c
+@@ -621,7 +621,10 @@ err_free_chan:
+ tty->disc_data = NULL;
+ clear_bit(SLF_INUSE, &sl->flags);
+ slc_free_netdev(sl->dev);
++ /* do not call free_netdev before rtnl_unlock */
++ rtnl_unlock();
+ free_netdev(sl->dev);
++ return err;
+
+ err_exit:
+ rtnl_unlock();
--- /dev/null
+From foo@baz Sat 28 Mar 2020 10:29:54 AM CET
+From: Taehee Yoo <ap420073@gmail.com>
+Date: Wed, 18 Mar 2020 13:28:09 +0000
+Subject: vxlan: check return value of gro_cells_init()
+
+From: Taehee Yoo <ap420073@gmail.com>
+
+[ Upstream commit 384d91c267e621e0926062cfb3f20cb72dc16928 ]
+
+gro_cells_init() returns error if memory allocation is failed.
+But the vxlan module doesn't check the return value of gro_cells_init().
+
+Fixes: 58ce31cca1ff ("vxlan: GRO support at tunnel layer")`
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/vxlan.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -2454,10 +2454,19 @@ static void vxlan_vs_add_dev(struct vxla
+ /* Setup stats when device is created */
+ static int vxlan_init(struct net_device *dev)
+ {
++ struct vxlan_dev *vxlan = netdev_priv(dev);
++ int err;
++
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
++ err = gro_cells_init(&vxlan->gro_cells, dev);
++ if (err) {
++ free_percpu(dev->tstats);
++ return err;
++ }
++
+ return 0;
+ }
+
+@@ -2717,8 +2726,6 @@ static void vxlan_setup(struct net_devic
+
+ vxlan->dev = dev;
+
+- gro_cells_init(&vxlan->gro_cells, dev);
+-
+ for (h = 0; h < FDB_HASH_SIZE; ++h)
+ INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
+ }