--- /dev/null
+From foo@baz Sat Dec 19 12:04:16 PM CET 2020
+From: Sergej Bauer <sbauer@blackbox.su>
+Date: Mon, 2 Nov 2020 01:35:55 +0300
+Subject: lan743x: fix for potential NULL pointer dereference with bare card
+
+From: Sergej Bauer <sbauer@blackbox.su>
+
+[ Upstream commit e9e13b6adc338be1eb88db87bcb392696144bd02 ]
+
+This is the 3rd revision of the patch fix for potential null pointer dereference
+with lan743x card.
+
+The simpliest way to reproduce: boot with bare lan743x and issue "ethtool ethN"
+commant where ethN is the interface with lan743x card. Example:
+
+$ sudo ethtool eth7
+dmesg:
+[ 103.510336] BUG: kernel NULL pointer dereference, address: 0000000000000340
+...
+[ 103.510836] RIP: 0010:phy_ethtool_get_wol+0x5/0x30 [libphy]
+...
+[ 103.511629] Call Trace:
+[ 103.511666] lan743x_ethtool_get_wol+0x21/0x40 [lan743x]
+[ 103.511724] dev_ethtool+0x1507/0x29d0
+[ 103.511769] ? avc_has_extended_perms+0x17f/0x440
+[ 103.511820] ? tomoyo_init_request_info+0x84/0x90
+[ 103.511870] ? tomoyo_path_number_perm+0x68/0x1e0
+[ 103.511919] ? tty_insert_flip_string_fixed_flag+0x82/0xe0
+[ 103.511973] ? inet_ioctl+0x187/0x1d0
+[ 103.512016] dev_ioctl+0xb5/0x560
+[ 103.512055] sock_do_ioctl+0xa0/0x140
+[ 103.512098] sock_ioctl+0x2cb/0x3c0
+[ 103.512139] __x64_sys_ioctl+0x84/0xc0
+[ 103.512183] do_syscall_64+0x33/0x80
+[ 103.512224] entry_SYSCALL_64_after_hwframe+0x44/0xa9
+[ 103.512274] RIP: 0033:0x7f54a9cba427
+...
+
+Previous versions can be found at:
+v1:
+initial version
+ https://lkml.org/lkml/2020/10/28/921
+
+v2:
+do not return from lan743x_ethtool_set_wol if netdev->phydev == NULL, just skip
+the call of phy_ethtool_set_wol() instead.
+ https://lkml.org/lkml/2020/10/31/380
+
+v3:
+in function lan743x_ethtool_set_wol:
+use ternary operator instead of if-else sentence (review by Markus Elfring)
+return -ENETDOWN insted of -EIO (review by Andrew Lunn)
+
+Signed-off-by: Sergej Bauer <sbauer@blackbox.su>
+
+Link: https://lore.kernel.org/r/20201101223556.16116-1-sbauer@blackbox.su
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/microchip/lan743x_ethtool.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
++++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
+@@ -659,7 +659,9 @@ static void lan743x_ethtool_get_wol(stru
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+- phy_ethtool_get_wol(netdev->phydev, wol);
++
++ if (netdev->phydev)
++ phy_ethtool_get_wol(netdev->phydev, wol);
+
+ wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST |
+ WAKE_MAGIC | WAKE_PHY | WAKE_ARP;
+@@ -688,9 +690,8 @@ static int lan743x_ethtool_set_wol(struc
+
+ device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts);
+
+- phy_ethtool_set_wol(netdev->phydev, wol);
+-
+- return 0;
++ return netdev->phydev ? phy_ethtool_set_wol(netdev->phydev, wol)
++ : -ENETDOWN;
+ }
+ #endif /* CONFIG_PM */
+
--- /dev/null
+From foo@baz Sat Dec 19 12:04:16 PM CET 2020
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 4 Dec 2020 08:24:28 -0800
+Subject: mac80211: mesh: fix mesh_pathtbl_init() error path
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 905b2032fa424f253d9126271439cc1db2b01130 ]
+
+If tbl_mpp can not be allocated, we call mesh_table_free(tbl_path)
+while tbl_path rhashtable has not yet been initialized, which causes
+panics.
+
+Simply factorize the rhashtable_init() call into mesh_table_alloc()
+
+WARNING: CPU: 1 PID: 8474 at kernel/workqueue.c:3040 __flush_work kernel/workqueue.c:3040 [inline]
+WARNING: CPU: 1 PID: 8474 at kernel/workqueue.c:3040 __cancel_work_timer+0x514/0x540 kernel/workqueue.c:3136
+Modules linked in:
+CPU: 1 PID: 8474 Comm: syz-executor663 Not tainted 5.10.0-rc6-syzkaller #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+RIP: 0010:__flush_work kernel/workqueue.c:3040 [inline]
+RIP: 0010:__cancel_work_timer+0x514/0x540 kernel/workqueue.c:3136
+Code: 5d c3 e8 bf ae 29 00 0f 0b e9 f0 fd ff ff e8 b3 ae 29 00 0f 0b 43 80 3c 3e 00 0f 85 31 ff ff ff e9 34 ff ff ff e8 9c ae 29 00 <0f> 0b e9 dc fe ff ff 89 e9 80 e1 07 80 c1 03 38 c1 0f 8c 7d fd ff
+RSP: 0018:ffffc9000165f5a0 EFLAGS: 00010293
+RAX: ffffffff814b7064 RBX: 0000000000000001 RCX: ffff888021c80000
+RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000
+RBP: ffff888024039ca0 R08: dffffc0000000000 R09: fffffbfff1dd3e64
+R10: fffffbfff1dd3e64 R11: 0000000000000000 R12: 1ffff920002cbebd
+R13: ffff888024039c88 R14: 1ffff11004807391 R15: dffffc0000000000
+FS: 0000000001347880(0000) GS:ffff8880b9d00000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000000020000140 CR3: 000000002cc0a000 CR4: 00000000001506e0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ rhashtable_free_and_destroy+0x25/0x9c0 lib/rhashtable.c:1137
+ mesh_table_free net/mac80211/mesh_pathtbl.c:69 [inline]
+ mesh_pathtbl_init+0x287/0x2e0 net/mac80211/mesh_pathtbl.c:785
+ ieee80211_mesh_init_sdata+0x2ee/0x530 net/mac80211/mesh.c:1591
+ ieee80211_setup_sdata+0x733/0xc40 net/mac80211/iface.c:1569
+ ieee80211_if_add+0xd5c/0x1cd0 net/mac80211/iface.c:1987
+ ieee80211_add_iface+0x59/0x130 net/mac80211/cfg.c:125
+ rdev_add_virtual_intf net/wireless/rdev-ops.h:45 [inline]
+ nl80211_new_interface+0x563/0xb40 net/wireless/nl80211.c:3855
+ genl_family_rcv_msg_doit net/netlink/genetlink.c:739 [inline]
+ genl_family_rcv_msg net/netlink/genetlink.c:783 [inline]
+ genl_rcv_msg+0xe4e/0x1280 net/netlink/genetlink.c:800
+ netlink_rcv_skb+0x190/0x3a0 net/netlink/af_netlink.c:2494
+ genl_rcv+0x24/0x40 net/netlink/genetlink.c:811
+ netlink_unicast_kernel net/netlink/af_netlink.c:1304 [inline]
+ netlink_unicast+0x780/0x930 net/netlink/af_netlink.c:1330
+ netlink_sendmsg+0x9a8/0xd40 net/netlink/af_netlink.c:1919
+ sock_sendmsg_nosec net/socket.c:651 [inline]
+ sock_sendmsg net/socket.c:671 [inline]
+ ____sys_sendmsg+0x519/0x800 net/socket.c:2353
+ ___sys_sendmsg net/socket.c:2407 [inline]
+ __sys_sendmsg+0x2b1/0x360 net/socket.c:2440
+ do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Fixes: 60854fd94573 ("mac80211: mesh: convert path table to rhashtable")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Reviewed-by: Johannes Berg <johannes@sipsolutions.net>
+Link: https://lore.kernel.org/r/20201204162428.2583119-1-eric.dumazet@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mac80211/mesh_pathtbl.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/net/mac80211/mesh_pathtbl.c
++++ b/net/mac80211/mesh_pathtbl.c
+@@ -63,6 +63,7 @@ static struct mesh_table *mesh_table_all
+ atomic_set(&newtbl->entries, 0);
+ spin_lock_init(&newtbl->gates_lock);
+ spin_lock_init(&newtbl->walk_lock);
++ rhashtable_init(&newtbl->rhead, &mesh_rht_params);
+
+ return newtbl;
+ }
+@@ -786,9 +787,6 @@ int mesh_pathtbl_init(struct ieee80211_s
+ goto free_path;
+ }
+
+- rhashtable_init(&tbl_path->rhead, &mesh_rht_params);
+- rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params);
+-
+ sdata->u.mesh.mesh_paths = tbl_path;
+ sdata->u.mesh.mpp_paths = tbl_mpp;
+
--- /dev/null
+From foo@baz Sat Dec 19 12:04:16 PM CET 2020
+From: Zhang Changzhong <zhangchangzhong@huawei.com>
+Date: Fri, 4 Dec 2020 16:48:56 +0800
+Subject: net: bridge: vlan: fix error return code in __vlan_add()
+
+From: Zhang Changzhong <zhangchangzhong@huawei.com>
+
+[ Upstream commit ee4f52a8de2c6f78b01f10b4c330867d88c1653a ]
+
+Fix to return a negative error code from the error handling
+case instead of 0, as done elsewhere in this function.
+
+Fixes: f8ed289fab84 ("bridge: vlan: use br_vlan_(get|put)_master to deal with refcounts")
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Zhang Changzhong <zhangchangzhong@huawei.com>
+Acked-by: Nikolay Aleksandrov <nikolay@nvidia.com>
+Link: https://lore.kernel.org/r/1607071737-33875-1-git-send-email-zhangchangzhong@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bridge/br_vlan.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/bridge/br_vlan.c
++++ b/net/bridge/br_vlan.c
+@@ -242,8 +242,10 @@ static int __vlan_add(struct net_bridge_
+ }
+
+ masterv = br_vlan_get_master(br, v->vid);
+- if (!masterv)
++ if (!masterv) {
++ err = -ENOMEM;
+ goto out_filt;
++ }
+ v->brvlan = masterv;
+ v->stats = masterv->stats;
+ } else {
--- /dev/null
+From foo@baz Sat Dec 19 12:04:16 PM CET 2020
+From: Moshe Shemesh <moshe@mellanox.com>
+Date: Wed, 9 Dec 2020 15:03:38 +0200
+Subject: net/mlx4_en: Avoid scheduling restart task if it is already running
+
+From: Moshe Shemesh <moshe@mellanox.com>
+
+[ Upstream commit fed91613c9dd455dd154b22fa8e11b8526466082 ]
+
+Add restarting state flag to avoid scheduling another restart task while
+such task is already running. Change task name from watchdog_task to
+restart_task to better fit the task role.
+
+Fixes: 1e338db56e5a ("mlx4_en: Fix a race at restart task")
+Signed-off-by: Moshe Shemesh <moshe@mellanox.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 20 +++++++++++++-------
+ drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 7 ++++++-
+ 2 files changed, 19 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -1384,8 +1384,10 @@ static void mlx4_en_tx_timeout(struct ne
+ }
+
+ priv->port_stats.tx_timeout++;
+- en_dbg(DRV, priv, "Scheduling watchdog\n");
+- queue_work(mdev->workqueue, &priv->watchdog_task);
++ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) {
++ en_dbg(DRV, priv, "Scheduling port restart\n");
++ queue_work(mdev->workqueue, &priv->restart_task);
++ }
+ }
+
+
+@@ -1835,6 +1837,7 @@ int mlx4_en_start_port(struct net_device
+ local_bh_enable();
+ }
+
++ clear_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state);
+ netif_tx_start_all_queues(dev);
+ netif_device_attach(dev);
+
+@@ -2005,7 +2008,7 @@ void mlx4_en_stop_port(struct net_device
+ static void mlx4_en_restart(struct work_struct *work)
+ {
+ struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+- watchdog_task);
++ restart_task);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct net_device *dev = priv->dev;
+
+@@ -2387,7 +2390,7 @@ static int mlx4_en_change_mtu(struct net
+ if (netif_running(dev)) {
+ mutex_lock(&mdev->state_lock);
+ if (!mdev->device_up) {
+- /* NIC is probably restarting - let watchdog task reset
++ /* NIC is probably restarting - let restart task reset
+ * the port */
+ en_dbg(DRV, priv, "Change MTU called with card down!?\n");
+ } else {
+@@ -2396,7 +2399,9 @@ static int mlx4_en_change_mtu(struct net
+ if (err) {
+ en_err(priv, "Failed restarting port:%d\n",
+ priv->port);
+- queue_work(mdev->workqueue, &priv->watchdog_task);
++ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING,
++ &priv->state))
++ queue_work(mdev->workqueue, &priv->restart_task);
+ }
+ }
+ mutex_unlock(&mdev->state_lock);
+@@ -2882,7 +2887,8 @@ static int mlx4_xdp_set(struct net_devic
+ if (err) {
+ en_err(priv, "Failed starting port %d for XDP change\n",
+ priv->port);
+- queue_work(mdev->workqueue, &priv->watchdog_task);
++ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
++ queue_work(mdev->workqueue, &priv->restart_task);
+ }
+ }
+
+@@ -3280,7 +3286,7 @@ int mlx4_en_init_netdev(struct mlx4_en_d
+ priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
+ spin_lock_init(&priv->stats_lock);
+ INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
+- INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
++ INIT_WORK(&priv->restart_task, mlx4_en_restart);
+ INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
+ INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
+ INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+@@ -530,6 +530,10 @@ struct mlx4_en_stats_bitmap {
+ struct mutex mutex; /* for mutual access to stats bitmap */
+ };
+
++enum {
++ MLX4_EN_STATE_FLAG_RESTARTING,
++};
++
+ struct mlx4_en_priv {
+ struct mlx4_en_dev *mdev;
+ struct mlx4_en_port_profile *prof;
+@@ -595,7 +599,7 @@ struct mlx4_en_priv {
+ struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
+ struct mlx4_qp drop_qp;
+ struct work_struct rx_mode_task;
+- struct work_struct watchdog_task;
++ struct work_struct restart_task;
+ struct work_struct linkstate_task;
+ struct delayed_work stats_task;
+ struct delayed_work service_task;
+@@ -643,6 +647,7 @@ struct mlx4_en_priv {
+ u32 pflags;
+ u8 rss_key[MLX4_EN_RSS_KEY_SIZE];
+ u8 rss_hash_fn;
++ unsigned long state;
+ };
+
+ enum mlx4_en_wol {
--- /dev/null
+From foo@baz Sat Dec 19 11:44:48 AM CET 2020
+From: Moshe Shemesh <moshe@mellanox.com>
+Date: Wed, 9 Dec 2020 15:03:39 +0200
+Subject: net/mlx4_en: Handle TX error CQE
+
+From: Moshe Shemesh <moshe@mellanox.com>
+
+[ Upstream commit ba603d9d7b1215c72513d7c7aa02b6775fd4891b ]
+
+In case error CQE was found while polling TX CQ, the QP is in error
+state and all posted WQEs will generate error CQEs without any data
+transmitted. Fix it by reopening the channels, via same method used for
+TX timeout handling.
+
+In addition add some more info on error CQE and WQE for debug.
+
+Fixes: bd2f631d7c60 ("net/mlx4_en: Notify user when TX ring in error state")
+Signed-off-by: Moshe Shemesh <moshe@mellanox.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 1
+ drivers/net/ethernet/mellanox/mlx4/en_tx.c | 40 ++++++++++++++++++++-----
+ drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 5 +++
+ 3 files changed, 39 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -1741,6 +1741,7 @@ int mlx4_en_start_port(struct net_device
+ mlx4_en_deactivate_cq(priv, cq);
+ goto tx_err;
+ }
++ clear_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &tx_ring->state);
+ if (t != TX_XDP) {
+ tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
+ tx_ring->recycle_ring = NULL;
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -385,6 +385,35 @@ int mlx4_en_free_tx_buf(struct net_devic
+ return cnt;
+ }
+
++static void mlx4_en_handle_err_cqe(struct mlx4_en_priv *priv, struct mlx4_err_cqe *err_cqe,
++ u16 cqe_index, struct mlx4_en_tx_ring *ring)
++{
++ struct mlx4_en_dev *mdev = priv->mdev;
++ struct mlx4_en_tx_info *tx_info;
++ struct mlx4_en_tx_desc *tx_desc;
++ u16 wqe_index;
++ int desc_size;
++
++ en_err(priv, "CQE error - cqn 0x%x, ci 0x%x, vendor syndrome: 0x%x syndrome: 0x%x\n",
++ ring->sp_cqn, cqe_index, err_cqe->vendor_err_syndrome, err_cqe->syndrome);
++ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe, sizeof(*err_cqe),
++ false);
++
++ wqe_index = be16_to_cpu(err_cqe->wqe_index) & ring->size_mask;
++ tx_info = &ring->tx_info[wqe_index];
++ desc_size = tx_info->nr_txbb << LOG_TXBB_SIZE;
++ en_err(priv, "Related WQE - qpn 0x%x, wqe index 0x%x, wqe size 0x%x\n", ring->qpn,
++ wqe_index, desc_size);
++ tx_desc = ring->buf + (wqe_index << LOG_TXBB_SIZE);
++ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, tx_desc, desc_size, false);
++
++ if (test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
++ return;
++
++ en_err(priv, "Scheduling port restart\n");
++ queue_work(mdev->workqueue, &priv->restart_task);
++}
++
+ bool mlx4_en_process_tx_cq(struct net_device *dev,
+ struct mlx4_en_cq *cq, int napi_budget)
+ {
+@@ -431,13 +460,10 @@ bool mlx4_en_process_tx_cq(struct net_de
+ dma_rmb();
+
+ if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
+- MLX4_CQE_OPCODE_ERROR)) {
+- struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe;
+-
+- en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n",
+- cqe_err->vendor_err_syndrome,
+- cqe_err->syndrome);
+- }
++ MLX4_CQE_OPCODE_ERROR))
++ if (!test_and_set_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &ring->state))
++ mlx4_en_handle_err_cqe(priv, (struct mlx4_err_cqe *)cqe, index,
++ ring);
+
+ /* Skip over last polled CQE */
+ new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+@@ -271,6 +271,10 @@ struct mlx4_en_page_cache {
+ } buf[MLX4_EN_CACHE_SIZE];
+ };
+
++enum {
++ MLX4_EN_TX_RING_STATE_RECOVERING,
++};
++
+ struct mlx4_en_priv;
+
+ struct mlx4_en_tx_ring {
+@@ -317,6 +321,7 @@ struct mlx4_en_tx_ring {
+ * Only queue_stopped might be used if BQL is not properly working.
+ */
+ unsigned long queue_stopped;
++ unsigned long state;
+ struct mlx4_hwq_resources sp_wqres;
+ struct mlx4_qp sp_qp;
+ struct mlx4_qp_context sp_context;
--- /dev/null
+From foo@baz Sat Dec 19 11:44:48 AM CET 2020
+From: Fugang Duan <fugang.duan@nxp.com>
+Date: Mon, 7 Dec 2020 18:51:40 +0800
+Subject: net: stmmac: delete the eee_ctrl_timer after napi disabled
+
+From: Fugang Duan <fugang.duan@nxp.com>
+
+[ Upstream commit 5f58591323bf3f342920179f24515935c4b5fd60 ]
+
+There have chance to re-enable the eee_ctrl_timer and fire the timer
+in napi callback after delete the timer in .stmmac_release(), which
+introduces to access eee registers in the timer function after clocks
+are disabled then causes system hang. Found this issue when do
+suspend/resume and reboot stress test.
+
+It is safe to delete the timer after napi disabled and disable lpi mode.
+
+Fixes: d765955d2ae0b ("stmmac: add the Energy Efficient Ethernet support")
+Signed-off-by: Fugang Duan <fugang.duan@nxp.com>
+Signed-off-by: Joakim Zhang <qiangqing.zhang@nxp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -2702,9 +2702,6 @@ static int stmmac_release(struct net_dev
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 chan;
+
+- if (priv->eee_enabled)
+- del_timer_sync(&priv->eee_ctrl_timer);
+-
+ /* Stop and disconnect the PHY */
+ if (dev->phydev) {
+ phy_stop(dev->phydev);
+@@ -2723,6 +2720,11 @@ static int stmmac_release(struct net_dev
+ if (priv->lpi_irq > 0)
+ free_irq(priv->lpi_irq, dev);
+
++ if (priv->eee_enabled) {
++ priv->tx_path_in_lpi_mode = false;
++ del_timer_sync(&priv->eee_ctrl_timer);
++ }
++
+ /* Stop TX/RX DMA and clear the descriptors */
+ stmmac_stop_all_dma(priv);
+
+@@ -4510,6 +4512,11 @@ int stmmac_suspend(struct device *dev)
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ del_timer_sync(&priv->tx_queue[chan].txtimer);
+
++ if (priv->eee_enabled) {
++ priv->tx_path_in_lpi_mode = false;
++ del_timer_sync(&priv->eee_ctrl_timer);
++ }
++
+ /* Stop TX/RX DMA */
+ stmmac_stop_all_dma(priv);
+
--- /dev/null
+From foo@baz Sat Dec 19 11:44:48 AM CET 2020
+From: Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+Date: Sat, 5 Dec 2020 22:32:07 +0100
+Subject: net: stmmac: dwmac-meson8b: fix mask definition of the m250_sel mux
+
+From: Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+
+[ Upstream commit 82ca4c922b8992013a238d65cf4e60cc33e12f36 ]
+
+The m250_sel mux clock uses bit 4 in the PRG_ETH0 register. Fix this by
+shifting the PRG_ETH0_CLK_M250_SEL_MASK accordingly as the "mask" in
+struct clk_mux expects the mask relative to the "shift" field in the
+same struct.
+
+While here, get rid of the PRG_ETH0_CLK_M250_SEL_SHIFT macro and use
+__ffs() to determine it from the existing PRG_ETH0_CLK_M250_SEL_MASK
+macro.
+
+Fixes: 566e8251625304 ("net: stmmac: add a glue driver for the Amlogic Meson 8b / GXBB DWMAC")
+Signed-off-by: Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+Reviewed-by: Jerome Brunet <jbrunet@baylibre.com>
+Link: https://lore.kernel.org/r/20201205213207.519341-1-martin.blumenstingl@googlemail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+@@ -35,7 +35,6 @@
+ #define PRG_ETH0_EXT_RMII_MODE 4
+
+ /* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */
+-#define PRG_ETH0_CLK_M250_SEL_SHIFT 4
+ #define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4)
+
+ #define PRG_ETH0_TXDLY_SHIFT 5
+@@ -149,8 +148,9 @@ static int meson8b_init_rgmii_tx_clk(str
+ }
+
+ clk_configs->m250_mux.reg = dwmac->regs + PRG_ETH0;
+- clk_configs->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT;
+- clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK;
++ clk_configs->m250_mux.shift = __ffs(PRG_ETH0_CLK_M250_SEL_MASK);
++ clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK >>
++ clk_configs->m250_mux.shift;
+ clk = meson8b_dwmac_register_clk(dwmac, "m250_sel", mux_parent_names,
+ MUX_CLK_NUM_PARENTS, &clk_mux_ops,
+ &clk_configs->m250_mux.hw);
--- /dev/null
+From foo@baz Sat Dec 19 12:04:16 PM CET 2020
+From: Fugang Duan <fugang.duan@nxp.com>
+Date: Mon, 7 Dec 2020 18:51:39 +0800
+Subject: net: stmmac: free tx skb buffer in stmmac_resume()
+
+From: Fugang Duan <fugang.duan@nxp.com>
+
+[ Upstream commit 4ec236c7c51f89abb0224a4da4a6b77f9beb6600 ]
+
+When do suspend/resume test, there have WARN_ON() log dump from
+stmmac_xmit() funciton, the code logic:
+ entry = tx_q->cur_tx;
+ first_entry = entry;
+ WARN_ON(tx_q->tx_skbuff[first_entry]);
+
+In normal case, tx_q->tx_skbuff[txq->cur_tx] should be NULL because
+the skb should be handled and freed in stmmac_tx_clean().
+
+But stmmac_resume() reset queue parameters like below, skb buffers
+may not be freed.
+ tx_q->cur_tx = 0;
+ tx_q->dirty_tx = 0;
+
+So free tx skb buffer in stmmac_resume() to avoid warning and
+memory leak.
+
+log:
+[ 46.139824] ------------[ cut here ]------------
+[ 46.144453] WARNING: CPU: 0 PID: 0 at drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:3235 stmmac_xmit+0x7a0/0x9d0
+[ 46.154969] Modules linked in: crct10dif_ce vvcam(O) flexcan can_dev
+[ 46.161328] CPU: 0 PID: 0 Comm: swapper/0 Tainted: G O 5.4.24-2.1.0+g2ad925d15481 #1
+[ 46.170369] Hardware name: NXP i.MX8MPlus EVK board (DT)
+[ 46.175677] pstate: 80000005 (Nzcv daif -PAN -UAO)
+[ 46.180465] pc : stmmac_xmit+0x7a0/0x9d0
+[ 46.184387] lr : dev_hard_start_xmit+0x94/0x158
+[ 46.188913] sp : ffff800010003cc0
+[ 46.192224] x29: ffff800010003cc0 x28: ffff000177e2a100
+[ 46.197533] x27: ffff000176ef0840 x26: ffff000176ef0090
+[ 46.202842] x25: 0000000000000000 x24: 0000000000000000
+[ 46.208151] x23: 0000000000000003 x22: ffff8000119ddd30
+[ 46.213460] x21: ffff00017636f000 x20: ffff000176ef0cc0
+[ 46.218769] x19: 0000000000000003 x18: 0000000000000000
+[ 46.224078] x17: 0000000000000000 x16: 0000000000000000
+[ 46.229386] x15: 0000000000000079 x14: 0000000000000000
+[ 46.234695] x13: 0000000000000003 x12: 0000000000000003
+[ 46.240003] x11: 0000000000000010 x10: 0000000000000010
+[ 46.245312] x9 : ffff00017002b140 x8 : 0000000000000000
+[ 46.250621] x7 : ffff00017636f000 x6 : 0000000000000010
+[ 46.255930] x5 : 0000000000000001 x4 : ffff000176ef0000
+[ 46.261238] x3 : 0000000000000003 x2 : 00000000ffffffff
+[ 46.266547] x1 : ffff000177e2a000 x0 : 0000000000000000
+[ 46.271856] Call trace:
+[ 46.274302] stmmac_xmit+0x7a0/0x9d0
+[ 46.277874] dev_hard_start_xmit+0x94/0x158
+[ 46.282056] sch_direct_xmit+0x11c/0x338
+[ 46.285976] __qdisc_run+0x118/0x5f0
+[ 46.289549] net_tx_action+0x110/0x198
+[ 46.293297] __do_softirq+0x120/0x23c
+[ 46.296958] irq_exit+0xb8/0xd8
+[ 46.300098] __handle_domain_irq+0x64/0xb8
+[ 46.304191] gic_handle_irq+0x5c/0x148
+[ 46.307936] el1_irq+0xb8/0x180
+[ 46.311076] cpuidle_enter_state+0x84/0x360
+[ 46.315256] cpuidle_enter+0x34/0x48
+[ 46.318829] call_cpuidle+0x18/0x38
+[ 46.322314] do_idle+0x1e0/0x280
+[ 46.325539] cpu_startup_entry+0x24/0x40
+[ 46.329460] rest_init+0xd4/0xe0
+[ 46.332687] arch_call_rest_init+0xc/0x14
+[ 46.336695] start_kernel+0x420/0x44c
+[ 46.340353] ---[ end trace bc1ee695123cbacd ]---
+
+Fixes: 47dd7a540b8a0 ("net: add support for STMicroelectronics Ethernet controllers.")
+Signed-off-by: Fugang Duan <fugang.duan@nxp.com>
+Signed-off-by: Joakim Zhang <qiangqing.zhang@nxp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1429,6 +1429,19 @@ static void dma_free_tx_skbufs(struct st
+ }
+
+ /**
++ * stmmac_free_tx_skbufs - free TX skb buffers
++ * @priv: private structure
++ */
++static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
++{
++ u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
++ u32 queue;
++
++ for (queue = 0; queue < tx_queue_cnt; queue++)
++ dma_free_tx_skbufs(priv, queue);
++}
++
++/**
+ * free_dma_rx_desc_resources - free RX dma desc resources
+ * @priv: private structure
+ */
+@@ -4591,6 +4604,7 @@ int stmmac_resume(struct device *dev)
+
+ stmmac_reset_queues_param(priv);
+
++ stmmac_free_tx_skbufs(priv);
+ stmmac_clear_descriptors(priv);
+
+ stmmac_hw_setup(ndev, false);
x86-apic-vector-fix-ordering-in-vector-assignment.patch
compiler.h-fix-barrier_data-on-clang.patch
pci-qcom-add-missing-reset-for-ipq806x.patch
+mac80211-mesh-fix-mesh_pathtbl_init-error-path.patch
+net-stmmac-free-tx-skb-buffer-in-stmmac_resume.patch
+tcp-select-sane-initial-rcvq_space.space-for-big-mss.patch
+tcp-fix-cwnd-limited-bug-for-tso-deferral-where-we-send-nothing.patch
+net-mlx4_en-avoid-scheduling-restart-task-if-it-is-already-running.patch
+lan743x-fix-for-potential-null-pointer-dereference-with-bare-card.patch
+net-mlx4_en-handle-tx-error-cqe.patch
+net-stmmac-delete-the-eee_ctrl_timer-after-napi-disabled.patch
+net-stmmac-dwmac-meson8b-fix-mask-definition-of-the-m250_sel-mux.patch
+net-bridge-vlan-fix-error-return-code-in-__vlan_add.patch
--- /dev/null
+From foo@baz Sat Dec 19 12:04:16 PM CET 2020
+From: Neal Cardwell <ncardwell@google.com>
+Date: Tue, 8 Dec 2020 22:57:59 -0500
+Subject: tcp: fix cwnd-limited bug for TSO deferral where we send nothing
+
+From: Neal Cardwell <ncardwell@google.com>
+
+[ Upstream commit 299bcb55ecd1412f6df606e9dc0912d55610029e ]
+
+When cwnd is not a multiple of the TSO skb size of N*MSS, we can get
+into persistent scenarios where we have the following sequence:
+
+(1) ACK for full-sized skb of N*MSS arrives
+ -> tcp_write_xmit() transmit full-sized skb with N*MSS
+ -> move pacing release time forward
+ -> exit tcp_write_xmit() because pacing time is in the future
+
+(2) TSQ callback or TCP internal pacing timer fires
+ -> try to transmit next skb, but TSO deferral finds remainder of
+ available cwnd is not big enough to trigger an immediate send
+ now, so we defer sending until the next ACK.
+
+(3) repeat...
+
+So we can get into a case where we never mark ourselves as
+cwnd-limited for many seconds at a time, even with
+bulk/infinite-backlog senders, because:
+
+o In case (1) above, every time in tcp_write_xmit() we have enough
+cwnd to send a full-sized skb, we are not fully using the cwnd
+(because cwnd is not a multiple of the TSO skb size). So every time we
+send data, we are not cwnd limited, and so in the cwnd-limited
+tracking code in tcp_cwnd_validate() we mark ourselves as not
+cwnd-limited.
+
+o In case (2) above, every time in tcp_write_xmit() that we try to
+transmit the "remainder" of the cwnd but defer, we set the local
+variable is_cwnd_limited to true, but we do not send any packets, so
+sent_pkts is zero, so we don't call the cwnd-limited logic to update
+tp->is_cwnd_limited.
+
+Fixes: ca8a22634381 ("tcp: make cwnd-limited checks measurement-based, and gentler")
+Reported-by: Ingemar Johansson <ingemar.s.johansson@ericsson.com>
+Signed-off-by: Neal Cardwell <ncardwell@google.com>
+Signed-off-by: Yuchung Cheng <ycheng@google.com>
+Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20201209035759.1225145-1-ncardwell.kernel@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_output.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1622,7 +1622,8 @@ static void tcp_cwnd_validate(struct soc
+ * window, and remember whether we were cwnd-limited then.
+ */
+ if (!before(tp->snd_una, tp->max_packets_seq) ||
+- tp->packets_out > tp->max_packets_out) {
++ tp->packets_out > tp->max_packets_out ||
++ is_cwnd_limited) {
+ tp->max_packets_out = tp->packets_out;
+ tp->max_packets_seq = tp->snd_nxt;
+ tp->is_cwnd_limited = is_cwnd_limited;
+@@ -2407,6 +2408,10 @@ repair:
+ else
+ tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
+
++ is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
++ if (likely(sent_pkts || is_cwnd_limited))
++ tcp_cwnd_validate(sk, is_cwnd_limited);
++
+ if (likely(sent_pkts)) {
+ if (tcp_in_cwnd_reduction(sk))
+ tp->prr_out += sent_pkts;
+@@ -2414,8 +2419,6 @@ repair:
+ /* Send one loss probe per tail loss episode. */
+ if (push_one != 2)
+ tcp_schedule_loss_probe(sk, false);
+- is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
+- tcp_cwnd_validate(sk, is_cwnd_limited);
+ return false;
+ }
+ return !tp->packets_out && !tcp_write_queue_empty(sk);
--- /dev/null
+From foo@baz Sat Dec 19 12:04:16 PM CET 2020
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 8 Dec 2020 08:21:31 -0800
+Subject: tcp: select sane initial rcvq_space.space for big MSS
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 72d05c00d7ecda85df29abd046da7e41cc071c17 ]
+
+Before commit a337531b942b ("tcp: up initial rmem to 128KB and SYN rwin to around 64KB")
+small tcp_rmem[1] values were overridden by tcp_fixup_rcvbuf() to accommodate various MSS.
+
+This is no longer the case, and Hazem Mohamed Abuelfotoh reported
+that DRS would not work for MTU 9000 endpoints receiving regular (1500 bytes) frames.
+
+Root cause is that tcp_init_buffer_space() uses tp->rcv_wnd for upper limit
+of rcvq_space.space computation, while it can select later a smaller
+value for tp->rcv_ssthresh and tp->window_clamp.
+
+ss -temoi on receiver would show :
+
+skmem:(r0,rb131072,t0,tb46080,f0,w0,o0,bl0,d0) rcv_space:62496 rcv_ssthresh:56596
+
+This means that TCP can not increase its window in tcp_grow_window(),
+and that DRS can never kick.
+
+Fix this by making sure that rcvq_space.space is not bigger than number of bytes
+that can be held in TCP receive queue.
+
+People unable/unwilling to change their kernel can work around this issue by
+selecting a bigger tcp_rmem[1] value as in :
+
+echo "4096 196608 6291456" >/proc/sys/net/ipv4/tcp_rmem
+
+Based on an initial report and patch from Hazem Mohamed Abuelfotoh
+ https://lore.kernel.org/netdev/20201204180622.14285-1-abuehaze@amazon.com/
+
+Fixes: a337531b942b ("tcp: up initial rmem to 128KB and SYN rwin to around 64KB")
+Fixes: 041a14d26715 ("tcp: start receiver buffer autotuning sooner")
+Reported-by: Hazem Mohamed Abuelfotoh <abuehaze@amazon.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_input.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -439,7 +439,6 @@ void tcp_init_buffer_space(struct sock *
+ if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
+ tcp_sndbuf_expand(sk);
+
+- tp->rcvq_space.space = min_t(u32, tp->rcv_wnd, TCP_INIT_CWND * tp->advmss);
+ tcp_mstamp_refresh(tp);
+ tp->rcvq_space.time = tp->tcp_mstamp;
+ tp->rcvq_space.seq = tp->copied_seq;
+@@ -463,6 +462,8 @@ void tcp_init_buffer_space(struct sock *
+
+ tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
+ tp->snd_cwnd_stamp = tcp_jiffies32;
++ tp->rcvq_space.space = min3(tp->rcv_ssthresh, tp->rcv_wnd,
++ (u32)TCP_INIT_CWND * tp->advmss);
+ }
+
+ /* 4. Recalculate window clamp after socket hit its memory bounds. */