--- /dev/null
+From e479308ac228b7482617d31e4253089ea9abbde2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2020 13:58:19 +0300
+Subject: blktrace: fix dereference after null check
+
+From: Cengiz Can <cengiz@kernel.wtf>
+
+[ Upstream commit 153031a301bb07194e9c37466cfce8eacb977621 ]
+
+There was a recent change in blktrace.c that added a RCU protection to
+`q->blk_trace` in order to fix a use-after-free issue during access.
+
+However the change missed an edge case that can lead to dereferencing of
+`bt` pointer even when it's NULL:
+
+Coverity static analyzer marked this as a FORWARD_NULL issue with CID
+1460458.
+
+```
+/kernel/trace/blktrace.c: 1904 in sysfs_blk_trace_attr_store()
+1898 ret = 0;
+1899 if (bt == NULL)
+1900 ret = blk_trace_setup_queue(q, bdev);
+1901
+1902 if (ret == 0) {
+1903 if (attr == &dev_attr_act_mask)
+>>> CID 1460458: Null pointer dereferences (FORWARD_NULL)
+>>> Dereferencing null pointer "bt".
+1904 bt->act_mask = value;
+1905 else if (attr == &dev_attr_pid)
+1906 bt->pid = value;
+1907 else if (attr == &dev_attr_start_lba)
+1908 bt->start_lba = value;
+1909 else if (attr == &dev_attr_end_lba)
+```
+
+Added a reassignment with RCU annotation to fix the issue.
+
+Fixes: c780e86dd48 ("blktrace: Protect q->blk_trace with RCU")
+Cc: stable@vger.kernel.org
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Bob Liu <bob.liu@oracle.com>
+Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Cengiz Can <cengiz@kernel.wtf>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/blktrace.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index a6d3016410eba..840ef7af20e04 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -1896,8 +1896,11 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
+ }
+
+ ret = 0;
+- if (bt == NULL)
++ if (bt == NULL) {
+ ret = blk_trace_setup_queue(q, bdev);
++ bt = rcu_dereference_protected(q->blk_trace,
++ lockdep_is_held(&q->blk_trace_mutex));
++ }
+
+ if (ret == 0) {
+ if (attr == &dev_attr_act_mask)
+--
+2.20.1
+
--- /dev/null
+From 11d7d4f82ae94263e7209414ae06bb248cd0f732 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 23 Feb 2020 14:54:58 -0500
+Subject: dm thin metadata: fix lockdep complaint
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+[ Upstream commit 3918e0667bbac99400b44fa5aef3f8be2eeada4a ]
+
+[ 3934.173244] ======================================================
+[ 3934.179572] WARNING: possible circular locking dependency detected
+[ 3934.185884] 5.4.21-xfstests #1 Not tainted
+[ 3934.190151] ------------------------------------------------------
+[ 3934.196673] dmsetup/8897 is trying to acquire lock:
+[ 3934.201688] ffffffffbce82b18 (shrinker_rwsem){++++}, at: unregister_shrinker+0x22/0x80
+[ 3934.210268]
+ but task is already holding lock:
+[ 3934.216489] ffff92a10cc5e1d0 (&pmd->root_lock){++++}, at: dm_pool_metadata_close+0xba/0x120
+[ 3934.225083]
+ which lock already depends on the new lock.
+
+[ 3934.564165] Chain exists of:
+ shrinker_rwsem --> &journal->j_checkpoint_mutex --> &pmd->root_lock
+
+For a more detailed lockdep report, please see:
+
+ https://lore.kernel.org/r/20200220234519.GA620489@mit.edu
+
+We shouldn't need to hold the lock while are just tearing down and
+freeing the whole metadata pool structure.
+
+Fixes: 44d8ebf436399a4 ("dm thin metadata: use pool locking at end of dm_pool_metadata_close")
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-thin-metadata.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index 8bb723f1a569a..4cd8868f80040 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -960,9 +960,9 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
+ DMWARN("%s: __commit_transaction() failed, error = %d",
+ __func__, r);
+ }
++ pmd_write_unlock(pmd);
+ if (!pmd->fail_io)
+ __destroy_persistent_data_objects(pmd);
+- pmd_write_unlock(pmd);
+
+ kfree(pmd);
+ return 0;
+--
+2.20.1
+
--- /dev/null
+From bd6d7f47dfff2c3d0f51e2b698394b973157efe2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Feb 2020 15:56:32 -0800
+Subject: net: dsa: bcm_sf2: Forcibly configure IMP port for 1Gb/sec
+
+From: Florian Fainelli <f.fainelli@gmail.com>
+
+[ Upstream commit 98c5f7d44fef309e692c24c6d71131ee0f0871fb ]
+
+We are still experiencing some packet loss with the existing advanced
+congestion buffering (ACB) settings with the IMP port configured for
+2Gb/sec, so revert to conservative link speeds that do not produce
+packet loss until this is resolved.
+
+Fixes: 8f1880cbe8d0 ("net: dsa: bcm_sf2: Configure IMP port for 2Gb/sec")
+Fixes: de34d7084edd ("net: dsa: bcm_sf2: Only 7278 supports 2Gb/sec IMP port")
+Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
+Reviewed-by: Vivien Didelot <vivien.didelot@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/bcm_sf2.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index d1955543acd1d..b0f5280a83cb6 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -69,8 +69,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
+ /* Force link status for IMP port */
+ reg = core_readl(priv, offset);
+ reg |= (MII_SW_OR | LINK_STS);
+- if (priv->type == BCM7278_DEVICE_ID)
+- reg |= GMII_SPEED_UP_2G;
++ reg &= ~GMII_SPEED_UP_2G;
+ core_writel(priv, reg, offset);
+
+ /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
+--
+2.20.1
+
--- /dev/null
+From 4d53df40fd5b062b46bbaad3e17be48953ce4bcf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Jan 2020 12:25:10 +0100
+Subject: netfilter: hashlimit: do not use indirect calls during gc
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 28b3a4270c0fc064557e409111f2a678e64b6fa7 ]
+
+no need, just use a simple boolean to indicate we want to reap all
+entries.
+
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/xt_hashlimit.c | 22 ++++------------------
+ 1 file changed, 4 insertions(+), 18 deletions(-)
+
+diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
+index 1b68a131083c2..7a2c4b8408c49 100644
+--- a/net/netfilter/xt_hashlimit.c
++++ b/net/netfilter/xt_hashlimit.c
+@@ -358,21 +358,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
+ return 0;
+ }
+
+-static bool select_all(const struct xt_hashlimit_htable *ht,
+- const struct dsthash_ent *he)
+-{
+- return true;
+-}
+-
+-static bool select_gc(const struct xt_hashlimit_htable *ht,
+- const struct dsthash_ent *he)
+-{
+- return time_after_eq(jiffies, he->expires);
+-}
+-
+-static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
+- bool (*select)(const struct xt_hashlimit_htable *ht,
+- const struct dsthash_ent *he))
++static void htable_selective_cleanup(struct xt_hashlimit_htable *ht, bool select_all)
+ {
+ unsigned int i;
+
+@@ -382,7 +368,7 @@ static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
+
+ spin_lock_bh(&ht->lock);
+ hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) {
+- if ((*select)(ht, dh))
++ if (time_after_eq(jiffies, dh->expires) || select_all)
+ dsthash_free(ht, dh);
+ }
+ spin_unlock_bh(&ht->lock);
+@@ -396,7 +382,7 @@ static void htable_gc(struct work_struct *work)
+
+ ht = container_of(work, struct xt_hashlimit_htable, gc_work.work);
+
+- htable_selective_cleanup(ht, select_gc);
++ htable_selective_cleanup(ht, false);
+
+ queue_delayed_work(system_power_efficient_wq,
+ &ht->gc_work, msecs_to_jiffies(ht->cfg.gc_interval));
+@@ -420,7 +406,7 @@ static void htable_destroy(struct xt_hashlimit_htable *hinfo)
+ {
+ cancel_delayed_work_sync(&hinfo->gc_work);
+ htable_remove_proc_entry(hinfo);
+- htable_selective_cleanup(hinfo, select_all);
++ htable_selective_cleanup(hinfo, true);
+ kfree(hinfo->name);
+ vfree(hinfo);
+ }
+--
+2.20.1
+
--- /dev/null
+From 5dece94d7dc399605c04d9541469861594e9e006 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Feb 2020 22:53:52 -0800
+Subject: netfilter: xt_hashlimit: unregister proc file before releasing mutex
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit 99b79c3900d4627672c85d9f344b5b0f06bc2a4d ]
+
+Before releasing the global mutex, we only unlink the hashtable
+from the hash list, its proc file is still not unregistered at
+this point. So syzbot could trigger a race condition where a
+parallel htable_create() could register the same file immediately
+after the mutex is released.
+
+Move htable_remove_proc_entry() back to mutex protection to
+fix this. And, fold htable_destroy() into htable_put() to make
+the code slightly easier to understand.
+
+Reported-and-tested-by: syzbot+d195fd3b9a364ddd6731@syzkaller.appspotmail.com
+Fixes: c4a3922d2d20 ("netfilter: xt_hashlimit: reduce hashlimit_mutex scope for htable_put()")
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/xt_hashlimit.c | 16 ++++++----------
+ 1 file changed, 6 insertions(+), 10 deletions(-)
+
+diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
+index 7a2c4b8408c49..8c835ad637290 100644
+--- a/net/netfilter/xt_hashlimit.c
++++ b/net/netfilter/xt_hashlimit.c
+@@ -402,15 +402,6 @@ static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo)
+ remove_proc_entry(hinfo->name, parent);
+ }
+
+-static void htable_destroy(struct xt_hashlimit_htable *hinfo)
+-{
+- cancel_delayed_work_sync(&hinfo->gc_work);
+- htable_remove_proc_entry(hinfo);
+- htable_selective_cleanup(hinfo, true);
+- kfree(hinfo->name);
+- vfree(hinfo);
+-}
+-
+ static struct xt_hashlimit_htable *htable_find_get(struct net *net,
+ const char *name,
+ u_int8_t family)
+@@ -432,8 +423,13 @@ static void htable_put(struct xt_hashlimit_htable *hinfo)
+ {
+ if (refcount_dec_and_mutex_lock(&hinfo->use, &hashlimit_mutex)) {
+ hlist_del(&hinfo->node);
++ htable_remove_proc_entry(hinfo);
+ mutex_unlock(&hashlimit_mutex);
+- htable_destroy(hinfo);
++
++ cancel_delayed_work_sync(&hinfo->gc_work);
++ htable_selective_cleanup(hinfo, true);
++ kfree(hinfo->name);
++ vfree(hinfo);
+ }
+ }
+
+--
+2.20.1
+
--- /dev/null
+From 1a8a485273820c525cd7b6212a031b7b10534305 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 Feb 2020 14:57:28 +0200
+Subject: RDMA/core: Fix pkey and port assignment in get_new_pps
+
+From: Maor Gottlieb <maorg@mellanox.com>
+
+[ Upstream commit 801b67f3eaafd3f2ec8b65d93142d4ffedba85df ]
+
+When port is part of the modify mask, then we should take it from the
+qp_attr and not from the old pps. Same for PKEY. Otherwise there are
+panics in some configurations:
+
+ RIP: 0010:get_pkey_idx_qp_list+0x50/0x80 [ib_core]
+ Code: c7 18 e8 13 04 30 ef 0f b6 43 06 48 69 c0 b8 00 00 00 48 03 85 a0 04 00 00 48 8b 50 20 48 8d 48 20 48 39 ca 74 1a 0f b7 73 04 <66> 39 72 10 75 08 eb 10 66 39 72 10 74 0a 48 8b 12 48 39 ca 75 f2
+ RSP: 0018:ffffafb3480932f0 EFLAGS: 00010203
+ RAX: ffff98059ababa10 RBX: ffff980d926e8cc0 RCX: ffff98059ababa30
+ RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff98059ababa28
+ RBP: ffff98059b940000 R08: 00000000000310c0 R09: ffff97fe47c07480
+ R10: 0000000000000036 R11: 0000000000000200 R12: 0000000000000071
+ R13: ffff98059b940000 R14: ffff980d87f948a0 R15: 0000000000000000
+ FS: 00007f88deb31740(0000) GS:ffff98059f600000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 0000000000000010 CR3: 0000000853e26001 CR4: 00000000001606e0
+ Call Trace:
+ port_pkey_list_insert+0x3d/0x1b0 [ib_core]
+ ? kmem_cache_alloc_trace+0x215/0x220
+ ib_security_modify_qp+0x226/0x3a0 [ib_core]
+ _ib_modify_qp+0xcf/0x390 [ib_core]
+ ipoib_init_qp+0x7f/0x200 [ib_ipoib]
+ ? rvt_modify_port+0xd0/0xd0 [rdmavt]
+ ? ib_find_pkey+0x99/0xf0 [ib_core]
+ ipoib_ib_dev_open_default+0x1a/0x200 [ib_ipoib]
+ ipoib_ib_dev_open+0x96/0x130 [ib_ipoib]
+ ipoib_open+0x44/0x130 [ib_ipoib]
+ __dev_open+0xd1/0x160
+ __dev_change_flags+0x1ab/0x1f0
+ dev_change_flags+0x23/0x60
+ do_setlink+0x328/0xe30
+ ? __nla_validate_parse+0x54/0x900
+ __rtnl_newlink+0x54e/0x810
+ ? __alloc_pages_nodemask+0x17d/0x320
+ ? page_fault+0x30/0x50
+ ? _cond_resched+0x15/0x30
+ ? kmem_cache_alloc_trace+0x1c8/0x220
+ rtnl_newlink+0x43/0x60
+ rtnetlink_rcv_msg+0x28f/0x350
+ ? kmem_cache_alloc+0x1fb/0x200
+ ? _cond_resched+0x15/0x30
+ ? __kmalloc_node_track_caller+0x24d/0x2d0
+ ? rtnl_calcit.isra.31+0x120/0x120
+ netlink_rcv_skb+0xcb/0x100
+ netlink_unicast+0x1e0/0x340
+ netlink_sendmsg+0x317/0x480
+ ? __check_object_size+0x48/0x1d0
+ sock_sendmsg+0x65/0x80
+ ____sys_sendmsg+0x223/0x260
+ ? copy_msghdr_from_user+0xdc/0x140
+ ___sys_sendmsg+0x7c/0xc0
+ ? skb_dequeue+0x57/0x70
+ ? __inode_wait_for_writeback+0x75/0xe0
+ ? fsnotify_grab_connector+0x45/0x80
+ ? __dentry_kill+0x12c/0x180
+ __sys_sendmsg+0x58/0xa0
+ do_syscall_64+0x5b/0x200
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+ RIP: 0033:0x7f88de467f10
+
+Link: https://lore.kernel.org/r/20200227125728.100551-1-leon@kernel.org
+Cc: <stable@vger.kernel.org>
+Fixes: 1dd017882e01 ("RDMA/core: Fix protection fault in get_pkey_idx_qp_list")
+Signed-off-by: Maor Gottlieb <maorg@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Tested-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/security.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
+index 2b4d80393bd0d..9e27ca18d3270 100644
+--- a/drivers/infiniband/core/security.c
++++ b/drivers/infiniband/core/security.c
+@@ -340,11 +340,15 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
+ return NULL;
+
+ if (qp_attr_mask & IB_QP_PORT)
+- new_pps->main.port_num =
+- (qp_pps) ? qp_pps->main.port_num : qp_attr->port_num;
++ new_pps->main.port_num = qp_attr->port_num;
++ else if (qp_pps)
++ new_pps->main.port_num = qp_pps->main.port_num;
++
+ if (qp_attr_mask & IB_QP_PKEY_INDEX)
+- new_pps->main.pkey_index = (qp_pps) ? qp_pps->main.pkey_index :
+- qp_attr->pkey_index;
++ new_pps->main.pkey_index = qp_attr->pkey_index;
++ else if (qp_pps)
++ new_pps->main.pkey_index = qp_pps->main.pkey_index;
++
+ if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT))
+ new_pps->main.state = IB_PORT_PKEY_VALID;
+
+--
+2.20.1
+
--- /dev/null
+From 9b3dc68bc6b1ec0ab003fc6dd23d681100a008f1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Feb 2020 13:43:18 -0700
+Subject: RDMA/core: Fix use of logical OR in get_new_pps
+
+From: Nathan Chancellor <natechancellor@gmail.com>
+
+[ Upstream commit 4ca501d6aaf21de31541deac35128bbea8427aa6 ]
+
+Clang warns:
+
+../drivers/infiniband/core/security.c:351:41: warning: converting the
+enum constant to a boolean [-Wint-in-bool-context]
+ if (!(qp_attr_mask & (IB_QP_PKEY_INDEX || IB_QP_PORT)) && qp_pps) {
+ ^
+1 warning generated.
+
+A bitwise OR should have been used instead.
+
+Fixes: 1dd017882e01 ("RDMA/core: Fix protection fault in get_pkey_idx_qp_list")
+Link: https://lore.kernel.org/r/20200217204318.13609-1-natechancellor@gmail.com
+Link: https://github.com/ClangBuiltLinux/linux/issues/889
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Nathan Chancellor <natechancellor@gmail.com>
+Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/security.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
+index 9e27ca18d3270..2d5608315dc80 100644
+--- a/drivers/infiniband/core/security.c
++++ b/drivers/infiniband/core/security.c
+@@ -352,7 +352,7 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
+ if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT))
+ new_pps->main.state = IB_PORT_PKEY_VALID;
+
+- if (!(qp_attr_mask & (IB_QP_PKEY_INDEX || IB_QP_PORT)) && qp_pps) {
++ if (!(qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) && qp_pps) {
+ new_pps->main.port_num = qp_pps->main.port_num;
+ new_pps->main.pkey_index = qp_pps->main.pkey_index;
+ if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
+--
+2.20.1
+
block-bfq-get-a-ref-to-a-group-when-adding-it-to-a-s.patch
block-bfq-get-extra-ref-to-prevent-a-queue-from-bein.patch
block-bfq-do-not-insert-oom-queue-into-position-tree.patch
+dm-thin-metadata-fix-lockdep-complaint.patch
+net-dsa-bcm_sf2-forcibly-configure-imp-port-for-1gb-.patch
+rdma-core-fix-pkey-and-port-assignment-in-get_new_pp.patch
+rdma-core-fix-use-of-logical-or-in-get_new_pps.patch
+blktrace-fix-dereference-after-null-check.patch
+netfilter-hashlimit-do-not-use-indirect-calls-during.patch
+netfilter-xt_hashlimit-unregister-proc-file-before-r.patch