--- /dev/null
+From 5016da09fe9933c70c9923e124d82a2bf4ea80ea Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Feb 2020 20:14:35 +0100
+Subject: amdgpu: Prevent build errors regarding soft/hard-float FP ABI tags
+
+From: Daniel Kolesa <daniel@octaforge.org>
+
+[ Upstream commit 416611d9b6eebaeae58ed26cc7d23131c69126b1 ]
+
+On PowerPC, the compiler will tag object files with whether they
+use hard or soft float FP ABI and whether they use 64 or 128-bit
+long double ABI. On systems with 64-bit long double ABI, a tag
+will get emitted whenever a double is used, as on those systems
+a long double is the same as a double. This will prevent linkage
+as other files are being compiled with hard-float.
+
+On ppc64, this code will never actually get used for the time
+being, as the only currently existing hardware using it are the
+Renoir APUs. Therefore, until this is testable and can be fixed
+properly, at least make sure the build will not fail.
+
+Signed-off-by: Daniel Kolesa <daniel@octaforge.org>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+index b864869cc7e3e..6fa7422c51da5 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+@@ -91,6 +91,12 @@ ifdef CONFIG_DRM_AMD_DC_DCN2_1
+ ###############################################################################
+ CLK_MGR_DCN21 = rn_clk_mgr.o rn_clk_mgr_vbios_smu.o
+
++# prevent build errors regarding soft-float vs hard-float FP ABI tags
++# this code is currently unused on ppc64, as it applies to Renoir APUs only
++ifdef CONFIG_PPC64
++CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
++endif
++
+ AMD_DAL_CLK_MGR_DCN21 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn21/,$(CLK_MGR_DCN21))
+
+ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
+--
+2.20.1
+
--- /dev/null
+From adb14f785539f943f46b1185a6abeb0fce576022 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Dec 2019 11:08:47 +0900
+Subject: ARM: dts: sti: fixup sound frame-inversion for stihxxx-b2120.dtsi
+
+From: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+
+[ Upstream commit f24667779b5348279e5e4328312a141a730a1fc7 ]
+
+frame-inversion is "flag" not "uint32".
+This patch fixup it.
+
+Signed-off-by: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+Reviewed-by: Patrice Chotard <patrice.chotard@st.com>
+Signed-off-by: Patrice Chotard <patrice.chotard@st.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/stihxxx-b2120.dtsi | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm/boot/dts/stihxxx-b2120.dtsi b/arch/arm/boot/dts/stihxxx-b2120.dtsi
+index 60e11045ad762..d051f080e52ec 100644
+--- a/arch/arm/boot/dts/stihxxx-b2120.dtsi
++++ b/arch/arm/boot/dts/stihxxx-b2120.dtsi
+@@ -46,7 +46,7 @@
+ /* DAC */
+ format = "i2s";
+ mclk-fs = <256>;
+- frame-inversion = <1>;
++ frame-inversion;
+ cpu {
+ sound-dai = <&sti_uni_player2>;
+ };
+--
+2.20.1
+
--- /dev/null
+From 912cb6f91c90045342a0972d31f1eb8fb72858ef Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 7 Feb 2020 12:57:36 +0100
+Subject: arm/ftrace: Fix BE text poking
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit be993e44badc448add6a18d6f12b20615692c4c3 ]
+
+The __patch_text() function already applies __opcode_to_mem_*(), so
+when __opcode_to_mem_*() is not the identity (BE*), it is applied
+twice, wrecking the instruction.
+
+Fixes: 42e51f187f86 ("arm/ftrace: Use __patch_text()")
+Reported-by: Dmitry Osipenko <digetx@gmail.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Tested-by: Dmitry Osipenko <digetx@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/kernel/ftrace.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
+index bda949fd84e8b..93caf757f1d5d 100644
+--- a/arch/arm/kernel/ftrace.c
++++ b/arch/arm/kernel/ftrace.c
+@@ -81,13 +81,10 @@ static int ftrace_modify_code(unsigned long pc, unsigned long old,
+ {
+ unsigned long replaced;
+
+- if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
++ if (IS_ENABLED(CONFIG_THUMB2_KERNEL))
+ old = __opcode_to_mem_thumb32(old);
+- new = __opcode_to_mem_thumb32(new);
+- } else {
++ else
+ old = __opcode_to_mem_arm(old);
+- new = __opcode_to_mem_arm(new);
+- }
+
+ if (validate) {
+ if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
+--
+2.20.1
+
--- /dev/null
+From 429f2080e4ccd161c981ba5e0942237d81797024 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Feb 2020 22:12:05 +0800
+Subject: bcache: ignore pending signals when creating gc and allocator thread
+
+From: Coly Li <colyli@suse.de>
+
+[ Upstream commit 0b96da639a4874311e9b5156405f69ef9fc3bef8 ]
+
+When run a cache set, all the bcache btree node of this cache set will
+be checked by bch_btree_check(). If the bcache btree is very large,
+iterating all the btree nodes will occupy too much system memory and
+the bcache registering process might be selected and killed by system
+OOM killer. kthread_run() will fail if current process has pending
+signal, therefore the kthread creating in run_cache_set() for gc and
+allocator kernel threads are very probably failed for a very large
+bcache btree.
+
+Indeed such OOM is safe and the registering process will exit after
+the registration done. Therefore this patch flushes pending signals
+during the cache set start up, specificly in bch_cache_allocator_start()
+and bch_gc_thread_start(), to make sure run_cache_set() won't fail for
+large cahced data set.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/bcache/alloc.c | 18 ++++++++++++++++--
+ drivers/md/bcache/btree.c | 13 +++++++++++++
+ 2 files changed, 29 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
+index a1df0d95151c6..8bc1faf71ff2f 100644
+--- a/drivers/md/bcache/alloc.c
++++ b/drivers/md/bcache/alloc.c
+@@ -67,6 +67,7 @@
+ #include <linux/blkdev.h>
+ #include <linux/kthread.h>
+ #include <linux/random.h>
++#include <linux/sched/signal.h>
+ #include <trace/events/bcache.h>
+
+ #define MAX_OPEN_BUCKETS 128
+@@ -733,8 +734,21 @@ int bch_open_buckets_alloc(struct cache_set *c)
+
+ int bch_cache_allocator_start(struct cache *ca)
+ {
+- struct task_struct *k = kthread_run(bch_allocator_thread,
+- ca, "bcache_allocator");
++ struct task_struct *k;
++
++ /*
++ * In case previous btree check operation occupies too many
++ * system memory for bcache btree node cache, and the
++ * registering process is selected by OOM killer. Here just
++ * ignore the SIGKILL sent by OOM killer if there is, to
++ * avoid kthread_run() being failed by pending signals. The
++ * bcache registering process will exit after the registration
++ * done.
++ */
++ if (signal_pending(current))
++ flush_signals(current);
++
++ k = kthread_run(bch_allocator_thread, ca, "bcache_allocator");
+ if (IS_ERR(k))
+ return PTR_ERR(k);
+
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 14d6c33b0957e..78f0711a25849 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -34,6 +34,7 @@
+ #include <linux/random.h>
+ #include <linux/rcupdate.h>
+ #include <linux/sched/clock.h>
++#include <linux/sched/signal.h>
+ #include <linux/rculist.h>
+ #include <linux/delay.h>
+ #include <trace/events/bcache.h>
+@@ -1917,6 +1918,18 @@ static int bch_gc_thread(void *arg)
+
+ int bch_gc_thread_start(struct cache_set *c)
+ {
++ /*
++ * In case previous btree check operation occupies too many
++ * system memory for bcache btree node cache, and the
++ * registering process is selected by OOM killer. Here just
++ * ignore the SIGKILL sent by OOM killer if there is, to
++ * avoid kthread_run() being failed by pending signals. The
++ * bcache registering process will exit after the registration
++ * done.
++ */
++ if (signal_pending(current))
++ flush_signals(current);
++
+ c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
+ return PTR_ERR_OR_ZERO(c->gc_thread);
+ }
+--
+2.20.1
+
--- /dev/null
+From efbde57c29316b35dc38dc8bd8bfb02a4132d92a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Feb 2020 21:28:25 -0500
+Subject: ceph: do not execute direct write in parallel if O_APPEND is
+ specified
+
+From: Xiubo Li <xiubli@redhat.com>
+
+[ Upstream commit 8e4473bb50a1796c9c32b244e5dbc5ee24ead937 ]
+
+In O_APPEND & O_DIRECT mode, the data from different writers will
+be possibly overlapping each other since they take the shared lock.
+
+For example, both Writer1 and Writer2 are in O_APPEND and O_DIRECT
+mode:
+
+ Writer1 Writer2
+
+ shared_lock() shared_lock()
+ getattr(CAP_SIZE) getattr(CAP_SIZE)
+ iocb->ki_pos = EOF iocb->ki_pos = EOF
+ write(data1)
+ write(data2)
+ shared_unlock() shared_unlock()
+
+The data2 will overlap the data1 from the same file offset, the
+old EOF.
+
+Switch to exclusive lock instead when O_APPEND is specified.
+
+Signed-off-by: Xiubo Li <xiubli@redhat.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ceph/file.c | 17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index 11929d2bb594c..cd09e63d682b7 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -1418,6 +1418,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ struct ceph_cap_flush *prealloc_cf;
+ ssize_t count, written = 0;
+ int err, want, got;
++ bool direct_lock = false;
+ loff_t pos;
+ loff_t limit = max(i_size_read(inode), fsc->max_file_size);
+
+@@ -1428,8 +1429,11 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ if (!prealloc_cf)
+ return -ENOMEM;
+
++ if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT)
++ direct_lock = true;
++
+ retry_snap:
+- if (iocb->ki_flags & IOCB_DIRECT)
++ if (direct_lock)
+ ceph_start_io_direct(inode);
+ else
+ ceph_start_io_write(inode);
+@@ -1519,14 +1523,15 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
+
+ /* we might need to revert back to that point */
+ data = *from;
+- if (iocb->ki_flags & IOCB_DIRECT) {
++ if (iocb->ki_flags & IOCB_DIRECT)
+ written = ceph_direct_read_write(iocb, &data, snapc,
+ &prealloc_cf);
+- ceph_end_io_direct(inode);
+- } else {
++ else
+ written = ceph_sync_write(iocb, &data, pos, snapc);
++ if (direct_lock)
++ ceph_end_io_direct(inode);
++ else
+ ceph_end_io_write(inode);
+- }
+ if (written > 0)
+ iov_iter_advance(from, written);
+ ceph_put_snap_context(snapc);
+@@ -1577,7 +1582,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
+
+ goto out_unlocked;
+ out:
+- if (iocb->ki_flags & IOCB_DIRECT)
++ if (direct_lock)
+ ceph_end_io_direct(inode);
+ else
+ ceph_end_io_write(inode);
+--
+2.20.1
+
--- /dev/null
+From 43dfa6e65cd9620dc604aec07fd6721b8ab8e2e4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Feb 2020 13:16:16 +0000
+Subject: cfg80211: add missing policy for NL80211_ATTR_STATUS_CODE
+
+From: Sergey Matyukevich <sergey.matyukevich.os@quantenna.com>
+
+[ Upstream commit ea75080110a4c1fa011b0a73cb8f42227143ee3e ]
+
+The nl80211_policy is missing for NL80211_ATTR_STATUS_CODE attribute.
+As a result, for strictly validated commands, it's assumed to not be
+supported.
+
+Signed-off-by: Sergey Matyukevich <sergey.matyukevich.os@quantenna.com>
+Link: https://lore.kernel.org/r/20200213131608.10541-2-sergey.matyukevich.os@quantenna.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/wireless/nl80211.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 1e97ac5435b23..118a98de516cd 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -437,6 +437,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
+ [NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT] = { .type = NLA_FLAG },
+ [NL80211_ATTR_CONTROL_PORT_OVER_NL80211] = { .type = NLA_FLAG },
+ [NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG },
++ [NL80211_ATTR_STATUS_CODE] = { .type = NLA_U16 },
+ [NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 },
+ [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
+ [NL80211_ATTR_PID] = { .type = NLA_U32 },
+--
+2.20.1
+
--- /dev/null
+From c8998a8ffa88c9bbc09f3de958d730acabf5f83c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Feb 2020 10:56:50 +0000
+Subject: cfg80211: check wiphy driver existence for drvinfo report
+
+From: Sergey Matyukevich <sergey.matyukevich.os@quantenna.com>
+
+[ Upstream commit bfb7bac3a8f47100ebe7961bd14e924c96e21ca7 ]
+
+When preparing ethtool drvinfo, check if wiphy driver is defined
+before dereferencing it. Driver may not exist, e.g. if wiphy is
+attached to a virtual platform device.
+
+Signed-off-by: Sergey Matyukevich <sergey.matyukevich.os@quantenna.com>
+Link: https://lore.kernel.org/r/20200203105644.28875-1-sergey.matyukevich.os@quantenna.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/wireless/ethtool.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c
+index a9c0f368db5d2..24e18405cdb48 100644
+--- a/net/wireless/ethtool.c
++++ b/net/wireless/ethtool.c
+@@ -7,9 +7,13 @@
+ void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+ {
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
++ struct device *pdev = wiphy_dev(wdev->wiphy);
+
+- strlcpy(info->driver, wiphy_dev(wdev->wiphy)->driver->name,
+- sizeof(info->driver));
++ if (pdev->driver)
++ strlcpy(info->driver, pdev->driver->name,
++ sizeof(info->driver));
++ else
++ strlcpy(info->driver, "N/A", sizeof(info->driver));
+
+ strlcpy(info->version, init_utsname()->release, sizeof(info->version));
+
+--
+2.20.1
+
--- /dev/null
+From 5c369ddc1fda920c1df586086530baf1c240b0d7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Feb 2020 15:31:48 -0600
+Subject: cifs: Fix mode output in debugging statements
+
+From: Frank Sorenson <sorenson@redhat.com>
+
+[ Upstream commit f52aa79df43c4509146140de0241bc21a4a3b4c7 ]
+
+A number of the debug statements output file or directory mode
+in hex. Change these to print using octal.
+
+Signed-off-by: Frank Sorenson <sorenson@redhat.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/cifs/cifsacl.c | 4 ++--
+ fs/cifs/connect.c | 2 +-
+ fs/cifs/inode.c | 2 +-
+ 3 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
+index fb41e51dd5743..25704beb9d4ca 100644
+--- a/fs/cifs/cifsacl.c
++++ b/fs/cifs/cifsacl.c
+@@ -601,7 +601,7 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
+ ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
+ *pmode |= (S_IXUGO & (*pbits_to_set));
+
+- cifs_dbg(NOISY, "access flags 0x%x mode now 0x%x\n", flags, *pmode);
++ cifs_dbg(NOISY, "access flags 0x%x mode now %04o\n", flags, *pmode);
+ return;
+ }
+
+@@ -630,7 +630,7 @@ static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
+ if (mode & S_IXUGO)
+ *pace_flags |= SET_FILE_EXEC_RIGHTS;
+
+- cifs_dbg(NOISY, "mode: 0x%x, access flags now 0x%x\n",
++ cifs_dbg(NOISY, "mode: %04o, access flags now 0x%x\n",
+ mode, *pace_flags);
+ return;
+ }
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 0aa3623ae0e16..641825cfa7670 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -4151,7 +4151,7 @@ int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
+ cifs_sb->mnt_gid = pvolume_info->linux_gid;
+ cifs_sb->mnt_file_mode = pvolume_info->file_mode;
+ cifs_sb->mnt_dir_mode = pvolume_info->dir_mode;
+- cifs_dbg(FYI, "file mode: 0x%hx dir mode: 0x%hx\n",
++ cifs_dbg(FYI, "file mode: %04ho dir mode: %04ho\n",
+ cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode);
+
+ cifs_sb->actimeo = pvolume_info->actimeo;
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index ca76a9287456f..b3f3675e18788 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -1649,7 +1649,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
+ struct TCP_Server_Info *server;
+ char *full_path;
+
+- cifs_dbg(FYI, "In cifs_mkdir, mode = 0x%hx inode = 0x%p\n",
++ cifs_dbg(FYI, "In cifs_mkdir, mode = %04ho inode = 0x%p\n",
+ mode, inode);
+
+ cifs_sb = CIFS_SB(inode->i_sb);
+--
+2.20.1
+
--- /dev/null
+From 9ff7c42d8e6cefa882fec526eb899d9d1278c54d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Feb 2020 14:15:58 -0500
+Subject: dax: pass NOWAIT flag to iomap_apply
+
+From: Jeff Moyer <jmoyer@redhat.com>
+
+[ Upstream commit 96222d53842dfe54869ec4e1b9d4856daf9105a2 ]
+
+fstests generic/471 reports a failure when run with MOUNT_OPTIONS="-o
+dax". The reason is that the initial pwrite to an empty file with the
+RWF_NOWAIT flag set does not return -EAGAIN. It turns out that
+dax_iomap_rw doesn't pass that flag through to iomap_apply.
+
+With this patch applied, generic/471 passes for me.
+
+Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/x49r1z86e1d.fsf@segfault.boston.devel.redhat.com
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dax.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/fs/dax.c b/fs/dax.c
+index 1f1f0201cad18..0b0d8819cb1bb 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -1207,6 +1207,9 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
+ lockdep_assert_held(&inode->i_rwsem);
+ }
+
++ if (iocb->ki_flags & IOCB_NOWAIT)
++ flags |= IOMAP_NOWAIT;
++
+ while (iov_iter_count(iter)) {
+ ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
+ iter, dax_iomap_actor);
+--
+2.20.1
+
--- /dev/null
+From 8bce983f2ee00569a091417986580e96b846059d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Jan 2020 10:57:16 -0500
+Subject: drm/amd/display: Add initialitions for PLL2 clock source
+
+From: Isabel Zhang <isabel.zhang@amd.com>
+
+[ Upstream commit c134c3cabae46a56ab2e1f5e5fa49405e1758838 ]
+
+[Why]
+Starting from 14nm, the PLL is built into the PHY and the PLL is mapped
+to PHY on 1 to 1 basis. In the code, the DP port is mapped to a PLL that was not
+initialized. This causes DP to HDMI dongle to not light up the display.
+
+[How]
+Initializations added for PLL2 when creating resources.
+
+Signed-off-by: Isabel Zhang <isabel.zhang@amd.com>
+Reviewed-by: Eric Yang <eric.yang2@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+index 83cda43a1b6b3..77741b18c85b0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+@@ -57,6 +57,7 @@
+ #include "dcn20/dcn20_dccg.h"
+ #include "dcn21_hubbub.h"
+ #include "dcn10/dcn10_resource.h"
++#include "dce110/dce110_resource.h"
+
+ #include "dcn20/dcn20_dwb.h"
+ #include "dcn20/dcn20_mmhubbub.h"
+@@ -867,6 +868,7 @@ static const struct dc_debug_options debug_defaults_diags = {
+ enum dcn20_clk_src_array_id {
+ DCN20_CLK_SRC_PLL0,
+ DCN20_CLK_SRC_PLL1,
++ DCN20_CLK_SRC_PLL2,
+ DCN20_CLK_SRC_TOTAL_DCN21
+ };
+
+@@ -1730,6 +1732,10 @@ static bool construct(
+ dcn21_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL1,
+ &clk_src_regs[1], false);
++ pool->base.clock_sources[DCN20_CLK_SRC_PLL2] =
++ dcn21_clock_source_create(ctx, ctx->dc_bios,
++ CLOCK_SOURCE_COMBO_PHY_PLL2,
++ &clk_src_regs[2], false);
+
+ pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21;
+
+--
+2.20.1
+
--- /dev/null
+From aded50393a2a3755624d24c40b0fd65d93623b01 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Jan 2020 22:50:13 -0500
+Subject: drm/amd/display: Check engine is not NULL before acquiring
+
+From: Aric Cyr <aric.cyr@amd.com>
+
+[ Upstream commit 2b63d0ec0daf79ba503fa8bfa25e07dc3da274f3 ]
+
+[Why]
+Engine can be NULL in some cases, so we must not acquire it.
+
+[How]
+Check for NULL engine before acquiring.
+
+Signed-off-by: Aric Cyr <aric.cyr@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_aux.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+index 793c0cec407f9..5fcffb29317e3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+@@ -398,7 +398,7 @@ static bool acquire(
+ {
+ enum gpio_result result;
+
+- if (!is_engine_available(engine))
++ if ((engine == NULL) || !is_engine_available(engine))
+ return false;
+
+ result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
+--
+2.20.1
+
--- /dev/null
+From 00a10d95d4d7bae582688d1370c35524c6c68d52 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Jan 2020 11:55:06 -0500
+Subject: drm/amd/display: Do not set optimized_require to false after plane
+ disable
+
+From: Sung Lee <sung.lee@amd.com>
+
+[ Upstream commit df36f6cf23ada812930afa8ee76681d4ad307c61 ]
+
+[WHY]
+The optimized_require flag is needed to set watermarks and clocks lower
+in certain conditions. This flag is set to true and then set to false
+while programming front end in dcn20.
+
+[HOW]
+Do not set the flag to false while disabling plane.
+
+Signed-off-by: Sung Lee <sung.lee@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+index ac8c18fadefce..448bc9b39942f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -493,7 +493,6 @@ static void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
+ dpp->funcs->dpp_dppclk_control(dpp, false, false);
+
+ hubp->power_gated = true;
+- dc->optimized_required = false; /* We're powering off, no need to optimize */
+
+ dc->hwss.plane_atomic_power_down(dc,
+ pipe_ctx->plane_res.dpp,
+--
+2.20.1
+
--- /dev/null
+From 8f82eba8e70c517cd71545296b468ccdc4efb73f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Jan 2020 16:30:15 -0500
+Subject: drm/amd/display: Limit minimum DPPCLK to 100MHz.
+
+From: Yongqiang Sun <yongqiang.sun@amd.com>
+
+[ Upstream commit 6c81917a0485ee2a1be0dc23321ac10ecfd9578b ]
+
+[Why]
+Underflow is observed when plug in a 4K@60 monitor with
+1366x768 eDP due to DPPCLK is too low.
+
+[How]
+Limit minimum DPPCLK to 100MHz.
+
+Signed-off-by: Yongqiang Sun <yongqiang.sun@amd.com>
+Reviewed-by: Eric Yang <eric.yang2@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+index dbf063856846e..5f683d118d2aa 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+@@ -149,6 +149,12 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
+ rn_vbios_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
+ }
+
++ // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
++ if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
++ if (new_clocks->dppclk_khz < 100000)
++ new_clocks->dppclk_khz = 100000;
++ }
++
+ if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
+ if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
+ dpp_clock_lowered = true;
+--
+2.20.1
+
--- /dev/null
+From 52449be554ee061e1ebf69d42856d630d73f3e8b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Jan 2020 11:18:48 -0800
+Subject: drm/msm: Set dma maximum segment size for mdss
+
+From: Sean Paul <seanpaul@chromium.org>
+
+[ Upstream commit db735fc4036bbe1fbe606819b5f0ff26cc76cdff ]
+
+Turning on CONFIG_DMA_API_DEBUG_SG results in the following error:
+
+[ 12.078665] msm ae00000.mdss: DMA-API: mapping sg segment longer than device claims to support [len=3526656] [max=65536]
+[ 12.089870] WARNING: CPU: 6 PID: 334 at /mnt/host/source/src/third_party/kernel/v4.19/kernel/dma/debug.c:1301 debug_dma_map_sg+0x1dc/0x318
+[ 12.102655] Modules linked in: joydev
+[ 12.106442] CPU: 6 PID: 334 Comm: frecon Not tainted 4.19.0 #2
+[ 12.112450] Hardware name: Google Cheza (rev3+) (DT)
+[ 12.117566] pstate: 60400009 (nZCv daif +PAN -UAO)
+[ 12.122506] pc : debug_dma_map_sg+0x1dc/0x318
+[ 12.126995] lr : debug_dma_map_sg+0x1dc/0x318
+[ 12.131487] sp : ffffff800cc3ba80
+[ 12.134913] x29: ffffff800cc3ba80 x28: 0000000000000000
+[ 12.140395] x27: 0000000000000004 x26: 0000000000000004
+[ 12.145868] x25: ffffff8008e55b18 x24: 0000000000000000
+[ 12.151337] x23: 00000000ffffffff x22: ffffff800921c000
+[ 12.156809] x21: ffffffc0fa75b080 x20: ffffffc0f7195090
+[ 12.162280] x19: ffffffc0f1c53280 x18: 0000000000000000
+[ 12.167749] x17: 0000000000000000 x16: 0000000000000000
+[ 12.173218] x15: 0000000000000000 x14: 0720072007200720
+[ 12.178689] x13: 0720072007200720 x12: 0720072007200720
+[ 12.184161] x11: 0720072007200720 x10: 0720072007200720
+[ 12.189641] x9 : ffffffc0f1fc6b60 x8 : 0000000000000000
+[ 12.195110] x7 : ffffff8008132ce0 x6 : 0000000000000000
+[ 12.200585] x5 : 0000000000000000 x4 : ffffff8008134734
+[ 12.206058] x3 : ffffff800cc3b830 x2 : ffffffc0f1fc6240
+[ 12.211532] x1 : 25045a74f48a7400 x0 : 25045a74f48a7400
+[ 12.217006] Call trace:
+[ 12.219535] debug_dma_map_sg+0x1dc/0x318
+[ 12.223671] get_pages+0x19c/0x20c
+[ 12.227177] msm_gem_fault+0x64/0xfc
+[ 12.230874] __do_fault+0x3c/0x140
+[ 12.234383] __handle_mm_fault+0x70c/0xdb8
+[ 12.238603] handle_mm_fault+0xac/0xc4
+[ 12.242473] do_page_fault+0x1bc/0x3d4
+[ 12.246342] do_translation_fault+0x54/0x88
+[ 12.250652] do_mem_abort+0x60/0xf0
+[ 12.254250] el0_da+0x20/0x24
+[ 12.257317] irq event stamp: 67260
+[ 12.260828] hardirqs last enabled at (67259): [<ffffff8008132d0c>] console_unlock+0x214/0x608
+[ 12.269693] hardirqs last disabled at (67260): [<ffffff8008080e0c>] do_debug_exception+0x5c/0x178
+[ 12.278820] softirqs last enabled at (67256): [<ffffff8008081664>] __do_softirq+0x4d4/0x520
+[ 12.287510] softirqs last disabled at (67249): [<ffffff80080be574>] irq_exit+0xa8/0x100
+[ 12.295742] ---[ end trace e63cfc40c313ffab ]---
+
+The root of the problem is that the default segment size for sgt is
+(UINT_MAX & PAGE_MASK), and the default segment size for device dma is
+64K. As such, if you compare the 2, you would deduce that the sg segment
+will overflow the device's capacity. In reality, the hardware can
+accommodate the larger sg segments, it's just not initializing its max
+segment properly. This patch initializes the max segment size for the
+mdss device, which gets rid of that pesky warning.
+
+Reported-by: Stephen Boyd <swboyd@chromium.org>
+Tested-by: Stephen Boyd <swboyd@chromium.org>
+Tested-by: Sai Prakash Ranjan <saiprakash.ranjan@codeaurora.org>
+Reviewed-by: Rob Clark <robdclark@gmail.com>
+Signed-off-by: Sean Paul <seanpaul@chromium.org>
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200121111813.REPOST.1.I92c66a35fb13f368095b05287bdabdbe88ca6922@changeid
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/msm_drv.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index c84f0a8b3f2ce..b73fbb65e14b2 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -441,6 +441,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
+ if (ret)
+ goto err_msm_uninit;
+
++ if (!dev->dma_parms) {
++ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
++ GFP_KERNEL);
++ if (!dev->dma_parms)
++ return -ENOMEM;
++ }
++ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
++
+ msm_gem_shrinker_init(ddev);
+
+ switch (get_mdp_ver(pdev)) {
+--
+2.20.1
+
--- /dev/null
+From e3f56991e9c1db7d6add04a1d71d9672ff3c214d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Feb 2020 10:59:18 -0800
+Subject: i40e: Fix the conditional for i40e_vc_validate_vqs_bitmaps
+
+From: Brett Creeley <brett.creeley@intel.com>
+
+[ Upstream commit f27f37a04a69890ac85d9155f03ee2d23b678d8f ]
+
+Commit d9d6a9aed3f6 ("i40e: Fix virtchnl_queue_select bitmap
+validation") introduced a necessary change for verifying how queue
+bitmaps from the iavf driver get validated. Unfortunately, the
+conditional was reversed. Fix this.
+
+Fixes: d9d6a9aed3f6 ("i40e: Fix virtchnl_queue_select bitmap validation")
+Signed-off-by: Brett Creeley <brett.creeley@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index 69523ac85639e..56b9e445732ba 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -2362,7 +2362,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
+ goto error_param;
+ }
+
+- if (i40e_vc_validate_vqs_bitmaps(vqs)) {
++ if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+@@ -2424,7 +2424,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
+ goto error_param;
+ }
+
+- if (i40e_vc_validate_vqs_bitmaps(vqs)) {
++ if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+--
+2.20.1
+
--- /dev/null
+From 56a831a909eaf58f10eee0bcb2380a458c77ecbe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Feb 2020 01:20:02 -0800
+Subject: ice: Don't allow same value for Rx tail to be written twice
+
+From: Brett Creeley <brett.creeley@intel.com>
+
+[ Upstream commit 168983a8e19b89efd175661e53faa6246be363a0 ]
+
+Currently we compare the value we are about to write to the Rx tail
+register with the previous value of next_to_use. The problem with this
+is we only write tail on 8 descriptor boundaries, but next_to_use is
+updated whenever we clean Rx descriptors. Fix this by comparing the
+value we are about to write to tail with the previously written tail
+value. This will prevent duplicate Rx tail bumps.
+
+Signed-off-by: Brett Creeley <brett.creeley@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_txrx_lib.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+index 35bbc4ff603cd..6da048a6ca7c1 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+@@ -10,7 +10,7 @@
+ */
+ void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
+ {
+- u16 prev_ntu = rx_ring->next_to_use;
++ u16 prev_ntu = rx_ring->next_to_use & ~0x7;
+
+ rx_ring->next_to_use = val;
+
+--
+2.20.1
+
--- /dev/null
+From 458f82ef7d3b5c864ca9903cad77b25882a6eef7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Feb 2020 01:20:06 -0800
+Subject: ice: fix and consolidate logging of NVM/firmware version information
+
+From: Bruce Allan <bruce.w.allan@intel.com>
+
+[ Upstream commit fbf1e1f6988e70287b1bfcad4f655ca96b681929 ]
+
+Logging the firmware/NVM information during driver load is redundant since
+that information is also available via ethtool. Move the functionality
+found in ice_nvm_version_str() directly into ice_get_drvinfo() and remove
+calling the former and logging that info during driver probe. This also
+gets rid of a bug in ice_nvm_version_str() where it returns a pointer to
+a buffer which is free'ed when that function exits.
+
+Signed-off-by: Bruce Allan <bruce.w.allan@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_ethtool.c | 15 +++++++++++++--
+ drivers/net/ethernet/intel/ice/ice_lib.c | 19 -------------------
+ drivers/net/ethernet/intel/ice/ice_lib.h | 2 --
+ drivers/net/ethernet/intel/ice/ice_main.c | 5 -----
+ 4 files changed, 13 insertions(+), 28 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index 9ebd93e79aeb6..f956f7bb4ef2d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -165,13 +165,24 @@ static void
+ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+ {
+ struct ice_netdev_priv *np = netdev_priv(netdev);
++ u8 oem_ver, oem_patch, nvm_ver_hi, nvm_ver_lo;
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
++ struct ice_hw *hw = &pf->hw;
++ u16 oem_build;
+
+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
+- strlcpy(drvinfo->fw_version, ice_nvm_version_str(&pf->hw),
+- sizeof(drvinfo->fw_version));
++
++ /* Display NVM version (from which the firmware version can be
++ * determined) which contains more pertinent information.
++ */
++ ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch,
++ &nvm_ver_hi, &nvm_ver_lo);
++ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++ "%x.%02x 0x%x %d.%d.%d", nvm_ver_hi, nvm_ver_lo,
++ hw->nvm.eetrack, oem_ver, oem_build, oem_patch);
++
+ strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
+ sizeof(drvinfo->bus_info));
+ drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE;
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index e7449248fab4c..e0e3c6400e4b9 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -2647,25 +2647,6 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
+ }
+ #endif /* CONFIG_DCB */
+
+-/**
+- * ice_nvm_version_str - format the NVM version strings
+- * @hw: ptr to the hardware info
+- */
+-char *ice_nvm_version_str(struct ice_hw *hw)
+-{
+- u8 oem_ver, oem_patch, ver_hi, ver_lo;
+- static char buf[ICE_NVM_VER_LEN];
+- u16 oem_build;
+-
+- ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch, &ver_hi,
+- &ver_lo);
+-
+- snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d", ver_hi, ver_lo,
+- hw->nvm.eetrack, oem_ver, oem_build, oem_patch);
+-
+- return buf;
+-}
+-
+ /**
+ * ice_update_ring_stats - Update ring statistics
+ * @ring: ring to update
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
+index 6e31e30aba394..0d2b1119c0e38 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.h
++++ b/drivers/net/ethernet/intel/ice/ice_lib.h
+@@ -97,8 +97,6 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
+
+ u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
+
+-char *ice_nvm_version_str(struct ice_hw *hw);
+-
+ enum ice_status
+ ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 69bff085acf75..b4cbeb4f3177f 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -3241,11 +3241,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
+ goto err_exit_unroll;
+ }
+
+- dev_info(dev, "firmware %d.%d.%d api %d.%d.%d nvm %s build 0x%08x\n",
+- hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch,
+- hw->api_maj_ver, hw->api_min_ver, hw->api_patch,
+- ice_nvm_version_str(hw), hw->fw_build);
+-
+ ice_request_fw(pf);
+
+ /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
+--
+2.20.1
+
--- /dev/null
+From e98ebc7380c28a0cca2fbb3f2ebb08eabcc88fa2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Feb 2020 01:20:00 -0800
+Subject: ice: Fix switch between FW and SW LLDP
+
+From: Dave Ertman <david.m.ertman@intel.com>
+
+[ Upstream commit 53977ee47410885e7d4eee87d2c811a48a275150 ]
+
+When switching between FW and SW LLDP mode, the
+number of configured TLV apps in the driver's
+DCB configuration is getting out of synch with
+what lldpad thinks is configured. This is causing
+a problem when shutting down lldpad. The cleanup
+is trying to delete TLV apps that are not defined
+in the kernel.
+
+Since the driver is keeping an accurate account
+of the apps defined, use the drivers number of
+apps to determine if there is an app to delete.
+If the number of apps is <= 1, then do not
+attempt to delete.
+
+Signed-off-by: Dave Ertman <david.m.ertman@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_dcb_nl.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+index d870c1aedc170..926c9772f0860 100644
+--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
++++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+@@ -713,13 +713,13 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
+ return -EINVAL;
+
+ mutex_lock(&pf->tc_mutex);
+- ret = dcb_ieee_delapp(netdev, app);
+- if (ret)
+- goto delapp_out;
+-
+ old_cfg = &pf->hw.port_info->local_dcbx_cfg;
+
+- if (old_cfg->numapps == 1)
++ if (old_cfg->numapps <= 1)
++ goto delapp_out;
++
++ ret = dcb_ieee_delapp(netdev, app);
++ if (ret)
+ goto delapp_out;
+
+ new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+--
+2.20.1
+
--- /dev/null
+From 934a4aa3694f46c7b2d8554f12b4b6c49b905e66 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Feb 2020 01:20:07 -0800
+Subject: ice: update Unit Load Status bitmask to check after reset
+
+From: Bruce Allan <bruce.w.allan@intel.com>
+
+[ Upstream commit cf8fc2a0863f9ff27ebd2efcdb1f7d378b9fb8a6 ]
+
+After a reset the Unit Load Status bits in the GLNVM_ULD register to check
+for completion should be 0x7FF before continuing. Update the mask to check
+(minus the three reserved bits that are always set).
+
+Signed-off-by: Bruce Allan <bruce.w.allan@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_common.c | 17 ++++++++++++-----
+ drivers/net/ethernet/intel/ice/ice_hw_autogen.h | 6 ++++++
+ 2 files changed, 18 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
+index fb1d930470c71..cb437a448305e 100644
+--- a/drivers/net/ethernet/intel/ice/ice_common.c
++++ b/drivers/net/ethernet/intel/ice/ice_common.c
+@@ -937,7 +937,7 @@ void ice_deinit_hw(struct ice_hw *hw)
+ */
+ enum ice_status ice_check_reset(struct ice_hw *hw)
+ {
+- u32 cnt, reg = 0, grst_delay;
++ u32 cnt, reg = 0, grst_delay, uld_mask;
+
+ /* Poll for Device Active state in case a recent CORER, GLOBR,
+ * or EMPR has occurred. The grst delay value is in 100ms units.
+@@ -959,13 +959,20 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
+ return ICE_ERR_RESET_FAILED;
+ }
+
+-#define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \
+- GLNVM_ULD_GLOBR_DONE_M)
++#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
++ GLNVM_ULD_PCIER_DONE_1_M |\
++ GLNVM_ULD_CORER_DONE_M |\
++ GLNVM_ULD_GLOBR_DONE_M |\
++ GLNVM_ULD_POR_DONE_M |\
++ GLNVM_ULD_POR_DONE_1_M |\
++ GLNVM_ULD_PCIER_DONE_2_M)
++
++ uld_mask = ICE_RESET_DONE_MASK;
+
+ /* Device is Active; check Global Reset processes are done */
+ for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
+- reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
+- if (reg == ICE_RESET_DONE_MASK) {
++ reg = rd32(hw, GLNVM_ULD) & uld_mask;
++ if (reg == uld_mask) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Global reset processes done. %d\n", cnt);
+ break;
+diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+index e8f32350fed29..6f4a70fa39037 100644
+--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
++++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+@@ -276,8 +276,14 @@
+ #define GLNVM_GENS_SR_SIZE_S 5
+ #define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, 5)
+ #define GLNVM_ULD 0x000B6008
++#define GLNVM_ULD_PCIER_DONE_M BIT(0)
++#define GLNVM_ULD_PCIER_DONE_1_M BIT(1)
+ #define GLNVM_ULD_CORER_DONE_M BIT(3)
+ #define GLNVM_ULD_GLOBR_DONE_M BIT(4)
++#define GLNVM_ULD_POR_DONE_M BIT(5)
++#define GLNVM_ULD_POR_DONE_1_M BIT(8)
++#define GLNVM_ULD_PCIER_DONE_2_M BIT(9)
++#define GLNVM_ULD_PE_DONE_M BIT(10)
+ #define GLPCI_CNF2 0x000BE004
+ #define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1)
+ #define PF_FUNC_RID 0x0009E880
+--
+2.20.1
+
--- /dev/null
+From 2a4d6bebc326a06df2af9bc78f17e392da8a70b8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Feb 2020 01:20:12 -0800
+Subject: ice: Use correct netif error function
+
+From: Ben Shelton <benjamin.h.shelton@intel.com>
+
+[ Upstream commit 1d8bd9927234081db15a1d42a7f99505244e3703 ]
+
+Use the correct netif_msg_[tx,rx]_error() function to determine whether to
+print the MDD event type.
+
+Signed-off-by: Ben Shelton <benjamin.h.shelton@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index c9b35b202639d..7f71f06fa819c 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -1235,7 +1235,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
+ u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
+ GL_MDET_TX_TCLAN_QNUM_S);
+
+- if (netif_msg_rx_err(pf))
++ if (netif_msg_tx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
+ event, queue, pf_num, vf_num);
+ wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
+--
+2.20.1
+
--- /dev/null
+From 312902cdb4e400c7a2b3bd4d14b9fec04d3373f3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Feb 2020 01:20:09 -0800
+Subject: ice: Use ice_pf_to_dev
+
+From: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
+
+[ Upstream commit 9a946843ba5c173e259fef7a035feac994a65b59 ]
+
+Use ice_pf_to_dev(pf) instead of &pf->pdev->dev
+Use ice_pf_to_dev(vsi->back) instead of &vsi->back->pdev->dev
+When a pointer to the pf instance is available, use ice_pf_to_dev
+instead of ice_hw_to_dev
+
+Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_base.c | 12 ++++++------
+ drivers/net/ethernet/intel/ice/ice_dcb_nl.c | 2 +-
+ drivers/net/ethernet/intel/ice/ice_ethtool.c | 2 +-
+ drivers/net/ethernet/intel/ice/ice_lib.c | 14 +++++++-------
+ drivers/net/ethernet/intel/ice/ice_main.c | 16 ++++++++--------
+ drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 8 ++++----
+ 6 files changed, 27 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
+index 77d6a0291e975..6939c14858b20 100644
+--- a/drivers/net/ethernet/intel/ice/ice_base.c
++++ b/drivers/net/ethernet/intel/ice/ice_base.c
+@@ -320,7 +320,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
+ if (err)
+ return err;
+
+- dev_info(&vsi->back->pdev->dev, "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
++ dev_info(ice_pf_to_dev(vsi->back), "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
+ ring->q_index);
+ } else {
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
+@@ -399,7 +399,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
+ /* Absolute queue number out of 2K needs to be passed */
+ err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
+ if (err) {
+- dev_err(&vsi->back->pdev->dev,
++ dev_err(ice_pf_to_dev(vsi->back),
+ "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
+ pf_q, err);
+ return -EIO;
+@@ -422,7 +422,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
+ ice_alloc_rx_bufs_slow_zc(ring, ICE_DESC_UNUSED(ring)) :
+ ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
+ if (err)
+- dev_info(&vsi->back->pdev->dev,
++ dev_info(ice_pf_to_dev(vsi->back),
+ "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
+ ring->xsk_umem ? "UMEM enabled " : "",
+ ring->q_index, pf_q);
+@@ -817,13 +817,13 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ * queues at the hardware level anyway.
+ */
+ if (status == ICE_ERR_RESET_ONGOING) {
+- dev_dbg(&vsi->back->pdev->dev,
++ dev_dbg(ice_pf_to_dev(vsi->back),
+ "Reset in progress. LAN Tx queues already disabled\n");
+ } else if (status == ICE_ERR_DOES_NOT_EXIST) {
+- dev_dbg(&vsi->back->pdev->dev,
++ dev_dbg(ice_pf_to_dev(vsi->back),
+ "LAN Tx queues do not exist, nothing to disable\n");
+ } else if (status) {
+- dev_err(&vsi->back->pdev->dev,
++ dev_err(ice_pf_to_dev(vsi->back),
+ "Failed to disable LAN Tx queues, error: %d\n", status);
+ return -ENODEV;
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+index 926c9772f0860..265cf69b321bf 100644
+--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
++++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+@@ -882,7 +882,7 @@ ice_dcbnl_vsi_del_app(struct ice_vsi *vsi,
+ sapp.protocol = app->prot_id;
+ sapp.priority = app->priority;
+ err = ice_dcbnl_delapp(vsi->netdev, &sapp);
+- dev_dbg(&vsi->back->pdev->dev,
++ dev_dbg(ice_pf_to_dev(vsi->back),
+ "Deleting app for VSI idx=%d err=%d sel=%d proto=0x%x, prio=%d\n",
+ vsi->idx, err, app->selector, app->prot_id, app->priority);
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index f956f7bb4ef2d..9bd166e3dff3d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -1054,7 +1054,7 @@ ice_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
+ fec = ICE_FEC_NONE;
+ break;
+ default:
+- dev_warn(&vsi->back->pdev->dev, "Unsupported FEC mode: %d\n",
++ dev_warn(ice_pf_to_dev(vsi->back), "Unsupported FEC mode: %d\n",
+ fecparam->fec);
+ return -EINVAL;
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index e0e3c6400e4b9..b43bb51f6067a 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -116,7 +116,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
+ vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
+ break;
+ default:
+- dev_dbg(&vsi->back->pdev->dev,
++ dev_dbg(ice_pf_to_dev(vsi->back),
+ "Not setting number of Tx/Rx descriptors for VSI type %d\n",
+ vsi->type);
+ break;
+@@ -697,7 +697,7 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
+ vsi->num_txq = tx_count;
+
+ if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
+- dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
++ dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
+ /* since there is a chance that num_rxq could have been changed
+ * in the above for loop, make num_txq equal to num_rxq.
+ */
+@@ -1306,7 +1306,7 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
+
+ err = ice_setup_rx_ctx(vsi->rx_rings[i]);
+ if (err) {
+- dev_err(&vsi->back->pdev->dev,
++ dev_err(ice_pf_to_dev(vsi->back),
+ "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
+ i, err);
+ return err;
+@@ -1476,7 +1476,7 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
+
+ status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (status) {
+- dev_err(&vsi->back->pdev->dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
++ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %d\n",
+ status, hw->adminq.sq_last_status);
+ ret = -EIO;
+ goto out;
+@@ -1522,7 +1522,7 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
+
+ status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (status) {
+- dev_err(&vsi->back->pdev->dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
++ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
+ ena, status, hw->adminq.sq_last_status);
+ ret = -EIO;
+ goto out;
+@@ -1696,7 +1696,7 @@ ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
+ struct ice_q_vector *q_vector = vsi->q_vectors[i];
+
+ if (!q_vector) {
+- dev_err(&vsi->back->pdev->dev,
++ dev_err(ice_pf_to_dev(vsi->back),
+ "Failed to set reg_idx on q_vector %d VSI %d\n",
+ i, vsi->vsi_num);
+ goto clear_reg_idx;
+@@ -2718,6 +2718,6 @@ ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set)
+ status = ice_remove_mac(&vsi->back->hw, &tmp_add_list);
+
+ cfg_mac_fltr_exit:
+- ice_free_fltr_list(&vsi->back->pdev->dev, &tmp_add_list);
++ ice_free_fltr_list(ice_pf_to_dev(vsi->back), &tmp_add_list);
+ return status;
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index b4cbeb4f3177f..c9b35b202639d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -269,7 +269,7 @@ static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
+ */
+ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
+ {
+- struct device *dev = &vsi->back->pdev->dev;
++ struct device *dev = ice_pf_to_dev(vsi->back);
+ struct net_device *netdev = vsi->netdev;
+ bool promisc_forced_on = false;
+ struct ice_pf *pf = vsi->back;
+@@ -1364,7 +1364,7 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
+ if (vsi->type != ICE_VSI_PF)
+ return 0;
+
+- dev = &vsi->back->pdev->dev;
++ dev = ice_pf_to_dev(vsi->back);
+
+ pi = vsi->port_info;
+
+@@ -1682,7 +1682,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
+ */
+ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
+ {
+- struct device *dev = &vsi->back->pdev->dev;
++ struct device *dev = ice_pf_to_dev(vsi->back);
+ int i;
+
+ for (i = 0; i < vsi->num_xdp_txq; i++) {
+@@ -3858,14 +3858,14 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
+
+ /* Don't set any netdev advanced features with device in Safe Mode */
+ if (ice_is_safe_mode(vsi->back)) {
+- dev_err(&vsi->back->pdev->dev,
++ dev_err(ice_pf_to_dev(vsi->back),
+ "Device is in Safe Mode - not enabling advanced netdev features\n");
+ return ret;
+ }
+
+ /* Do not change setting during reset */
+ if (ice_is_reset_in_progress(pf->state)) {
+- dev_err(&vsi->back->pdev->dev,
++ dev_err(ice_pf_to_dev(vsi->back),
+ "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
+ return -EBUSY;
+ }
+@@ -4408,7 +4408,7 @@ int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
+ int i, err = 0;
+
+ if (!vsi->num_txq) {
+- dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
++ dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
+ vsi->vsi_num);
+ return -EINVAL;
+ }
+@@ -4439,7 +4439,7 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
+ int i, err = 0;
+
+ if (!vsi->num_rxq) {
+- dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
++ dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
+ vsi->vsi_num);
+ return -EINVAL;
+ }
+@@ -4968,7 +4968,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
+
+ status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (status) {
+- dev_err(&vsi->back->pdev->dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
++ dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
+ bmode, status, hw->adminq.sq_last_status);
+ ret = -EIO;
+ goto out;
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+index edb374296d1f3..e2114f24a19e9 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+@@ -508,7 +508,7 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
+
+ status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (status) {
+- dev_info(&vsi->back->pdev->dev, "update VSI for port VLAN failed, err %d aq_err %d\n",
++ dev_info(ice_pf_to_dev(vsi->back), "update VSI for port VLAN failed, err %d aq_err %d\n",
+ status, hw->adminq.sq_last_status);
+ ret = -EIO;
+ goto out;
+@@ -2019,7 +2019,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
+ continue;
+
+ if (ice_vsi_ctrl_rx_ring(vsi, true, vf_q_id)) {
+- dev_err(&vsi->back->pdev->dev,
++ dev_err(ice_pf_to_dev(vsi->back),
+ "Failed to enable Rx ring %d on VSI %d\n",
+ vf_q_id, vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+@@ -2122,7 +2122,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
+
+ if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
+ ring, &txq_meta)) {
+- dev_err(&vsi->back->pdev->dev,
++ dev_err(ice_pf_to_dev(vsi->back),
+ "Failed to stop Tx ring %d on VSI %d\n",
+ vf_q_id, vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+@@ -2149,7 +2149,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
+ continue;
+
+ if (ice_vsi_ctrl_rx_ring(vsi, false, vf_q_id)) {
+- dev_err(&vsi->back->pdev->dev,
++ dev_err(ice_pf_to_dev(vsi->back),
+ "Failed to stop Rx ring %d on VSI %d\n",
+ vf_q_id, vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+--
+2.20.1
+
--- /dev/null
+From f7b4a056d6242f884ab290f1943838ce3bcf232e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Feb 2020 06:30:06 -0700
+Subject: io-wq: don't call kXalloc_node() with non-online node
+
+From: Jens Axboe <axboe@kernel.dk>
+
+[ Upstream commit 7563439adfae153b20331f1567c8b5d0e5cbd8a7 ]
+
+Glauber reports a crash on init on a box he has:
+
+ RIP: 0010:__alloc_pages_nodemask+0x132/0x340
+ Code: 18 01 75 04 41 80 ce 80 89 e8 48 8b 54 24 08 8b 74 24 1c c1 e8 0c 48 8b 3c 24 83 e0 01 88 44 24 20 48 85 d2 0f 85 74 01 00 00 <3b> 77 08 0f 82 6b 01 00 00 48 89 7c 24 10 89 ea 48 8b 07 b9 00 02
+ RSP: 0018:ffffb8be4d0b7c28 EFLAGS: 00010246
+ RAX: 0000000000000000 RBX: 0000000000000000 RCX: 000000000000e8e8
+ RDX: 0000000000000000 RSI: 0000000000000002 RDI: 0000000000002080
+ RBP: 0000000000012cc0 R08: 0000000000000000 R09: 0000000000000002
+ R10: 0000000000000dc0 R11: ffff995c60400100 R12: 0000000000000000
+ R13: 0000000000012cc0 R14: 0000000000000001 R15: ffff995c60db00f0
+ FS: 00007f4d115ca900(0000) GS:ffff995c60d80000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 0000000000002088 CR3: 00000017cca66002 CR4: 00000000007606e0
+ DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+ PKRU: 55555554
+ Call Trace:
+ alloc_slab_page+0x46/0x320
+ new_slab+0x9d/0x4e0
+ ___slab_alloc+0x507/0x6a0
+ ? io_wq_create+0xb4/0x2a0
+ __slab_alloc+0x1c/0x30
+ kmem_cache_alloc_node_trace+0xa6/0x260
+ io_wq_create+0xb4/0x2a0
+ io_uring_setup+0x97f/0xaa0
+ ? io_remove_personalities+0x30/0x30
+ ? io_poll_trigger_evfd+0x30/0x30
+ do_syscall_64+0x5b/0x1c0
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+ RIP: 0033:0x7f4d116cb1ed
+
+which is due to the 'wqe' and 'worker' allocation being node affine.
+But it isn't valid to call the node affine allocation if the node isn't
+online.
+
+Setup structures for even offline nodes, as usual, but skip them in
+terms of thread setup to not waste resources. If the node isn't online,
+just alloc memory with NUMA_NO_NODE.
+
+Reported-by: Glauber Costa <glauber@scylladb.com>
+Tested-by: Glauber Costa <glauber@scylladb.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io-wq.c | 22 ++++++++++++++++++----
+ 1 file changed, 18 insertions(+), 4 deletions(-)
+
+diff --git a/fs/io-wq.c b/fs/io-wq.c
+index 0dc4bb6de6566..25ffb6685baea 100644
+--- a/fs/io-wq.c
++++ b/fs/io-wq.c
+@@ -666,11 +666,16 @@ static int io_wq_manager(void *data)
+ /* create fixed workers */
+ refcount_set(&wq->refs, workers_to_create);
+ for_each_node(node) {
++ if (!node_online(node))
++ continue;
+ if (!create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND))
+ goto err;
+ workers_to_create--;
+ }
+
++ while (workers_to_create--)
++ refcount_dec(&wq->refs);
++
+ complete(&wq->done);
+
+ while (!kthread_should_stop()) {
+@@ -678,6 +683,9 @@ static int io_wq_manager(void *data)
+ struct io_wqe *wqe = wq->wqes[node];
+ bool fork_worker[2] = { false, false };
+
++ if (!node_online(node))
++ continue;
++
+ spin_lock_irq(&wqe->lock);
+ if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
+ fork_worker[IO_WQ_ACCT_BOUND] = true;
+@@ -793,7 +801,9 @@ static bool io_wq_for_each_worker(struct io_wqe *wqe,
+
+ list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
+ if (io_worker_get(worker)) {
+- ret = func(worker, data);
++ /* no task if node is/was offline */
++ if (worker->task)
++ ret = func(worker, data);
+ io_worker_release(worker);
+ if (ret)
+ break;
+@@ -1006,6 +1016,8 @@ void io_wq_flush(struct io_wq *wq)
+ for_each_node(node) {
+ struct io_wqe *wqe = wq->wqes[node];
+
++ if (!node_online(node))
++ continue;
+ init_completion(&data.done);
+ INIT_IO_WORK(&data.work, io_wq_flush_func);
+ data.work.flags |= IO_WQ_WORK_INTERNAL;
+@@ -1038,12 +1050,15 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
+
+ for_each_node(node) {
+ struct io_wqe *wqe;
++ int alloc_node = node;
+
+- wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, node);
++ if (!node_online(alloc_node))
++ alloc_node = NUMA_NO_NODE;
++ wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
+ if (!wqe)
+ goto err;
+ wq->wqes[node] = wqe;
+- wqe->node = node;
++ wqe->node = alloc_node;
+ wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
+ atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
+ if (wq->user) {
+@@ -1051,7 +1066,6 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
+ task_rlimit(current, RLIMIT_NPROC);
+ }
+ atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
+- wqe->node = node;
+ wqe->wq = wq;
+ spin_lock_init(&wqe->lock);
+ INIT_WQ_LIST(&wqe->work_list);
+--
+2.20.1
+
--- /dev/null
+From 3cfcd863400c265b54c0bf74a04ecc17a638202c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 7 Feb 2020 13:18:28 +0100
+Subject: io_uring: flush overflowed CQ events in the io_uring_poll()
+
+From: Stefano Garzarella <sgarzare@redhat.com>
+
+[ Upstream commit 63e5d81f72af1bf370bf8a6745b0a8d71a7bb37d ]
+
+In io_uring_poll() we must flush overflowed CQ events before to
+check if there are CQ events available, to avoid missing events.
+
+We call the io_cqring_events() that checks and flushes any overflow
+and returns the number of CQ events available.
+
+Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io_uring.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 678c62782ba3b..de4bd647cd1df 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -4970,7 +4970,7 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
+ if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
+ ctx->rings->sq_ring_entries)
+ mask |= EPOLLOUT | EPOLLWRNORM;
+- if (READ_ONCE(ctx->rings->cq.head) != ctx->cached_cq_tail)
++ if (io_cqring_events(ctx, false))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ return mask;
+--
+2.20.1
+
--- /dev/null
+From 5c46b80630bd07771d7dab4c4c48ab1d37854271 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Dec 2019 10:42:19 -0600
+Subject: ipmi:ssif: Handle a possible NULL pointer reference
+
+From: Corey Minyard <cminyard@mvista.com>
+
+[ Upstream commit 6b8526d3abc02c08a2f888e8c20b7ac9e5776dfe ]
+
+In error cases a NULL can be passed to memcpy. The length will always
+be zero, so it doesn't really matter, but go ahead and check for NULL,
+anyway, to be more precise and avoid static analysis errors.
+
+Reported-by: kbuild test robot <lkp@intel.com>
+Signed-off-by: Corey Minyard <cminyard@mvista.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/char/ipmi/ipmi_ssif.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index 22c6a2e612360..8ac390c2b5147 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -775,10 +775,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ msg = ssif_info->curr_msg;
+ if (msg) {
++ if (data) {
++ if (len > IPMI_MAX_MSG_LENGTH)
++ len = IPMI_MAX_MSG_LENGTH;
++ memcpy(msg->rsp, data, len);
++ } else {
++ len = 0;
++ }
+ msg->rsp_size = len;
+- if (msg->rsp_size > IPMI_MAX_MSG_LENGTH)
+- msg->rsp_size = IPMI_MAX_MSG_LENGTH;
+- memcpy(msg->rsp, data, msg->rsp_size);
+ ssif_info->curr_msg = NULL;
+ }
+
+--
+2.20.1
+
--- /dev/null
+From 5d178a83389d9c763707bcec451fd8bcd00c467d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 31 Jan 2020 13:12:58 +0200
+Subject: mac80211: consider more elements in parsing CRC
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+[ Upstream commit a04564c99bb4a92f805a58e56b2d22cc4978f152 ]
+
+We only use the parsing CRC for checking if a beacon changed,
+and elements with an ID > 63 cannot be represented in the
+filter. Thus, like we did before with WMM and Cisco vendor
+elements, just statically add these forgotten items to the
+CRC:
+ - WLAN_EID_VHT_OPERATION
+ - WLAN_EID_OPMODE_NOTIF
+
+I guess that in most cases when VHT/HE operation change, the HT
+operation also changed, and so the change was picked up, but we
+did notice that pure operating mode notification changes were
+ignored.
+
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
+Link: https://lore.kernel.org/r/20200131111300.891737-22-luca@coelho.fi
+[restrict to VHT for the mac80211 branch]
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/util.c | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 32a7a53833c01..739e90555d8b9 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -1063,16 +1063,22 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
+ elem_parse_failed = true;
+ break;
+ case WLAN_EID_VHT_OPERATION:
+- if (elen >= sizeof(struct ieee80211_vht_operation))
++ if (elen >= sizeof(struct ieee80211_vht_operation)) {
+ elems->vht_operation = (void *)pos;
+- else
+- elem_parse_failed = true;
++ if (calc_crc)
++ crc = crc32_be(crc, pos - 2, elen + 2);
++ break;
++ }
++ elem_parse_failed = true;
+ break;
+ case WLAN_EID_OPMODE_NOTIF:
+- if (elen > 0)
++ if (elen > 0) {
+ elems->opmode_notif = pos;
+- else
+- elem_parse_failed = true;
++ if (calc_crc)
++ crc = crc32_be(crc, pos - 2, elen + 2);
++ break;
++ }
++ elem_parse_failed = true;
+ break;
+ case WLAN_EID_MESH_ID:
+ elems->mesh_id = pos;
+--
+2.20.1
+
--- /dev/null
+From cf17e99062a81e5081c0e5e09f92cce5618318a2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Feb 2020 15:07:28 +0200
+Subject: mac80211: fix wrong 160/80+80 MHz setting
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Shay Bar <shay.bar@celeno.com>
+
+[ Upstream commit 33181ea7f5a62a17fbe55f0f73428ecb5e686be8 ]
+
+Before this patch, STA's would set new width of 160/80+80 MHz based on AP capability only.
+This is wrong because STA may not support > 80MHz BW.
+Fix is to verify STA has 160/80+80 MHz capability before increasing its width to > 80MHz.
+
+The "support_80_80" and "support_160" setting is based on:
+"Table 9-272 — Setting of the Supported Channel Width Set subfield and Extended NSS BW
+Support subfield at a STA transmitting the VHT Capabilities Information field"
+From "Draft P802.11REVmd_D3.0.pdf"
+
+Signed-off-by: Aviad Brikman <aviad.brikman@celeno.com>
+Signed-off-by: Shay Bar <shay.bar@celeno.com>
+Link: https://lore.kernel.org/r/20200210130728.23674-1-shay.bar@celeno.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/util.c | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 739e90555d8b9..decd46b383938 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -2993,10 +2993,22 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw,
+ int cf0, cf1;
+ int ccfs0, ccfs1, ccfs2;
+ int ccf0, ccf1;
++ u32 vht_cap;
++ bool support_80_80 = false;
++ bool support_160 = false;
+
+ if (!oper || !htop)
+ return false;
+
++ vht_cap = hw->wiphy->bands[chandef->chan->band]->vht_cap.cap;
++ support_160 = (vht_cap & (IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK |
++ IEEE80211_VHT_CAP_EXT_NSS_BW_MASK));
++ support_80_80 = ((vht_cap &
++ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) ||
++ (vht_cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
++ vht_cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) ||
++ ((vht_cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) >>
++ IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT > 1));
+ ccfs0 = oper->center_freq_seg0_idx;
+ ccfs1 = oper->center_freq_seg1_idx;
+ ccfs2 = (le16_to_cpu(htop->operation_mode) &
+@@ -3024,10 +3036,10 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw,
+ unsigned int diff;
+
+ diff = abs(ccf1 - ccf0);
+- if (diff == 8) {
++ if ((diff == 8) && support_160) {
+ new.width = NL80211_CHAN_WIDTH_160;
+ new.center_freq1 = cf1;
+- } else if (diff > 8) {
++ } else if ((diff > 8) && support_80_80) {
+ new.width = NL80211_CHAN_WIDTH_80P80;
+ new.center_freq2 = cf1;
+ }
+--
+2.20.1
+
--- /dev/null
+From f9156de370af8830e2b8c968b402a037c22d85ff Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Feb 2020 15:17:42 +0000
+Subject: net: ena: add missing ethtool TX timestamping indication
+
+From: Arthur Kiyanovski <akiyano@amazon.com>
+
+[ Upstream commit cf6d17fde93bdda23c9b02dd5906a12bf8c55209 ]
+
+Current implementation of the driver calls skb_tx_timestamp()to add a
+software tx timestamp to the skb, however the software-transmit capability
+is not reported in ethtool -T.
+
+This commit updates the ethtool structure to report the software-transmit
+capability in ethtool -T using the standard ethtool_op_get_ts_info().
+This function reports all software timestamping capabilities (tx and rx),
+as well as setting phc_index = -1. phc_index is the index of the PTP
+hardware clock device that will be used for hardware timestamps. Since we
+don't have such a device in ENA, using the default -1 value is the correct
+setting.
+
+Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)")
+Signed-off-by: Ezequiel Lara Gomez <ezegomez@amazon.com>
+Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amazon/ena/ena_ethtool.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+index fc96c66b44cb5..8b56383b64aea 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
++++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+@@ -812,6 +812,7 @@ static const struct ethtool_ops ena_ethtool_ops = {
+ .set_channels = ena_set_channels,
+ .get_tunable = ena_get_tunable,
+ .set_tunable = ena_set_tunable,
++ .get_ts_info = ethtool_op_get_ts_info,
+ };
+
+ void ena_set_ethtool_ops(struct net_device *netdev)
+--
+2.20.1
+
--- /dev/null
+From 215d3d1cce40ce8a6c664b68eb79ac443c0bee9a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Feb 2020 15:17:51 +0000
+Subject: net: ena: ena-com.c: prevent NULL pointer dereference
+
+From: Arthur Kiyanovski <akiyano@amazon.com>
+
+[ Upstream commit c207979f5ae10ed70aff1bb13f39f0736973de99 ]
+
+comp_ctx can be NULL in a very rare case when an admin command is executed
+during the execution of ena_remove().
+
+The bug scenario is as follows:
+
+* ena_destroy_device() sets the comp_ctx to be NULL
+* An admin command is executed before executing unregister_netdev(),
+ this can still happen because our device can still receive callbacks
+ from the netdev infrastructure such as ethtool commands.
+* When attempting to access the comp_ctx, the bug occurs since it's set
+ to NULL
+
+Fix:
+Added a check that comp_ctx is not NULL
+
+Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)")
+Signed-off-by: Sameeh Jubran <sameehj@amazon.com>
+Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amazon/ena/ena_com.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
+index 74743fd8a1e0a..304531332e70a 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_com.c
+@@ -200,6 +200,11 @@ static void comp_ctxt_release(struct ena_com_admin_queue *queue,
+ static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
+ u16 command_id, bool capture)
+ {
++ if (unlikely(!queue->comp_ctx)) {
++ pr_err("Completion context is NULL\n");
++ return NULL;
++ }
++
+ if (unlikely(command_id >= queue->q_depth)) {
+ pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
+ command_id, queue->q_depth);
+--
+2.20.1
+
--- /dev/null
+From 5955909dd9fac4e201158d0432c8ff2db23dac76 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Feb 2020 15:17:50 +0000
+Subject: net: ena: ethtool: use correct value for crc32 hash
+
+From: Sameeh Jubran <sameehj@amazon.com>
+
+[ Upstream commit 886d2089276e40d460731765083a741c5c762461 ]
+
+Up till kernel 4.11 there was no enum defined for crc32 hash in ethtool,
+thus the xor enum was used for supporting crc32.
+
+Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)")
+Signed-off-by: Sameeh Jubran <sameehj@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amazon/ena/ena_ethtool.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+index 610a7c63e1742..4ad69066e7846 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
++++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+@@ -693,7 +693,7 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ func = ETH_RSS_HASH_TOP;
+ break;
+ case ENA_ADMIN_CRC32:
+- func = ETH_RSS_HASH_XOR;
++ func = ETH_RSS_HASH_CRC32;
+ break;
+ default:
+ netif_err(adapter, drv, netdev,
+@@ -739,7 +739,7 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
+ case ETH_RSS_HASH_TOP:
+ func = ENA_ADMIN_TOEPLITZ;
+ break;
+- case ETH_RSS_HASH_XOR:
++ case ETH_RSS_HASH_CRC32:
+ func = ENA_ADMIN_CRC32;
+ break;
+ default:
+--
+2.20.1
+
--- /dev/null
+From 75d1d1e21acae5434e4ce07b69fa68e5d5adb55f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Feb 2020 15:17:48 +0000
+Subject: net: ena: fix corruption of dev_idx_to_host_tbl
+
+From: Arthur Kiyanovski <akiyano@amazon.com>
+
+[ Upstream commit e3f89f91e98ce07dc0f121a3b70d21aca749ba39 ]
+
+The function ena_com_ind_tbl_convert_from_device() has an overflow
+bug as explained below. Either way, this function is not needed at
+all since we don't retrieve the indirection table from the device
+at any point which means that this conversion is not needed.
+
+The bug:
+The for loop iterates over all io_sq_queues, when passing the actual
+number of used queues the io_sq_queues[i].idx equals 0 since they are
+uninitialized which results in the following code to be executed till
+the end of the loop:
+
+dev_idx_to_host_tbl[0] = i;
+
+This results dev_idx_to_host_tbl[0] in being equal to
+ENA_TOTAL_NUM_QUEUES - 1.
+
+Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)")
+Signed-off-by: Sameeh Jubran <sameehj@amazon.com>
+Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amazon/ena/ena_com.c | 28 -----------------------
+ 1 file changed, 28 deletions(-)
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
+index 8ab192cb26b74..74743fd8a1e0a 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_com.c
+@@ -1281,30 +1281,6 @@ static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
+ return 0;
+ }
+
+-static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
+-{
+- u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
+- struct ena_rss *rss = &ena_dev->rss;
+- u8 idx;
+- u16 i;
+-
+- for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
+- dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
+-
+- for (i = 0; i < 1 << rss->tbl_log_size; i++) {
+- if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
+- return -EINVAL;
+- idx = (u8)rss->rss_ind_tbl[i].cq_idx;
+-
+- if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
+- return -EINVAL;
+-
+- rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
+- }
+-
+- return 0;
+-}
+-
+ static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
+ u16 intr_delay_resolution)
+ {
+@@ -2638,10 +2614,6 @@ int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
+ if (!ind_tbl)
+ return 0;
+
+- rc = ena_com_ind_tbl_convert_from_device(ena_dev);
+- if (unlikely(rc))
+- return rc;
+-
+ for (i = 0; i < (1 << rss->tbl_log_size); i++)
+ ind_tbl[i] = rss->host_rss_ind_tbl[i];
+
+--
+2.20.1
+
--- /dev/null
+From aeef5952dd262b50741cb42b66e13b344109c03e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Feb 2020 15:17:43 +0000
+Subject: net: ena: fix incorrect default RSS key
+
+From: Arthur Kiyanovski <akiyano@amazon.com>
+
+[ Upstream commit 0d1c3de7b8c78a5e44b74b62ede4a63629f5d811 ]
+
+Bug description:
+When running "ethtool -x <if_name>" the key shows up as all zeros.
+
+When we use "ethtool -X <if_name> hfunc toeplitz hkey <some:random:key>" to
+set the key and then try to retrieve it using "ethtool -x <if_name>" then
+we return the correct key because we return the one we saved.
+
+Bug cause:
+We don't fetch the key from the device but instead return the key
+that we have saved internally which is by default set to zero upon
+allocation.
+
+Fix:
+This commit fixes the issue by initializing the key to a random value
+using netdev_rss_key_fill().
+
+Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)")
+Signed-off-by: Sameeh Jubran <sameehj@amazon.com>
+Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amazon/ena/ena_com.c | 15 +++++++++++++++
+ drivers/net/ethernet/amazon/ena/ena_com.h | 1 +
+ 2 files changed, 16 insertions(+)
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
+index e54c44fdcaa73..d6b894b06fa30 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_com.c
+@@ -1041,6 +1041,19 @@ static int ena_com_get_feature(struct ena_com_dev *ena_dev,
+ feature_ver);
+ }
+
++static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
++{
++ struct ena_admin_feature_rss_flow_hash_control *hash_key =
++ (ena_dev->rss).hash_key;
++
++ netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
++ /* The key is stored in the device in u32 array
++ * as well as the API requires the key to be passed in this
++ * format. Thus the size of our array should be divided by 4
++ */
++ hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32);
++}
++
+ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
+ {
+ struct ena_rss *rss = &ena_dev->rss;
+@@ -2631,6 +2644,8 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
+ if (unlikely(rc))
+ goto err_hash_key;
+
++ ena_com_hash_key_fill_default_key(ena_dev);
++
+ rc = ena_com_hash_ctrl_init(ena_dev);
+ if (unlikely(rc))
+ goto err_hash_ctrl;
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
+index 0ce37d54ed108..9b5bd28ed0ac6 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.h
++++ b/drivers/net/ethernet/amazon/ena/ena_com.h
+@@ -44,6 +44,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/types.h>
+ #include <linux/wait.h>
++#include <linux/netdevice.h>
+
+ #include "ena_common_defs.h"
+ #include "ena_admin_defs.h"
+--
+2.20.1
+
--- /dev/null
+From 15c9597dadae42946820382f983c0486813aeb58 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Feb 2020 15:17:47 +0000
+Subject: net: ena: fix incorrectly saving queue numbers when setting RSS
+ indirection table
+
+From: Arthur Kiyanovski <akiyano@amazon.com>
+
+[ Upstream commit 92569fd27f5cb0ccbdf7c7d70044b690e89a0277 ]
+
+The indirection table has the indices of the Rx queues. When we store it
+during set indirection operation, we convert the indices to our internal
+representation of the indices.
+
+Our internal representation of the indices is: even indices for Tx and
+uneven indices for Rx, where every Tx/Rx pair are in a consecutive order
+starting from 0. For example if the driver has 3 queues (3 for Tx and 3
+for Rx) then the indices are as follows:
+0 1 2 3 4 5
+Tx Rx Tx Rx Tx Rx
+
+The BUG:
+The issue is that when we satisfy a get request for the indirection
+table, we don't convert the indices back to the original representation.
+
+The FIX:
+Simply apply the inverse function for the indices of the indirection
+table after we set it.
+
+Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)")
+Signed-off-by: Sameeh Jubran <sameehj@amazon.com>
+Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amazon/ena/ena_ethtool.c | 24 ++++++++++++++++++-
+ drivers/net/ethernet/amazon/ena/ena_netdev.h | 2 ++
+ 2 files changed, 25 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+index 8be9df885bf4f..610a7c63e1742 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
++++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+@@ -636,6 +636,28 @@ static u32 ena_get_rxfh_key_size(struct net_device *netdev)
+ return ENA_HASH_KEY_SIZE;
+ }
+
++static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir)
++{
++ struct ena_com_dev *ena_dev = adapter->ena_dev;
++ int i, rc;
++
++ if (!indir)
++ return 0;
++
++ rc = ena_com_indirect_table_get(ena_dev, indir);
++ if (rc)
++ return rc;
++
++ /* Our internal representation of the indices is: even indices
++ * for Tx and uneven indices for Rx. We need to convert the Rx
++ * indices to be consecutive
++ */
++ for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++)
++ indir[i] = ENA_IO_RXQ_IDX_TO_COMBINED_IDX(indir[i]);
++
++ return rc;
++}
++
+ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+ {
+@@ -644,7 +666,7 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 func;
+ int rc;
+
+- rc = ena_com_indirect_table_get(adapter->ena_dev, indir);
++ rc = ena_indirection_table_get(adapter, indir);
+ if (rc)
+ return rc;
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
+index bffd778f2ce34..2fe5eeea6b695 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
+@@ -129,6 +129,8 @@
+
+ #define ENA_IO_TXQ_IDX(q) (2 * (q))
+ #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1)
++#define ENA_IO_TXQ_IDX_TO_COMBINED_IDX(q) ((q) / 2)
++#define ENA_IO_RXQ_IDX_TO_COMBINED_IDX(q) (((q) - 1) / 2)
+
+ #define ENA_MGMNT_IRQ_IDX 0
+ #define ENA_IO_IRQ_FIRST_IDX 1
+--
+2.20.1
+
--- /dev/null
+From 3ea649786d30ad6a6ea9f667cd33f2c5ccc49c08 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Feb 2020 15:17:40 +0000
+Subject: net: ena: fix potential crash when rxfh key is NULL
+
+From: Arthur Kiyanovski <akiyano@amazon.com>
+
+[ Upstream commit 91a65b7d3ed8450f31ab717a65dcb5f9ceb5ab02 ]
+
+When ethtool -X is called without an hkey, ena_com_fill_hash_function()
+is called with key=NULL, which is passed to memcpy causing a crash.
+
+This commit fixes this issue by checking key is not NULL.
+
+Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)")
+Signed-off-by: Sameeh Jubran <sameehj@amazon.com>
+Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amazon/ena/ena_com.c | 17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
+index ea62604fdf8ca..e54c44fdcaa73 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_com.c
+@@ -2297,15 +2297,16 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
+
+ switch (func) {
+ case ENA_ADMIN_TOEPLITZ:
+- if (key_len > sizeof(hash_key->key)) {
+- pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
+- key_len, sizeof(hash_key->key));
+- return -EINVAL;
++ if (key) {
++ if (key_len != sizeof(hash_key->key)) {
++ pr_err("key len (%hu) doesn't equal the supported size (%zu)\n",
++ key_len, sizeof(hash_key->key));
++ return -EINVAL;
++ }
++ memcpy(hash_key->key, key, key_len);
++ rss->hash_init_val = init_val;
++ hash_key->keys_num = key_len >> 2;
+ }
+-
+- memcpy(hash_key->key, key, key_len);
+- rss->hash_init_val = init_val;
+- hash_key->keys_num = key_len >> 2;
+ break;
+ case ENA_ADMIN_CRC32:
+ rss->hash_init_val = init_val;
+--
+2.20.1
+
--- /dev/null
+From a32289d652caef82aa0c5960e3699a0783aa9cc6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Feb 2020 15:17:41 +0000
+Subject: net: ena: fix uses of round_jiffies()
+
+From: Arthur Kiyanovski <akiyano@amazon.com>
+
+[ Upstream commit 2a6e5fa2f4c25b66c763428a3e65363214946931 ]
+
+>From the documentation of round_jiffies():
+"Rounds a time delta in the future (in jiffies) up or down to
+(approximately) full seconds. This is useful for timers for which
+the exact time they fire does not matter too much, as long as
+they fire approximately every X seconds.
+By rounding these timers to whole seconds, all such timers will fire
+at the same time, rather than at various times spread out. The goal
+of this is to have the CPU wake up less, which saves power."
+
+There are 2 parts to this patch:
+================================
+Part 1:
+-------
+In our case we need timer_service to be called approximately every
+X=1 seconds, and the exact time does not matter, so using round_jiffies()
+is the right way to go.
+
+Therefore we add round_jiffies() to the mod_timer() in ena_timer_service().
+
+Part 2:
+-------
+round_jiffies() is used in check_for_missing_keep_alive() when
+getting the jiffies of the expiration of the keep_alive timeout. Here it
+is actually a mistake to use round_jiffies() because we want the exact
+time when keep_alive should expire and not an approximate rounded time,
+which can cause early, false positive, timeouts.
+
+Therefore we remove round_jiffies() in the calculation of
+keep_alive_expired() in check_for_missing_keep_alive().
+
+Fixes: 82ef30f13be0 ("net: ena: add hardware hints capability to the driver")
+Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)")
+Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amazon/ena/ena_netdev.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index 948583fdcc286..1c1a41bd11daa 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -3049,8 +3049,8 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
+ if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
+ return;
+
+- keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies +
+- adapter->keep_alive_timeout);
++ keep_alive_expired = adapter->last_keep_alive_jiffies +
++ adapter->keep_alive_timeout;
+ if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Keep alive watchdog timeout.\n");
+@@ -3152,7 +3152,7 @@ static void ena_timer_service(struct timer_list *t)
+ }
+
+ /* Reset the timer */
+- mod_timer(&adapter->timer_service, jiffies + HZ);
++ mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
+ }
+
+ static int ena_calc_max_io_queue_num(struct pci_dev *pdev,
+--
+2.20.1
+
--- /dev/null
+From 65b69cb7fe832941871a29ba8944a273019b5668 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Feb 2020 15:17:44 +0000
+Subject: net: ena: rss: do not allocate key when not supported
+
+From: Sameeh Jubran <sameehj@amazon.com>
+
+[ Upstream commit 6a4f7dc82d1e3abd3feb0c60b5041056fcd9880c ]
+
+Currently we allocate the key whether the device supports setting the
+key or not. This commit adds a check to the allocation function and
+handles the error accordingly.
+
+Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)")
+Signed-off-by: Sameeh Jubran <sameehj@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amazon/ena/ena_com.c | 24 ++++++++++++++++++++---
+ 1 file changed, 21 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
+index d6b894b06fa30..6f758ece86f60 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_com.c
+@@ -1057,6 +1057,20 @@ static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
+ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
+ {
+ struct ena_rss *rss = &ena_dev->rss;
++ struct ena_admin_feature_rss_flow_hash_control *hash_key;
++ struct ena_admin_get_feat_resp get_resp;
++ int rc;
++
++ hash_key = (ena_dev->rss).hash_key;
++
++ rc = ena_com_get_feature_ex(ena_dev, &get_resp,
++ ENA_ADMIN_RSS_HASH_FUNCTION,
++ ena_dev->rss.hash_key_dma_addr,
++ sizeof(ena_dev->rss.hash_key), 0);
++ if (unlikely(rc)) {
++ hash_key = NULL;
++ return -EOPNOTSUPP;
++ }
+
+ rss->hash_key =
+ dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
+@@ -2640,11 +2654,15 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
+ if (unlikely(rc))
+ goto err_indr_tbl;
+
++ /* The following function might return unsupported in case the
++ * device doesn't support setting the key / hash function. We can safely
++ * ignore this error and have indirection table support only.
++ */
+ rc = ena_com_hash_key_allocate(ena_dev);
+- if (unlikely(rc))
++ if (unlikely(rc) && rc != -EOPNOTSUPP)
+ goto err_hash_key;
+-
+- ena_com_hash_key_fill_default_key(ena_dev);
++ else if (rc != -EOPNOTSUPP)
++ ena_com_hash_key_fill_default_key(ena_dev);
+
+ rc = ena_com_hash_ctrl_init(ena_dev);
+ if (unlikely(rc))
+--
+2.20.1
+
--- /dev/null
+From 6f17bf45ed840f530cc629c4046b23ede9400e29 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Feb 2020 15:17:45 +0000
+Subject: net: ena: rss: fix failure to get indirection table
+
+From: Sameeh Jubran <sameehj@amazon.com>
+
+[ Upstream commit 0c8923c0a64fb5d14bebb9a9065d2dc25ac5e600 ]
+
+On old hardware, getting / setting the hash function is not supported while
+gettting / setting the indirection table is.
+
+This commit enables us to still show the indirection table on older
+hardwares by setting the hash function and key to NULL.
+
+Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)")
+Signed-off-by: Sameeh Jubran <sameehj@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amazon/ena/ena_ethtool.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+index 8b56383b64aea..8be9df885bf4f 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
++++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+@@ -648,7 +648,21 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ if (rc)
+ return rc;
+
++ /* We call this function in order to check if the device
++ * supports getting/setting the hash function.
++ */
+ rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func, key);
++
++ if (rc) {
++ if (rc == -EOPNOTSUPP) {
++ key = NULL;
++ hfunc = NULL;
++ rc = 0;
++ }
++
++ return rc;
++ }
++
+ if (rc)
+ return rc;
+
+--
+2.20.1
+
--- /dev/null
+From dda33b9b6283fa7b8e36db76011016e82f824fd9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Feb 2020 15:17:46 +0000
+Subject: net: ena: rss: store hash function as values and not bits
+
+From: Arthur Kiyanovski <akiyano@amazon.com>
+
+[ Upstream commit 4844470d472d660c26149ad764da2406adb13423 ]
+
+The device receives, stores and retrieves the hash function value as bits
+and not as their enum value.
+
+The bug:
+* In ena_com_set_hash_function() we set
+ cmd.u.flow_hash_func.selected_func to the bit value of rss->hash_func.
+ (1 << rss->hash_func)
+* In ena_com_get_hash_function() we retrieve the hash function and store
+ it's bit value in rss->hash_func. (Now the bit value of rss->hash_func
+ is stored in rss->hash_func instead of it's enum value)
+
+The fix:
+This commit fixes the issue by converting the retrieved hash function
+values from the device to the matching enum value of the set bit using
+ffs(). ffs() finds the first set bit's index in a word. Since the function
+returns 1 for the LSB's index, we need to subtract 1 from the returned
+value (note that BIT(0) is 1).
+
+Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)")
+Signed-off-by: Sameeh Jubran <sameehj@amazon.com>
+Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amazon/ena/ena_com.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
+index 6f758ece86f60..8ab192cb26b74 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_com.c
+@@ -2370,7 +2370,11 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
+ if (unlikely(rc))
+ return rc;
+
+- rss->hash_func = get_resp.u.flow_hash_func.selected_func;
++ /* ffs() returns 1 in case the lsb is set */
++ rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
++ if (rss->hash_func)
++ rss->hash_func--;
++
+ if (func)
+ *func = rss->hash_func;
+
+--
+2.20.1
+
--- /dev/null
+From 25e0e23cf140eb6f4e750329add79861c1bd098c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Feb 2020 09:53:41 +0800
+Subject: net: hns3: add management table after IMP reset
+
+From: Yufeng Mo <moyufeng@huawei.com>
+
+[ Upstream commit d0db7ed397517c8b2be24a0d1abfa15df776908e ]
+
+In the current process, the management table is missing after the
+IMP reset. This patch adds the management table to the reset process.
+
+Fixes: f5aac71c0327 ("net: hns3: add manager table initialization for hardware")
+Signed-off-by: Yufeng Mo <moyufeng@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 13dbd249f35fa..bfdb08572f0cc 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -9821,6 +9821,13 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
+ return ret;
+ }
+
++ ret = init_mgr_tbl(hdev);
++ if (ret) {
++ dev_err(&pdev->dev,
++ "failed to reinit manager table, ret = %d\n", ret);
++ return ret;
++ }
++
+ ret = hclge_init_fd_config(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
+--
+2.20.1
+
--- /dev/null
+From 2e0772ad6d5f651381fa7249d077b0afb1e319ae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Feb 2020 09:53:43 +0800
+Subject: net: hns3: fix a copying IPv6 address error in
+ hclge_fd_get_flow_tuples()
+
+From: Guangbin Huang <huangguangbin2@huawei.com>
+
+[ Upstream commit 47327c9315b2f3ae4ab659457977a26669631f20 ]
+
+The IPv6 address defined in struct in6_addr is specified as
+big endian, but there is no specified endian in struct
+hclge_fd_rule_tuples, so it will cause a problem if directly
+use memcpy() to copy ipv6 address between these two structures
+since this field in struct hclge_fd_rule_tuples is little endian.
+
+This patch fixes this problem by using be32_to_cpu() to convert
+endian of IPv6 address of struct in6_addr before copying.
+
+Fixes: d93ed94fbeaf ("net: hns3: add aRFS support for PF")
+Signed-off-by: Guangbin Huang <huangguangbin2@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index bfdb08572f0cc..5d74f5a60102a 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -6106,6 +6106,9 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
+ static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
+ struct hclge_fd_rule_tuples *tuples)
+ {
++#define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
++#define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
++
+ tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
+ tuples->ip_proto = fkeys->basic.ip_proto;
+ tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
+@@ -6114,12 +6117,12 @@ static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
+ tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
+ tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
+ } else {
+- memcpy(tuples->src_ip,
+- fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
+- sizeof(tuples->src_ip));
+- memcpy(tuples->dst_ip,
+- fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
+- sizeof(tuples->dst_ip));
++ int i;
++
++ for (i = 0; i < IPV6_SIZE; i++) {
++ tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
++ tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
++ }
+ }
+ }
+
+--
+2.20.1
+
--- /dev/null
+From 98f223c69dd29f1ff1695f841205a32959dbeabc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Feb 2020 09:53:42 +0800
+Subject: net: hns3: fix VF bandwidth does not take effect in some case
+
+From: Yonglong Liu <liuyonglong@huawei.com>
+
+[ Upstream commit 19eb1123b4e9337fe20b1763fec528f837ec6568 ]
+
+When enabling 4 TC after setting the bandwidth of VF, the bandwidth
+of VF will resume to default value, because of the qset resources
+changed in this case.
+
+This patch fixes it by using a fixed VF's qset resources according to
+HNAE3_MAX_TC macro.
+
+Fixes: ee9e44248f52 ("net: hns3: add support for configuring bandwidth of VF on the host")
+Signed-off-by: Yonglong Liu <liuyonglong@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+index 180224eab1ca4..28db13253a5e7 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+@@ -566,7 +566,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
+ */
+ kinfo->num_tc = vport->vport_id ? 1 :
+ min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
+- vport->qs_offset = (vport->vport_id ? hdev->tm_info.num_tc : 0) +
++ vport->qs_offset = (vport->vport_id ? HNAE3_MAX_TC : 0) +
+ (vport->vport_id ? (vport->vport_id - 1) : 0);
+
+ max_rss_size = min_t(u16, hdev->rss_size_max,
+--
+2.20.1
+
--- /dev/null
+From 7a63801aa04d3a79918a51ff139e89a4cb717544 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Feb 2020 09:01:53 -0500
+Subject: NFSv4: Fix races between open and dentry revalidation
+
+From: Trond Myklebust <trondmy@gmail.com>
+
+[ Upstream commit cf5b4059ba7197d6cef9c0e024979d178ed8c8ec ]
+
+We want to make sure that we revalidate the dentry if and only if
+we've done an OPEN by filename.
+In order to avoid races with remote changes to the directory on the
+server, we want to save the verifier before calling OPEN. The exception
+is if the server returned a delegation with our OPEN, as we then
+know that the filename can't have changed on the server.
+
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Reviewed-by: Benjamin Coddington <bcodding@gmail.com>
+Tested-by: Benjamin Coddington <bcodding@gmail.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/nfs4file.c | 1 -
+ fs/nfs/nfs4proc.c | 18 ++++++++++++++++--
+ 2 files changed, 16 insertions(+), 3 deletions(-)
+
+diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
+index 620de905cba97..3f892035c1413 100644
+--- a/fs/nfs/nfs4file.c
++++ b/fs/nfs/nfs4file.c
+@@ -86,7 +86,6 @@ nfs4_file_open(struct inode *inode, struct file *filp)
+ if (inode != d_inode(dentry))
+ goto out_drop;
+
+- nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ nfs_file_set_open_context(filp, ctx);
+ nfs_fscache_open_file(inode, filp);
+ err = 0;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 6ddb4f517d373..13c2de527718a 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2962,10 +2962,13 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
+ struct dentry *dentry;
+ struct nfs4_state *state;
+ fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx);
++ struct inode *dir = d_inode(opendata->dir);
++ unsigned long dir_verifier;
+ unsigned int seq;
+ int ret;
+
+ seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
++ dir_verifier = nfs_save_change_attribute(dir);
+
+ ret = _nfs4_proc_open(opendata, ctx);
+ if (ret != 0)
+@@ -2993,8 +2996,19 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
+ dput(ctx->dentry);
+ ctx->dentry = dentry = alias;
+ }
+- nfs_set_verifier(dentry,
+- nfs_save_change_attribute(d_inode(opendata->dir)));
++ }
++
++ switch(opendata->o_arg.claim) {
++ default:
++ break;
++ case NFS4_OPEN_CLAIM_NULL:
++ case NFS4_OPEN_CLAIM_DELEGATE_CUR:
++ case NFS4_OPEN_CLAIM_DELEGATE_PREV:
++ if (!opendata->rpc_done)
++ break;
++ if (opendata->o_res.delegation_type != 0)
++ dir_verifier = nfs_save_change_attribute(dir);
++ nfs_set_verifier(dentry, dir_verifier);
+ }
+
+ /* Parse layoutget results before we check for access */
+--
+2.20.1
+
--- /dev/null
+From 8014e43629730220bd2db0240d81603ee2892a39 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Feb 2020 01:41:05 +0900
+Subject: nvme/pci: move cqe check after device shutdown
+
+From: Keith Busch <kbusch@kernel.org>
+
+[ Upstream commit fa46c6fb5d61b1f17b06d7c6ef75478b576304c7 ]
+
+Many users have reported nvme triggered irq_startup() warnings during
+shutdown. The driver uses the nvme queue's irq to synchronize scanning
+for completions, and enabling an interrupt affined to only offline CPUs
+triggers the alarming warning.
+
+Move the final CQE check to after disabling the device and all
+registered interrupts have been torn down so that we do not have any
+IRQ to synchronize.
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=206509
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/pci.c | 23 ++++++++++++++++++-----
+ 1 file changed, 18 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index da392b50f73e7..9c80f9f081496 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -1401,6 +1401,23 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
+ nvme_poll_irqdisable(nvmeq, -1);
+ }
+
++/*
++ * Called only on a device that has been disabled and after all other threads
++ * that can check this device's completion queues have synced. This is the
++ * last chance for the driver to see a natural completion before
++ * nvme_cancel_request() terminates all incomplete requests.
++ */
++static void nvme_reap_pending_cqes(struct nvme_dev *dev)
++{
++ u16 start, end;
++ int i;
++
++ for (i = dev->ctrl.queue_count - 1; i > 0; i--) {
++ nvme_process_cq(&dev->queues[i], &start, &end, -1);
++ nvme_complete_cqes(&dev->queues[i], start, end);
++ }
++}
++
+ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
+ int entry_size)
+ {
+@@ -2235,11 +2252,6 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
+ if (timeout == 0)
+ return false;
+
+- /* handle any remaining CQEs */
+- if (opcode == nvme_admin_delete_cq &&
+- !test_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags))
+- nvme_poll_irqdisable(nvmeq, -1);
+-
+ sent--;
+ if (nr_queues)
+ goto retry;
+@@ -2428,6 +2440,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
+ nvme_suspend_io_queues(dev);
+ nvme_suspend_queue(&dev->queues[0]);
+ nvme_pci_disable(dev);
++ nvme_reap_pending_cqes(dev);
+
+ blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
+ blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
+--
+2.20.1
+
--- /dev/null
+From e543a1918134903fbdc96e4cb31519ac4053ec92 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Feb 2020 16:01:45 -0800
+Subject: nvme: prevent warning triggered by nvme_stop_keep_alive
+
+From: Nigel Kirkland <nigel.kirkland@broadcom.com>
+
+[ Upstream commit 97b2512ad000a409b4073dd1a71e4157d76675cb ]
+
+Delayed keep alive work is queued on system workqueue and may be cancelled
+via nvme_stop_keep_alive from nvme_reset_wq, nvme_fc_wq or nvme_wq.
+
+Check_flush_dependency detects mismatched attributes between the work-queue
+context used to cancel the keep alive work and system-wq. Specifically
+system-wq does not have the WQ_MEM_RECLAIM flag, whereas the contexts used
+to cancel keep alive work have WQ_MEM_RECLAIM flag.
+
+Example warning:
+
+ workqueue: WQ_MEM_RECLAIM nvme-reset-wq:nvme_fc_reset_ctrl_work [nvme_fc]
+ is flushing !WQ_MEM_RECLAIM events:nvme_keep_alive_work [nvme_core]
+
+To avoid the flags mismatch, delayed keep alive work is queued on nvme_wq.
+
+However this creates a secondary concern where work and a request to cancel
+that work may be in the same work queue - namely err_work in the rdma and
+tcp transports, which will want to flush/cancel the keep alive work which
+will now be on nvme_wq.
+
+After reviewing the transports, it looks like err_work can be moved to
+nvme_reset_wq. In fact that aligns them better with transition into
+RESETTING and performing related reset work in nvme_reset_wq.
+
+Change nvme-rdma and nvme-tcp to perform err_work in nvme_reset_wq.
+
+Signed-off-by: Nigel Kirkland <nigel.kirkland@broadcom.com>
+Signed-off-by: James Smart <jsmart2021@gmail.com>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/core.c | 10 +++++-----
+ drivers/nvme/host/rdma.c | 2 +-
+ drivers/nvme/host/tcp.c | 2 +-
+ 3 files changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 641c07347e8d8..ada59df642d29 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -66,8 +66,8 @@ MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
+ * nvme_reset_wq - hosts nvme reset works
+ * nvme_delete_wq - hosts nvme delete works
+ *
+- * nvme_wq will host works such are scan, aen handling, fw activation,
+- * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq
++ * nvme_wq will host works such as scan, aen handling, fw activation,
++ * keep-alive, periodic reconnects etc. nvme_reset_wq
+ * runs reset works which also flush works hosted on nvme_wq for
+ * serialization purposes. nvme_delete_wq host controller deletion
+ * works which flush reset works for serialization.
+@@ -976,7 +976,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
+ startka = true;
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+ if (startka)
+- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
++ queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ }
+
+ static int nvme_keep_alive(struct nvme_ctrl *ctrl)
+@@ -1006,7 +1006,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
+ dev_dbg(ctrl->device,
+ "reschedule traffic based keep-alive timer\n");
+ ctrl->comp_seen = false;
+- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
++ queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ return;
+ }
+
+@@ -1023,7 +1023,7 @@ static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
+ if (unlikely(ctrl->kato == 0))
+ return;
+
+- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
++ queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ }
+
+ void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index 2a47c6c5007e1..3e85c5cacefd2 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -1088,7 +1088,7 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
+ return;
+
+- queue_work(nvme_wq, &ctrl->err_work);
++ queue_work(nvme_reset_wq, &ctrl->err_work);
+ }
+
+ static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index f8fa5c5b79f17..49d4373b84eb3 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -422,7 +422,7 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
+ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
+ return;
+
+- queue_work(nvme_wq, &to_tcp_ctrl(ctrl)->err_work);
++ queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
+ }
+
+ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
+--
+2.20.1
+
--- /dev/null
+From 3c3ea53507780a8759a194676119bb2eb73ccd36 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Feb 2020 10:37:18 -0800
+Subject: nvme/tcp: fix bug on double requeue when send fails
+
+From: Anton Eidelman <anton@lightbitslabs.com>
+
+[ Upstream commit 2d570a7c0251c594489a2c16b82b14ae30345c03 ]
+
+When nvme_tcp_io_work() fails to send to socket due to
+connection close/reset, error_recovery work is triggered
+from nvme_tcp_state_change() socket callback.
+This cancels all the active requests in the tagset,
+which requeues them.
+
+The failed request, however, was ended and thus requeued
+individually as well unless send returned -EPIPE.
+Another return code to be treated the same way is -ECONNRESET.
+
+Double requeue caused BUG_ON(blk_queued_rq(rq))
+in blk_mq_requeue_request() from either the individual requeue
+of the failed request or the bulk requeue from
+blk_mq_tagset_busy_iter(, nvme_cancel_request, );
+
+Signed-off-by: Anton Eidelman <anton@lightbitslabs.com>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/tcp.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 6d43b23a0fc8b..f8fa5c5b79f17 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1054,7 +1054,12 @@ static void nvme_tcp_io_work(struct work_struct *w)
+ } else if (unlikely(result < 0)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "failed to send request %d\n", result);
+- if (result != -EPIPE)
++
++ /*
++ * Fail the request unless peer closed the connection,
++ * in which case error recovery flow will complete all.
++ */
++ if ((result != -EPIPE) && (result != -ECONNRESET))
+ nvme_tcp_fail_request(queue->request);
+ nvme_tcp_done_send_req(queue);
+ return;
+--
+2.20.1
+
--- /dev/null
+From ab689b9ec2a7f12c0ec8b964f0529689a42bafbb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Feb 2020 00:50:17 +0800
+Subject: perf/smmuv3: Use platform_get_irq_optional() for wired interrupt
+
+From: John Garry <john.garry@huawei.com>
+
+[ Upstream commit 0ca2c0319a7bce0e152b51b866979d62dc261e48 ]
+
+Even though a SMMUv3 PMCG implementation may use an MSI as the form of
+interrupt source, the kernel would still complain that it does not find
+the wired (GSIV) interrupt in this case:
+
+root@(none)$ dmesg | grep arm-smmu-v3-pmcg | grep "not found"
+[ 59.237219] arm-smmu-v3-pmcg arm-smmu-v3-pmcg.8.auto: IRQ index 0 not found
+[ 59.322841] arm-smmu-v3-pmcg arm-smmu-v3-pmcg.9.auto: IRQ index 0 not found
+[ 59.422155] arm-smmu-v3-pmcg arm-smmu-v3-pmcg.10.auto: IRQ index 0 not found
+[ 59.539014] arm-smmu-v3-pmcg arm-smmu-v3-pmcg.11.auto: IRQ index 0 not found
+[ 59.640329] arm-smmu-v3-pmcg arm-smmu-v3-pmcg.12.auto: IRQ index 0 not found
+[ 59.743112] arm-smmu-v3-pmcg arm-smmu-v3-pmcg.13.auto: IRQ index 0 not found
+[ 59.880577] arm-smmu-v3-pmcg arm-smmu-v3-pmcg.14.auto: IRQ index 0 not found
+[ 60.017528] arm-smmu-v3-pmcg arm-smmu-v3-pmcg.15.auto: IRQ index 0 not found
+
+Use platform_get_irq_optional() to silence the warning.
+
+If neither interrupt source is found, then the driver will still warn that
+IRQ setup errored and the probe will fail.
+
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Signed-off-by: John Garry <john.garry@huawei.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/perf/arm_smmuv3_pmu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
+index d704eccc548f6..f01a57e5a5f35 100644
+--- a/drivers/perf/arm_smmuv3_pmu.c
++++ b/drivers/perf/arm_smmuv3_pmu.c
+@@ -771,7 +771,7 @@ static int smmu_pmu_probe(struct platform_device *pdev)
+ smmu_pmu->reloc_base = smmu_pmu->reg_base;
+ }
+
+- irq = platform_get_irq(pdev, 0);
++ irq = platform_get_irq_optional(pdev, 0);
+ if (irq > 0)
+ smmu_pmu->irq = irq;
+
+--
+2.20.1
+
--- /dev/null
+From 06acb472effb32b90a3dba3e788effac89e2eb1e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Jan 2020 10:31:18 -0800
+Subject: perf/x86/cstate: Add Tremont support
+
+From: Kan Liang <kan.liang@linux.intel.com>
+
+[ Upstream commit ecf71fbccb9ac5cb964eb7de59bb9da3755b7885 ]
+
+Tremont is Intel's successor to Goldmont Plus. From the perspective of
+Intel cstate residency counters, there is nothing changed compared with
+Goldmont Plus and Goldmont.
+
+Share glm_cstates with Goldmont Plus and Goldmont.
+Update the comments for Tremont.
+
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Andi Kleen <ak@linux.intel.com>
+Link: https://lkml.kernel.org/r/1580236279-35492-2-git-send-email-kan.liang@linux.intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/events/intel/cstate.c | 22 +++++++++++++---------
+ 1 file changed, 13 insertions(+), 9 deletions(-)
+
+diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
+index e1daf4151e116..4814c964692cb 100644
+--- a/arch/x86/events/intel/cstate.c
++++ b/arch/x86/events/intel/cstate.c
+@@ -40,17 +40,18 @@
+ * Model specific counters:
+ * MSR_CORE_C1_RES: CORE C1 Residency Counter
+ * perf code: 0x00
+- * Available model: SLM,AMT,GLM,CNL
++ * Available model: SLM,AMT,GLM,CNL,TNT
+ * Scope: Core (each processor core has a MSR)
+ * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
+ * perf code: 0x01
+ * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM,
+- * CNL,KBL,CML
++ * CNL,KBL,CML,TNT
+ * Scope: Core
+ * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
+ * perf code: 0x02
+ * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
+- * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL
++ * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL,
++ * TNT
+ * Scope: Core
+ * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
+ * perf code: 0x03
+@@ -60,17 +61,18 @@
+ * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
+ * perf code: 0x00
+ * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
+- * KBL,CML,ICL,TGL
++ * KBL,CML,ICL,TGL,TNT
+ * Scope: Package (physical package)
+ * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
+ * perf code: 0x01
+ * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
+- * GLM,CNL,KBL,CML,ICL,TGL
++ * GLM,CNL,KBL,CML,ICL,TGL,TNT
+ * Scope: Package (physical package)
+ * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
+ * perf code: 0x02
+- * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
+- * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL
++ * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
++ * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL,
++ * TNT
+ * Scope: Package (physical package)
+ * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
+ * perf code: 0x03
+@@ -87,7 +89,8 @@
+ * Scope: Package (physical package)
+ * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
+ * perf code: 0x06
+- * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL
++ * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL,
++ * TNT
+ * Scope: Package (physical package)
+ *
+ */
+@@ -640,8 +643,9 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
+
+ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_D, glm_cstates),
+-
+ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates),
++ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_TREMONT_D, glm_cstates),
++ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_TREMONT, glm_cstates),
+
+ X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_L, icl_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE, icl_cstates),
+--
+2.20.1
+
--- /dev/null
+From 7cf261248684e578818a8d28e57cc11578c35923 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Jan 2020 10:31:17 -0800
+Subject: perf/x86/intel: Add Elkhart Lake support
+
+From: Kan Liang <kan.liang@linux.intel.com>
+
+[ Upstream commit eda23b387f6c4bb2971ac7e874a09913f533b22c ]
+
+Elkhart Lake also uses Tremont CPU. From the perspective of Intel PMU,
+there is nothing changed compared with Jacobsville.
+Share the perf code with Jacobsville.
+
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Andi Kleen <ak@linux.intel.com>
+Link: https://lkml.kernel.org/r/1580236279-35492-1-git-send-email-kan.liang@linux.intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/events/intel/core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 3be51aa06e67e..dff6623804c28 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -4765,6 +4765,7 @@ __init int intel_pmu_init(void)
+ break;
+
+ case INTEL_FAM6_ATOM_TREMONT_D:
++ case INTEL_FAM6_ATOM_TREMONT:
+ x86_pmu.late_ack = true;
+ memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+--
+2.20.1
+
--- /dev/null
+From 910721ab49e9b14335af4a349597b56ad78478a0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Jan 2020 10:31:19 -0800
+Subject: perf/x86/msr: Add Tremont support
+
+From: Kan Liang <kan.liang@linux.intel.com>
+
+[ Upstream commit 0aa0e0d6b34b89649e6b5882a7e025a0eb9bd832 ]
+
+Tremont is Intel's successor to Goldmont Plus. SMI_COUNT MSR is also
+supported.
+
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Andi Kleen <ak@linux.intel.com>
+Link: https://lkml.kernel.org/r/1580236279-35492-3-git-send-email-kan.liang@linux.intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/events/msr.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
+index 6f86650b3f77d..a949f6f55991d 100644
+--- a/arch/x86/events/msr.c
++++ b/arch/x86/events/msr.c
+@@ -75,8 +75,9 @@ static bool test_intel(int idx, void *data)
+
+ case INTEL_FAM6_ATOM_GOLDMONT:
+ case INTEL_FAM6_ATOM_GOLDMONT_D:
+-
+ case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
++ case INTEL_FAM6_ATOM_TREMONT_D:
++ case INTEL_FAM6_ATOM_TREMONT:
+
+ case INTEL_FAM6_XEON_PHI_KNL:
+ case INTEL_FAM6_XEON_PHI_KNM:
+--
+2.20.1
+
--- /dev/null
+From 4837693f310ec52b789d70c6f6d325d640e1a564 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 8 Feb 2020 15:50:36 +0100
+Subject: qmi_wwan: re-add DW5821e pre-production variant
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Bjørn Mork <bjorn@mork.no>
+
+[ Upstream commit 88bf54603f6f2c137dfee1abf6436ceac3528d2d ]
+
+Commit f25e1392fdb5 removed the support for the pre-production variant
+of the Dell DW5821e to avoid probing another USB interface unnecessarily.
+However, the pre-production samples are found in the wild, and this lack
+of support is causing problems for users of such samples. It is therefore
+necessary to support both variants.
+
+Matching on both interfaces 0 and 1 is not expected to cause any problem
+with either variant, as only the QMI function will be probed successfully
+on either. Interface 1 will be rejected based on the HID class for the
+production variant:
+
+T: Bus=01 Lev=03 Prnt=04 Port=00 Cnt=01 Dev#= 16 Spd=480 MxCh= 0
+D: Ver= 2.10 Cls=ef(misc ) Sub=02 Prot=01 MxPS=64 #Cfgs= 2
+P: Vendor=413c ProdID=81d7 Rev=03.18
+S: Manufacturer=DELL
+S: Product=DW5821e Snapdragon X20 LTE
+S: SerialNumber=0123456789ABCDEF
+C: #Ifs= 6 Cfg#= 1 Atr=a0 MxPwr=500mA
+I: If#= 0 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan
+I: If#= 1 Alt= 0 #EPs= 1 Cls=03(HID ) Sub=00 Prot=00 Driver=usbhid
+I: If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+I: If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+I: If#= 4 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+I: If#= 5 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option
+
+And interface 0 will be rejected based on too few endpoints for the
+pre-production variant:
+
+T: Bus=01 Lev=02 Prnt=02 Port=03 Cnt=03 Dev#= 7 Spd=480 MxCh= 0
+D: Ver= 2.10 Cls=ef(misc ) Sub=02 Prot=01 MxPS=64 #Cfgs= 2
+P: Vendor=413c ProdID=81d7 Rev= 3.18
+S: Manufacturer=DELL
+S: Product=DW5821e Snapdragon X20 LTE
+S: SerialNumber=0123456789ABCDEF
+C: #Ifs= 5 Cfg#= 1 Atr=a0 MxPwr=500mA
+I: If#= 0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=
+I: If#= 1 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan
+I: If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+I: If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+I: If#= 4 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+
+Fixes: f25e1392fdb5 ("qmi_wwan: fix interface number for DW5821e production firmware")
+Link: https://whrl.pl/Rf0vNk
+Reported-by: Lars Melin <larsm17@gmail.com>
+Cc: Aleksander Morgado <aleksander@aleksander.es>
+Signed-off-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/qmi_wwan.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 9485c8d1de8a3..839cef720cf64 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1363,6 +1363,7 @@ static const struct usb_device_id products[] = {
+ {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
+ {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
+ {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
++ {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
+ {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
+ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
+ {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
+--
+2.20.1
+
--- /dev/null
+From 6f350ee495a0afe5edaa7c0fb3023838888f69a4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 8 Feb 2020 16:55:04 +0100
+Subject: qmi_wwan: unconditionally reject 2 ep interfaces
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Bjørn Mork <bjorn@mork.no>
+
+[ Upstream commit 00516d13d4cfa56ce39da144db2dbf08b09b9357 ]
+
+We have been using the fact that the QMI and DIAG functions
+usually are the only ones with class/subclass/protocol being
+ff/ff/ff on Quectel modems. This has allowed us to match the
+QMI function without knowing the exact interface number,
+which can vary depending on firmware configuration.
+
+The ability to silently reject the DIAG function, which is
+usually handled by the option driver, is important for this
+method to work. This is done based on the knowledge that it
+has exactly 2 bulk endpoints. QMI function control interfaces
+will have either 3 or 1 endpoint. This rule is universal so
+the quirk condition can be removed.
+
+The fixed layouts known from the Gobi1k and Gobi2k modems
+have been gradually replaced by more dynamic layouts, and
+many vendors now use configurable layouts without changing
+device IDs. Renaming the class/subclass/protocol matching
+macro makes it more obvious that this is now not Quectel
+specific anymore.
+
+Cc: Kristian Evensen <kristian.evensen@gmail.com>
+Cc: Aleksander Morgado <aleksander@aleksander.es>
+Signed-off-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/qmi_wwan.c | 42 ++++++++++++++------------------------
+ 1 file changed, 15 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 839cef720cf64..3b7a3b8a5e067 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -61,7 +61,6 @@ enum qmi_wwan_flags {
+
+ enum qmi_wwan_quirks {
+ QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */
+- QMI_WWAN_QUIRK_QUECTEL_DYNCFG = 1 << 1, /* check num. endpoints */
+ };
+
+ struct qmimux_hdr {
+@@ -916,16 +915,6 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = {
+ .data = QMI_WWAN_QUIRK_DTR,
+ };
+
+-static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = {
+- .description = "WWAN/QMI device",
+- .flags = FLAG_WWAN | FLAG_SEND_ZLP,
+- .bind = qmi_wwan_bind,
+- .unbind = qmi_wwan_unbind,
+- .manage_power = qmi_wwan_manage_power,
+- .rx_fixup = qmi_wwan_rx_fixup,
+- .data = QMI_WWAN_QUIRK_DTR | QMI_WWAN_QUIRK_QUECTEL_DYNCFG,
+-};
+-
+ #define HUAWEI_VENDOR_ID 0x12D1
+
+ /* map QMI/wwan function by a fixed interface number */
+@@ -946,14 +935,18 @@ static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = {
+ #define QMI_GOBI_DEVICE(vend, prod) \
+ QMI_FIXED_INTF(vend, prod, 0)
+
+-/* Quectel does not use fixed interface numbers on at least some of their
+- * devices. We need to check the number of endpoints to ensure that we bind to
+- * the correct interface.
++/* Many devices have QMI and DIAG functions which are distinguishable
++ * from other vendor specific functions by class, subclass and
++ * protocol all being 0xff. The DIAG function has exactly 2 endpoints
++ * and is silently rejected when probed.
++ *
++ * This makes it possible to match dynamically numbered QMI functions
++ * as seen on e.g. many Quectel modems.
+ */
+-#define QMI_QUIRK_QUECTEL_DYNCFG(vend, prod) \
++#define QMI_MATCH_FF_FF_FF(vend, prod) \
+ USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_VENDOR_SPEC, \
+ USB_SUBCLASS_VENDOR_SPEC, 0xff), \
+- .driver_info = (unsigned long)&qmi_wwan_info_quirk_quectel_dyncfg
++ .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr
+
+ static const struct usb_device_id products[] = {
+ /* 1. CDC ECM like devices match on the control interface */
+@@ -1059,10 +1052,10 @@ static const struct usb_device_id products[] = {
+ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
+- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
+- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
+- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
+- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
++ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
++ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
++ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
++ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
+
+ /* 3. Combined interface devices matching on interface number */
+ {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
+@@ -1455,7 +1448,6 @@ static int qmi_wwan_probe(struct usb_interface *intf,
+ {
+ struct usb_device_id *id = (struct usb_device_id *)prod;
+ struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
+- const struct driver_info *info;
+
+ /* Workaround to enable dynamic IDs. This disables usbnet
+ * blacklisting functionality. Which, if required, can be
+@@ -1491,12 +1483,8 @@ static int qmi_wwan_probe(struct usb_interface *intf,
+ * different. Ignore the current interface if the number of endpoints
+ * equals the number for the diag interface (two).
+ */
+- info = (void *)id->driver_info;
+-
+- if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) {
+- if (desc->bNumEndpoints == 2)
+- return -ENODEV;
+- }
++ if (desc->bNumEndpoints == 2)
++ return -ENODEV;
+
+ return usbnet_probe(intf, id);
+ }
+--
+2.20.1
+
--- /dev/null
+From 314e40444c7a6b64c3bcb30091f5c152284de788 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 7 Feb 2020 19:44:29 +0530
+Subject: RDMA/siw: Remove unwanted WARN_ON in siw_cm_llp_data_ready()
+
+From: Krishnamraju Eraparaju <krishna2@chelsio.com>
+
+[ Upstream commit 663218a3e715fd9339d143a3e10088316b180f4f ]
+
+Warnings like below can fill up the dmesg while disconnecting RDMA
+connections.
+Hence, remove the unwanted WARN_ON.
+
+ WARNING: CPU: 6 PID: 0 at drivers/infiniband/sw/siw/siw_cm.c:1229 siw_cm_llp_data_ready+0xc1/0xd0 [siw]
+ RIP: 0010:siw_cm_llp_data_ready+0xc1/0xd0 [siw]
+ Call Trace:
+ <IRQ>
+ tcp_data_queue+0x226/0xb40
+ tcp_rcv_established+0x220/0x620
+ tcp_v4_do_rcv+0x12a/0x1e0
+ tcp_v4_rcv+0xb05/0xc00
+ ip_local_deliver_finish+0x69/0x210
+ ip_local_deliver+0x6b/0xe0
+ ip_rcv+0x273/0x362
+ __netif_receive_skb_core+0xb35/0xc30
+ netif_receive_skb_internal+0x3d/0xb0
+ napi_gro_frags+0x13b/0x200
+ t4_ethrx_handler+0x433/0x7d0 [cxgb4]
+ process_responses+0x318/0x580 [cxgb4]
+ napi_rx_handler+0x14/0x100 [cxgb4]
+ net_rx_action+0x149/0x3b0
+ __do_softirq+0xe3/0x30a
+ irq_exit+0x100/0x110
+ do_IRQ+0x7f/0xe0
+ common_interrupt+0xf/0xf
+ </IRQ>
+
+Link: https://lore.kernel.org/r/20200207141429.27927-1-krishna2@chelsio.com
+Signed-off-by: Krishnamraju Eraparaju <krishna2@chelsio.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/sw/siw/siw_cm.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
+index 3bccfef40e7e1..ac86363ce1a24 100644
+--- a/drivers/infiniband/sw/siw/siw_cm.c
++++ b/drivers/infiniband/sw/siw/siw_cm.c
+@@ -1225,10 +1225,9 @@ static void siw_cm_llp_data_ready(struct sock *sk)
+ read_lock(&sk->sk_callback_lock);
+
+ cep = sk_to_cep(sk);
+- if (!cep) {
+- WARN_ON(1);
++ if (!cep)
+ goto out;
+- }
++
+ siw_dbg_cep(cep, "state: %d\n", cep->state);
+
+ switch (cep->state) {
+--
+2.20.1
+
--- /dev/null
+From f64b0ef30210ed8e1892e00ee838382fc71bbdc3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Dec 2019 16:02:54 +0100
+Subject: s390/zcrypt: fix card and queue total counter wrap
+
+From: Harald Freudenberger <freude@linux.ibm.com>
+
+[ Upstream commit fcd98d4002539f1e381916fc1b6648938c1eac76 ]
+
+The internal statistic counters for the total number of
+requests processed per card and per queue used integers. So they do
+wrap after a rather huge amount of crypto requests processed. This
+patch introduces uint64 counters which should hold much longer but
+still may wrap. The sysfs attributes request_count for card and queue
+also used only %ld and now display the counter value with %llu.
+
+This is not a security relevant fix. The int overflow which happened
+is not in any way exploitable as a security breach.
+
+Signed-off-by: Harald Freudenberger <freude@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/s390/crypto/ap_bus.h | 4 ++--
+ drivers/s390/crypto/ap_card.c | 8 ++++----
+ drivers/s390/crypto/ap_queue.c | 6 +++---
+ drivers/s390/crypto/zcrypt_api.c | 16 +++++++++-------
+ 4 files changed, 18 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
+index bb35ba4a8d243..4348fdff1c61e 100644
+--- a/drivers/s390/crypto/ap_bus.h
++++ b/drivers/s390/crypto/ap_bus.h
+@@ -162,7 +162,7 @@ struct ap_card {
+ unsigned int functions; /* AP device function bitfield. */
+ int queue_depth; /* AP queue depth.*/
+ int id; /* AP card number. */
+- atomic_t total_request_count; /* # requests ever for this AP device.*/
++ atomic64_t total_request_count; /* # requests ever for this AP device.*/
+ };
+
+ #define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
+@@ -179,7 +179,7 @@ struct ap_queue {
+ enum ap_state state; /* State of the AP device. */
+ int pendingq_count; /* # requests on pendingq list. */
+ int requestq_count; /* # requests on requestq list. */
+- int total_request_count; /* # requests ever for this AP device.*/
++ u64 total_request_count; /* # requests ever for this AP device.*/
+ int request_timeout; /* Request timeout in jiffies. */
+ struct timer_list timeout; /* Timer for request timeouts. */
+ struct list_head pendingq; /* List of message sent to AP queue. */
+diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
+index 63b4cc6cd7e59..e85bfca1ed163 100644
+--- a/drivers/s390/crypto/ap_card.c
++++ b/drivers/s390/crypto/ap_card.c
+@@ -63,13 +63,13 @@ static ssize_t request_count_show(struct device *dev,
+ char *buf)
+ {
+ struct ap_card *ac = to_ap_card(dev);
+- unsigned int req_cnt;
++ u64 req_cnt;
+
+ req_cnt = 0;
+ spin_lock_bh(&ap_list_lock);
+- req_cnt = atomic_read(&ac->total_request_count);
++ req_cnt = atomic64_read(&ac->total_request_count);
+ spin_unlock_bh(&ap_list_lock);
+- return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
++ return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
+ }
+
+ static ssize_t request_count_store(struct device *dev,
+@@ -83,7 +83,7 @@ static ssize_t request_count_store(struct device *dev,
+ for_each_ap_queue(aq, ac)
+ aq->total_request_count = 0;
+ spin_unlock_bh(&ap_list_lock);
+- atomic_set(&ac->total_request_count, 0);
++ atomic64_set(&ac->total_request_count, 0);
+
+ return count;
+ }
+diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
+index 37c3bdc3642dc..a317ab4849320 100644
+--- a/drivers/s390/crypto/ap_queue.c
++++ b/drivers/s390/crypto/ap_queue.c
+@@ -479,12 +479,12 @@ static ssize_t request_count_show(struct device *dev,
+ char *buf)
+ {
+ struct ap_queue *aq = to_ap_queue(dev);
+- unsigned int req_cnt;
++ u64 req_cnt;
+
+ spin_lock_bh(&aq->lock);
+ req_cnt = aq->total_request_count;
+ spin_unlock_bh(&aq->lock);
+- return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
++ return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
+ }
+
+ static ssize_t request_count_store(struct device *dev,
+@@ -676,7 +676,7 @@ void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
+ list_add_tail(&ap_msg->list, &aq->requestq);
+ aq->requestq_count++;
+ aq->total_request_count++;
+- atomic_inc(&aq->card->total_request_count);
++ atomic64_inc(&aq->card->total_request_count);
+ /* Send/receive as many request from the queue as possible. */
+ ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL));
+ spin_unlock_bh(&aq->lock);
+diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
+index 9157e728a362d..7fa0262e91af0 100644
+--- a/drivers/s390/crypto/zcrypt_api.c
++++ b/drivers/s390/crypto/zcrypt_api.c
+@@ -605,8 +605,8 @@ static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
+ weight += atomic_read(&zc->load);
+ pref_weight += atomic_read(&pref_zc->load);
+ if (weight == pref_weight)
+- return atomic_read(&zc->card->total_request_count) >
+- atomic_read(&pref_zc->card->total_request_count);
++ return atomic64_read(&zc->card->total_request_count) >
++ atomic64_read(&pref_zc->card->total_request_count);
+ return weight > pref_weight;
+ }
+
+@@ -1216,11 +1216,12 @@ static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
+ spin_unlock(&zcrypt_list_lock);
+ }
+
+-static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
++static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters)
+ {
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
+ int card;
++ u64 cnt;
+
+ memset(reqcnt, 0, sizeof(int) * max_adapters);
+ spin_lock(&zcrypt_list_lock);
+@@ -1232,8 +1233,9 @@ static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
+ || card >= max_adapters)
+ continue;
+ spin_lock(&zq->queue->lock);
+- reqcnt[card] = zq->queue->total_request_count;
++ cnt = zq->queue->total_request_count;
+ spin_unlock(&zq->queue->lock);
++ reqcnt[card] = (cnt < UINT_MAX) ? (u32) cnt : UINT_MAX;
+ }
+ }
+ local_bh_enable();
+@@ -1411,9 +1413,9 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ return 0;
+ }
+ case ZCRYPT_PERDEV_REQCNT: {
+- int *reqcnt;
++ u32 *reqcnt;
+
+- reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL);
++ reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL);
+ if (!reqcnt)
+ return -ENOMEM;
+ zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
+@@ -1470,7 +1472,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ }
+ case Z90STAT_PERDEV_REQCNT: {
+ /* the old ioctl supports only 64 adapters */
+- int reqcnt[MAX_ZDEV_CARDIDS];
++ u32 reqcnt[MAX_ZDEV_CARDIDS];
+
+ zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
+ if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
+--
+2.20.1
+
--- /dev/null
+From df80d507f9408c86cd9a0c53e3d685564bd50bb1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 11 Jan 2020 04:53:38 -0500
+Subject: sched/core: Don't skip remote tick for idle CPUs
+
+From: Scott Wood <swood@redhat.com>
+
+[ Upstream commit 488603b815a7514c7009e6fc339d74ed4a30f343 ]
+
+This will be used in the next patch to get a loadavg update from
+nohz cpus. The delta check is skipped because idle_sched_class
+doesn't update se.exec_start.
+
+Signed-off-by: Scott Wood <swood@redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Link: https://lkml.kernel.org/r/1578736419-14628-2-git-send-email-swood@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/core.c | 18 ++++++++++--------
+ 1 file changed, 10 insertions(+), 8 deletions(-)
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index b2564d62a0f74..3cb879f4eb9c6 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -3669,22 +3669,24 @@ static void sched_tick_remote(struct work_struct *work)
+ * statistics and checks timeslices in a time-independent way, regardless
+ * of when exactly it is running.
+ */
+- if (idle_cpu(cpu) || !tick_nohz_tick_stopped_cpu(cpu))
++ if (!tick_nohz_tick_stopped_cpu(cpu))
+ goto out_requeue;
+
+ rq_lock_irq(rq, &rf);
+ curr = rq->curr;
+- if (is_idle_task(curr) || cpu_is_offline(cpu))
++ if (cpu_is_offline(cpu))
+ goto out_unlock;
+
+ update_rq_clock(rq);
+- delta = rq_clock_task(rq) - curr->se.exec_start;
+
+- /*
+- * Make sure the next tick runs within a reasonable
+- * amount of time.
+- */
+- WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
++ if (!is_idle_task(curr)) {
++ /*
++ * Make sure the next tick runs within a reasonable
++ * amount of time.
++ */
++ delta = rq_clock_task(rq) - curr->se.exec_start;
++ WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
++ }
+ curr->sched_class->task_tick(rq, curr, 0);
+
+ out_unlock:
+--
+2.20.1
+
--- /dev/null
+From 2a84b34503ad7c1ef36e77890b5d6766aa776590 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Jan 2020 15:13:56 +0100
+Subject: sched/fair: Prevent unlimited runtime on throttled group
+
+From: Vincent Guittot <vincent.guittot@linaro.org>
+
+[ Upstream commit 2a4b03ffc69f2dedc6388e9a6438b5f4c133a40d ]
+
+When a running task is moved on a throttled task group and there is no
+other task enqueued on the CPU, the task can keep running using 100% CPU
+whatever the allocated bandwidth for the group and although its cfs rq is
+throttled. Furthermore, the group entity of the cfs_rq and its parents are
+not enqueued but only set as curr on their respective cfs_rqs.
+
+We have the following sequence:
+
+sched_move_task
+ -dequeue_task: dequeue task and group_entities.
+ -put_prev_task: put task and group entities.
+ -sched_change_group: move task to new group.
+ -enqueue_task: enqueue only task but not group entities because cfs_rq is
+ throttled.
+ -set_next_task : set task and group_entities as current sched_entity of
+ their cfs_rq.
+
+Another impact is that the root cfs_rq runnable_load_avg at root rq stays
+null because the group_entities are not enqueued. This situation will stay
+the same until an "external" event triggers a reschedule. Let trigger it
+immediately instead.
+
+Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Ben Segall <bsegall@google.com>
+Link: https://lkml.kernel.org/r/1579011236-31256-1-git-send-email-vincent.guittot@linaro.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/core.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 65ed821335dd5..9e7768dbd92d2 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -7068,8 +7068,15 @@ void sched_move_task(struct task_struct *tsk)
+
+ if (queued)
+ enqueue_task(rq, tsk, queue_flags);
+- if (running)
++ if (running) {
+ set_next_task(rq, tsk);
++ /*
++ * After changing group, the running task may have joined a
++ * throttled one but it's still the running task. Trigger a
++ * resched to make sure that task can still run.
++ */
++ resched_curr(rq);
++ }
+
+ task_rq_unlock(rq, tsk, &rf);
+ }
+--
+2.20.1
+
net-add-strict-checks-in-netdev_name_node_alt_destroy.patch
net-macb-properly-handle-phylink-on-at91rm9200.patch
net-rtnetlink-fix-bugs-in-rtnl_alt_ifname.patch
+ipmi-ssif-handle-a-possible-null-pointer-reference.patch
+drm-msm-set-dma-maximum-segment-size-for-mdss.patch
+sched-core-don-t-skip-remote-tick-for-idle-cpus.patch
+timers-nohz-update-nohz-load-in-remote-tick.patch
+sched-fair-prevent-unlimited-runtime-on-throttled-gr.patch
+dax-pass-nowait-flag-to-iomap_apply.patch
+mac80211-consider-more-elements-in-parsing-crc.patch
+cfg80211-check-wiphy-driver-existence-for-drvinfo-re.patch
+io_uring-flush-overflowed-cq-events-in-the-io_uring_.patch
+s390-zcrypt-fix-card-and-queue-total-counter-wrap.patch
+qmi_wwan-re-add-dw5821e-pre-production-variant.patch
+qmi_wwan-unconditionally-reject-2-ep-interfaces.patch
+nfsv4-fix-races-between-open-and-dentry-revalidation.patch
+perf-smmuv3-use-platform_get_irq_optional-for-wired-.patch
+arm-ftrace-fix-be-text-poking.patch
+perf-x86-intel-add-elkhart-lake-support.patch
+perf-x86-cstate-add-tremont-support.patch
+perf-x86-msr-add-tremont-support.patch
+ceph-do-not-execute-direct-write-in-parallel-if-o_ap.patch
+arm-dts-sti-fixup-sound-frame-inversion-for-stihxxx-.patch
+drm-amd-display-do-not-set-optimized_require-to-fals.patch
+rdma-siw-remove-unwanted-warn_on-in-siw_cm_llp_data_.patch
+drm-amd-display-check-engine-is-not-null-before-acqu.patch
+drm-amd-display-limit-minimum-dppclk-to-100mhz.patch
+drm-amd-display-add-initialitions-for-pll2-clock-sou.patch
+amdgpu-prevent-build-errors-regarding-soft-hard-floa.patch
+soc-tegra-fuse-fix-build-with-tegra194-configuration.patch
+i40e-fix-the-conditional-for-i40e_vc_validate_vqs_bi.patch
+net-ena-fix-potential-crash-when-rxfh-key-is-null.patch
+net-ena-fix-uses-of-round_jiffies.patch
+net-ena-add-missing-ethtool-tx-timestamping-indicati.patch
+net-ena-fix-incorrect-default-rss-key.patch
+net-ena-rss-do-not-allocate-key-when-not-supported.patch
+net-ena-rss-fix-failure-to-get-indirection-table.patch
+net-ena-rss-store-hash-function-as-values-and-not-bi.patch
+net-ena-fix-incorrectly-saving-queue-numbers-when-se.patch
+net-ena-fix-corruption-of-dev_idx_to_host_tbl.patch
+net-ena-ethtool-use-correct-value-for-crc32-hash.patch
+net-ena-ena-com.c-prevent-null-pointer-dereference.patch
+ice-fix-switch-between-fw-and-sw-lldp.patch
+ice-don-t-allow-same-value-for-rx-tail-to-be-written.patch
+ice-fix-and-consolidate-logging-of-nvm-firmware-vers.patch
+ice-update-unit-load-status-bitmask-to-check-after-r.patch
+ice-use-ice_pf_to_dev.patch
+ice-use-correct-netif-error-function.patch
+io-wq-don-t-call-kxalloc_node-with-non-online-node.patch
+cifs-fix-mode-output-in-debugging-statements.patch
+bcache-ignore-pending-signals-when-creating-gc-and-a.patch
+cfg80211-add-missing-policy-for-nl80211_attr_status_.patch
+mac80211-fix-wrong-160-80-80-mhz-setting.patch
+net-hns3-add-management-table-after-imp-reset.patch
+net-hns3-fix-vf-bandwidth-does-not-take-effect-in-so.patch
+net-hns3-fix-a-copying-ipv6-address-error-in-hclge_f.patch
+nvme-tcp-fix-bug-on-double-requeue-when-send-fails.patch
+nvme-prevent-warning-triggered-by-nvme_stop_keep_ali.patch
+nvme-pci-move-cqe-check-after-device-shutdown.patch
--- /dev/null
+From f26998fe02afcaa89195bd730a3f6e49eb970699 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Feb 2020 15:31:14 +0100
+Subject: soc/tegra: fuse: Fix build with Tegra194 configuration
+
+From: Thierry Reding <treding@nvidia.com>
+
+[ Upstream commit 6f4ecbe284df5f22e386a640d9a4b32cede62030 ]
+
+If only Tegra194 support is enabled, the tegra30_fuse_read() and
+tegra30_fuse_init() function are not declared and cause a build failure.
+Add Tegra194 to the preprocessor guard to make sure these functions are
+available for Tegra194-only builds as well.
+
+Link: https://lore.kernel.org/r/20200203143114.3967295-1-thierry.reding@gmail.com
+Reported-by: kbuild test robot <lkp@intel.com>
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Olof Johansson <olof@lixom.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/soc/tegra/fuse/fuse-tegra30.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
+index b8daaf5b7291b..efd158b4607cb 100644
+--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
++++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
+@@ -36,7 +36,8 @@
+ defined(CONFIG_ARCH_TEGRA_124_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_132_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_210_SOC) || \
+- defined(CONFIG_ARCH_TEGRA_186_SOC)
++ defined(CONFIG_ARCH_TEGRA_186_SOC) || \
++ defined(CONFIG_ARCH_TEGRA_194_SOC)
+ static u32 tegra30_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset)
+ {
+ if (WARN_ON(!fuse->base))
+--
+2.20.1
+
--- /dev/null
+From e32d2984f1a071bf4d2ca9fcec2c644fcfa75924 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 11 Jan 2020 04:53:39 -0500
+Subject: timers/nohz: Update NOHZ load in remote tick
+
+From: Peter Zijlstra (Intel) <peterz@infradead.org>
+
+[ Upstream commit ebc0f83c78a2d26384401ecf2d2fa48063c0ee27 ]
+
+The way loadavg is tracked during nohz only pays attention to the load
+upon entering nohz. This can be particularly noticeable if full nohz is
+entered while non-idle, and then the cpu goes idle and stays that way for
+a long time.
+
+Use the remote tick to ensure that full nohz cpus report their deltas
+within a reasonable time.
+
+[ swood: Added changelog and removed recheck of stopped tick. ]
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Scott Wood <swood@redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Link: https://lkml.kernel.org/r/1578736419-14628-3-git-send-email-swood@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/sched/nohz.h | 2 ++
+ kernel/sched/core.c | 4 +++-
+ kernel/sched/loadavg.c | 33 +++++++++++++++++++++++----------
+ 3 files changed, 28 insertions(+), 11 deletions(-)
+
+diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
+index 1abe91ff6e4a2..6d67e9a5af6bb 100644
+--- a/include/linux/sched/nohz.h
++++ b/include/linux/sched/nohz.h
+@@ -15,9 +15,11 @@ static inline void nohz_balance_enter_idle(int cpu) { }
+
+ #ifdef CONFIG_NO_HZ_COMMON
+ void calc_load_nohz_start(void);
++void calc_load_nohz_remote(struct rq *rq);
+ void calc_load_nohz_stop(void);
+ #else
+ static inline void calc_load_nohz_start(void) { }
++static inline void calc_load_nohz_remote(struct rq *rq) { }
+ static inline void calc_load_nohz_stop(void) { }
+ #endif /* CONFIG_NO_HZ_COMMON */
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 3cb879f4eb9c6..65ed821335dd5 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -3677,6 +3677,7 @@ static void sched_tick_remote(struct work_struct *work)
+ if (cpu_is_offline(cpu))
+ goto out_unlock;
+
++ curr = rq->curr;
+ update_rq_clock(rq);
+
+ if (!is_idle_task(curr)) {
+@@ -3689,10 +3690,11 @@ static void sched_tick_remote(struct work_struct *work)
+ }
+ curr->sched_class->task_tick(rq, curr, 0);
+
++ calc_load_nohz_remote(rq);
+ out_unlock:
+ rq_unlock_irq(rq, &rf);
+-
+ out_requeue:
++
+ /*
+ * Run the remote tick once per second (1Hz). This arbitrary
+ * frequency is large enough to avoid overload but short enough
+diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
+index 28a516575c181..de22da666ac73 100644
+--- a/kernel/sched/loadavg.c
++++ b/kernel/sched/loadavg.c
+@@ -231,16 +231,11 @@ static inline int calc_load_read_idx(void)
+ return calc_load_idx & 1;
+ }
+
+-void calc_load_nohz_start(void)
++static void calc_load_nohz_fold(struct rq *rq)
+ {
+- struct rq *this_rq = this_rq();
+ long delta;
+
+- /*
+- * We're going into NO_HZ mode, if there's any pending delta, fold it
+- * into the pending NO_HZ delta.
+- */
+- delta = calc_load_fold_active(this_rq, 0);
++ delta = calc_load_fold_active(rq, 0);
+ if (delta) {
+ int idx = calc_load_write_idx();
+
+@@ -248,6 +243,24 @@ void calc_load_nohz_start(void)
+ }
+ }
+
++void calc_load_nohz_start(void)
++{
++ /*
++ * We're going into NO_HZ mode, if there's any pending delta, fold it
++ * into the pending NO_HZ delta.
++ */
++ calc_load_nohz_fold(this_rq());
++}
++
++/*
++ * Keep track of the load for NOHZ_FULL, must be called between
++ * calc_load_nohz_{start,stop}().
++ */
++void calc_load_nohz_remote(struct rq *rq)
++{
++ calc_load_nohz_fold(rq);
++}
++
+ void calc_load_nohz_stop(void)
+ {
+ struct rq *this_rq = this_rq();
+@@ -268,7 +281,7 @@ void calc_load_nohz_stop(void)
+ this_rq->calc_load_update += LOAD_FREQ;
+ }
+
+-static long calc_load_nohz_fold(void)
++static long calc_load_nohz_read(void)
+ {
+ int idx = calc_load_read_idx();
+ long delta = 0;
+@@ -323,7 +336,7 @@ static void calc_global_nohz(void)
+ }
+ #else /* !CONFIG_NO_HZ_COMMON */
+
+-static inline long calc_load_nohz_fold(void) { return 0; }
++static inline long calc_load_nohz_read(void) { return 0; }
+ static inline void calc_global_nohz(void) { }
+
+ #endif /* CONFIG_NO_HZ_COMMON */
+@@ -346,7 +359,7 @@ void calc_global_load(unsigned long ticks)
+ /*
+ * Fold the 'old' NO_HZ-delta to include all NO_HZ CPUs.
+ */
+- delta = calc_load_nohz_fold();
++ delta = calc_load_nohz_read();
+ if (delta)
+ atomic_long_add(delta, &calc_load_tasks);
+
+--
+2.20.1
+