--- /dev/null
+From 23160ab015bc17c7b8a1d636f934745203f8e677 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Dec 2019 16:23:25 -0500
+Subject: EDAC: skx_common: downgrade message importance on missing PCI device
+
+From: Aristeu Rozanski <aris@redhat.com>
+
+[ Upstream commit 854bb48018d5da261d438b2232fa683bdb553979 ]
+
+Both skx_edac and i10nm_edac drivers are loaded based on the matching CPU being
+available which leads the module to be automatically loaded in virtual machines
+as well. That will fail due the missing PCI devices. In both drivers the first
+function to make use of the PCI devices is skx_get_hi_lo() will simply print
+
+ EDAC skx: Can't get tolm/tohm
+
+for each CPU core, which is noisy. This patch makes it a debug message.
+
+Signed-off-by: Aristeu Rozanski <aris@redhat.com>
+Signed-off-by: Tony Luck <tony.luck@intel.com>
+Link: https://lore.kernel.org/r/20191204212325.c4k47p5hrnn3vpb5@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/edac/skx_common.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
+index d8ff63d91b860..a04349c6d17ef 100644
+--- a/drivers/edac/skx_common.c
++++ b/drivers/edac/skx_common.c
+@@ -235,7 +235,7 @@ int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm)
+
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, did, NULL);
+ if (!pdev) {
+- skx_printk(KERN_ERR, "Can't get tolm/tohm\n");
++ edac_dbg(2, "Can't get tolm/tohm\n");
+ return -ENODEV;
+ }
+
+--
+2.20.1
+
--- /dev/null
+From 1f7abd4a57f440eacab5f3508021171cf87b5c6e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Feb 2020 15:20:18 -0700
+Subject: io_uring: grab ->fs as part of async offload
+
+From: Jens Axboe <axboe@kernel.dk>
+
+[ Upstream commits 9392a27d88b9 and ff002b30181d ]
+
+Ensure that the async work grabs ->fs from the queueing task if the
+punted commands needs to do lookups.
+
+We don't have these two commits in 5.4-stable:
+
+ff002b30181d30cdfbca316dadd099c3ca0d739c
+9392a27d88b9707145d713654eb26f0c29789e50
+
+because they don't apply with the rework that was done in how io_uring
+handles offload. Since there's no io-wq in 5.4, it doesn't make sense to
+do two patches. I'm attaching my port of the two for 5.4-stable, it's
+been tested. Please queue it up for the next 5.4-stable, thanks!
+
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io_uring.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 46 insertions(+)
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index ed9a551882cf3..f34a8f7eee5d1 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -71,6 +71,7 @@
+ #include <linux/sizes.h>
+ #include <linux/hugetlb.h>
+ #include <linux/highmem.h>
++#include <linux/fs_struct.h>
+
+ #include <uapi/linux/io_uring.h>
+
+@@ -334,6 +335,8 @@ struct io_kiocb {
+ u32 result;
+ u32 sequence;
+
++ struct fs_struct *fs;
++
+ struct work_struct work;
+ };
+
+@@ -651,6 +654,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
+ /* one is dropped after submission, the other at completion */
+ refcount_set(&req->refs, 2);
+ req->result = 0;
++ req->fs = NULL;
+ return req;
+ out:
+ percpu_ref_put(&ctx->refs);
+@@ -1663,6 +1667,16 @@ static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ ret = -EINTR;
+ }
+
++ if (req->fs) {
++ struct fs_struct *fs = req->fs;
++
++ spin_lock(&req->fs->lock);
++ if (--fs->users)
++ fs = NULL;
++ spin_unlock(&req->fs->lock);
++ if (fs)
++ free_fs_struct(fs);
++ }
+ io_cqring_add_event(req->ctx, sqe->user_data, ret);
+ io_put_req(req);
+ return 0;
+@@ -2159,6 +2173,7 @@ static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
+ static void io_sq_wq_submit_work(struct work_struct *work)
+ {
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
++ struct fs_struct *old_fs_struct = current->fs;
+ struct io_ring_ctx *ctx = req->ctx;
+ struct mm_struct *cur_mm = NULL;
+ struct async_list *async_list;
+@@ -2178,6 +2193,15 @@ restart:
+ /* Ensure we clear previously set non-block flag */
+ req->rw.ki_flags &= ~IOCB_NOWAIT;
+
++ if (req->fs != current->fs && current->fs != old_fs_struct) {
++ task_lock(current);
++ if (req->fs)
++ current->fs = req->fs;
++ else
++ current->fs = old_fs_struct;
++ task_unlock(current);
++ }
++
+ ret = 0;
+ if (io_sqe_needs_user(sqe) && !cur_mm) {
+ if (!mmget_not_zero(ctx->sqo_mm)) {
+@@ -2276,6 +2300,11 @@ out:
+ mmput(cur_mm);
+ }
+ revert_creds(old_cred);
++ if (old_fs_struct) {
++ task_lock(current);
++ current->fs = old_fs_struct;
++ task_unlock(current);
++ }
+ }
+
+ /*
+@@ -2503,6 +2532,23 @@ err:
+
+ req->user_data = s->sqe->user_data;
+
++#if defined(CONFIG_NET)
++ switch (READ_ONCE(s->sqe->opcode)) {
++ case IORING_OP_SENDMSG:
++ case IORING_OP_RECVMSG:
++ spin_lock(¤t->fs->lock);
++ if (!current->fs->in_exec) {
++ req->fs = current->fs;
++ req->fs->users++;
++ }
++ spin_unlock(¤t->fs->lock);
++ if (!req->fs) {
++ ret = -EAGAIN;
++ goto err_req;
++ }
++ }
++#endif
++
+ /*
+ * If we already have a head request, queue this one for async
+ * submittal once the head completes. If we don't have a head but
+--
+2.20.1
+