--- /dev/null
+From ebf339eb9bc20920db50a3c76caba03c7018294f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Aug 2020 11:42:27 -0400
+Subject: btrfs: fix potential deadlock in the search ioctl
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit a48b73eca4ceb9b8a4b97f290a065335dbcd8a04 ]
+
+With the conversion of the tree locks to rwsem I got the following
+lockdep splat:
+
+ ======================================================
+ WARNING: possible circular locking dependency detected
+ 5.8.0-rc7-00165-g04ec4da5f45f-dirty #922 Not tainted
+ ------------------------------------------------------
+ compsize/11122 is trying to acquire lock:
+ ffff889fabca8768 (&mm->mmap_lock#2){++++}-{3:3}, at: __might_fault+0x3e/0x90
+
+ but task is already holding lock:
+ ffff889fe720fe40 (btrfs-fs-00){++++}-{3:3}, at: __btrfs_tree_read_lock+0x39/0x180
+
+ which lock already depends on the new lock.
+
+ the existing dependency chain (in reverse order) is:
+
+ -> #2 (btrfs-fs-00){++++}-{3:3}:
+ down_write_nested+0x3b/0x70
+ __btrfs_tree_lock+0x24/0x120
+ btrfs_search_slot+0x756/0x990
+ btrfs_lookup_inode+0x3a/0xb4
+ __btrfs_update_delayed_inode+0x93/0x270
+ btrfs_async_run_delayed_root+0x168/0x230
+ btrfs_work_helper+0xd4/0x570
+ process_one_work+0x2ad/0x5f0
+ worker_thread+0x3a/0x3d0
+ kthread+0x133/0x150
+ ret_from_fork+0x1f/0x30
+
+ -> #1 (&delayed_node->mutex){+.+.}-{3:3}:
+ __mutex_lock+0x9f/0x930
+ btrfs_delayed_update_inode+0x50/0x440
+ btrfs_update_inode+0x8a/0xf0
+ btrfs_dirty_inode+0x5b/0xd0
+ touch_atime+0xa1/0xd0
+ btrfs_file_mmap+0x3f/0x60
+ mmap_region+0x3a4/0x640
+ do_mmap+0x376/0x580
+ vm_mmap_pgoff+0xd5/0x120
+ ksys_mmap_pgoff+0x193/0x230
+ do_syscall_64+0x50/0x90
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+ -> #0 (&mm->mmap_lock#2){++++}-{3:3}:
+ __lock_acquire+0x1272/0x2310
+ lock_acquire+0x9e/0x360
+ __might_fault+0x68/0x90
+ _copy_to_user+0x1e/0x80
+ copy_to_sk.isra.32+0x121/0x300
+ search_ioctl+0x106/0x200
+ btrfs_ioctl_tree_search_v2+0x7b/0xf0
+ btrfs_ioctl+0x106f/0x30a0
+ ksys_ioctl+0x83/0xc0
+ __x64_sys_ioctl+0x16/0x20
+ do_syscall_64+0x50/0x90
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+ other info that might help us debug this:
+
+ Chain exists of:
+ &mm->mmap_lock#2 --> &delayed_node->mutex --> btrfs-fs-00
+
+ Possible unsafe locking scenario:
+
+ CPU0 CPU1
+ ---- ----
+ lock(btrfs-fs-00);
+ lock(&delayed_node->mutex);
+ lock(btrfs-fs-00);
+ lock(&mm->mmap_lock#2);
+
+ *** DEADLOCK ***
+
+ 1 lock held by compsize/11122:
+ #0: ffff889fe720fe40 (btrfs-fs-00){++++}-{3:3}, at: __btrfs_tree_read_lock+0x39/0x180
+
+ stack backtrace:
+ CPU: 17 PID: 11122 Comm: compsize Kdump: loaded Not tainted 5.8.0-rc7-00165-g04ec4da5f45f-dirty #922
+ Hardware name: Quanta Tioga Pass Single Side 01-0030993006/Tioga Pass Single Side, BIOS F08_3A18 12/20/2018
+ Call Trace:
+ dump_stack+0x78/0xa0
+ check_noncircular+0x165/0x180
+ __lock_acquire+0x1272/0x2310
+ lock_acquire+0x9e/0x360
+ ? __might_fault+0x3e/0x90
+ ? find_held_lock+0x72/0x90
+ __might_fault+0x68/0x90
+ ? __might_fault+0x3e/0x90
+ _copy_to_user+0x1e/0x80
+ copy_to_sk.isra.32+0x121/0x300
+ ? btrfs_search_forward+0x2a6/0x360
+ search_ioctl+0x106/0x200
+ btrfs_ioctl_tree_search_v2+0x7b/0xf0
+ btrfs_ioctl+0x106f/0x30a0
+ ? __do_sys_newfstat+0x5a/0x70
+ ? ksys_ioctl+0x83/0xc0
+ ksys_ioctl+0x83/0xc0
+ __x64_sys_ioctl+0x16/0x20
+ do_syscall_64+0x50/0x90
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+The problem is we're doing a copy_to_user() while holding tree locks,
+which can deadlock if we have to do a page fault for the copy_to_user().
+This exists even without my locking changes, so it needs to be fixed.
+Rework the search ioctl to do the pre-fault and then
+copy_to_user_nofault for the copying.
+
+CC: stable@vger.kernel.org # 4.4+
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/extent_io.c | 8 ++++----
+ fs/btrfs/extent_io.h | 6 +++---
+ fs/btrfs/ioctl.c | 27 ++++++++++++++++++++-------
+ 3 files changed, 27 insertions(+), 14 deletions(-)
+
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index fbcd18d96c524..82d597b16152c 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -5377,9 +5377,9 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
+ }
+ }
+
+-int read_extent_buffer_to_user(const struct extent_buffer *eb,
+- void __user *dstv,
+- unsigned long start, unsigned long len)
++int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
++ void __user *dstv,
++ unsigned long start, unsigned long len)
+ {
+ size_t cur;
+ size_t offset;
+@@ -5400,7 +5400,7 @@ int read_extent_buffer_to_user(const struct extent_buffer *eb,
+
+ cur = min(len, (PAGE_SIZE - offset));
+ kaddr = page_address(page);
+- if (copy_to_user(dst, kaddr + offset, cur)) {
++ if (probe_user_write(dst, kaddr + offset, cur)) {
+ ret = -EFAULT;
+ break;
+ }
+diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
+index a3598b24441e1..d5089cadd7c49 100644
+--- a/fs/btrfs/extent_io.h
++++ b/fs/btrfs/extent_io.h
+@@ -448,9 +448,9 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
+ void read_extent_buffer(const struct extent_buffer *eb, void *dst,
+ unsigned long start,
+ unsigned long len);
+-int read_extent_buffer_to_user(const struct extent_buffer *eb,
+- void __user *dst, unsigned long start,
+- unsigned long len);
++int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
++ void __user *dst, unsigned long start,
++ unsigned long len);
+ void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src);
+ void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
+ const void *src);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index a5ae02bf3652b..85990755edd90 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -2079,9 +2079,14 @@ static noinline int copy_to_sk(struct btrfs_path *path,
+ sh.len = item_len;
+ sh.transid = found_transid;
+
+- /* copy search result header */
+- if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) {
+- ret = -EFAULT;
++ /*
++ * Copy search result header. If we fault then loop again so we
++ * can fault in the pages and -EFAULT there if there's a
++ * problem. Otherwise we'll fault and then copy the buffer in
++ * properly this next time through
++ */
++ if (probe_user_write(ubuf + *sk_offset, &sh, sizeof(sh))) {
++ ret = 0;
+ goto out;
+ }
+
+@@ -2089,10 +2094,14 @@ static noinline int copy_to_sk(struct btrfs_path *path,
+
+ if (item_len) {
+ char __user *up = ubuf + *sk_offset;
+- /* copy the item */
+- if (read_extent_buffer_to_user(leaf, up,
+- item_off, item_len)) {
+- ret = -EFAULT;
++ /*
++ * Copy the item, same behavior as above, but reset the
++ * * sk_offset so we copy the full thing again.
++ */
++ if (read_extent_buffer_to_user_nofault(leaf, up,
++ item_off, item_len)) {
++ ret = 0;
++ *sk_offset -= sizeof(sh);
+ goto out;
+ }
+
+@@ -2180,6 +2189,10 @@ static noinline int search_ioctl(struct inode *inode,
+ key.offset = sk->min_offset;
+
+ while (1) {
++ ret = fault_in_pages_writeable(ubuf, *buf_size - sk_offset);
++ if (ret)
++ break;
++
+ ret = btrfs_search_forward(root, &key, path, sk->min_transid);
+ if (ret != 0) {
+ if (ret > 0)
+--
+2.25.1
+
--- /dev/null
+From 462df8eb3c9312fd14971b928189cc5de4cd6442 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Aug 2018 18:26:54 +0300
+Subject: btrfs: Remove extraneous extent_buffer_get from tree_mod_log_rewind
+
+From: Nikolay Borisov <nborisov@suse.com>
+
+[ Upstream commit 24cee18a1c1d7c731ea5987e0c99daea22ae7f4a ]
+
+When a rewound buffer is created it already has a ref count of 1 and the
+dummy flag set. Then another ref is taken bumping the count to 2.
+Finally when this buffer is released from btrfs_release_path the extra
+reference is decremented by the special handling code in
+free_extent_buffer.
+
+However, this special code is in fact redundant sinca ref count of 1 is
+still correct since the buffer is only accessed via btrfs_path struct.
+This paves the way forward of removing the special handling in
+free_extent_buffer.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/ctree.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 6c13d7d83f5ca..12b1a1c80c1b3 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -1347,7 +1347,6 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
+ btrfs_tree_read_unlock_blocking(eb);
+ free_extent_buffer(eb);
+
+- extent_buffer_get(eb_rewin);
+ btrfs_tree_read_lock(eb_rewin);
+ __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
+ WARN_ON(btrfs_header_nritems(eb_rewin) >
+--
+2.25.1
+
--- /dev/null
+From 222c87b7b31ca71e2c789301fe585d976812a5d5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Aug 2018 18:26:53 +0300
+Subject: btrfs: Remove redundant extent_buffer_get in get_old_root
+
+From: Nikolay Borisov <nborisov@suse.com>
+
+[ Upstream commit 6c122e2a0c515cfb3f3a9cefb5dad4cb62109c78 ]
+
+get_old_root used used only by btrfs_search_old_slot to initialise the
+path structure. The old root is always a cloned buffer (either via alloc
+dummy or via btrfs_clone_extent_buffer) and its reference count is 2: 1
+from allocation, 1 from extent_buffer_get call in get_old_root.
+
+This latter explicit ref count acquire operation is in fact unnecessary
+since the semantic is such that the newly allocated buffer is handed
+over to the btrfs_path for lifetime management. Considering this just
+remove the extra extent_buffer_get in get_old_root.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/ctree.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index c9943d70e2cb2..6c13d7d83f5ca 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -1421,7 +1421,6 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
+
+ if (!eb)
+ return NULL;
+- extent_buffer_get(eb);
+ btrfs_tree_read_lock(eb);
+ if (old_root) {
+ btrfs_set_header_bytenr(eb, eb->start);
+--
+2.25.1
+
--- /dev/null
+From 04f85da2f18c5ce8532e25f955a3bbcf006b9540 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Aug 2020 11:42:31 -0400
+Subject: btrfs: set the lockdep class for log tree extent buffers
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit d3beaa253fd6fa40b8b18a216398e6e5376a9d21 ]
+
+These are special extent buffers that get rewound in order to lookup
+the state of the tree at a specific point in time. As such they do not
+go through the normal initialization paths that set their lockdep class,
+so handle them appropriately when they are created and before they are
+locked.
+
+CC: stable@vger.kernel.org # 4.4+
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/ctree.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 12b1a1c80c1b3..8007b6aacec60 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -1347,6 +1347,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
+ btrfs_tree_read_unlock_blocking(eb);
+ free_extent_buffer(eb);
+
++ btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin),
++ eb_rewin, btrfs_header_level(eb_rewin));
+ btrfs_tree_read_lock(eb_rewin);
+ __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
+ WARN_ON(btrfs_header_nritems(eb_rewin) >
+@@ -1420,7 +1422,6 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
+
+ if (!eb)
+ return NULL;
+- btrfs_tree_read_lock(eb);
+ if (old_root) {
+ btrfs_set_header_bytenr(eb, eb->start);
+ btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
+@@ -1428,6 +1429,9 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
+ btrfs_set_header_level(eb, old_root->level);
+ btrfs_set_header_generation(eb, old_generation);
+ }
++ btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb,
++ btrfs_header_level(eb));
++ btrfs_tree_read_lock(eb);
+ if (tm)
+ __tree_mod_log_rewind(fs_info, eb, time_seq, tm);
+ else
+--
+2.25.1
+
--- /dev/null
+From 1b2e8d9f3e87ba6a7e7c8823001ff503770e0754 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Oct 2019 11:07:18 +0200
+Subject: net: usb: qmi_wwan: add Telit 0x1050 composition
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Daniele Palmas <dnlplm@gmail.com>
+
+[ Upstream commit e0ae2c578d3909e60e9448207f5d83f785f1129f ]
+
+This patch adds support for Telit FN980 0x1050 composition
+
+0x1050: tty, adb, rmnet, tty, tty, tty, tty
+
+Signed-off-by: Daniele Palmas <dnlplm@gmail.com>
+Acked-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/qmi_wwan.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index ea3c891186147..41fbb8669845e 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1262,6 +1262,7 @@ static const struct usb_device_id products[] = {
+ {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
+ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
+--
+2.25.1
+
ext2-don-t-update-mtime-on-cow-faults.patch
xfs-don-t-update-mtime-on-cow-faults.patch
btrfs-drop-path-before-adding-new-uuid-tree-entry.patch
+vfio-type1-support-faulting-pfnmap-vmas.patch
+vfio-pci-fault-mmaps-to-enable-vma-tracking.patch
+vfio-pci-invalidate-mmaps-and-block-mmio-access-on-d.patch
+btrfs-remove-redundant-extent_buffer_get-in-get_old_.patch
+btrfs-remove-extraneous-extent_buffer_get-from-tree_.patch
+btrfs-set-the-lockdep-class-for-log-tree-extent-buff.patch
+uaccess-add-non-pagefault-user-space-read-functions.patch
+uaccess-add-non-pagefault-user-space-write-function.patch
+btrfs-fix-potential-deadlock-in-the-search-ioctl.patch
+net-usb-qmi_wwan-add-telit-0x1050-composition.patch
--- /dev/null
+From 500b654b03234c9c40076797287c0ef8e9022be0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 May 2019 14:38:18 +0900
+Subject: uaccess: Add non-pagefault user-space read functions
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+[ Upstream commit 3d7081822f7f9eab867d9bcc8fd635208ec438e0 ]
+
+Add probe_user_read(), strncpy_from_unsafe_user() and
+strnlen_unsafe_user() which allows caller to access user-space
+in IRQ context.
+
+Current probe_kernel_read() and strncpy_from_unsafe() are
+not available for user-space memory, because it sets
+KERNEL_DS while accessing data. On some arch, user address
+space and kernel address space can be co-exist, but others
+can not. In that case, setting KERNEL_DS means given
+address is treated as a kernel address space.
+Also strnlen_user() is only available from user context since
+it can sleep if pagefault is enabled.
+
+To access user-space memory without pagefault, we need
+these new functions which sets USER_DS while accessing
+the data.
+
+Link: http://lkml.kernel.org/r/155789869802.26965.4940338412595759063.stgit@devnote2
+
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/uaccess.h | 14 +++++
+ mm/maccess.c | 122 ++++++++++++++++++++++++++++++++++++++--
+ 2 files changed, 130 insertions(+), 6 deletions(-)
+
+diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
+index d55b68b113de1..db88f36540e9e 100644
+--- a/include/linux/uaccess.h
++++ b/include/linux/uaccess.h
+@@ -242,6 +242,17 @@ static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
+ extern long probe_kernel_read(void *dst, const void *src, size_t size);
+ extern long __probe_kernel_read(void *dst, const void *src, size_t size);
+
++/*
++ * probe_user_read(): safely attempt to read from a location in user space
++ * @dst: pointer to the buffer that shall take the data
++ * @src: address to read from
++ * @size: size of the data chunk
++ *
++ * Safely read from address @src to the buffer at @dst. If a kernel fault
++ * happens, handle that and return -EFAULT.
++ */
++extern long probe_user_read(void *dst, const void __user *src, size_t size);
++
+ /*
+ * probe_kernel_write(): safely attempt to write to a location
+ * @dst: address to write to
+@@ -255,6 +266,9 @@ extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
+ extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
+
+ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
++extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
++ long count);
++extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
+
+ /**
+ * probe_kernel_address(): safely attempt to read from a location
+diff --git a/mm/maccess.c b/mm/maccess.c
+index ec00be51a24fd..80d70cb5cc0bd 100644
+--- a/mm/maccess.c
++++ b/mm/maccess.c
+@@ -5,8 +5,20 @@
+ #include <linux/mm.h>
+ #include <linux/uaccess.h>
+
++static __always_inline long
++probe_read_common(void *dst, const void __user *src, size_t size)
++{
++ long ret;
++
++ pagefault_disable();
++ ret = __copy_from_user_inatomic(dst, src, size);
++ pagefault_enable();
++
++ return ret ? -EFAULT : 0;
++}
++
+ /**
+- * probe_kernel_read(): safely attempt to read from a location
++ * probe_kernel_read(): safely attempt to read from a kernel-space location
+ * @dst: pointer to the buffer that shall take the data
+ * @src: address to read from
+ * @size: size of the data chunk
+@@ -29,16 +41,40 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+- pagefault_disable();
+- ret = __copy_from_user_inatomic(dst,
+- (__force const void __user *)src, size);
+- pagefault_enable();
++ ret = probe_read_common(dst, (__force const void __user *)src, size);
+ set_fs(old_fs);
+
+- return ret ? -EFAULT : 0;
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(probe_kernel_read);
+
++/**
++ * probe_user_read(): safely attempt to read from a user-space location
++ * @dst: pointer to the buffer that shall take the data
++ * @src: address to read from. This must be a user address.
++ * @size: size of the data chunk
++ *
++ * Safely read from user address @src to the buffer at @dst. If a kernel fault
++ * happens, handle that and return -EFAULT.
++ */
++
++long __weak probe_user_read(void *dst, const void __user *src, size_t size)
++ __attribute__((alias("__probe_user_read")));
++
++long __probe_user_read(void *dst, const void __user *src, size_t size)
++{
++ long ret = -EFAULT;
++ mm_segment_t old_fs = get_fs();
++
++ set_fs(USER_DS);
++ if (access_ok(VERIFY_READ, src, size))
++ ret = probe_read_common(dst, src, size);
++ set_fs(old_fs);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(probe_user_read);
++
+ /**
+ * probe_kernel_write(): safely attempt to write to a location
+ * @dst: address to write to
+@@ -66,6 +102,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
+ }
+ EXPORT_SYMBOL_GPL(probe_kernel_write);
+
++
+ /**
+ * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address.
+ * @dst: Destination address, in kernel space. This buffer must be at
+@@ -105,3 +142,76 @@ long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
+
+ return ret ? -EFAULT : src - unsafe_addr;
+ }
++
++/**
++ * strncpy_from_unsafe_user: - Copy a NUL terminated string from unsafe user
++ * address.
++ * @dst: Destination address, in kernel space. This buffer must be at
++ * least @count bytes long.
++ * @unsafe_addr: Unsafe user address.
++ * @count: Maximum number of bytes to copy, including the trailing NUL.
++ *
++ * Copies a NUL-terminated string from unsafe user address to kernel buffer.
++ *
++ * On success, returns the length of the string INCLUDING the trailing NUL.
++ *
++ * If access fails, returns -EFAULT (some data may have been copied
++ * and the trailing NUL added).
++ *
++ * If @count is smaller than the length of the string, copies @count-1 bytes,
++ * sets the last byte of @dst buffer to NUL and returns @count.
++ */
++long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
++ long count)
++{
++ mm_segment_t old_fs = get_fs();
++ long ret;
++
++ if (unlikely(count <= 0))
++ return 0;
++
++ set_fs(USER_DS);
++ pagefault_disable();
++ ret = strncpy_from_user(dst, unsafe_addr, count);
++ pagefault_enable();
++ set_fs(old_fs);
++
++ if (ret >= count) {
++ ret = count;
++ dst[ret - 1] = '\0';
++ } else if (ret > 0) {
++ ret++;
++ }
++
++ return ret;
++}
++
++/**
++ * strnlen_unsafe_user: - Get the size of a user string INCLUDING final NUL.
++ * @unsafe_addr: The string to measure.
++ * @count: Maximum count (including NUL)
++ *
++ * Get the size of a NUL-terminated string in user space without pagefault.
++ *
++ * Returns the size of the string INCLUDING the terminating NUL.
++ *
++ * If the string is too long, returns a number larger than @count. User
++ * has to check the return value against "> count".
++ * On exception (or invalid count), returns 0.
++ *
++ * Unlike strnlen_user, this can be used from IRQ handler etc. because
++ * it disables pagefaults.
++ */
++long strnlen_unsafe_user(const void __user *unsafe_addr, long count)
++{
++ mm_segment_t old_fs = get_fs();
++ int ret;
++
++ set_fs(USER_DS);
++ pagefault_disable();
++ ret = strnlen_user(unsafe_addr, count);
++ pagefault_enable();
++ set_fs(old_fs);
++
++ return ret;
++}
+--
+2.25.1
+
--- /dev/null
+From 391083d448c97dd234938f3fff8ef8421e3d36e9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 2 Nov 2019 00:17:56 +0100
+Subject: uaccess: Add non-pagefault user-space write function
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit 1d1585ca0f48fe7ed95c3571f3e4a82b2b5045dc ]
+
+Commit 3d7081822f7f ("uaccess: Add non-pagefault user-space read functions")
+missed to add probe write function, therefore factor out a probe_write_common()
+helper with most logic of probe_kernel_write() except setting KERNEL_DS, and
+add a new probe_user_write() helper so it can be used from BPF side.
+
+Again, on some archs, the user address space and kernel address space can
+co-exist and be overlapping, so in such case, setting KERNEL_DS would mean
+that the given address is treated as being in kernel address space.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Andrii Nakryiko <andriin@fb.com>
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Link: https://lore.kernel.org/bpf/9df2542e68141bfa3addde631441ee45503856a8.1572649915.git.daniel@iogearbox.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/uaccess.h | 12 +++++++++++
+ mm/maccess.c | 45 +++++++++++++++++++++++++++++++++++++----
+ 2 files changed, 53 insertions(+), 4 deletions(-)
+
+diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
+index db88f36540e9e..db9b0dd0a7a3b 100644
+--- a/include/linux/uaccess.h
++++ b/include/linux/uaccess.h
+@@ -265,6 +265,18 @@ extern long probe_user_read(void *dst, const void __user *src, size_t size);
+ extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
+ extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
+
++/*
++ * probe_user_write(): safely attempt to write to a location in user space
++ * @dst: address to write to
++ * @src: pointer to the data that shall be written
++ * @size: size of the data chunk
++ *
++ * Safely write to address @dst from the buffer at @src. If a kernel fault
++ * happens, handle that and return -EFAULT.
++ */
++extern long notrace probe_user_write(void __user *dst, const void *src, size_t size);
++extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size);
++
+ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
+ extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
+ long count);
+diff --git a/mm/maccess.c b/mm/maccess.c
+index 80d70cb5cc0bd..6e41ba452e5e9 100644
+--- a/mm/maccess.c
++++ b/mm/maccess.c
+@@ -17,6 +17,18 @@ probe_read_common(void *dst, const void __user *src, size_t size)
+ return ret ? -EFAULT : 0;
+ }
+
++static __always_inline long
++probe_write_common(void __user *dst, const void *src, size_t size)
++{
++ long ret;
++
++ pagefault_disable();
++ ret = __copy_to_user_inatomic(dst, src, size);
++ pagefault_enable();
++
++ return ret ? -EFAULT : 0;
++}
++
+ /**
+ * probe_kernel_read(): safely attempt to read from a kernel-space location
+ * @dst: pointer to the buffer that shall take the data
+@@ -84,6 +96,7 @@ EXPORT_SYMBOL_GPL(probe_user_read);
+ * Safely write to address @dst from the buffer at @src. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
++
+ long __weak probe_kernel_write(void *dst, const void *src, size_t size)
+ __attribute__((alias("__probe_kernel_write")));
+
+@@ -93,15 +106,39 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+- pagefault_disable();
+- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
+- pagefault_enable();
++ ret = probe_write_common((__force void __user *)dst, src, size);
+ set_fs(old_fs);
+
+- return ret ? -EFAULT : 0;
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(probe_kernel_write);
+
++/**
++ * probe_user_write(): safely attempt to write to a user-space location
++ * @dst: address to write to
++ * @src: pointer to the data that shall be written
++ * @size: size of the data chunk
++ *
++ * Safely write to address @dst from the buffer at @src. If a kernel fault
++ * happens, handle that and return -EFAULT.
++ */
++
++long __weak probe_user_write(void __user *dst, const void *src, size_t size)
++ __attribute__((alias("__probe_user_write")));
++
++long __probe_user_write(void __user *dst, const void *src, size_t size)
++{
++ long ret = -EFAULT;
++ mm_segment_t old_fs = get_fs();
++
++ set_fs(USER_DS);
++ if (access_ok(VERIFY_WRITE, dst, size))
++ ret = probe_write_common(dst, src, size);
++ set_fs(old_fs);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(probe_user_write);
+
+ /**
+ * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address.
+--
+2.25.1
+
--- /dev/null
+From d75f62a94201a4f01489e3f2f5bc4c26fa2a5baf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Sep 2020 15:47:21 +0530
+Subject: vfio-pci: Fault mmaps to enable vma tracking
+
+From: Alex Williamson <alex.williamson@redhat.com>
+
+commit 11c4cd07ba111a09f49625f9e4c851d83daf0a22 upstream.
+
+Rather than calling remap_pfn_range() when a region is mmap'd, setup
+a vm_ops handler to support dynamic faulting of the range on access.
+This allows us to manage a list of vmas actively mapping the area that
+we can later use to invalidate those mappings. The open callback
+invalidates the vma range so that all tracking is inserted in the
+fault handler and removed in the close handler.
+
+Reviewed-by: Peter Xu <peterx@redhat.com>
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+[Ajay: Regenerated the patch for v4.19]
+Signed-off-by: Ajay Kaher <akaher@vmware.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vfio/pci/vfio_pci.c | 76 ++++++++++++++++++++++++++++-
+ drivers/vfio/pci/vfio_pci_private.h | 7 +++
+ 2 files changed, 81 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index 66783a37f450c..3cd596023c2bb 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -1121,6 +1121,70 @@ static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
+ return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
+ }
+
++static int vfio_pci_add_vma(struct vfio_pci_device *vdev,
++ struct vm_area_struct *vma)
++{
++ struct vfio_pci_mmap_vma *mmap_vma;
++
++ mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL);
++ if (!mmap_vma)
++ return -ENOMEM;
++
++ mmap_vma->vma = vma;
++
++ mutex_lock(&vdev->vma_lock);
++ list_add(&mmap_vma->vma_next, &vdev->vma_list);
++ mutex_unlock(&vdev->vma_lock);
++
++ return 0;
++}
++
++/*
++ * Zap mmaps on open so that we can fault them in on access and therefore
++ * our vma_list only tracks mappings accessed since last zap.
++ */
++static void vfio_pci_mmap_open(struct vm_area_struct *vma)
++{
++ zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
++}
++
++static void vfio_pci_mmap_close(struct vm_area_struct *vma)
++{
++ struct vfio_pci_device *vdev = vma->vm_private_data;
++ struct vfio_pci_mmap_vma *mmap_vma;
++
++ mutex_lock(&vdev->vma_lock);
++ list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
++ if (mmap_vma->vma == vma) {
++ list_del(&mmap_vma->vma_next);
++ kfree(mmap_vma);
++ break;
++ }
++ }
++ mutex_unlock(&vdev->vma_lock);
++}
++
++static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
++{
++ struct vm_area_struct *vma = vmf->vma;
++ struct vfio_pci_device *vdev = vma->vm_private_data;
++
++ if (vfio_pci_add_vma(vdev, vma))
++ return VM_FAULT_OOM;
++
++ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
++ vma->vm_end - vma->vm_start, vma->vm_page_prot))
++ return VM_FAULT_SIGBUS;
++
++ return VM_FAULT_NOPAGE;
++}
++
++static const struct vm_operations_struct vfio_pci_mmap_ops = {
++ .open = vfio_pci_mmap_open,
++ .close = vfio_pci_mmap_close,
++ .fault = vfio_pci_mmap_fault,
++};
++
+ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
+ {
+ struct vfio_pci_device *vdev = device_data;
+@@ -1170,8 +1234,14 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
+
+- return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+- req_len, vma->vm_page_prot);
++ /*
++ * See remap_pfn_range(), called from vfio_pci_fault() but we can't
++ * change vm_flags within the fault handler. Set them now.
++ */
++ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
++ vma->vm_ops = &vfio_pci_mmap_ops;
++
++ return 0;
+ }
+
+ static void vfio_pci_request(void *device_data, unsigned int count)
+@@ -1243,6 +1313,8 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ spin_lock_init(&vdev->irqlock);
+ mutex_init(&vdev->ioeventfds_lock);
+ INIT_LIST_HEAD(&vdev->ioeventfds_list);
++ mutex_init(&vdev->vma_lock);
++ INIT_LIST_HEAD(&vdev->vma_list);
+
+ ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
+ if (ret) {
+diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
+index cde3b5d3441ad..9743c934199d6 100644
+--- a/drivers/vfio/pci/vfio_pci_private.h
++++ b/drivers/vfio/pci/vfio_pci_private.h
+@@ -76,6 +76,11 @@ struct vfio_pci_dummy_resource {
+ struct list_head res_next;
+ };
+
++struct vfio_pci_mmap_vma {
++ struct vm_area_struct *vma;
++ struct list_head vma_next;
++};
++
+ struct vfio_pci_device {
+ struct pci_dev *pdev;
+ void __iomem *barmap[PCI_STD_RESOURCE_END + 1];
+@@ -111,6 +116,8 @@ struct vfio_pci_device {
+ struct list_head dummy_resources_list;
+ struct mutex ioeventfds_lock;
+ struct list_head ioeventfds_list;
++ struct mutex vma_lock;
++ struct list_head vma_list;
+ };
+
+ #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
+--
+2.25.1
+
--- /dev/null
+From 4bdee1bc7df536d0d59d3a90672f02a2a6a5ede7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Sep 2020 15:47:22 +0530
+Subject: vfio-pci: Invalidate mmaps and block MMIO access on disabled memory
+
+From: Alex Williamson <alex.williamson@redhat.com>
+
+commit abafbc551fddede3e0a08dee1dcde08fc0eb8476 upstream.
+
+Accessing the disabled memory space of a PCI device would typically
+result in a master abort response on conventional PCI, or an
+unsupported request on PCI express. The user would generally see
+these as a -1 response for the read return data and the write would be
+silently discarded, possibly with an uncorrected, non-fatal AER error
+triggered on the host. Some systems however take it upon themselves
+to bring down the entire system when they see something that might
+indicate a loss of data, such as this discarded write to a disabled
+memory space.
+
+To avoid this, we want to try to block the user from accessing memory
+spaces while they're disabled. We start with a semaphore around the
+memory enable bit, where writers modify the memory enable state and
+must be serialized, while readers make use of the memory region and
+can access in parallel. Writers include both direct manipulation via
+the command register, as well as any reset path where the internal
+mechanics of the reset may both explicitly and implicitly disable
+memory access, and manipulation of the MSI-X configuration, where the
+MSI-X vector table resides in MMIO space of the device. Readers
+include the read and write file ops to access the vfio device fd
+offsets as well as memory mapped access. In the latter case, we make
+use of our new vma list support to zap, or invalidate, those memory
+mappings in order to force them to be faulted back in on access.
+
+Our semaphore usage will stall user access to MMIO spaces across
+internal operations like reset, but the user might experience new
+behavior when trying to access the MMIO space while disabled via the
+PCI command register. Access via read or write while disabled will
+return -EIO and access via memory maps will result in a SIGBUS. This
+is expected to be compatible with known use cases and potentially
+provides better error handling capabilities than present in the
+hardware, while avoiding the more readily accessible and severe
+platform error responses that might otherwise occur.
+
+Fixes: CVE-2020-12888
+Reviewed-by: Peter Xu <peterx@redhat.com>
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+[Ajay: Regenerated the patch for v4.19]
+Signed-off-by: Ajay Kaher <akaher@vmware.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vfio/pci/vfio_pci.c | 291 ++++++++++++++++++++++++----
+ drivers/vfio/pci/vfio_pci_config.c | 36 +++-
+ drivers/vfio/pci/vfio_pci_intrs.c | 14 ++
+ drivers/vfio/pci/vfio_pci_private.h | 9 +
+ drivers/vfio/pci/vfio_pci_rdwr.c | 24 ++-
+ 5 files changed, 331 insertions(+), 43 deletions(-)
+
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index 3cd596023c2bb..9f72a6ee13b53 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -29,6 +29,7 @@
+ #include <linux/vfio.h>
+ #include <linux/vgaarb.h>
+ #include <linux/nospec.h>
++#include <linux/sched/mm.h>
+
+ #include "vfio_pci_private.h"
+
+@@ -181,6 +182,7 @@ no_mmap:
+
+ static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
+ static void vfio_pci_disable(struct vfio_pci_device *vdev);
++static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data);
+
+ /*
+ * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
+@@ -623,6 +625,12 @@ int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
+ return 0;
+ }
+
++struct vfio_devices {
++ struct vfio_device **devices;
++ int cur_index;
++ int max_index;
++};
++
+ static long vfio_pci_ioctl(void *device_data,
+ unsigned int cmd, unsigned long arg)
+ {
+@@ -696,7 +704,7 @@ static long vfio_pci_ioctl(void *device_data,
+ {
+ void __iomem *io;
+ size_t size;
+- u16 orig_cmd;
++ u16 cmd;
+
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.flags = 0;
+@@ -716,10 +724,7 @@ static long vfio_pci_ioctl(void *device_data,
+ * Is it really there? Enable memory decode for
+ * implicit access in pci_map_rom().
+ */
+- pci_read_config_word(pdev, PCI_COMMAND, &orig_cmd);
+- pci_write_config_word(pdev, PCI_COMMAND,
+- orig_cmd | PCI_COMMAND_MEMORY);
+-
++ cmd = vfio_pci_memory_lock_and_enable(vdev);
+ io = pci_map_rom(pdev, &size);
+ if (io) {
+ info.flags = VFIO_REGION_INFO_FLAG_READ;
+@@ -727,8 +732,8 @@ static long vfio_pci_ioctl(void *device_data,
+ } else {
+ info.size = 0;
+ }
++ vfio_pci_memory_unlock_and_restore(vdev, cmd);
+
+- pci_write_config_word(pdev, PCI_COMMAND, orig_cmd);
+ break;
+ }
+ case VFIO_PCI_VGA_REGION_INDEX:
+@@ -865,8 +870,16 @@ static long vfio_pci_ioctl(void *device_data,
+ return ret;
+
+ } else if (cmd == VFIO_DEVICE_RESET) {
+- return vdev->reset_works ?
+- pci_try_reset_function(vdev->pdev) : -EINVAL;
++ int ret;
++
++ if (!vdev->reset_works)
++ return -EINVAL;
++
++ vfio_pci_zap_and_down_write_memory_lock(vdev);
++ ret = pci_try_reset_function(vdev->pdev);
++ up_write(&vdev->memory_lock);
++
++ return ret;
+
+ } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
+ struct vfio_pci_hot_reset_info hdr;
+@@ -946,8 +959,9 @@ reset_info_exit:
+ int32_t *group_fds;
+ struct vfio_pci_group_entry *groups;
+ struct vfio_pci_group_info info;
++ struct vfio_devices devs = { .cur_index = 0 };
+ bool slot = false;
+- int i, count = 0, ret = 0;
++ int i, group_idx, mem_idx = 0, count = 0, ret = 0;
+
+ minsz = offsetofend(struct vfio_pci_hot_reset, count);
+
+@@ -999,9 +1013,9 @@ reset_info_exit:
+ * user interface and store the group and iommu ID. This
+ * ensures the group is held across the reset.
+ */
+- for (i = 0; i < hdr.count; i++) {
++ for (group_idx = 0; group_idx < hdr.count; group_idx++) {
+ struct vfio_group *group;
+- struct fd f = fdget(group_fds[i]);
++ struct fd f = fdget(group_fds[group_idx]);
+ if (!f.file) {
+ ret = -EBADF;
+ break;
+@@ -1014,8 +1028,9 @@ reset_info_exit:
+ break;
+ }
+
+- groups[i].group = group;
+- groups[i].id = vfio_external_user_iommu_id(group);
++ groups[group_idx].group = group;
++ groups[group_idx].id =
++ vfio_external_user_iommu_id(group);
+ }
+
+ kfree(group_fds);
+@@ -1034,13 +1049,63 @@ reset_info_exit:
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
+ vfio_pci_validate_devs,
+ &info, slot);
+- if (!ret)
+- /* User has access, do the reset */
+- ret = pci_reset_bus(vdev->pdev);
++ if (ret)
++ goto hot_reset_release;
++
++ devs.max_index = count;
++ devs.devices = kcalloc(count, sizeof(struct vfio_device *),
++ GFP_KERNEL);
++ if (!devs.devices) {
++ ret = -ENOMEM;
++ goto hot_reset_release;
++ }
++
++ /*
++ * We need to get memory_lock for each device, but devices
++ * can share mmap_sem, therefore we need to zap and hold
++ * the vma_lock for each device, and only then get each
++ * memory_lock.
++ */
++ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
++ vfio_pci_try_zap_and_vma_lock_cb,
++ &devs, slot);
++ if (ret)
++ goto hot_reset_release;
++
++ for (; mem_idx < devs.cur_index; mem_idx++) {
++ struct vfio_pci_device *tmp;
++
++ tmp = vfio_device_data(devs.devices[mem_idx]);
++
++ ret = down_write_trylock(&tmp->memory_lock);
++ if (!ret) {
++ ret = -EBUSY;
++ goto hot_reset_release;
++ }
++ mutex_unlock(&tmp->vma_lock);
++ }
++
++ /* User has access, do the reset */
++ ret = pci_reset_bus(vdev->pdev);
+
+ hot_reset_release:
+- for (i--; i >= 0; i--)
+- vfio_group_put_external_user(groups[i].group);
++ for (i = 0; i < devs.cur_index; i++) {
++ struct vfio_device *device;
++ struct vfio_pci_device *tmp;
++
++ device = devs.devices[i];
++ tmp = vfio_device_data(device);
++
++ if (i < mem_idx)
++ up_write(&tmp->memory_lock);
++ else
++ mutex_unlock(&tmp->vma_lock);
++ vfio_device_put(device);
++ }
++ kfree(devs.devices);
++
++ for (group_idx--; group_idx >= 0; group_idx--)
++ vfio_group_put_external_user(groups[group_idx].group);
+
+ kfree(groups);
+ return ret;
+@@ -1121,8 +1186,126 @@ static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
+ return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
+ }
+
+-static int vfio_pci_add_vma(struct vfio_pci_device *vdev,
+- struct vm_area_struct *vma)
++/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
++static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
++{
++ struct vfio_pci_mmap_vma *mmap_vma, *tmp;
++
++ /*
++ * Lock ordering:
++ * vma_lock is nested under mmap_sem for vm_ops callback paths.
++ * The memory_lock semaphore is used by both code paths calling
++ * into this function to zap vmas and the vm_ops.fault callback
++ * to protect the memory enable state of the device.
++ *
++ * When zapping vmas we need to maintain the mmap_sem => vma_lock
++ * ordering, which requires using vma_lock to walk vma_list to
++ * acquire an mm, then dropping vma_lock to get the mmap_sem and
++ * reacquiring vma_lock. This logic is derived from similar
++ * requirements in uverbs_user_mmap_disassociate().
++ *
++ * mmap_sem must always be the top-level lock when it is taken.
++ * Therefore we can only hold the memory_lock write lock when
++ * vma_list is empty, as we'd need to take mmap_sem to clear
++ * entries. vma_list can only be guaranteed empty when holding
++ * vma_lock, thus memory_lock is nested under vma_lock.
++ *
++ * This enables the vm_ops.fault callback to acquire vma_lock,
++ * followed by memory_lock read lock, while already holding
++ * mmap_sem without risk of deadlock.
++ */
++ while (1) {
++ struct mm_struct *mm = NULL;
++
++ if (try) {
++ if (!mutex_trylock(&vdev->vma_lock))
++ return 0;
++ } else {
++ mutex_lock(&vdev->vma_lock);
++ }
++ while (!list_empty(&vdev->vma_list)) {
++ mmap_vma = list_first_entry(&vdev->vma_list,
++ struct vfio_pci_mmap_vma,
++ vma_next);
++ mm = mmap_vma->vma->vm_mm;
++ if (mmget_not_zero(mm))
++ break;
++
++ list_del(&mmap_vma->vma_next);
++ kfree(mmap_vma);
++ mm = NULL;
++ }
++ if (!mm)
++ return 1;
++ mutex_unlock(&vdev->vma_lock);
++
++ if (try) {
++ if (!down_read_trylock(&mm->mmap_sem)) {
++ mmput(mm);
++ return 0;
++ }
++ } else {
++ down_read(&mm->mmap_sem);
++ }
++ if (mmget_still_valid(mm)) {
++ if (try) {
++ if (!mutex_trylock(&vdev->vma_lock)) {
++ up_read(&mm->mmap_sem);
++ mmput(mm);
++ return 0;
++ }
++ } else {
++ mutex_lock(&vdev->vma_lock);
++ }
++ list_for_each_entry_safe(mmap_vma, tmp,
++ &vdev->vma_list, vma_next) {
++ struct vm_area_struct *vma = mmap_vma->vma;
++
++ if (vma->vm_mm != mm)
++ continue;
++
++ list_del(&mmap_vma->vma_next);
++ kfree(mmap_vma);
++
++ zap_vma_ptes(vma, vma->vm_start,
++ vma->vm_end - vma->vm_start);
++ }
++ mutex_unlock(&vdev->vma_lock);
++ }
++ up_read(&mm->mmap_sem);
++ mmput(mm);
++ }
++}
++
++void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device *vdev)
++{
++ vfio_pci_zap_and_vma_lock(vdev, false);
++ down_write(&vdev->memory_lock);
++ mutex_unlock(&vdev->vma_lock);
++}
++
++u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev)
++{
++ u16 cmd;
++
++ down_write(&vdev->memory_lock);
++ pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd);
++ if (!(cmd & PCI_COMMAND_MEMORY))
++ pci_write_config_word(vdev->pdev, PCI_COMMAND,
++ cmd | PCI_COMMAND_MEMORY);
++
++ return cmd;
++}
++
++void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, u16 cmd)
++{
++ pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd);
++ up_write(&vdev->memory_lock);
++}
++
++/* Caller holds vma_lock */
++static int __vfio_pci_add_vma(struct vfio_pci_device *vdev,
++ struct vm_area_struct *vma)
+ {
+ struct vfio_pci_mmap_vma *mmap_vma;
+
+@@ -1131,10 +1314,7 @@ static int vfio_pci_add_vma(struct vfio_pci_device *vdev,
+ return -ENOMEM;
+
+ mmap_vma->vma = vma;
+-
+- mutex_lock(&vdev->vma_lock);
+ list_add(&mmap_vma->vma_next, &vdev->vma_list);
+- mutex_unlock(&vdev->vma_lock);
+
+ return 0;
+ }
+@@ -1168,15 +1348,32 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
+ {
+ struct vm_area_struct *vma = vmf->vma;
+ struct vfio_pci_device *vdev = vma->vm_private_data;
++ vm_fault_t ret = VM_FAULT_NOPAGE;
++
++ mutex_lock(&vdev->vma_lock);
++ down_read(&vdev->memory_lock);
++
++ if (!__vfio_pci_memory_enabled(vdev)) {
++ ret = VM_FAULT_SIGBUS;
++ mutex_unlock(&vdev->vma_lock);
++ goto up_out;
++ }
++
++ if (__vfio_pci_add_vma(vdev, vma)) {
++ ret = VM_FAULT_OOM;
++ mutex_unlock(&vdev->vma_lock);
++ goto up_out;
++ }
+
+- if (vfio_pci_add_vma(vdev, vma))
+- return VM_FAULT_OOM;
++ mutex_unlock(&vdev->vma_lock);
+
+ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot))
+- return VM_FAULT_SIGBUS;
++ ret = VM_FAULT_SIGBUS;
+
+- return VM_FAULT_NOPAGE;
++up_out:
++ up_read(&vdev->memory_lock);
++ return ret;
+ }
+
+ static const struct vm_operations_struct vfio_pci_mmap_ops = {
+@@ -1315,6 +1512,7 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ INIT_LIST_HEAD(&vdev->ioeventfds_list);
+ mutex_init(&vdev->vma_lock);
+ INIT_LIST_HEAD(&vdev->vma_list);
++ init_rwsem(&vdev->memory_lock);
+
+ ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
+ if (ret) {
+@@ -1410,12 +1608,6 @@ static struct pci_driver vfio_pci_driver = {
+ .err_handler = &vfio_err_handlers,
+ };
+
+-struct vfio_devices {
+- struct vfio_device **devices;
+- int cur_index;
+- int max_index;
+-};
+-
+ static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
+ {
+ struct vfio_devices *devs = data;
+@@ -1437,6 +1629,39 @@ static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
+ return 0;
+ }
+
++static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data)
++{
++ struct vfio_devices *devs = data;
++ struct vfio_device *device;
++ struct vfio_pci_device *vdev;
++
++ if (devs->cur_index == devs->max_index)
++ return -ENOSPC;
++
++ device = vfio_device_get_from_dev(&pdev->dev);
++ if (!device)
++ return -EINVAL;
++
++ if (pci_dev_driver(pdev) != &vfio_pci_driver) {
++ vfio_device_put(device);
++ return -EBUSY;
++ }
++
++ vdev = vfio_device_data(device);
++
++ /*
++ * Locking multiple devices is prone to deadlock, runaway and
++ * unwind if we hit contention.
++ */
++ if (!vfio_pci_zap_and_vma_lock(vdev, true)) {
++ vfio_device_put(device);
++ return -EBUSY;
++ }
++
++ devs->devices[devs->cur_index++] = device;
++ return 0;
++}
++
+ /*
+ * Attempt to do a bus/slot reset if there are devices affected by a reset for
+ * this device that are needs_reset and all of the affected devices are unused
+diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
+index 36bc8f104e42e..4fe71fbce1942 100644
+--- a/drivers/vfio/pci/vfio_pci_config.c
++++ b/drivers/vfio/pci/vfio_pci_config.c
+@@ -398,6 +398,14 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write)
+ *(__le32 *)(&p->write[off]) = cpu_to_le32(write);
+ }
+
++/* Caller should hold memory_lock semaphore */
++bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
++{
++ u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
++
++ return cmd & PCI_COMMAND_MEMORY;
++}
++
+ /*
+ * Restore the *real* BARs after we detect a FLR or backdoor reset.
+ * (backdoor = some device specific technique that we didn't catch)
+@@ -558,13 +566,18 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
+
+ new_cmd = le32_to_cpu(val);
+
++ phys_io = !!(phys_cmd & PCI_COMMAND_IO);
++ virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
++ new_io = !!(new_cmd & PCI_COMMAND_IO);
++
+ phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY);
+ virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY);
+ new_mem = !!(new_cmd & PCI_COMMAND_MEMORY);
+
+- phys_io = !!(phys_cmd & PCI_COMMAND_IO);
+- virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
+- new_io = !!(new_cmd & PCI_COMMAND_IO);
++ if (!new_mem)
++ vfio_pci_zap_and_down_write_memory_lock(vdev);
++ else
++ down_write(&vdev->memory_lock);
+
+ /*
+ * If the user is writing mem/io enable (new_mem/io) and we
+@@ -581,8 +594,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
+ }
+
+ count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
+- if (count < 0)
++ if (count < 0) {
++ if (offset == PCI_COMMAND)
++ up_write(&vdev->memory_lock);
+ return count;
++ }
+
+ /*
+ * Save current memory/io enable bits in vconfig to allow for
+@@ -593,6 +609,8 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
+
+ *virt_cmd &= cpu_to_le16(~mask);
+ *virt_cmd |= cpu_to_le16(new_cmd & mask);
++
++ up_write(&vdev->memory_lock);
+ }
+
+ /* Emulate INTx disable */
+@@ -830,8 +848,11 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
+ pos - offset + PCI_EXP_DEVCAP,
+ &cap);
+
+- if (!ret && (cap & PCI_EXP_DEVCAP_FLR))
++ if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) {
++ vfio_pci_zap_and_down_write_memory_lock(vdev);
+ pci_try_reset_function(vdev->pdev);
++ up_write(&vdev->memory_lock);
++ }
+ }
+
+ /*
+@@ -909,8 +930,11 @@ static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos,
+ pos - offset + PCI_AF_CAP,
+ &cap);
+
+- if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP))
++ if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) {
++ vfio_pci_zap_and_down_write_memory_lock(vdev);
+ pci_try_reset_function(vdev->pdev);
++ up_write(&vdev->memory_lock);
++ }
+ }
+
+ return count;
+diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
+index 94594dc63c417..bdfdd506bc588 100644
+--- a/drivers/vfio/pci/vfio_pci_intrs.c
++++ b/drivers/vfio/pci/vfio_pci_intrs.c
+@@ -252,6 +252,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
+ struct pci_dev *pdev = vdev->pdev;
+ unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
+ int ret;
++ u16 cmd;
+
+ if (!is_irq_none(vdev))
+ return -EINVAL;
+@@ -261,13 +262,16 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
+ return -ENOMEM;
+
+ /* return the number of supported vectors if we can't get all: */
++ cmd = vfio_pci_memory_lock_and_enable(vdev);
+ ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
+ if (ret < nvec) {
+ if (ret > 0)
+ pci_free_irq_vectors(pdev);
++ vfio_pci_memory_unlock_and_restore(vdev, cmd);
+ kfree(vdev->ctx);
+ return ret;
+ }
++ vfio_pci_memory_unlock_and_restore(vdev, cmd);
+
+ vdev->num_ctx = nvec;
+ vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
+@@ -290,6 +294,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
+ struct pci_dev *pdev = vdev->pdev;
+ struct eventfd_ctx *trigger;
+ int irq, ret;
++ u16 cmd;
+
+ if (vector < 0 || vector >= vdev->num_ctx)
+ return -EINVAL;
+@@ -298,7 +303,11 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
+
+ if (vdev->ctx[vector].trigger) {
+ irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
++
++ cmd = vfio_pci_memory_lock_and_enable(vdev);
+ free_irq(irq, vdev->ctx[vector].trigger);
++ vfio_pci_memory_unlock_and_restore(vdev, cmd);
++
+ kfree(vdev->ctx[vector].name);
+ eventfd_ctx_put(vdev->ctx[vector].trigger);
+ vdev->ctx[vector].trigger = NULL;
+@@ -326,6 +335,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
+ * such a reset it would be unsuccessful. To avoid this, restore the
+ * cached value of the message prior to enabling.
+ */
++ cmd = vfio_pci_memory_lock_and_enable(vdev);
+ if (msix) {
+ struct msi_msg msg;
+
+@@ -335,6 +345,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
+
+ ret = request_irq(irq, vfio_msihandler, 0,
+ vdev->ctx[vector].name, trigger);
++ vfio_pci_memory_unlock_and_restore(vdev, cmd);
+ if (ret) {
+ kfree(vdev->ctx[vector].name);
+ eventfd_ctx_put(trigger);
+@@ -379,6 +390,7 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
+ {
+ struct pci_dev *pdev = vdev->pdev;
+ int i;
++ u16 cmd;
+
+ for (i = 0; i < vdev->num_ctx; i++) {
+ vfio_virqfd_disable(&vdev->ctx[i].unmask);
+@@ -387,7 +399,9 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
+
+ vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
+
++ cmd = vfio_pci_memory_lock_and_enable(vdev);
+ pci_free_irq_vectors(pdev);
++ vfio_pci_memory_unlock_and_restore(vdev, cmd);
+
+ /*
+ * Both disable paths above use pci_intx_for_msi() to clear DisINTx
+diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
+index 9743c934199d6..17d2bae5b013c 100644
+--- a/drivers/vfio/pci/vfio_pci_private.h
++++ b/drivers/vfio/pci/vfio_pci_private.h
+@@ -118,6 +118,7 @@ struct vfio_pci_device {
+ struct list_head ioeventfds_list;
+ struct mutex vma_lock;
+ struct list_head vma_list;
++ struct rw_semaphore memory_lock;
+ };
+
+ #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
+@@ -156,6 +157,14 @@ extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
+ unsigned int type, unsigned int subtype,
+ const struct vfio_pci_regops *ops,
+ size_t size, u32 flags, void *data);
++
++extern bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev);
++extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device
++ *vdev);
++extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev);
++extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev,
++ u16 cmd);
++
+ #ifdef CONFIG_VFIO_PCI_IGD
+ extern int vfio_pci_igd_init(struct vfio_pci_device *vdev);
+ #else
+diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
+index a6029d0a55244..3d0ec2bbe131f 100644
+--- a/drivers/vfio/pci/vfio_pci_rdwr.c
++++ b/drivers/vfio/pci/vfio_pci_rdwr.c
+@@ -165,6 +165,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
+ size_t x_start = 0, x_end = 0;
+ resource_size_t end;
+ void __iomem *io;
++ struct resource *res = &vdev->pdev->resource[bar];
+ ssize_t done;
+
+ if (pci_resource_start(pdev, bar))
+@@ -180,6 +181,14 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
+
+ count = min(count, (size_t)(end - pos));
+
++ if (res->flags & IORESOURCE_MEM) {
++ down_read(&vdev->memory_lock);
++ if (!__vfio_pci_memory_enabled(vdev)) {
++ up_read(&vdev->memory_lock);
++ return -EIO;
++ }
++ }
++
+ if (bar == PCI_ROM_RESOURCE) {
+ /*
+ * The ROM can fill less space than the BAR, so we start the
+@@ -187,13 +196,17 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
+ * filling large ROM BARs much faster.
+ */
+ io = pci_map_rom(pdev, &x_start);
+- if (!io)
+- return -ENOMEM;
++ if (!io) {
++ done = -ENOMEM;
++ goto out;
++ }
+ x_end = end;
+ } else {
+ int ret = vfio_pci_setup_barmap(vdev, bar);
+- if (ret)
+- return ret;
++ if (ret) {
++ done = ret;
++ goto out;
++ }
+
+ io = vdev->barmap[bar];
+ }
+@@ -210,6 +223,9 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
+
+ if (bar == PCI_ROM_RESOURCE)
+ pci_unmap_rom(pdev, io);
++out:
++ if (res->flags & IORESOURCE_MEM)
++ up_read(&vdev->memory_lock);
+
+ return done;
+ }
+--
+2.25.1
+
--- /dev/null
+From 2a67200ad9e71668f3264ec25850e63b4b1d919e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Sep 2020 15:47:20 +0530
+Subject: vfio/type1: Support faulting PFNMAP vmas
+
+From: Alex Williamson <alex.williamson@redhat.com>
+
+commit 41311242221e3482b20bfed10fa4d9db98d87016 upstream.
+
+With conversion to follow_pfn(), DMA mapping a PFNMAP range depends on
+the range being faulted into the vma. Add support to manually provide
+that, in the same way as done on KVM with hva_to_pfn_remapped().
+
+Reviewed-by: Peter Xu <peterx@redhat.com>
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+[Ajay: Regenerated the patch for v4.19]
+Signed-off-by: Ajay Kaher <akaher@vmware.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vfio/vfio_iommu_type1.c | 36 ++++++++++++++++++++++++++++++---
+ 1 file changed, 33 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index 52083b710b87e..05d8553635ee7 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -343,6 +343,32 @@ static int put_pfn(unsigned long pfn, int prot)
+ return 0;
+ }
+
++static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
++ unsigned long vaddr, unsigned long *pfn,
++ bool write_fault)
++{
++ int ret;
++
++ ret = follow_pfn(vma, vaddr, pfn);
++ if (ret) {
++ bool unlocked = false;
++
++ ret = fixup_user_fault(NULL, mm, vaddr,
++ FAULT_FLAG_REMOTE |
++ (write_fault ? FAULT_FLAG_WRITE : 0),
++ &unlocked);
++ if (unlocked)
++ return -EAGAIN;
++
++ if (ret)
++ return ret;
++
++ ret = follow_pfn(vma, vaddr, pfn);
++ }
++
++ return ret;
++}
++
+ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
+ int prot, unsigned long *pfn)
+ {
+@@ -382,12 +408,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
+
+ down_read(&mm->mmap_sem);
+
++retry:
+ vma = find_vma_intersection(mm, vaddr, vaddr + 1);
+
+ if (vma && vma->vm_flags & VM_PFNMAP) {
+- if (!follow_pfn(vma, vaddr, pfn) &&
+- is_invalid_reserved_pfn(*pfn))
+- ret = 0;
++ ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE);
++ if (ret == -EAGAIN)
++ goto retry;
++
++ if (!ret && !is_invalid_reserved_pfn(*pfn))
++ ret = -EFAULT;
+ }
+
+ up_read(&mm->mmap_sem);
+--
+2.25.1
+