--- /dev/null
+From foo@baz Thu Dec 13 21:38:45 CET 2018
+From: Lorenzo Stoakes <lstoakes@gmail.com>
+Date: Thu, 13 Oct 2016 01:20:11 +0100
+Subject: mm: remove write/force parameters from __get_user_pages_locked()
+
+From: Lorenzo Stoakes <lstoakes@gmail.com>
+
+commit 859110d7497cdd0e6b21010d6f777049d676382c upstream.
+
+This removes the redundant 'write' and 'force' parameters from
+__get_user_pages_locked() to make the use of FOLL_FORCE explicit in
+callers as use of this flag can result in surprising behaviour (and
+hence bugs) within the mm subsystem.
+
+Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 4.4:
+ - Drop change in get_user_pages_remote()
+ - Adjust context]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/gup.c | 37 ++++++++++++++++++++++++++-----------
+ 1 file changed, 26 insertions(+), 11 deletions(-)
+
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -627,7 +627,6 @@ static __always_inline long __get_user_p
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long nr_pages,
+- int write, int force,
+ struct page **pages,
+ struct vm_area_struct **vmas,
+ int *locked, bool notify_drop,
+@@ -645,10 +644,6 @@ static __always_inline long __get_user_p
+
+ if (pages)
+ flags |= FOLL_GET;
+- if (write)
+- flags |= FOLL_WRITE;
+- if (force)
+- flags |= FOLL_FORCE;
+
+ pages_done = 0;
+ lock_dropped = false;
+@@ -745,8 +740,15 @@ long get_user_pages_locked(struct task_s
+ int write, int force, struct page **pages,
+ int *locked)
+ {
+- return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
+- pages, NULL, locked, true, FOLL_TOUCH);
++ unsigned int flags = FOLL_TOUCH;
++
++ if (write)
++ flags |= FOLL_WRITE;
++ if (force)
++ flags |= FOLL_FORCE;
++
++ return __get_user_pages_locked(tsk, mm, start, nr_pages,
++ pages, NULL, locked, true, flags);
+ }
+ EXPORT_SYMBOL(get_user_pages_locked);
+
+@@ -767,9 +769,15 @@ __always_inline long __get_user_pages_un
+ {
+ long ret;
+ int locked = 1;
++
++ if (write)
++ gup_flags |= FOLL_WRITE;
++ if (force)
++ gup_flags |= FOLL_FORCE;
++
+ down_read(&mm->mmap_sem);
+- ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
+- pages, NULL, &locked, false, gup_flags);
++ ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL,
++ &locked, false, gup_flags);
+ if (locked)
+ up_read(&mm->mmap_sem);
+ return ret;
+@@ -861,8 +869,15 @@ long get_user_pages(struct task_struct *
+ unsigned long start, unsigned long nr_pages, int write,
+ int force, struct page **pages, struct vm_area_struct **vmas)
+ {
+- return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
+- pages, vmas, NULL, false, FOLL_TOUCH);
++ unsigned int flags = FOLL_TOUCH;
++
++ if (write)
++ flags |= FOLL_WRITE;
++ if (force)
++ flags |= FOLL_FORCE;
++
++ return __get_user_pages_locked(tsk, mm, start, nr_pages,
++ pages, vmas, NULL, false, flags);
+ }
+ EXPORT_SYMBOL(get_user_pages);
+
--- /dev/null
+From foo@baz Thu Dec 13 21:38:45 CET 2018
+From: Lorenzo Stoakes <lstoakes@gmail.com>
+Date: Thu, 13 Oct 2016 01:20:12 +0100
+Subject: mm: remove write/force parameters from __get_user_pages_unlocked()
+
+From: Lorenzo Stoakes <lstoakes@gmail.com>
+
+commit d4944b0ecec0af882483fe44b66729316e575208 upstream.
+
+This removes the redundant 'write' and 'force' parameters from
+__get_user_pages_unlocked() to make the use of FOLL_FORCE explicit in
+callers as use of this flag can result in surprising behaviour (and
+hence bugs) within the mm subsystem.
+
+Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
+Acked-by: Paolo Bonzini <pbonzini@redhat.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 4.4:
+ - Defer changes in process_vm_rw_single_vec() and async_pf_execute() since
+ they use get_user_pages_unlocked() here
+ - Adjust context]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/mm.h | 3 +--
+ mm/gup.c | 19 ++++++++++---------
+ mm/nommu.c | 14 ++++++++++----
+ virt/kvm/kvm_main.c | 11 ++++++++---
+ 4 files changed, 29 insertions(+), 18 deletions(-)
+
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1207,8 +1207,7 @@ long get_user_pages_locked(struct task_s
+ int *locked);
+ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+- int write, int force, struct page **pages,
+- unsigned int gup_flags);
++ struct page **pages, unsigned int gup_flags);
+ long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages);
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -764,17 +764,11 @@ EXPORT_SYMBOL(get_user_pages_locked);
+ */
+ __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+- int write, int force, struct page **pages,
+- unsigned int gup_flags)
++ struct page **pages, unsigned int gup_flags)
+ {
+ long ret;
+ int locked = 1;
+
+- if (write)
+- gup_flags |= FOLL_WRITE;
+- if (force)
+- gup_flags |= FOLL_FORCE;
+-
+ down_read(&mm->mmap_sem);
+ ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL,
+ &locked, false, gup_flags);
+@@ -805,8 +799,15 @@ long get_user_pages_unlocked(struct task
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages)
+ {
+- return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write,
+- force, pages, FOLL_TOUCH);
++ unsigned int flags = FOLL_TOUCH;
++
++ if (write)
++ flags |= FOLL_WRITE;
++ if (force)
++ flags |= FOLL_FORCE;
++
++ return __get_user_pages_unlocked(tsk, mm, start, nr_pages,
++ pages, flags);
+ }
+ EXPORT_SYMBOL(get_user_pages_unlocked);
+
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -211,8 +211,7 @@ EXPORT_SYMBOL(get_user_pages_locked);
+
+ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+- int write, int force, struct page **pages,
+- unsigned int gup_flags)
++ struct page **pages, unsigned int gup_flags)
+ {
+ long ret;
+ down_read(&mm->mmap_sem);
+@@ -227,8 +226,15 @@ long get_user_pages_unlocked(struct task
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages)
+ {
+- return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write,
+- force, pages, 0);
++ unsigned int flags = 0;
++
++ if (write)
++ flags |= FOLL_WRITE;
++ if (force)
++ flags |= FOLL_FORCE;
++
++ return __get_user_pages_unlocked(tsk, mm, start, nr_pages,
++ pages, flags);
+ }
+ EXPORT_SYMBOL(get_user_pages_unlocked);
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1352,10 +1352,15 @@ static int hva_to_pfn_slow(unsigned long
+ npages = get_user_page_nowait(current, current->mm,
+ addr, write_fault, page);
+ up_read(¤t->mm->mmap_sem);
+- } else
++ } else {
++ unsigned int flags = FOLL_TOUCH | FOLL_HWPOISON;
++
++ if (write_fault)
++ flags |= FOLL_WRITE;
++
+ npages = __get_user_pages_unlocked(current, current->mm, addr, 1,
+- write_fault, 0, page,
+- FOLL_TOUCH|FOLL_HWPOISON);
++ page, flags);
++ }
+ if (npages != 1)
+ return npages;
+
--- /dev/null
+From foo@baz Thu Dec 13 21:38:45 CET 2018
+From: Lorenzo Stoakes <lstoakes@gmail.com>
+Date: Thu, 13 Oct 2016 01:20:18 +0100
+Subject: mm: replace __access_remote_vm() write parameter with gup_flags
+
+From: Lorenzo Stoakes <lstoakes@gmail.com>
+
+commit 442486ec1096781c50227b73f721a63974b0fdda upstream.
+
+This removes the 'write' argument from __access_remote_vm() and replaces
+it with 'gup_flags' as use of this function previously silently implied
+FOLL_FORCE, whereas after this patch callers explicitly pass this flag.
+
+We make this explicit as use of FOLL_FORCE can result in surprising
+behaviour (and hence bugs) within the mm subsystem.
+
+Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 4.4: adjust context]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memory.c | 23 +++++++++++++++--------
+ mm/nommu.c | 9 ++++++---
+ 2 files changed, 21 insertions(+), 11 deletions(-)
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3711,14 +3711,11 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
+ * given task for page fault accounting.
+ */
+ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+- unsigned long addr, void *buf, int len, int write)
++ unsigned long addr, void *buf, int len, unsigned int gup_flags)
+ {
+ struct vm_area_struct *vma;
+ void *old_buf = buf;
+- unsigned int flags = FOLL_FORCE;
+-
+- if (write)
+- flags |= FOLL_WRITE;
++ int write = gup_flags & FOLL_WRITE;
+
+ down_read(&mm->mmap_sem);
+ /* ignore errors, just check how much was successfully transferred */
+@@ -3728,7 +3725,7 @@ static int __access_remote_vm(struct tas
+ struct page *page = NULL;
+
+ ret = get_user_pages(tsk, mm, addr, 1,
+- flags, &page, &vma);
++ gup_flags, &page, &vma);
+ if (ret <= 0) {
+ #ifndef CONFIG_HAVE_IOREMAP_PROT
+ break;
+@@ -3787,7 +3784,12 @@ static int __access_remote_vm(struct tas
+ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+ void *buf, int len, int write)
+ {
+- return __access_remote_vm(NULL, mm, addr, buf, len, write);
++ unsigned int flags = FOLL_FORCE;
++
++ if (write)
++ flags |= FOLL_WRITE;
++
++ return __access_remote_vm(NULL, mm, addr, buf, len, flags);
+ }
+
+ /*
+@@ -3800,12 +3802,17 @@ int access_process_vm(struct task_struct
+ {
+ struct mm_struct *mm;
+ int ret;
++ unsigned int flags = FOLL_FORCE;
+
+ mm = get_task_mm(tsk);
+ if (!mm)
+ return 0;
+
+- ret = __access_remote_vm(tsk, mm, addr, buf, len, write);
++ if (write)
++ flags |= FOLL_WRITE;
++
++ ret = __access_remote_vm(tsk, mm, addr, buf, len, flags);
++
+ mmput(mm);
+
+ return ret;
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -1929,9 +1929,10 @@ void filemap_map_pages(struct vm_area_st
+ EXPORT_SYMBOL(filemap_map_pages);
+
+ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+- unsigned long addr, void *buf, int len, int write)
++ unsigned long addr, void *buf, int len, unsigned int gup_flags)
+ {
+ struct vm_area_struct *vma;
++ int write = gup_flags & FOLL_WRITE;
+
+ down_read(&mm->mmap_sem);
+
+@@ -1973,7 +1974,8 @@ static int __access_remote_vm(struct tas
+ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+ void *buf, int len, int write)
+ {
+- return __access_remote_vm(NULL, mm, addr, buf, len, write);
++ return __access_remote_vm(NULL, mm, addr, buf, len,
++ write ? FOLL_WRITE : 0);
+ }
+
+ /*
+@@ -1991,7 +1993,8 @@ int access_process_vm(struct task_struct
+ if (!mm)
+ return 0;
+
+- len = __access_remote_vm(tsk, mm, addr, buf, len, write);
++ len = __access_remote_vm(tsk, mm, addr, buf, len,
++ write ? FOLL_WRITE : 0);
+
+ mmput(mm);
+ return len;
--- /dev/null
+From foo@baz Thu Dec 13 21:38:45 CET 2018
+From: Lorenzo Stoakes <lstoakes@gmail.com>
+Date: Thu, 13 Oct 2016 01:20:19 +0100
+Subject: mm: replace access_remote_vm() write parameter with gup_flags
+
+From: Lorenzo Stoakes <lstoakes@gmail.com>
+
+commit 6347e8d5bcce33fc36e651901efefbe2c93a43ef upstream.
+
+This removes the 'write' argument from access_remote_vm() and replaces
+it with 'gup_flags' as use of this function previously silently implied
+FOLL_FORCE, whereas after this patch callers explicitly pass this flag.
+
+We make this explicit as use of FOLL_FORCE can result in surprising
+behaviour (and hence bugs) within the mm subsystem.
+
+Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/proc/base.c | 19 +++++++++++++------
+ include/linux/mm.h | 2 +-
+ mm/memory.c | 11 +++--------
+ mm/nommu.c | 7 +++----
+ 4 files changed, 20 insertions(+), 19 deletions(-)
+
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -254,7 +254,7 @@ static ssize_t proc_pid_cmdline_read(str
+ * Inherently racy -- command line shares address space
+ * with code and data.
+ */
+- rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0);
++ rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_FORCE);
+ if (rv <= 0)
+ goto out_free_page;
+
+@@ -272,7 +272,8 @@ static ssize_t proc_pid_cmdline_read(str
+ int nr_read;
+
+ _count = min3(count, len, PAGE_SIZE);
+- nr_read = access_remote_vm(mm, p, page, _count, 0);
++ nr_read = access_remote_vm(mm, p, page, _count,
++ FOLL_FORCE);
+ if (nr_read < 0)
+ rv = nr_read;
+ if (nr_read <= 0)
+@@ -307,7 +308,8 @@ static ssize_t proc_pid_cmdline_read(str
+ bool final;
+
+ _count = min3(count, len, PAGE_SIZE);
+- nr_read = access_remote_vm(mm, p, page, _count, 0);
++ nr_read = access_remote_vm(mm, p, page, _count,
++ FOLL_FORCE);
+ if (nr_read < 0)
+ rv = nr_read;
+ if (nr_read <= 0)
+@@ -356,7 +358,8 @@ skip_argv:
+ bool final;
+
+ _count = min3(count, len, PAGE_SIZE);
+- nr_read = access_remote_vm(mm, p, page, _count, 0);
++ nr_read = access_remote_vm(mm, p, page, _count,
++ FOLL_FORCE);
+ if (nr_read < 0)
+ rv = nr_read;
+ if (nr_read <= 0)
+@@ -868,6 +871,7 @@ static ssize_t mem_rw(struct file *file,
+ unsigned long addr = *ppos;
+ ssize_t copied;
+ char *page;
++ unsigned int flags = FOLL_FORCE;
+
+ if (!mm)
+ return 0;
+@@ -880,6 +884,9 @@ static ssize_t mem_rw(struct file *file,
+ if (!atomic_inc_not_zero(&mm->mm_users))
+ goto free;
+
++ if (write)
++ flags |= FOLL_WRITE;
++
+ while (count > 0) {
+ int this_len = min_t(int, count, PAGE_SIZE);
+
+@@ -888,7 +895,7 @@ static ssize_t mem_rw(struct file *file,
+ break;
+ }
+
+- this_len = access_remote_vm(mm, addr, page, this_len, write);
++ this_len = access_remote_vm(mm, addr, page, this_len, flags);
+ if (!this_len) {
+ if (!copied)
+ copied = -EIO;
+@@ -1001,7 +1008,7 @@ static ssize_t environ_read(struct file
+ this_len = min(max_len, this_len);
+
+ retval = access_remote_vm(mm, (env_start + src),
+- page, this_len, 0);
++ page, this_len, FOLL_FORCE);
+
+ if (retval <= 0) {
+ ret = retval;
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1191,7 +1191,7 @@ static inline int fixup_user_fault(struc
+
+ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
+ extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+- void *buf, int len, int write);
++ void *buf, int len, unsigned int gup_flags);
+
+ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3777,19 +3777,14 @@ static int __access_remote_vm(struct tas
+ * @addr: start address to access
+ * @buf: source or destination buffer
+ * @len: number of bytes to transfer
+- * @write: whether the access is a write
++ * @gup_flags: flags modifying lookup behaviour
+ *
+ * The caller must hold a reference on @mm.
+ */
+ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+- void *buf, int len, int write)
++ void *buf, int len, unsigned int gup_flags)
+ {
+- unsigned int flags = FOLL_FORCE;
+-
+- if (write)
+- flags |= FOLL_WRITE;
+-
+- return __access_remote_vm(NULL, mm, addr, buf, len, flags);
++ return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
+ }
+
+ /*
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -1967,15 +1967,14 @@ static int __access_remote_vm(struct tas
+ * @addr: start address to access
+ * @buf: source or destination buffer
+ * @len: number of bytes to transfer
+- * @write: whether the access is a write
++ * @gup_flags: flags modifying lookup behaviour
+ *
+ * The caller must hold a reference on @mm.
+ */
+ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+- void *buf, int len, int write)
++ void *buf, int len, unsigned int gup_flags)
+ {
+- return __access_remote_vm(NULL, mm, addr, buf, len,
+- write ? FOLL_WRITE : 0);
++ return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
+ }
+
+ /*
--- /dev/null
+From foo@baz Thu Dec 13 21:38:45 CET 2018
+From: Lorenzo Stoakes <lstoakes@gmail.com>
+Date: Thu, 13 Oct 2016 01:20:16 +0100
+Subject: mm: replace get_user_pages() write/force parameters with gup_flags
+
+From: Lorenzo Stoakes <lstoakes@gmail.com>
+
+commit 768ae309a96103ed02eb1e111e838c87854d8b51 upstream.
+
+This removes the 'write' and 'force' from get_user_pages() and replaces
+them with 'gup_flags' to make the use of FOLL_FORCE explicit in callers
+as use of this flag can result in surprising behaviour (and hence bugs)
+within the mm subsystem.
+
+Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Acked-by: Jesper Nilsson <jesper.nilsson@axis.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 4.4:
+ - Drop changes in rapidio, vchiq, goldfish
+ - Keep the "write" variable in amdgpu_ttm_tt_pin_userptr() as it's still
+ needed
+ - Also update calls from various other places that now use
+ get_user_pages_remote() upstream, which were updated there by commit
+ 9beae1ea8930 "mm: replace get_user_pages_remote() write/force ..."
+ - Also update calls from hfi1 and ipath
+ - Adjust context]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/cris/arch-v32/drivers/cryptocop.c | 4 +---
+ arch/ia64/kernel/err_inject.c | 2 +-
+ arch/x86/mm/mpx.c | 3 +--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 6 +++++-
+ drivers/gpu/drm/i915/i915_gem_userptr.c | 6 +++++-
+ drivers/gpu/drm/radeon/radeon_ttm.c | 2 +-
+ drivers/gpu/drm/via/via_dmablit.c | 4 ++--
+ drivers/infiniband/core/umem.c | 6 +++++-
+ drivers/infiniband/core/umem_odp.c | 7 +++++--
+ drivers/infiniband/hw/mthca/mthca_memfree.c | 4 ++--
+ drivers/infiniband/hw/qib/qib_user_pages.c | 3 ++-
+ drivers/infiniband/hw/usnic/usnic_uiom.c | 5 ++++-
+ drivers/media/v4l2-core/videobuf-dma-sg.c | 7 +++++--
+ drivers/misc/mic/scif/scif_rma.c | 3 +--
+ drivers/misc/sgi-gru/grufault.c | 2 +-
+ drivers/staging/rdma/hfi1/user_pages.c | 2 +-
+ drivers/staging/rdma/ipath/ipath_user_pages.c | 2 +-
+ drivers/virt/fsl_hypervisor.c | 4 ++--
+ fs/exec.c | 9 +++++++--
+ include/linux/mm.h | 2 +-
+ kernel/events/uprobes.c | 4 ++--
+ mm/gup.c | 15 +++++----------
+ mm/memory.c | 6 +++++-
+ mm/mempolicy.c | 2 +-
+ mm/nommu.c | 18 ++++--------------
+ security/tomoyo/domain.c | 3 ++-
+ 26 files changed, 72 insertions(+), 59 deletions(-)
+
+--- a/arch/cris/arch-v32/drivers/cryptocop.c
++++ b/arch/cris/arch-v32/drivers/cryptocop.c
+@@ -2724,7 +2724,6 @@ static int cryptocop_ioctl_process(struc
+ (unsigned long int)(oper.indata + prev_ix),
+ noinpages,
+ 0, /* read access only for in data */
+- 0, /* no force */
+ inpages,
+ NULL);
+
+@@ -2740,8 +2739,7 @@ static int cryptocop_ioctl_process(struc
+ current->mm,
+ (unsigned long int)oper.cipher_outdata,
+ nooutpages,
+- 1, /* write access for out data */
+- 0, /* no force */
++ FOLL_WRITE, /* write access for out data */
+ outpages,
+ NULL);
+ up_read(¤t->mm->mmap_sem);
+--- a/arch/ia64/kernel/err_inject.c
++++ b/arch/ia64/kernel/err_inject.c
+@@ -143,7 +143,7 @@ store_virtual_to_phys(struct device *dev
+ int ret;
+
+ ret = get_user_pages(current, current->mm, virt_addr,
+- 1, VM_READ, 0, NULL, NULL);
++ 1, FOLL_WRITE, NULL, NULL);
+ if (ret<=0) {
+ #ifdef ERR_INJ_DEBUG
+ printk("Virtual address %lx is not existing.\n",virt_addr);
+--- a/arch/x86/mm/mpx.c
++++ b/arch/x86/mm/mpx.c
+@@ -536,10 +536,9 @@ static int mpx_resolve_fault(long __user
+ {
+ long gup_ret;
+ int nr_pages = 1;
+- int force = 0;
+
+ gup_ret = get_user_pages(current, current->mm, (unsigned long)addr,
+- nr_pages, write, force, NULL, NULL);
++ nr_pages, write ? FOLL_WRITE : 0, NULL, NULL);
+ /*
+ * get_user_pages() returns number of pages gotten.
+ * 0 means we failed to fault in and get anything,
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -496,9 +496,13 @@ static int amdgpu_ttm_tt_pin_userptr(str
+ int r;
+
+ int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
++ unsigned int flags = 0;
+ enum dma_data_direction direction = write ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
++ if (write)
++ flags |= FOLL_WRITE;
++
+ if (current->mm != gtt->usermm)
+ return -EPERM;
+
+@@ -519,7 +523,7 @@ static int amdgpu_ttm_tt_pin_userptr(str
+ struct page **pages = ttm->pages + pinned;
+
+ r = get_user_pages(current, current->mm, userptr, num_pages,
+- write, 0, pages, NULL);
++ flags, pages, NULL);
+ if (r < 0)
+ goto release_pages;
+
+--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
++++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
+@@ -581,13 +581,17 @@ __i915_gem_userptr_get_pages_worker(stru
+ pvec = drm_malloc_ab(npages, sizeof(struct page *));
+ if (pvec != NULL) {
+ struct mm_struct *mm = obj->userptr.mm->mm;
++ unsigned int flags = 0;
++
++ if (!obj->userptr.read_only)
++ flags |= FOLL_WRITE;
+
+ down_read(&mm->mmap_sem);
+ while (pinned < npages) {
+ ret = get_user_pages(work->task, mm,
+ obj->userptr.ptr + pinned * PAGE_SIZE,
+ npages - pinned,
+- !obj->userptr.read_only, 0,
++ flags,
+ pvec + pinned, NULL);
+ if (ret < 0)
+ break;
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -557,7 +557,7 @@ static int radeon_ttm_tt_pin_userptr(str
+ struct page **pages = ttm->pages + pinned;
+
+ r = get_user_pages(current, current->mm, userptr, num_pages,
+- write, 0, pages, NULL);
++ write ? FOLL_WRITE : 0, pages, NULL);
+ if (r < 0)
+ goto release_pages;
+
+--- a/drivers/gpu/drm/via/via_dmablit.c
++++ b/drivers/gpu/drm/via/via_dmablit.c
+@@ -242,8 +242,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t
+ ret = get_user_pages(current, current->mm,
+ (unsigned long)xfer->mem_addr,
+ vsg->num_pages,
+- (vsg->direction == DMA_FROM_DEVICE),
+- 0, vsg->pages, NULL);
++ (vsg->direction == DMA_FROM_DEVICE) ? FOLL_WRITE : 0,
++ vsg->pages, NULL);
+
+ up_read(¤t->mm->mmap_sem);
+ if (ret != vsg->num_pages) {
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -95,6 +95,7 @@ struct ib_umem *ib_umem_get(struct ib_uc
+ DEFINE_DMA_ATTRS(attrs);
+ struct scatterlist *sg, *sg_list_start;
+ int need_release = 0;
++ unsigned int gup_flags = FOLL_WRITE;
+
+ if (dmasync)
+ dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
+@@ -177,6 +178,9 @@ struct ib_umem *ib_umem_get(struct ib_uc
+ if (ret)
+ goto out;
+
++ if (!umem->writable)
++ gup_flags |= FOLL_FORCE;
++
+ need_release = 1;
+ sg_list_start = umem->sg_head.sgl;
+
+@@ -184,7 +188,7 @@ struct ib_umem *ib_umem_get(struct ib_uc
+ ret = get_user_pages(current, current->mm, cur_base,
+ min_t(unsigned long, npages,
+ PAGE_SIZE / sizeof (struct page *)),
+- 1, !umem->writable, page_list, vma_list);
++ gup_flags, page_list, vma_list);
+
+ if (ret < 0)
+ goto out;
+--- a/drivers/infiniband/core/umem_odp.c
++++ b/drivers/infiniband/core/umem_odp.c
+@@ -527,6 +527,7 @@ int ib_umem_odp_map_dma_pages(struct ib_
+ u64 off;
+ int j, k, ret = 0, start_idx, npages = 0;
+ u64 base_virt_addr;
++ unsigned int flags = 0;
+
+ if (access_mask == 0)
+ return -EINVAL;
+@@ -556,6 +557,9 @@ int ib_umem_odp_map_dma_pages(struct ib_
+ goto out_put_task;
+ }
+
++ if (access_mask & ODP_WRITE_ALLOWED_BIT)
++ flags |= FOLL_WRITE;
++
+ start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
+ k = start_idx;
+
+@@ -574,8 +578,7 @@ int ib_umem_odp_map_dma_pages(struct ib_
+ */
+ npages = get_user_pages(owning_process, owning_mm, user_virt,
+ gup_num_pages,
+- access_mask & ODP_WRITE_ALLOWED_BIT, 0,
+- local_page_list, NULL);
++ flags, local_page_list, NULL);
+ up_read(&owning_mm->mmap_sem);
+
+ if (npages < 0)
+--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
++++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
+@@ -472,8 +472,8 @@ int mthca_map_user_db(struct mthca_dev *
+ goto out;
+ }
+
+- ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
+- pages, NULL);
++ ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1,
++ FOLL_WRITE, pages, NULL);
+ if (ret < 0)
+ goto out;
+
+--- a/drivers/infiniband/hw/qib/qib_user_pages.c
++++ b/drivers/infiniband/hw/qib/qib_user_pages.c
+@@ -68,7 +68,8 @@ static int __qib_get_user_pages(unsigned
+ for (got = 0; got < num_pages; got += ret) {
+ ret = get_user_pages(current, current->mm,
+ start_page + got * PAGE_SIZE,
+- num_pages - got, 1, 1,
++ num_pages - got,
++ FOLL_WRITE | FOLL_FORCE,
+ p + got, NULL);
+ if (ret < 0)
+ goto bail_release;
+--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
++++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
+@@ -113,6 +113,7 @@ static int usnic_uiom_get_pages(unsigned
+ int flags;
+ dma_addr_t pa;
+ DEFINE_DMA_ATTRS(attrs);
++ unsigned int gup_flags;
+
+ if (dmasync)
+ dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
+@@ -140,6 +141,8 @@ static int usnic_uiom_get_pages(unsigned
+
+ flags = IOMMU_READ | IOMMU_CACHE;
+ flags |= (writable) ? IOMMU_WRITE : 0;
++ gup_flags = FOLL_WRITE;
++ gup_flags |= (writable) ? 0 : FOLL_FORCE;
+ cur_base = addr & PAGE_MASK;
+ ret = 0;
+
+@@ -147,7 +150,7 @@ static int usnic_uiom_get_pages(unsigned
+ ret = get_user_pages(current, current->mm, cur_base,
+ min_t(unsigned long, npages,
+ PAGE_SIZE / sizeof(struct page *)),
+- 1, !writable, page_list, NULL);
++ gup_flags, page_list, NULL);
+
+ if (ret < 0)
+ goto out;
+--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
++++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
+@@ -156,6 +156,7 @@ static int videobuf_dma_init_user_locked
+ {
+ unsigned long first, last;
+ int err, rw = 0;
++ unsigned int flags = FOLL_FORCE;
+
+ dma->direction = direction;
+ switch (dma->direction) {
+@@ -178,13 +179,15 @@ static int videobuf_dma_init_user_locked
+ if (NULL == dma->pages)
+ return -ENOMEM;
+
++ if (rw == READ)
++ flags |= FOLL_WRITE;
++
+ dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
+ data, size, dma->nr_pages);
+
+ err = get_user_pages(current, current->mm,
+ data & PAGE_MASK, dma->nr_pages,
+- rw == READ, 1, /* force */
+- dma->pages, NULL);
++ flags, dma->pages, NULL);
+
+ if (err != dma->nr_pages) {
+ dma->nr_pages = (err >= 0) ? err : 0;
+--- a/drivers/misc/mic/scif/scif_rma.c
++++ b/drivers/misc/mic/scif/scif_rma.c
+@@ -1398,8 +1398,7 @@ retry:
+ mm,
+ (u64)addr,
+ nr_pages,
+- !!(prot & SCIF_PROT_WRITE),
+- 0,
++ (prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
+ pinned_pages->pages,
+ NULL);
+ up_write(&mm->mmap_sem);
+--- a/drivers/misc/sgi-gru/grufault.c
++++ b/drivers/misc/sgi-gru/grufault.c
+@@ -199,7 +199,7 @@ static int non_atomic_pte_lookup(struct
+ *pageshift = PAGE_SHIFT;
+ #endif
+ if (get_user_pages
+- (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
++ (current, current->mm, vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0)
+ return -EFAULT;
+ *paddr = page_to_phys(page);
+ put_page(page);
+--- a/drivers/staging/rdma/hfi1/user_pages.c
++++ b/drivers/staging/rdma/hfi1/user_pages.c
+@@ -85,7 +85,7 @@ static int __hfi1_get_user_pages(unsigne
+ for (got = 0; got < num_pages; got += ret) {
+ ret = get_user_pages(current, current->mm,
+ start_page + got * PAGE_SIZE,
+- num_pages - got, 1, 1,
++ num_pages - got, FOLL_WRITE | FOLL_FORCE,
+ p + got, NULL);
+ if (ret < 0)
+ goto bail_release;
+--- a/drivers/staging/rdma/ipath/ipath_user_pages.c
++++ b/drivers/staging/rdma/ipath/ipath_user_pages.c
+@@ -72,7 +72,7 @@ static int __ipath_get_user_pages(unsign
+ for (got = 0; got < num_pages; got += ret) {
+ ret = get_user_pages(current, current->mm,
+ start_page + got * PAGE_SIZE,
+- num_pages - got, 1, 1,
++ num_pages - got, FOLL_WRITE | FOLL_FORCE,
+ p + got, NULL);
+ if (ret < 0)
+ goto bail_release;
+--- a/drivers/virt/fsl_hypervisor.c
++++ b/drivers/virt/fsl_hypervisor.c
+@@ -246,8 +246,8 @@ static long ioctl_memcpy(struct fsl_hv_i
+ down_read(¤t->mm->mmap_sem);
+ num_pinned = get_user_pages(current, current->mm,
+ param.local_vaddr - lb_offset, num_pages,
+- (param.source == -1) ? READ : WRITE,
+- 0, pages, NULL);
++ (param.source == -1) ? 0 : FOLL_WRITE,
++ pages, NULL);
+ up_read(¤t->mm->mmap_sem);
+
+ if (num_pinned != num_pages) {
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -191,6 +191,7 @@ static struct page *get_arg_page(struct
+ {
+ struct page *page;
+ int ret;
++ unsigned int gup_flags = FOLL_FORCE;
+
+ #ifdef CONFIG_STACK_GROWSUP
+ if (write) {
+@@ -199,8 +200,12 @@ static struct page *get_arg_page(struct
+ return NULL;
+ }
+ #endif
+- ret = get_user_pages(current, bprm->mm, pos,
+- 1, write, 1, &page, NULL);
++
++ if (write)
++ gup_flags |= FOLL_WRITE;
++
++ ret = get_user_pages(current, bprm->mm, pos, 1, gup_flags,
++ &page, NULL);
+ if (ret <= 0)
+ return NULL;
+
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1199,7 +1199,7 @@ long __get_user_pages(struct task_struct
+ struct vm_area_struct **vmas, int *nonblocking);
+ long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+- int write, int force, struct page **pages,
++ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas);
+ long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+--- a/kernel/events/uprobes.c
++++ b/kernel/events/uprobes.c
+@@ -299,7 +299,7 @@ int uprobe_write_opcode(struct mm_struct
+
+ retry:
+ /* Read the page with vaddr into memory */
+- ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma);
++ ret = get_user_pages(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page, &vma);
+ if (ret <= 0)
+ return ret;
+
+@@ -1700,7 +1700,7 @@ static int is_trap_at_addr(struct mm_str
+ if (likely(result == 0))
+ goto out;
+
+- result = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
++ result = get_user_pages(NULL, mm, vaddr, 1, FOLL_FORCE, &page, NULL);
+ if (result < 0)
+ return result;
+
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -854,18 +854,13 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
+ * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
+ */
+ long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+- unsigned long start, unsigned long nr_pages, int write,
+- int force, struct page **pages, struct vm_area_struct **vmas)
++ unsigned long start, unsigned long nr_pages,
++ unsigned int gup_flags, struct page **pages,
++ struct vm_area_struct **vmas)
+ {
+- unsigned int flags = FOLL_TOUCH;
+-
+- if (write)
+- flags |= FOLL_WRITE;
+- if (force)
+- flags |= FOLL_FORCE;
+-
+ return __get_user_pages_locked(tsk, mm, start, nr_pages,
+- pages, vmas, NULL, false, flags);
++ pages, vmas, NULL, false,
++ gup_flags | FOLL_TOUCH);
+ }
+ EXPORT_SYMBOL(get_user_pages);
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3715,6 +3715,10 @@ static int __access_remote_vm(struct tas
+ {
+ struct vm_area_struct *vma;
+ void *old_buf = buf;
++ unsigned int flags = FOLL_FORCE;
++
++ if (write)
++ flags |= FOLL_WRITE;
+
+ down_read(&mm->mmap_sem);
+ /* ignore errors, just check how much was successfully transferred */
+@@ -3724,7 +3728,7 @@ static int __access_remote_vm(struct tas
+ struct page *page = NULL;
+
+ ret = get_user_pages(tsk, mm, addr, 1,
+- write, 1, &page, &vma);
++ flags, &page, &vma);
+ if (ret <= 0) {
+ #ifndef CONFIG_HAVE_IOREMAP_PROT
+ break;
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -818,7 +818,7 @@ static int lookup_node(struct mm_struct
+ struct page *p;
+ int err;
+
+- err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
++ err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, &p, NULL);
+ if (err >= 0) {
+ err = page_to_nid(p);
+ put_page(p);
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -184,18 +184,11 @@ finish_or_fault:
+ */
+ long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+- int write, int force, struct page **pages,
++ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas)
+ {
+- int flags = 0;
+-
+- if (write)
+- flags |= FOLL_WRITE;
+- if (force)
+- flags |= FOLL_FORCE;
+-
+- return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
+- NULL);
++ return __get_user_pages(tsk, mm, start, nr_pages,
++ gup_flags, pages, vmas, NULL);
+ }
+ EXPORT_SYMBOL(get_user_pages);
+
+@@ -204,10 +197,7 @@ long get_user_pages_locked(struct task_s
+ unsigned int gup_flags, struct page **pages,
+ int *locked)
+ {
+- int write = gup_flags & FOLL_WRITE;
+- int force = gup_flags & FOLL_FORCE;
+-
+- return get_user_pages(tsk, mm, start, nr_pages, write, force,
++ return get_user_pages(tsk, mm, start, nr_pages, gup_flags,
+ pages, NULL);
+ }
+ EXPORT_SYMBOL(get_user_pages_locked);
+--- a/security/tomoyo/domain.c
++++ b/security/tomoyo/domain.c
+@@ -874,7 +874,8 @@ bool tomoyo_dump_page(struct linux_binpr
+ }
+ /* Same with get_arg_page(bprm, pos, 0) in fs/exec.c */
+ #ifdef CONFIG_MMU
+- if (get_user_pages(current, bprm->mm, pos, 1, 0, 1, &page, NULL) <= 0)
++ if (get_user_pages(current, bprm->mm, pos, 1,
++ FOLL_FORCE, &page, NULL) <= 0)
+ return false;
+ #else
+ page = bprm->page[pos / PAGE_SIZE];
--- /dev/null
+From foo@baz Thu Dec 13 21:38:45 CET 2018
+From: Lorenzo Stoakes <lstoakes@gmail.com>
+Date: Thu, 13 Oct 2016 01:20:14 +0100
+Subject: mm: replace get_user_pages_locked() write/force parameters with gup_flags
+
+From: Lorenzo Stoakes <lstoakes@gmail.com>
+
+commit 3b913179c3fa89dd0e304193fa0c746fc0481447 upstream.
+
+This removes the 'write' and 'force' use from get_user_pages_locked()
+and replaces them with 'gup_flags' to make the use of FOLL_FORCE
+explicit in callers as use of this flag can result in surprising
+behaviour (and hence bugs) within the mm subsystem.
+
+Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 4.4: adjust context]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/mm.h | 3 +--
+ mm/frame_vector.c | 8 +++++++-
+ mm/gup.c | 12 +++---------
+ mm/nommu.c | 5 ++++-
+ 4 files changed, 15 insertions(+), 13 deletions(-)
+
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1203,8 +1203,7 @@ long get_user_pages(struct task_struct *
+ struct vm_area_struct **vmas);
+ long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+- int write, int force, struct page **pages,
+- int *locked);
++ unsigned int gup_flags, struct page **pages, int *locked);
+ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ struct page **pages, unsigned int gup_flags);
+--- a/mm/frame_vector.c
++++ b/mm/frame_vector.c
+@@ -41,10 +41,16 @@ int get_vaddr_frames(unsigned long start
+ int ret = 0;
+ int err;
+ int locked;
++ unsigned int gup_flags = 0;
+
+ if (nr_frames == 0)
+ return 0;
+
++ if (write)
++ gup_flags |= FOLL_WRITE;
++ if (force)
++ gup_flags |= FOLL_FORCE;
++
+ if (WARN_ON_ONCE(nr_frames > vec->nr_allocated))
+ nr_frames = vec->nr_allocated;
+
+@@ -59,7 +65,7 @@ int get_vaddr_frames(unsigned long start
+ vec->got_ref = true;
+ vec->is_pfns = false;
+ ret = get_user_pages_locked(current, mm, start, nr_frames,
+- write, force, (struct page **)(vec->ptrs), &locked);
++ gup_flags, (struct page **)(vec->ptrs), &locked);
+ goto out;
+ }
+
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -737,18 +737,12 @@ static __always_inline long __get_user_p
+ */
+ long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+- int write, int force, struct page **pages,
++ unsigned int gup_flags, struct page **pages,
+ int *locked)
+ {
+- unsigned int flags = FOLL_TOUCH;
+-
+- if (write)
+- flags |= FOLL_WRITE;
+- if (force)
+- flags |= FOLL_FORCE;
+-
+ return __get_user_pages_locked(tsk, mm, start, nr_pages,
+- pages, NULL, locked, true, flags);
++ pages, NULL, locked, true,
++ gup_flags | FOLL_TOUCH);
+ }
+ EXPORT_SYMBOL(get_user_pages_locked);
+
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -201,9 +201,12 @@ EXPORT_SYMBOL(get_user_pages);
+
+ long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+- int write, int force, struct page **pages,
++ unsigned int gup_flags, struct page **pages,
+ int *locked)
+ {
++ int write = gup_flags & FOLL_WRITE;
++ int force = gup_flags & FOLL_FORCE;
++
+ return get_user_pages(tsk, mm, start, nr_pages, write, force,
+ pages, NULL);
+ }
--- /dev/null
+From foo@baz Thu Dec 13 21:38:45 CET 2018
+From: Lorenzo Stoakes <lstoakes@gmail.com>
+Date: Thu, 13 Oct 2016 01:20:13 +0100
+Subject: mm: replace get_user_pages_unlocked() write/force parameters with gup_flags
+
+From: Lorenzo Stoakes <lstoakes@gmail.com>
+
+commit c164154f66f0c9b02673f07aa4f044f1d9c70274 upstream.
+
+This removes the 'write' and 'force' use from get_user_pages_unlocked()
+and replaces them with 'gup_flags' to make the use of FOLL_FORCE
+explicit in callers as use of this flag can result in surprising
+behaviour (and hence bugs) within the mm subsystem.
+
+Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 4.4:
+ - Also update calls from process_vm_rw_single_vec() and async_pf_execute()
+ - Adjust context]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/mm/gup.c | 2 +-
+ arch/s390/mm/gup.c | 2 +-
+ arch/sh/mm/gup.c | 3 ++-
+ arch/sparc/mm/gup.c | 3 ++-
+ arch/x86/mm/gup.c | 2 +-
+ drivers/media/pci/ivtv/ivtv-udma.c | 3 ++-
+ drivers/media/pci/ivtv/ivtv-yuv.c | 8 ++++----
+ drivers/scsi/st.c | 5 ++---
+ drivers/video/fbdev/pvr2fb.c | 2 +-
+ include/linux/mm.h | 2 +-
+ mm/gup.c | 14 ++++----------
+ mm/nommu.c | 11 ++---------
+ mm/process_vm_access.c | 6 +++++-
+ mm/util.c | 2 +-
+ net/ceph/pagevec.c | 2 +-
+ virt/kvm/async_pf.c | 2 +-
+ 16 files changed, 31 insertions(+), 38 deletions(-)
+
+--- a/arch/mips/mm/gup.c
++++ b/arch/mips/mm/gup.c
+@@ -303,7 +303,7 @@ slow_irqon:
+
+ ret = get_user_pages_unlocked(current, mm, start,
+ (end - start) >> PAGE_SHIFT,
+- write, 0, pages);
++ pages, write ? FOLL_WRITE : 0);
+
+ /* Have to be a bit careful with return values */
+ if (nr > 0) {
+--- a/arch/s390/mm/gup.c
++++ b/arch/s390/mm/gup.c
+@@ -242,7 +242,7 @@ int get_user_pages_fast(unsigned long st
+ start += nr << PAGE_SHIFT;
+ pages += nr;
+ ret = get_user_pages_unlocked(current, mm, start,
+- nr_pages - nr, write, 0, pages);
++ nr_pages - nr, pages, write ? FOLL_WRITE : 0);
+ /* Have to be a bit careful with return values */
+ if (nr > 0)
+ ret = (ret < 0) ? nr : ret + nr;
+--- a/arch/sh/mm/gup.c
++++ b/arch/sh/mm/gup.c
+@@ -258,7 +258,8 @@ slow_irqon:
+ pages += nr;
+
+ ret = get_user_pages_unlocked(current, mm, start,
+- (end - start) >> PAGE_SHIFT, write, 0, pages);
++ (end - start) >> PAGE_SHIFT, pages,
++ write ? FOLL_WRITE : 0);
+
+ /* Have to be a bit careful with return values */
+ if (nr > 0) {
+--- a/arch/sparc/mm/gup.c
++++ b/arch/sparc/mm/gup.c
+@@ -250,7 +250,8 @@ slow:
+ pages += nr;
+
+ ret = get_user_pages_unlocked(current, mm, start,
+- (end - start) >> PAGE_SHIFT, write, 0, pages);
++ (end - start) >> PAGE_SHIFT, pages,
++ write ? FOLL_WRITE : 0);
+
+ /* Have to be a bit careful with return values */
+ if (nr > 0) {
+--- a/arch/x86/mm/gup.c
++++ b/arch/x86/mm/gup.c
+@@ -388,7 +388,7 @@ slow_irqon:
+
+ ret = get_user_pages_unlocked(current, mm, start,
+ (end - start) >> PAGE_SHIFT,
+- write, 0, pages);
++ pages, write ? FOLL_WRITE : 0);
+
+ /* Have to be a bit careful with return values */
+ if (nr > 0) {
+--- a/drivers/media/pci/ivtv/ivtv-udma.c
++++ b/drivers/media/pci/ivtv/ivtv-udma.c
+@@ -125,7 +125,8 @@ int ivtv_udma_setup(struct ivtv *itv, un
+
+ /* Get user pages for DMA Xfer */
+ err = get_user_pages_unlocked(current, current->mm,
+- user_dma.uaddr, user_dma.page_count, 0, 1, dma->map);
++ user_dma.uaddr, user_dma.page_count, dma->map,
++ FOLL_FORCE);
+
+ if (user_dma.page_count != err) {
+ IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
+--- a/drivers/media/pci/ivtv/ivtv-yuv.c
++++ b/drivers/media/pci/ivtv/ivtv-yuv.c
+@@ -76,13 +76,13 @@ static int ivtv_yuv_prep_user_dma(struct
+
+ /* Get user pages for DMA Xfer */
+ y_pages = get_user_pages_unlocked(current, current->mm,
+- y_dma.uaddr, y_dma.page_count, 0, 1,
+- &dma->map[0]);
++ y_dma.uaddr, y_dma.page_count,
++ &dma->map[0], FOLL_FORCE);
+ uv_pages = 0; /* silence gcc. value is set and consumed only if: */
+ if (y_pages == y_dma.page_count) {
+ uv_pages = get_user_pages_unlocked(current, current->mm,
+- uv_dma.uaddr, uv_dma.page_count, 0, 1,
+- &dma->map[y_pages]);
++ uv_dma.uaddr, uv_dma.page_count,
++ &dma->map[y_pages], FOLL_FORCE);
+ }
+
+ if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) {
+--- a/drivers/scsi/st.c
++++ b/drivers/scsi/st.c
+@@ -4821,9 +4821,8 @@ static int sgl_map_user_pages(struct st_
+ current->mm,
+ uaddr,
+ nr_pages,
+- rw == READ,
+- 0, /* don't force */
+- pages);
++ pages,
++ rw == READ ? FOLL_WRITE : 0); /* don't force */
+
+ /* Errors and no page mapped should return here */
+ if (res < nr_pages)
+--- a/drivers/video/fbdev/pvr2fb.c
++++ b/drivers/video/fbdev/pvr2fb.c
+@@ -687,7 +687,7 @@ static ssize_t pvr2fb_write(struct fb_in
+ return -ENOMEM;
+
+ ret = get_user_pages_unlocked(current, current->mm, (unsigned long)buf,
+- nr_pages, WRITE, 0, pages);
++ nr_pages, pages, FOLL_WRITE);
+
+ if (ret < nr_pages) {
+ nr_pages = ret;
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1210,7 +1210,7 @@ long __get_user_pages_unlocked(struct ta
+ struct page **pages, unsigned int gup_flags);
+ long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+- int write, int force, struct page **pages);
++ struct page **pages, unsigned int gup_flags);
+ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages);
+
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -797,17 +797,10 @@ EXPORT_SYMBOL(__get_user_pages_unlocked)
+ */
+ long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+- int write, int force, struct page **pages)
++ struct page **pages, unsigned int gup_flags)
+ {
+- unsigned int flags = FOLL_TOUCH;
+-
+- if (write)
+- flags |= FOLL_WRITE;
+- if (force)
+- flags |= FOLL_FORCE;
+-
+ return __get_user_pages_unlocked(tsk, mm, start, nr_pages,
+- pages, flags);
++ pages, gup_flags | FOLL_TOUCH);
+ }
+ EXPORT_SYMBOL(get_user_pages_unlocked);
+
+@@ -1427,7 +1420,8 @@ int get_user_pages_fast(unsigned long st
+ pages += nr;
+
+ ret = get_user_pages_unlocked(current, mm, start,
+- nr_pages - nr, write, 0, pages);
++ nr_pages - nr, pages,
++ write ? FOLL_WRITE : 0);
+
+ /* Have to be a bit careful with return values */
+ if (nr > 0) {
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -224,17 +224,10 @@ EXPORT_SYMBOL(__get_user_pages_unlocked)
+
+ long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+- int write, int force, struct page **pages)
++ struct page **pages, unsigned int gup_flags)
+ {
+- unsigned int flags = 0;
+-
+- if (write)
+- flags |= FOLL_WRITE;
+- if (force)
+- flags |= FOLL_FORCE;
+-
+ return __get_user_pages_unlocked(tsk, mm, start, nr_pages,
+- pages, flags);
++ pages, gup_flags);
+ }
+ EXPORT_SYMBOL(get_user_pages_unlocked);
+
+--- a/mm/process_vm_access.c
++++ b/mm/process_vm_access.c
+@@ -88,19 +88,23 @@ static int process_vm_rw_single_vec(unsi
+ ssize_t rc = 0;
+ unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
+ / sizeof(struct pages *);
++ unsigned int flags = 0;
+
+ /* Work out address and page range required */
+ if (len == 0)
+ return 0;
+ nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
+
++ if (vm_write)
++ flags |= FOLL_WRITE;
++
+ while (!rc && nr_pages && iov_iter_count(iter)) {
+ int pages = min(nr_pages, max_pages_per_loop);
+ size_t bytes;
+
+ /* Get the pages we're interested in */
+ pages = get_user_pages_unlocked(task, mm, pa, pages,
+- vm_write, 0, process_pages);
++ process_pages, flags);
+ if (pages <= 0)
+ return -EFAULT;
+
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -278,7 +278,7 @@ int __weak get_user_pages_fast(unsigned
+ {
+ struct mm_struct *mm = current->mm;
+ return get_user_pages_unlocked(current, mm, start, nr_pages,
+- write, 0, pages);
++ pages, write ? FOLL_WRITE : 0);
+ }
+ EXPORT_SYMBOL_GPL(get_user_pages_fast);
+
+--- a/net/ceph/pagevec.c
++++ b/net/ceph/pagevec.c
+@@ -26,7 +26,7 @@ struct page **ceph_get_direct_page_vecto
+ while (got < num_pages) {
+ rc = get_user_pages_unlocked(current, current->mm,
+ (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
+- num_pages - got, write_page, 0, pages + got);
++ num_pages - got, pages + got, write_page ? FOLL_WRITE : 0);
+ if (rc < 0)
+ break;
+ BUG_ON(rc == 0);
+--- a/virt/kvm/async_pf.c
++++ b/virt/kvm/async_pf.c
+@@ -80,7 +80,7 @@ static void async_pf_execute(struct work
+
+ might_sleep();
+
+- get_user_pages_unlocked(NULL, mm, addr, 1, 1, 0, NULL);
++ get_user_pages_unlocked(NULL, mm, addr, 1, NULL, FOLL_WRITE);
+ kvm_async_page_present_sync(vcpu, apf);
+
+ spin_lock(&vcpu->async_pf.lock);
--- /dev/null
+From foo@baz Thu Dec 13 21:38:45 CET 2018
+From: Lorenzo Stoakes <lstoakes@gmail.com>
+Date: Thu, 13 Oct 2016 01:20:15 +0100
+Subject: mm: replace get_vaddr_frames() write/force parameters with gup_flags
+
+From: Lorenzo Stoakes <lstoakes@gmail.com>
+
+commit 7f23b3504a0df63b724180262c5f3f117f21bcae upstream.
+
+This removes the 'write' and 'force' from get_vaddr_frames() and
+replaces them with 'gup_flags' to make the use of FOLL_FORCE explicit in
+callers as use of this flag can result in surprising behaviour (and
+hence bugs) within the mm subsystem.
+
+Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/exynos/exynos_drm_g2d.c | 3 ++-
+ drivers/media/platform/omap/omap_vout.c | 2 +-
+ drivers/media/v4l2-core/videobuf2-memops.c | 6 +++++-
+ include/linux/mm.h | 2 +-
+ mm/frame_vector.c | 13 ++-----------
+ 5 files changed, 11 insertions(+), 15 deletions(-)
+
+--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+@@ -471,7 +471,8 @@ static dma_addr_t *g2d_userptr_get_dma_a
+ goto err_free;
+ }
+
+- ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec);
++ ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
++ g2d_userptr->vec);
+ if (ret != npages) {
+ DRM_ERROR("failed to get user pages from userptr.\n");
+ if (ret < 0)
+--- a/drivers/media/platform/omap/omap_vout.c
++++ b/drivers/media/platform/omap/omap_vout.c
+@@ -214,7 +214,7 @@ static int omap_vout_get_userptr(struct
+ if (!vec)
+ return -ENOMEM;
+
+- ret = get_vaddr_frames(virtp, 1, true, false, vec);
++ ret = get_vaddr_frames(virtp, 1, FOLL_WRITE, vec);
+ if (ret != 1) {
+ frame_vector_destroy(vec);
+ return -EINVAL;
+--- a/drivers/media/v4l2-core/videobuf2-memops.c
++++ b/drivers/media/v4l2-core/videobuf2-memops.c
+@@ -42,6 +42,10 @@ struct frame_vector *vb2_create_framevec
+ unsigned long first, last;
+ unsigned long nr;
+ struct frame_vector *vec;
++ unsigned int flags = FOLL_FORCE;
++
++ if (write)
++ flags |= FOLL_WRITE;
+
+ first = start >> PAGE_SHIFT;
+ last = (start + length - 1) >> PAGE_SHIFT;
+@@ -49,7 +53,7 @@ struct frame_vector *vb2_create_framevec
+ vec = frame_vector_create(nr);
+ if (!vec)
+ return ERR_PTR(-ENOMEM);
+- ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
++ ret = get_vaddr_frames(start & PAGE_MASK, nr, flags, vec);
+ if (ret < 0)
+ goto out_destroy;
+ /* We accept only complete set of PFNs */
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1227,7 +1227,7 @@ struct frame_vector {
+ struct frame_vector *frame_vector_create(unsigned int nr_frames);
+ void frame_vector_destroy(struct frame_vector *vec);
+ int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
+- bool write, bool force, struct frame_vector *vec);
++ unsigned int gup_flags, struct frame_vector *vec);
+ void put_vaddr_frames(struct frame_vector *vec);
+ int frame_vector_to_pages(struct frame_vector *vec);
+ void frame_vector_to_pfns(struct frame_vector *vec);
+--- a/mm/frame_vector.c
++++ b/mm/frame_vector.c
+@@ -11,10 +11,7 @@
+ * get_vaddr_frames() - map virtual addresses to pfns
+ * @start: starting user address
+ * @nr_frames: number of pages / pfns from start to map
+- * @write: whether pages will be written to by the caller
+- * @force: whether to force write access even if user mapping is
+- * readonly. See description of the same argument of
+- get_user_pages().
++ * @gup_flags: flags modifying lookup behaviour
+ * @vec: structure which receives pages / pfns of the addresses mapped.
+ * It should have space for at least nr_frames entries.
+ *
+@@ -34,23 +31,17 @@
+ * This function takes care of grabbing mmap_sem as necessary.
+ */
+ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
+- bool write, bool force, struct frame_vector *vec)
++ unsigned int gup_flags, struct frame_vector *vec)
+ {
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ int ret = 0;
+ int err;
+ int locked;
+- unsigned int gup_flags = 0;
+
+ if (nr_frames == 0)
+ return 0;
+
+- if (write)
+- gup_flags |= FOLL_WRITE;
+- if (force)
+- gup_flags |= FOLL_FORCE;
+-
+ if (WARN_ON_ONCE(nr_frames > vec->nr_allocated))
+ nr_frames = vec->nr_allocated;
+
--- /dev/null
+From foo@baz Thu Dec 13 21:38:45 CET 2018
+From: Willy Tarreau <w@1wt.eu>
+Date: Fri, 11 May 2018 08:11:44 +0200
+Subject: proc: do not access cmdline nor environ from file-backed areas
+
+From: Willy Tarreau <w@1wt.eu>
+
+commit 7f7ccc2ccc2e70c6054685f5e3522efa81556830 upstream.
+
+proc_pid_cmdline_read() and environ_read() directly access the target
+process' VM to retrieve the command line and environment. If this
+process remaps these areas onto a file via mmap(), the requesting
+process may experience various issues such as extra delays if the
+underlying device is slow to respond.
+
+Let's simply refuse to access file-backed areas in these functions.
+For this we add a new FOLL_ANON gup flag that is passed to all calls
+to access_remote_vm(). The code already takes care of such failures
+(including unmapped areas). Accesses via /proc/pid/mem were not
+changed though.
+
+This was assigned CVE-2018-1120.
+
+Note for stable backports: the patch may apply to kernels prior to 4.11
+but silently miss one location; it must be checked that no call to
+access_remote_vm() keeps zero as the last argument.
+
+Reported-by: Qualys Security Advisory <qsa@qualys.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Willy Tarreau <w@1wt.eu>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 4.4:
+ - Update the extra call to access_remote_vm() from proc_pid_cmdline_read()
+ - Adjust context]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/proc/base.c | 10 +++++-----
+ include/linux/mm.h | 1 +
+ mm/gup.c | 3 +++
+ 3 files changed, 9 insertions(+), 5 deletions(-)
+
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -254,7 +254,7 @@ static ssize_t proc_pid_cmdline_read(str
+ * Inherently racy -- command line shares address space
+ * with code and data.
+ */
+- rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0);
++ rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_ANON);
+ if (rv <= 0)
+ goto out_free_page;
+
+@@ -272,7 +272,7 @@ static ssize_t proc_pid_cmdline_read(str
+ int nr_read;
+
+ _count = min3(count, len, PAGE_SIZE);
+- nr_read = access_remote_vm(mm, p, page, _count, 0);
++ nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
+ if (nr_read < 0)
+ rv = nr_read;
+ if (nr_read <= 0)
+@@ -307,7 +307,7 @@ static ssize_t proc_pid_cmdline_read(str
+ bool final;
+
+ _count = min3(count, len, PAGE_SIZE);
+- nr_read = access_remote_vm(mm, p, page, _count, 0);
++ nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
+ if (nr_read < 0)
+ rv = nr_read;
+ if (nr_read <= 0)
+@@ -356,7 +356,7 @@ skip_argv:
+ bool final;
+
+ _count = min3(count, len, PAGE_SIZE);
+- nr_read = access_remote_vm(mm, p, page, _count, 0);
++ nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
+ if (nr_read < 0)
+ rv = nr_read;
+ if (nr_read <= 0)
+@@ -1006,7 +1006,7 @@ static ssize_t environ_read(struct file
+ max_len = min_t(size_t, PAGE_SIZE, count);
+ this_len = min(max_len, this_len);
+
+- retval = access_remote_vm(mm, (env_start + src), page, this_len, 0);
++ retval = access_remote_vm(mm, (env_start + src), page, this_len, FOLL_ANON);
+
+ if (retval <= 0) {
+ ret = retval;
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -2120,6 +2120,7 @@ static inline struct page *follow_page(s
+ #define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
+ #define FOLL_MLOCK 0x1000 /* lock present pages */
+ #define FOLL_COW 0x4000 /* internal GUP flag */
++#define FOLL_ANON 0x8000 /* don't do file mappings */
+
+ typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
+ void *data);
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -368,6 +368,9 @@ static int check_vma_flags(struct vm_are
+ if (vm_flags & (VM_IO | VM_PFNMAP))
+ return -EFAULT;
+
++ if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
++ return -EFAULT;
++
+ if (gup_flags & FOLL_WRITE) {
+ if (!(vm_flags & VM_WRITE)) {
+ if (!(gup_flags & FOLL_FORCE))
--- /dev/null
+From foo@baz Thu Dec 13 21:38:45 CET 2018
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Mon, 24 Oct 2016 19:00:44 -0700
+Subject: proc: don't use FOLL_FORCE for reading cmdline and environment
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 272ddc8b37354c3fe111ab26d25e792629148eee upstream.
+
+Now that Lorenzo cleaned things up and made the FOLL_FORCE users
+explicit, it becomes obvious how some of them don't really need
+FOLL_FORCE at all.
+
+So remove FOLL_FORCE from the proc code that reads the command line and
+arguments from user space.
+
+The mem_rw() function actually does want FOLL_FORCE, because gdd (and
+possibly many other debuggers) use it as a much more convenient version
+of PTRACE_PEEKDATA, but we should consider making the FOLL_FORCE part
+conditional on actually being a ptracer. This does not actually do
+that, just moves adds a comment to that effect and moves the gup_flags
+settings next to each other.
+
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/proc/base.c | 18 ++++++++----------
+ 1 file changed, 8 insertions(+), 10 deletions(-)
+
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -254,7 +254,7 @@ static ssize_t proc_pid_cmdline_read(str
+ * Inherently racy -- command line shares address space
+ * with code and data.
+ */
+- rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_FORCE);
++ rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0);
+ if (rv <= 0)
+ goto out_free_page;
+
+@@ -272,8 +272,7 @@ static ssize_t proc_pid_cmdline_read(str
+ int nr_read;
+
+ _count = min3(count, len, PAGE_SIZE);
+- nr_read = access_remote_vm(mm, p, page, _count,
+- FOLL_FORCE);
++ nr_read = access_remote_vm(mm, p, page, _count, 0);
+ if (nr_read < 0)
+ rv = nr_read;
+ if (nr_read <= 0)
+@@ -308,8 +307,7 @@ static ssize_t proc_pid_cmdline_read(str
+ bool final;
+
+ _count = min3(count, len, PAGE_SIZE);
+- nr_read = access_remote_vm(mm, p, page, _count,
+- FOLL_FORCE);
++ nr_read = access_remote_vm(mm, p, page, _count, 0);
+ if (nr_read < 0)
+ rv = nr_read;
+ if (nr_read <= 0)
+@@ -358,8 +356,7 @@ skip_argv:
+ bool final;
+
+ _count = min3(count, len, PAGE_SIZE);
+- nr_read = access_remote_vm(mm, p, page, _count,
+- FOLL_FORCE);
++ nr_read = access_remote_vm(mm, p, page, _count, 0);
+ if (nr_read < 0)
+ rv = nr_read;
+ if (nr_read <= 0)
+@@ -871,7 +868,7 @@ static ssize_t mem_rw(struct file *file,
+ unsigned long addr = *ppos;
+ ssize_t copied;
+ char *page;
+- unsigned int flags = FOLL_FORCE;
++ unsigned int flags;
+
+ if (!mm)
+ return 0;
+@@ -884,6 +881,8 @@ static ssize_t mem_rw(struct file *file,
+ if (!atomic_inc_not_zero(&mm->mm_users))
+ goto free;
+
++ /* Maybe we should limit FOLL_FORCE to actual ptrace users? */
++ flags = FOLL_FORCE;
+ if (write)
+ flags |= FOLL_WRITE;
+
+@@ -1007,8 +1006,7 @@ static ssize_t environ_read(struct file
+ max_len = min_t(size_t, PAGE_SIZE, count);
+ this_len = min(max_len, this_len);
+
+- retval = access_remote_vm(mm, (env_start + src),
+- page, this_len, FOLL_FORCE);
++ retval = access_remote_vm(mm, (env_start + src), page, this_len, 0);
+
+ if (retval <= 0) {
+ ret = retval;
hugetlbfs-fix-bug-in-pgoff-overflow-checking.patch
swiotlb-clean-up-reporting.patch
sr-pass-down-correctly-sized-scsi-sense-buffer.patch
+mm-remove-write-force-parameters-from-__get_user_pages_locked.patch
+mm-remove-write-force-parameters-from-__get_user_pages_unlocked.patch
+mm-replace-get_user_pages_unlocked-write-force-parameters-with-gup_flags.patch
+mm-replace-get_user_pages_locked-write-force-parameters-with-gup_flags.patch
+mm-replace-get_vaddr_frames-write-force-parameters-with-gup_flags.patch
+mm-replace-get_user_pages-write-force-parameters-with-gup_flags.patch
+mm-replace-__access_remote_vm-write-parameter-with-gup_flags.patch
+mm-replace-access_remote_vm-write-parameter-with-gup_flags.patch
+proc-don-t-use-foll_force-for-reading-cmdline-and-environment.patch
+proc-do-not-access-cmdline-nor-environ-from-file-backed-areas.patch