--- /dev/null
+From 303681d5d538d81b5e23754515202b5b9febd2e9 Mon Sep 17 00:00:00 2001
+From: Keno Fischer <keno@juliacomputing.com>
+Date: Tue, 24 Jan 2017 15:17:48 -0800
+Subject: mm/huge_memory.c: respect FOLL_FORCE/FOLL_COW for thp
+
+From: Keno Fischer <keno@juliacomputing.com>
+
+commit 8310d48b125d19fcd9521d83b8293e63eb1646aa upstream.
+
+In commit 19be0eaffa3a ("mm: remove gup_flags FOLL_WRITE games from
+__get_user_pages()"), the mm code was changed from unsetting FOLL_WRITE
+after a COW was resolved to setting the (newly introduced) FOLL_COW
+instead. Simultaneously, the check in gup.c was updated to still allow
+writes with FOLL_FORCE set if FOLL_COW had also been set.
+
+However, a similar check in huge_memory.c was forgotten. As a result,
+remote memory writes to ro regions of memory backed by transparent huge
+pages cause an infinite loop in the kernel (handle_mm_fault sets
+FOLL_COW and returns 0 causing a retry, but follow_trans_huge_pmd bails
+out immidiately because `(flags & FOLL_WRITE) && !pmd_write(*pmd)` is
+true.
+
+While in this state the process is stil SIGKILLable, but little else
+works (e.g. no ptrace attach, no other signals). This is easily
+reproduced with the following code (assuming thp are set to always):
+
+ #include <assert.h>
+ #include <fcntl.h>
+ #include <stdint.h>
+ #include <stdio.h>
+ #include <string.h>
+ #include <sys/mman.h>
+ #include <sys/stat.h>
+ #include <sys/types.h>
+ #include <sys/wait.h>
+ #include <unistd.h>
+
+ #define TEST_SIZE 5 * 1024 * 1024
+
+ int main(void) {
+ int status;
+ pid_t child;
+ int fd = open("/proc/self/mem", O_RDWR);
+ void *addr = mmap(NULL, TEST_SIZE, PROT_READ,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
+ assert(addr != MAP_FAILED);
+ pid_t parent_pid = getpid();
+ if ((child = fork()) == 0) {
+ void *addr2 = mmap(NULL, TEST_SIZE, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
+ assert(addr2 != MAP_FAILED);
+ memset(addr2, 'a', TEST_SIZE);
+ pwrite(fd, addr2, TEST_SIZE, (uintptr_t)addr);
+ return 0;
+ }
+ assert(child == waitpid(child, &status, 0));
+ assert(WIFEXITED(status) && WEXITSTATUS(status) == 0);
+ return 0;
+ }
+
+Fix this by updating follow_trans_huge_pmd in huge_memory.c analogously
+to the update in gup.c in the original commit. The same pattern exists
+in follow_devmap_pmd. However, we should not be able to reach that
+check with FOLL_COW set, so add WARN_ONCE to make sure we notice if we
+ever do.
+
+[akpm@linux-foundation.org: coding-style fixes]
+Link: http://lkml.kernel.org/r/20170106015025.GA38411@juliacomputing.com
+Signed-off-by: Keno Fischer <keno@juliacomputing.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Greg Thelen <gthelen@google.com>
+Cc: Nicholas Piggin <npiggin@gmail.com>
+Cc: Willy Tarreau <w@1wt.eu>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Hugh Dickins <hughd@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 3.16:
+ - Drop change to follow_devmap_pmd()
+ - pmd_dirty() is not available; check the page flags as in older
+ backports of can_follow_write_pte()
+ - Adjust context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/huge_memory.c | 19 ++++++++++++++++---
+ 1 file changed, 16 insertions(+), 3 deletions(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1269,6 +1269,18 @@ out_unlock:
+ return ret;
+ }
+
++/*
++ * FOLL_FORCE can write to even unwritable pmd's, but only
++ * after we've gone through a COW cycle and they are dirty.
++ */
++static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
++ unsigned int flags)
++{
++ return pmd_write(pmd) ||
++ ((flags & FOLL_FORCE) && (flags & FOLL_COW) &&
++ page && PageAnon(page));
++}
++
+ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
+ unsigned long addr,
+ pmd_t *pmd,
+@@ -1279,9 +1291,6 @@ struct page *follow_trans_huge_pmd(struc
+
+ assert_spin_locked(pmd_lockptr(mm, pmd));
+
+- if (flags & FOLL_WRITE && !pmd_write(*pmd))
+- goto out;
+-
+ /* Avoid dumping huge zero page */
+ if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
+ return ERR_PTR(-EFAULT);
+@@ -1292,6 +1301,10 @@ struct page *follow_trans_huge_pmd(struc
+
+ page = pmd_page(*pmd);
+ VM_BUG_ON_PAGE(!PageHead(page), page);
++
++ if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, page, flags))
++ goto out;
++
+ if (flags & FOLL_TOUCH) {
+ pmd_t _pmd;
+ /*
--- /dev/null
+From cf01fb9985e8deb25ccf0ea54d916b8871ae0e62 Mon Sep 17 00:00:00 2001
+From: Chris Salls <salls@cs.ucsb.edu>
+Date: Fri, 7 Apr 2017 23:48:11 -0700
+Subject: mm/mempolicy.c: fix error handling in set_mempolicy and mbind.
+
+From: Chris Salls <salls@cs.ucsb.edu>
+
+commit cf01fb9985e8deb25ccf0ea54d916b8871ae0e62 upstream.
+
+In the case that compat_get_bitmap fails we do not want to copy the
+bitmap to the user as it will contain uninitialized stack data and leak
+sensitive data.
+
+Signed-off-by: Chris Salls <salls@cs.ucsb.edu>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/mempolicy.c | 20 ++++++++------------
+ 1 file changed, 8 insertions(+), 12 deletions(-)
+
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -1492,7 +1492,6 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, in
+ COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
+ compat_ulong_t, maxnode)
+ {
+- long err = 0;
+ unsigned long __user *nm = NULL;
+ unsigned long nr_bits, alloc_size;
+ DECLARE_BITMAP(bm, MAX_NUMNODES);
+@@ -1501,14 +1500,13 @@ COMPAT_SYSCALL_DEFINE3(set_mempolicy, in
+ alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
+
+ if (nmask) {
+- err = compat_get_bitmap(bm, nmask, nr_bits);
++ if (compat_get_bitmap(bm, nmask, nr_bits))
++ return -EFAULT;
+ nm = compat_alloc_user_space(alloc_size);
+- err |= copy_to_user(nm, bm, alloc_size);
++ if (copy_to_user(nm, bm, alloc_size))
++ return -EFAULT;
+ }
+
+- if (err)
+- return -EFAULT;
+-
+ return sys_set_mempolicy(mode, nm, nr_bits+1);
+ }
+
+@@ -1516,7 +1514,6 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulo
+ compat_ulong_t, mode, compat_ulong_t __user *, nmask,
+ compat_ulong_t, maxnode, compat_ulong_t, flags)
+ {
+- long err = 0;
+ unsigned long __user *nm = NULL;
+ unsigned long nr_bits, alloc_size;
+ nodemask_t bm;
+@@ -1525,14 +1522,13 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulo
+ alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
+
+ if (nmask) {
+- err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
++ if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
++ return -EFAULT;
+ nm = compat_alloc_user_space(alloc_size);
+- err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
++ if (copy_to_user(nm, nodes_addr(bm), alloc_size))
++ return -EFAULT;
+ }
+
+- if (err)
+- return -EFAULT;
+-
+ return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
+ }
+