--- /dev/null
+From 1bd74b762969b377c999854355574b46d0dd514b Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Tue, 2 Aug 2016 14:04:51 -0700
+Subject: binfmt_elf: fix calculations for bss padding
+
+commit 0036d1f7eb95bcc52977f15507f00dd07018e7e2 upstream.
+
+A double-bug exists in the bss calculation code, where an overflow can
+happen in the "last_bss - elf_bss" calculation, but vm_brk internally
+aligns the argument, underflowing it, wrapping back around safe. We
+shouldn't depend on these bugs staying in sync, so this cleans up the
+bss padding handling to avoid the overflow.
+
+This moves the bss padzero() before the last_bss > elf_bss case, since
+the zero-filling of the ELF_PAGE should have nothing to do with the
+relationship of last_bss and elf_bss: any trailing portion should be
+zeroed, and a zero size is already handled by padzero().
+
+Then it handles the math on elf_bss vs last_bss correctly. These need
+to both be ELF_PAGE aligned to get the comparison correct, since that's
+the expected granularity of the mappings. Since elf_bss already had
+alignment-based padding happen in padzero(), the "start" of the new
+vm_brk() should be moved forward as done in the original code. However,
+since the "end" of the vm_brk() area will already become PAGE_ALIGNed in
+vm_brk() then last_bss should get aligned here to avoid hiding it as a
+side-effect.
+
+Additionally makes a cosmetic change to the initial last_bss calculation
+so it's easier to read in comparison to the load_addr calculation above
+it (i.e. the only difference is p_filesz vs p_memsz).
+
+Link: http://lkml.kernel.org/r/1468014494-25291-2-git-send-email-keescook@chromium.org
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Reported-by: Hector Marco-Gisbert <hecmargi@upv.es>
+Cc: Ismael Ripoll Ripoll <iripoll@upv.es>
+Cc: Alexander Viro <viro@zeniv.linux.org.uk>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Chen Gang <gang.chen.5i5j@gmail.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Konstantin Khlebnikov <koct9i@gmail.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/binfmt_elf.c | 34 ++++++++++++++++++----------------
+ 1 file changed, 18 insertions(+), 16 deletions(-)
+
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 70ea4b9c6dd9..2963a23f7a80 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -604,28 +604,30 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+ * Do the same thing for the memory mapping - between
+ * elf_bss and last_bss is the bss section.
+ */
+- k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
++ k = load_addr + eppnt->p_vaddr + eppnt->p_memsz;
+ if (k > last_bss)
+ last_bss = k;
+ }
+ }
+
++ /*
++ * Now fill out the bss section: first pad the last page from
++ * the file up to the page boundary, and zero it from elf_bss
++ * up to the end of the page.
++ */
++ if (padzero(elf_bss)) {
++ error = -EFAULT;
++ goto out;
++ }
++ /*
++ * Next, align both the file and mem bss up to the page size,
++ * since this is where elf_bss was just zeroed up to, and where
++ * last_bss will end after the vm_brk() below.
++ */
++ elf_bss = ELF_PAGEALIGN(elf_bss);
++ last_bss = ELF_PAGEALIGN(last_bss);
++ /* Finally, if there is still more bss to allocate, do it. */
+ if (last_bss > elf_bss) {
+- /*
+- * Now fill out the bss section. First pad the last page up
+- * to the page boundary, and then perform a mmap to make sure
+- * that there are zero-mapped pages up to and including the
+- * last bss page.
+- */
+- if (padzero(elf_bss)) {
+- error = -EFAULT;
+- goto out;
+- }
+-
+- /* What we have mapped so far */
+- elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
+-
+- /* Map the last of the bss segment */
+ error = vm_brk(elf_bss, last_bss - elf_bss);
+ if (BAD_ADDR(error))
+ goto out;
+--
+2.17.1
+
--- /dev/null
+From 636eaa83db780954174776685437709a918556ee Mon Sep 17 00:00:00 2001
+From: Oscar Salvador <osalvador@suse.de>
+Date: Fri, 13 Jul 2018 16:59:13 -0700
+Subject: fs, elf: make sure to page align bss in load_elf_library
+
+commit 24962af7e1041b7e50c1bc71d8d10dc678c556b5 upstream.
+
+The current code does not make sure to page align bss before calling
+vm_brk(), and this can lead to a VM_BUG_ON() in __mm_populate() due to
+the requested lenght not being correctly aligned.
+
+Let us make sure to align it properly.
+
+Kees: only applicable to CONFIG_USELIB kernels: 32-bit and configured
+for libc5.
+
+Link: http://lkml.kernel.org/r/20180705145539.9627-1-osalvador@techadventures.net
+Signed-off-by: Oscar Salvador <osalvador@suse.de>
+Reported-by: syzbot+5dcb560fe12aa5091c06@syzkaller.appspotmail.com
+Tested-by: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
+Acked-by: Kees Cook <keescook@chromium.org>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Nicolas Pitre <nicolas.pitre@linaro.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/binfmt_elf.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 2963a23f7a80..f010d6c8dd14 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -1214,9 +1214,8 @@ static int load_elf_library(struct file *file)
+ goto out_free_ph;
+ }
+
+- len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
+- ELF_MIN_ALIGN - 1);
+- bss = eppnt->p_memsz + eppnt->p_vaddr;
++ len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr);
++ bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr);
+ if (bss > len) {
+ error = vm_brk(len, bss - len);
+ if (BAD_ADDR(error))
+--
+2.17.1
+
--- /dev/null
+From 43d1ac299450738391ab5d950bcb186bbd72b0d6 Mon Sep 17 00:00:00 2001
+From: Michal Hocko <mhocko@suse.com>
+Date: Fri, 13 Jul 2018 16:59:20 -0700
+Subject: mm: do not bug_on on incorrect length in __mm_populate()
+
+commit bb177a732c4369bb58a1fe1df8f552b6f0f7db5f upstream.
+
+syzbot has noticed that a specially crafted library can easily hit
+VM_BUG_ON in __mm_populate
+
+ kernel BUG at mm/gup.c:1242!
+ invalid opcode: 0000 [#1] SMP
+ CPU: 2 PID: 9667 Comm: a.out Not tainted 4.18.0-rc3 #644
+ Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 05/19/2017
+ RIP: 0010:__mm_populate+0x1e2/0x1f0
+ Code: 55 d0 65 48 33 14 25 28 00 00 00 89 d8 75 21 48 83 c4 20 5b 41 5c 41 5d 41 5e 41 5f 5d c3 e8 75 18 f1 ff 0f 0b e8 6e 18 f1 ff <0f> 0b 31 db eb c9 e8 93 06 e0 ff 0f 1f 00 55 48 89 e5 53 48 89 fb
+ Call Trace:
+ vm_brk_flags+0xc3/0x100
+ vm_brk+0x1f/0x30
+ load_elf_library+0x281/0x2e0
+ __ia32_sys_uselib+0x170/0x1e0
+ do_fast_syscall_32+0xca/0x420
+ entry_SYSENTER_compat+0x70/0x7f
+
+The reason is that the length of the new brk is not page aligned when we
+try to populate the it. There is no reason to bug on that though.
+do_brk_flags already aligns the length properly so the mapping is
+expanded as it should. All we need is to tell mm_populate about it.
+Besides that there is absolutely no reason to to bug_on in the first
+place. The worst thing that could happen is that the last page wouldn't
+get populated and that is far from putting system into an inconsistent
+state.
+
+Fix the issue by moving the length sanitization code from do_brk_flags
+up to vm_brk_flags. The only other caller of do_brk_flags is brk
+syscall entry and it makes sure to provide the proper length so t here
+is no need for sanitation and so we can use do_brk_flags without it.
+
+Also remove the bogus BUG_ONs.
+
+[osalvador@techadventures.net: fix up vm_brk_flags s@request@len@]
+Link: http://lkml.kernel.org/r/20180706090217.GI32658@dhcp22.suse.cz
+Signed-off-by: Michal Hocko <mhocko@suse.com>
+Reported-by: syzbot <syzbot+5dcb560fe12aa5091c06@syzkaller.appspotmail.com>
+Tested-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Reviewed-by: Oscar Salvador <osalvador@suse.de>
+Cc: Zi Yan <zi.yan@cs.rutgers.edu>
+Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: Michael S. Tsirkin <mst@redhat.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: "Huang, Ying" <ying.huang@intel.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 4.4:
+ - There is no do_brk_flags() function; update do_brk()
+ - do_brk(), vm_brk() return the address on success
+ - Adjust context]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/gup.c | 2 --
+ mm/mmap.c | 19 ++++++++++---------
+ 2 files changed, 10 insertions(+), 11 deletions(-)
+
+diff --git a/mm/gup.c b/mm/gup.c
+index b599526db9f7..018144c4b9ec 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -940,8 +940,6 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
+ int locked = 0;
+ long ret = 0;
+
+- VM_BUG_ON(start & ~PAGE_MASK);
+- VM_BUG_ON(len != PAGE_ALIGN(len));
+ end = start + len;
+
+ for (nstart = start; nstart < end; nstart = nend) {
+diff --git a/mm/mmap.c b/mm/mmap.c
+index dd9205542a86..3074dbcd9621 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2808,21 +2808,15 @@ static inline void verify_mm_writelocked(struct mm_struct *mm)
+ * anonymous maps. eventually we may be able to do some
+ * brk-specific accounting here.
+ */
+-static unsigned long do_brk(unsigned long addr, unsigned long request)
++static unsigned long do_brk(unsigned long addr, unsigned long len)
+ {
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma, *prev;
+- unsigned long flags, len;
++ unsigned long flags;
+ struct rb_node **rb_link, *rb_parent;
+ pgoff_t pgoff = addr >> PAGE_SHIFT;
+ int error;
+
+- len = PAGE_ALIGN(request);
+- if (len < request)
+- return -ENOMEM;
+- if (!len)
+- return addr;
+-
+ flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
+
+ error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
+@@ -2890,12 +2884,19 @@ out:
+ return addr;
+ }
+
+-unsigned long vm_brk(unsigned long addr, unsigned long len)
++unsigned long vm_brk(unsigned long addr, unsigned long request)
+ {
+ struct mm_struct *mm = current->mm;
++ unsigned long len;
+ unsigned long ret;
+ bool populate;
+
++ len = PAGE_ALIGN(request);
++ if (len < request)
++ return -ENOMEM;
++ if (!len)
++ return addr;
++
+ down_write(&mm->mmap_sem);
+ ret = do_brk(addr, len);
+ populate = ((mm->def_flags & VM_LOCKED) != 0);
+--
+2.17.1
+
--- /dev/null
+From d6cb38c993f948d0301ca39081bcf437fa200d4b Mon Sep 17 00:00:00 2001
+From: Michal Hocko <mhocko@suse.com>
+Date: Mon, 23 May 2016 16:25:39 -0700
+Subject: mm, elf: handle vm_brk error
+
+commit ecc2bc8ac03884266cf73f8a2a42b911465b2fbc upstream.
+
+load_elf_library doesn't handle vm_brk failure although nothing really
+indicates it cannot do that because the function is allowed to fail due
+to vm_mmap failures already. This might be not a problem now but later
+patch will make vm_brk killable (resp. mmap_sem for write waiting will
+become killable) and so the failure will be more probable.
+
+Signed-off-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Alexander Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/binfmt_elf.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 62bc72001fce..70ea4b9c6dd9 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -1215,8 +1215,11 @@ static int load_elf_library(struct file *file)
+ len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
+ ELF_MIN_ALIGN - 1);
+ bss = eppnt->p_memsz + eppnt->p_vaddr;
+- if (bss > len)
+- vm_brk(len, bss - len);
++ if (bss > len) {
++ error = vm_brk(len, bss - len);
++ if (BAD_ADDR(error))
++ goto out_free_ph;
++ }
+ error = 0;
+
+ out_free_ph:
+--
+2.17.1
+
--- /dev/null
+From 2148901795dcf9d50d8c0a359324ee6f5ea6e8cc Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Tue, 2 Aug 2016 14:04:54 -0700
+Subject: mm: refuse wrapped vm_brk requests
+
+commit ba093a6d9397da8eafcfbaa7d95bd34255da39a0 upstream.
+
+The vm_brk() alignment calculations should refuse to overflow. The ELF
+loader depending on this, but it has been fixed now. No other unsafe
+callers have been found.
+
+Link: http://lkml.kernel.org/r/1468014494-25291-3-git-send-email-keescook@chromium.org
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Reported-by: Hector Marco-Gisbert <hecmargi@upv.es>
+Cc: Ismael Ripoll Ripoll <iripoll@upv.es>
+Cc: Alexander Viro <viro@zeniv.linux.org.uk>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Chen Gang <gang.chen.5i5j@gmail.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Konstantin Khlebnikov <koct9i@gmail.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 4.4: adjust context]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/mmap.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 39f5fbd07486..dd9205542a86 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2808,16 +2808,18 @@ static inline void verify_mm_writelocked(struct mm_struct *mm)
+ * anonymous maps. eventually we may be able to do some
+ * brk-specific accounting here.
+ */
+-static unsigned long do_brk(unsigned long addr, unsigned long len)
++static unsigned long do_brk(unsigned long addr, unsigned long request)
+ {
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma, *prev;
+- unsigned long flags;
++ unsigned long flags, len;
+ struct rb_node **rb_link, *rb_parent;
+ pgoff_t pgoff = addr >> PAGE_SHIFT;
+ int error;
+
+- len = PAGE_ALIGN(len);
++ len = PAGE_ALIGN(request);
++ if (len < request)
++ return -ENOMEM;
+ if (!len)
+ return addr;
+
+--
+2.17.1
+
fuse-fix-use-after-free-in-fuse_dev_do_write.patch
fuse-fix-blocked_waitq-wakeup.patch
fuse-set-fr_sent-while-locked.patch
+mm-elf-handle-vm_brk-error.patch
+binfmt_elf-fix-calculations-for-bss-padding.patch
+mm-refuse-wrapped-vm_brk-requests.patch
+fs-elf-make-sure-to-page-align-bss-in-load_elf_libra.patch
+mm-do-not-bug_on-on-incorrect-length-in-__mm_populat.patch