]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 20 May 2019 11:28:16 +0000 (13:28 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 20 May 2019 11:28:16 +0000 (13:28 +0200)
added patches:
ext4-fix-block-validity-checks-for-journal-inodes-using-indirect-blocks.patch
ext4-unsigned-int-compared-against-zero.patch
s390-mm-convert-to-the-generic-get_user_pages_fast-code.patch
s390-mm-make-the-pxd_offset-functions-more-robust.patch

queue-5.1/ext4-fix-block-validity-checks-for-journal-inodes-using-indirect-blocks.patch [new file with mode: 0644]
queue-5.1/ext4-unsigned-int-compared-against-zero.patch [new file with mode: 0644]
queue-5.1/s390-mm-convert-to-the-generic-get_user_pages_fast-code.patch [new file with mode: 0644]
queue-5.1/s390-mm-make-the-pxd_offset-functions-more-robust.patch [new file with mode: 0644]
queue-5.1/series

diff --git a/queue-5.1/ext4-fix-block-validity-checks-for-journal-inodes-using-indirect-blocks.patch b/queue-5.1/ext4-fix-block-validity-checks-for-journal-inodes-using-indirect-blocks.patch
new file mode 100644 (file)
index 0000000..dd17d55
--- /dev/null
@@ -0,0 +1,42 @@
+From 170417c8c7bb2cbbdd949bf5c443c0c8f24a203b Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Wed, 15 May 2019 00:51:19 -0400
+Subject: ext4: fix block validity checks for journal inodes using indirect blocks
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit 170417c8c7bb2cbbdd949bf5c443c0c8f24a203b upstream.
+
+Commit 345c0dbf3a30 ("ext4: protect journal inode's blocks using
+block_validity") failed to add an exception for the journal inode in
+ext4_check_blockref(), which is the function used by ext4_get_branch()
+for indirect blocks.  This caused attempts to read from the ext3-style
+journals to fail with:
+
+[  848.968550] EXT4-fs error (device sdb7): ext4_get_branch:171: inode #8: block 30343695: comm jbd2/sdb7-8: invalid block
+
+Fix this by adding the missing exception check.
+
+Fixes: 345c0dbf3a30 ("ext4: protect journal inode's blocks using block_validity")
+Reported-by: Arthur Marsh <arthur.marsh@internode.on.net>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/block_validity.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/fs/ext4/block_validity.c
++++ b/fs/ext4/block_validity.c
+@@ -276,6 +276,11 @@ int ext4_check_blockref(const char *func
+       __le32 *bref = p;
+       unsigned int blk;
++      if (ext4_has_feature_journal(inode->i_sb) &&
++          (inode->i_ino ==
++           le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
++              return 0;
++
+       while (bref < p+max) {
+               blk = le32_to_cpu(*bref++);
+               if (blk &&
diff --git a/queue-5.1/ext4-unsigned-int-compared-against-zero.patch b/queue-5.1/ext4-unsigned-int-compared-against-zero.patch
new file mode 100644 (file)
index 0000000..85dc8e9
--- /dev/null
@@ -0,0 +1,35 @@
+From fbbbbd2f28aec991f3fbc248df211550fbdfd58c Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Fri, 10 May 2019 22:06:38 -0400
+Subject: ext4: unsigned int compared against zero
+
+From: Colin Ian King <colin.king@canonical.com>
+
+commit fbbbbd2f28aec991f3fbc248df211550fbdfd58c upstream.
+
+There are two cases where u32 variables n and err are being checked
+for less than zero error values, the checks is always false because
+the variables are not signed. Fix this by making the variables ints.
+
+Addresses-Coverity: ("Unsigned compared against 0")
+Fixes: 345c0dbf3a30 ("ext4: protect journal inode's blocks using block_validity")
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/block_validity.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/ext4/block_validity.c
++++ b/fs/ext4/block_validity.c
+@@ -142,7 +142,8 @@ static int ext4_protect_reserved_inode(s
+       struct inode *inode;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct ext4_map_blocks map;
+-      u32 i = 0, err = 0, num, n;
++      u32 i = 0, num;
++      int err = 0, n;
+       if ((ino < EXT4_ROOT_INO) ||
+           (ino > le32_to_cpu(sbi->s_es->s_inodes_count)))
diff --git a/queue-5.1/s390-mm-convert-to-the-generic-get_user_pages_fast-code.patch b/queue-5.1/s390-mm-convert-to-the-generic-get_user_pages_fast-code.patch
new file mode 100644 (file)
index 0000000..eba8e25
--- /dev/null
@@ -0,0 +1,359 @@
+From 1a42010cdc26bb7e5912984f3c91b8c6d55f089a Mon Sep 17 00:00:00 2001
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Tue, 23 Apr 2019 10:53:21 +0200
+Subject: s390/mm: convert to the generic get_user_pages_fast code
+
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+
+commit 1a42010cdc26bb7e5912984f3c91b8c6d55f089a upstream.
+
+Define the gup_fast_permitted to check against the asce_limit of the
+mm attached to the current task, then replace the s390 specific gup
+code with the generic implementation in mm/gup.c.
+
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/Kconfig               |    1 
+ arch/s390/include/asm/pgtable.h |   12 +
+ arch/s390/mm/Makefile           |    2 
+ arch/s390/mm/gup.c              |  291 ----------------------------------------
+ 4 files changed, 14 insertions(+), 292 deletions(-)
+
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -149,6 +149,7 @@ config S390
+       select HAVE_FUNCTION_TRACER
+       select HAVE_FUTEX_CMPXCHG if FUTEX
+       select HAVE_GCC_PLUGINS
++      select HAVE_GENERIC_GUP
+       select HAVE_KERNEL_BZIP2
+       select HAVE_KERNEL_GZIP
+       select HAVE_KERNEL_LZ4
+--- a/arch/s390/include/asm/pgtable.h
++++ b/arch/s390/include/asm/pgtable.h
+@@ -1265,6 +1265,18 @@ static inline pte_t *pte_offset(pmd_t *p
+ #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
+ #define pte_unmap(pte) do { } while (0)
++static inline bool gup_fast_permitted(unsigned long start, int nr_pages)
++{
++      unsigned long len, end;
++
++      len = (unsigned long) nr_pages << PAGE_SHIFT;
++      end = start + len;
++      if (end < start)
++              return false;
++      return end <= current->mm->context.asce_limit;
++}
++#define gup_fast_permitted gup_fast_permitted
++
+ #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
+ #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
+ #define pte_page(x) pfn_to_page(pte_pfn(x))
+--- a/arch/s390/mm/Makefile
++++ b/arch/s390/mm/Makefile
+@@ -4,7 +4,7 @@
+ #
+ obj-y         := init.o fault.o extmem.o mmap.o vmem.o maccess.o
+-obj-y         += page-states.o gup.o pageattr.o pgtable.o pgalloc.o
++obj-y         += page-states.o pageattr.o pgtable.o pgalloc.o
+ obj-$(CONFIG_CMM)             += cmm.o
+ obj-$(CONFIG_HUGETLB_PAGE)    += hugetlbpage.o
+--- a/arch/s390/mm/gup.c
++++ /dev/null
+@@ -1,291 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- *  Lockless get_user_pages_fast for s390
+- *
+- *  Copyright IBM Corp. 2010
+- *  Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+- */
+-#include <linux/sched.h>
+-#include <linux/mm.h>
+-#include <linux/hugetlb.h>
+-#include <linux/vmstat.h>
+-#include <linux/pagemap.h>
+-#include <linux/rwsem.h>
+-#include <asm/pgtable.h>
+-
+-/*
+- * The performance critical leaf functions are made noinline otherwise gcc
+- * inlines everything into a single function which results in too much
+- * register pressure.
+- */
+-static inline int gup_pte_range(pmd_t pmd, unsigned long addr,
+-              unsigned long end, int write, struct page **pages, int *nr)
+-{
+-      struct page *head, *page;
+-      unsigned long mask;
+-      pte_t *ptep, pte;
+-
+-      mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
+-
+-      ptep = pte_offset_map(&pmd, addr);
+-      do {
+-              pte = *ptep;
+-              barrier();
+-              /* Similar to the PMD case, NUMA hinting must take slow path */
+-              if (pte_protnone(pte))
+-                      return 0;
+-              if ((pte_val(pte) & mask) != 0)
+-                      return 0;
+-              VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+-              page = pte_page(pte);
+-              head = compound_head(page);
+-              if (!page_cache_get_speculative(head))
+-                      return 0;
+-              if (unlikely(pte_val(pte) != pte_val(*ptep))) {
+-                      put_page(head);
+-                      return 0;
+-              }
+-              VM_BUG_ON_PAGE(compound_head(page) != head, page);
+-              pages[*nr] = page;
+-              (*nr)++;
+-
+-      } while (ptep++, addr += PAGE_SIZE, addr != end);
+-
+-      return 1;
+-}
+-
+-static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
+-              unsigned long end, int write, struct page **pages, int *nr)
+-{
+-      struct page *head, *page;
+-      unsigned long mask;
+-      int refs;
+-
+-      mask = (write ? _SEGMENT_ENTRY_PROTECT : 0) | _SEGMENT_ENTRY_INVALID;
+-      if ((pmd_val(pmd) & mask) != 0)
+-              return 0;
+-      VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
+-
+-      refs = 0;
+-      head = pmd_page(pmd);
+-      page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+-      do {
+-              VM_BUG_ON(compound_head(page) != head);
+-              pages[*nr] = page;
+-              (*nr)++;
+-              page++;
+-              refs++;
+-      } while (addr += PAGE_SIZE, addr != end);
+-
+-      if (!page_cache_add_speculative(head, refs)) {
+-              *nr -= refs;
+-              return 0;
+-      }
+-
+-      if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
+-              *nr -= refs;
+-              while (refs--)
+-                      put_page(head);
+-              return 0;
+-      }
+-
+-      return 1;
+-}
+-
+-
+-static inline int gup_pmd_range(pud_t pud, unsigned long addr,
+-              unsigned long end, int write, struct page **pages, int *nr)
+-{
+-      unsigned long next;
+-      pmd_t *pmdp, pmd;
+-
+-      pmdp = pmd_offset(&pud, addr);
+-      do {
+-              pmd = *pmdp;
+-              barrier();
+-              next = pmd_addr_end(addr, end);
+-              if (pmd_none(pmd))
+-                      return 0;
+-              if (unlikely(pmd_large(pmd))) {
+-                      /*
+-                       * NUMA hinting faults need to be handled in the GUP
+-                       * slowpath for accounting purposes and so that they
+-                       * can be serialised against THP migration.
+-                       */
+-                      if (pmd_protnone(pmd))
+-                              return 0;
+-                      if (!gup_huge_pmd(pmdp, pmd, addr, next,
+-                                        write, pages, nr))
+-                              return 0;
+-              } else if (!gup_pte_range(pmd, addr, next,
+-                                        write, pages, nr))
+-                      return 0;
+-      } while (pmdp++, addr = next, addr != end);
+-
+-      return 1;
+-}
+-
+-static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
+-              unsigned long end, int write, struct page **pages, int *nr)
+-{
+-      struct page *head, *page;
+-      unsigned long mask;
+-      int refs;
+-
+-      mask = (write ? _REGION_ENTRY_PROTECT : 0) | _REGION_ENTRY_INVALID;
+-      if ((pud_val(pud) & mask) != 0)
+-              return 0;
+-      VM_BUG_ON(!pfn_valid(pud_pfn(pud)));
+-
+-      refs = 0;
+-      head = pud_page(pud);
+-      page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+-      do {
+-              VM_BUG_ON_PAGE(compound_head(page) != head, page);
+-              pages[*nr] = page;
+-              (*nr)++;
+-              page++;
+-              refs++;
+-      } while (addr += PAGE_SIZE, addr != end);
+-
+-      if (!page_cache_add_speculative(head, refs)) {
+-              *nr -= refs;
+-              return 0;
+-      }
+-
+-      if (unlikely(pud_val(pud) != pud_val(*pudp))) {
+-              *nr -= refs;
+-              while (refs--)
+-                      put_page(head);
+-              return 0;
+-      }
+-
+-      return 1;
+-}
+-
+-static inline int gup_pud_range(p4d_t p4d, unsigned long addr,
+-              unsigned long end, int write, struct page **pages, int *nr)
+-{
+-      unsigned long next;
+-      pud_t *pudp, pud;
+-
+-      pudp = pud_offset(&p4d, addr);
+-      do {
+-              pud = *pudp;
+-              barrier();
+-              next = pud_addr_end(addr, end);
+-              if (pud_none(pud))
+-                      return 0;
+-              if (unlikely(pud_large(pud))) {
+-                      if (!gup_huge_pud(pudp, pud, addr, next, write, pages,
+-                                        nr))
+-                              return 0;
+-              } else if (!gup_pmd_range(pud, addr, next, write, pages,
+-                                        nr))
+-                      return 0;
+-      } while (pudp++, addr = next, addr != end);
+-
+-      return 1;
+-}
+-
+-static inline int gup_p4d_range(pgd_t pgd, unsigned long addr,
+-              unsigned long end, int write, struct page **pages, int *nr)
+-{
+-      unsigned long next;
+-      p4d_t *p4dp, p4d;
+-
+-      p4dp = p4d_offset(&pgd, addr);
+-      do {
+-              p4d = *p4dp;
+-              barrier();
+-              next = p4d_addr_end(addr, end);
+-              if (p4d_none(p4d))
+-                      return 0;
+-              if (!gup_pud_range(p4d, addr, next, write, pages, nr))
+-                      return 0;
+-      } while (p4dp++, addr = next, addr != end);
+-
+-      return 1;
+-}
+-
+-/*
+- * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
+- * back to the regular GUP.
+- * Note a difference with get_user_pages_fast: this always returns the
+- * number of pages pinned, 0 if no pages were pinned.
+- */
+-int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+-                        struct page **pages)
+-{
+-      struct mm_struct *mm = current->mm;
+-      unsigned long addr, len, end;
+-      unsigned long next, flags;
+-      pgd_t *pgdp, pgd;
+-      int nr = 0;
+-
+-      start &= PAGE_MASK;
+-      addr = start;
+-      len = (unsigned long) nr_pages << PAGE_SHIFT;
+-      end = start + len;
+-      if ((end <= start) || (end > mm->context.asce_limit))
+-              return 0;
+-      /*
+-       * local_irq_save() doesn't prevent pagetable teardown, but does
+-       * prevent the pagetables from being freed on s390.
+-       *
+-       * So long as we atomically load page table pointers versus teardown,
+-       * we can follow the address down to the the page and take a ref on it.
+-       */
+-      local_irq_save(flags);
+-      pgdp = pgd_offset(mm, addr);
+-      do {
+-              pgd = *pgdp;
+-              barrier();
+-              next = pgd_addr_end(addr, end);
+-              if (pgd_none(pgd))
+-                      break;
+-              if (!gup_p4d_range(pgd, addr, next, write, pages, &nr))
+-                      break;
+-      } while (pgdp++, addr = next, addr != end);
+-      local_irq_restore(flags);
+-
+-      return nr;
+-}
+-
+-/**
+- * get_user_pages_fast() - pin user pages in memory
+- * @start:    starting user address
+- * @nr_pages: number of pages from start to pin
+- * @write:    whether pages will be written to
+- * @pages:    array that receives pointers to the pages pinned.
+- *            Should be at least nr_pages long.
+- *
+- * Attempt to pin user pages in memory without taking mm->mmap_sem.
+- * If not successful, it will fall back to taking the lock and
+- * calling get_user_pages().
+- *
+- * Returns number of pages pinned. This may be fewer than the number
+- * requested. If nr_pages is 0 or negative, returns 0. If no pages
+- * were pinned, returns -errno.
+- */
+-int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+-                      struct page **pages)
+-{
+-      int nr, ret;
+-
+-      might_sleep();
+-      start &= PAGE_MASK;
+-      nr = __get_user_pages_fast(start, nr_pages, write, pages);
+-      if (nr == nr_pages)
+-              return nr;
+-
+-      /* Try to get the remaining pages with get_user_pages */
+-      start += nr << PAGE_SHIFT;
+-      pages += nr;
+-      ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
+-                                    write ? FOLL_WRITE : 0);
+-      /* Have to be a bit careful with return values */
+-      if (nr > 0)
+-              ret = (ret < 0) ? nr : ret + nr;
+-      return ret;
+-}
diff --git a/queue-5.1/s390-mm-make-the-pxd_offset-functions-more-robust.patch b/queue-5.1/s390-mm-make-the-pxd_offset-functions-more-robust.patch
new file mode 100644 (file)
index 0000000..807fb17
--- /dev/null
@@ -0,0 +1,250 @@
+From d1874a0c2805fcfa9162c972d6b7541e57adb542 Mon Sep 17 00:00:00 2001
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Tue, 23 Apr 2019 10:51:12 +0200
+Subject: s390/mm: make the pxd_offset functions more robust
+
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+
+commit d1874a0c2805fcfa9162c972d6b7541e57adb542 upstream.
+
+Change the way how pgd_offset, p4d_offset, pud_offset and pmd_offset
+walk the page tables. pgd_offset now always calculates the index for
+the top-level page table and adds it to the pgd, this is either a
+segment table offset for a 2-level setup, a region-3 offset for 3-levels,
+region-2 offset for 4-levels, or a region-1 offset for a 5-level setup.
+The other three functions p4d_offset, pud_offset and pmd_offset will
+only add the respective offset if they dereference the passed pointer.
+
+With the new way of walking the page tables a sequence like this from
+mm/gup.c now works:
+
+     pgdp = pgd_offset(current->mm, addr);
+     pgd = READ_ONCE(*pgdp);
+     p4dp = p4d_offset(&pgd, addr);
+     p4d = READ_ONCE(*p4dp);
+     pudp = pud_offset(&p4d, addr);
+     pud = READ_ONCE(*pudp);
+     pmdp = pmd_offset(&pud, addr);
+     pmd = READ_ONCE(*pmdp);
+
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/include/asm/pgtable.h |   67 +++++++++++++++++++++++++---------------
+ arch/s390/mm/gup.c              |   33 +++++++------------
+ 2 files changed, 55 insertions(+), 45 deletions(-)
+
+--- a/arch/s390/include/asm/pgtable.h
++++ b/arch/s390/include/asm/pgtable.h
+@@ -1204,42 +1204,67 @@ static inline pte_t mk_pte(struct page *
+ #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+ #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
+-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
+-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+-#define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr))
+-
+ #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
+ #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
+ #define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
+ #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
+-static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
++/*
++ * The pgd_offset function *always* adds the index for the top-level
++ * region/segment table. This is done to get a sequence like the
++ * following to work:
++ *    pgdp = pgd_offset(current->mm, addr);
++ *    pgd = READ_ONCE(*pgdp);
++ *    p4dp = p4d_offset(&pgd, addr);
++ *    ...
++ * The subsequent p4d_offset, pud_offset and pmd_offset functions
++ * only add an index if they dereferenced the pointer.
++ */
++static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
+ {
+-      p4d_t *p4d = (p4d_t *) pgd;
++      unsigned long rste;
++      unsigned int shift;
+-      if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
+-              p4d = (p4d_t *) pgd_deref(*pgd);
+-      return p4d + p4d_index(address);
++      /* Get the first entry of the top level table */
++      rste = pgd_val(*pgd);
++      /* Pick up the shift from the table type of the first entry */
++      shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
++      return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
+ }
+-static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
++#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
++#define pgd_offset_k(address) pgd_offset(&init_mm, address)
++
++static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
+ {
+-      pud_t *pud = (pud_t *) p4d;
++      if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
++              return (p4d_t *) pgd_deref(*pgd) + p4d_index(address);
++      return (p4d_t *) pgd;
++}
+-      if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
+-              pud = (pud_t *) p4d_deref(*p4d);
+-      return pud + pud_index(address);
++static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
++{
++      if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
++              return (pud_t *) p4d_deref(*p4d) + pud_index(address);
++      return (pud_t *) p4d;
+ }
+ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
+ {
+-      pmd_t *pmd = (pmd_t *) pud;
++      if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
++              return (pmd_t *) pud_deref(*pud) + pmd_index(address);
++      return (pmd_t *) pud;
++}
+-      if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
+-              pmd = (pmd_t *) pud_deref(*pud);
+-      return pmd + pmd_index(address);
++static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address)
++{
++      return (pte_t *) pmd_deref(*pmd) + pte_index(address);
+ }
++#define pte_offset_kernel(pmd, address) pte_offset(pmd, address)
++#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
++#define pte_unmap(pte) do { } while (0)
++
+ #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
+ #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
+ #define pte_page(x) pfn_to_page(pte_pfn(x))
+@@ -1249,12 +1274,6 @@ static inline pmd_t *pmd_offset(pud_t *p
+ #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
+ #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
+-/* Find an entry in the lowest level page table.. */
+-#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
+-#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
+-#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
+-#define pte_unmap(pte) do { } while (0)
+-
+ static inline pmd_t pmd_wrprotect(pmd_t pmd)
+ {
+       pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
+--- a/arch/s390/mm/gup.c
++++ b/arch/s390/mm/gup.c
+@@ -18,7 +18,7 @@
+  * inlines everything into a single function which results in too much
+  * register pressure.
+  */
+-static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
++static inline int gup_pte_range(pmd_t pmd, unsigned long addr,
+               unsigned long end, int write, struct page **pages, int *nr)
+ {
+       struct page *head, *page;
+@@ -27,7 +27,7 @@ static inline int gup_pte_range(pmd_t *p
+       mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
+-      ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
++      ptep = pte_offset_map(&pmd, addr);
+       do {
+               pte = *ptep;
+               barrier();
+@@ -93,16 +93,13 @@ static inline int gup_huge_pmd(pmd_t *pm
+ }
+-static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
++static inline int gup_pmd_range(pud_t pud, unsigned long addr,
+               unsigned long end, int write, struct page **pages, int *nr)
+ {
+       unsigned long next;
+       pmd_t *pmdp, pmd;
+-      pmdp = (pmd_t *) pudp;
+-      if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
+-              pmdp = (pmd_t *) pud_deref(pud);
+-      pmdp += pmd_index(addr);
++      pmdp = pmd_offset(&pud, addr);
+       do {
+               pmd = *pmdp;
+               barrier();
+@@ -120,7 +117,7 @@ static inline int gup_pmd_range(pud_t *p
+                       if (!gup_huge_pmd(pmdp, pmd, addr, next,
+                                         write, pages, nr))
+                               return 0;
+-              } else if (!gup_pte_range(pmdp, pmd, addr, next,
++              } else if (!gup_pte_range(pmd, addr, next,
+                                         write, pages, nr))
+                       return 0;
+       } while (pmdp++, addr = next, addr != end);
+@@ -166,16 +163,13 @@ static int gup_huge_pud(pud_t *pudp, pud
+       return 1;
+ }
+-static inline int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr,
++static inline int gup_pud_range(p4d_t p4d, unsigned long addr,
+               unsigned long end, int write, struct page **pages, int *nr)
+ {
+       unsigned long next;
+       pud_t *pudp, pud;
+-      pudp = (pud_t *) p4dp;
+-      if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
+-              pudp = (pud_t *) p4d_deref(p4d);
+-      pudp += pud_index(addr);
++      pudp = pud_offset(&p4d, addr);
+       do {
+               pud = *pudp;
+               barrier();
+@@ -186,7 +180,7 @@ static inline int gup_pud_range(p4d_t *p
+                       if (!gup_huge_pud(pudp, pud, addr, next, write, pages,
+                                         nr))
+                               return 0;
+-              } else if (!gup_pmd_range(pudp, pud, addr, next, write, pages,
++              } else if (!gup_pmd_range(pud, addr, next, write, pages,
+                                         nr))
+                       return 0;
+       } while (pudp++, addr = next, addr != end);
+@@ -194,23 +188,20 @@ static inline int gup_pud_range(p4d_t *p
+       return 1;
+ }
+-static inline int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
++static inline int gup_p4d_range(pgd_t pgd, unsigned long addr,
+               unsigned long end, int write, struct page **pages, int *nr)
+ {
+       unsigned long next;
+       p4d_t *p4dp, p4d;
+-      p4dp = (p4d_t *) pgdp;
+-      if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
+-              p4dp = (p4d_t *) pgd_deref(pgd);
+-      p4dp += p4d_index(addr);
++      p4dp = p4d_offset(&pgd, addr);
+       do {
+               p4d = *p4dp;
+               barrier();
+               next = p4d_addr_end(addr, end);
+               if (p4d_none(p4d))
+                       return 0;
+-              if (!gup_pud_range(p4dp, p4d, addr, next, write, pages, nr))
++              if (!gup_pud_range(p4d, addr, next, write, pages, nr))
+                       return 0;
+       } while (p4dp++, addr = next, addr != end);
+@@ -253,7 +244,7 @@ int __get_user_pages_fast(unsigned long
+               next = pgd_addr_end(addr, end);
+               if (pgd_none(pgd))
+                       break;
+-              if (!gup_p4d_range(pgdp, pgd, addr, next, write, pages, &nr))
++              if (!gup_p4d_range(pgd, addr, next, write, pages, &nr))
+                       break;
+       } while (pgdp++, addr = next, addr != end);
+       local_irq_restore(flags);
index 0897fda8c5d4f6a570fb2e804d87d666681e8af0..642e48f92229ddd04e72b8f8261bd9ffdce1ef2e 100644 (file)
@@ -122,3 +122,7 @@ xen-pvh-set-xen_domain_type-to-hvm-in-xen_pvh_init.patch
 xen-pvh-correctly-setup-the-pv-efi-interface-for-dom0.patch
 powerpc-32s-fix-flush_hash_pages-on-smp.patch
 libnvdimm-namespace-fix-label-tracking-error.patch
+s390-mm-make-the-pxd_offset-functions-more-robust.patch
+s390-mm-convert-to-the-generic-get_user_pages_fast-code.patch
+ext4-unsigned-int-compared-against-zero.patch
+ext4-fix-block-validity-checks-for-journal-inodes-using-indirect-blocks.patch