]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.18-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 31 Aug 2017 05:57:16 +0000 (07:57 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 31 Aug 2017 05:57:16 +0000 (07:57 +0200)
added patches:
arm64-mm-abort-uaccess-retries-upon-fatal-signal.patch
lib-bitmap-add-alignment-offset-for-bitmap_find_next_zero_area.patch
mm-cma-align-to-physical-address-not-cma-region-position.patch
mm-cma-constify-and-use-correct-signness-in-mm-cma.c.patch
mm-cma-fix-cma-aligned-offset-calculation.patch
mm-cma-fix-incorrect-type-conversion-for-size-during-dma-allocation.patch
mm-cma-fix-totalcma_pages-to-include-dt-defined-cma-regions.patch
mm-cma-make-kmemleak-ignore-cma-regions.patch
mm-cma-split-cma-reserved-in-dmesg-log.patch
x86-io-add-memory-clobber-to-insb-insw-insl-outsb-outsw-outsl.patch

queue-3.18/arm64-mm-abort-uaccess-retries-upon-fatal-signal.patch [new file with mode: 0644]
queue-3.18/lib-bitmap-add-alignment-offset-for-bitmap_find_next_zero_area.patch [new file with mode: 0644]
queue-3.18/mm-cma-align-to-physical-address-not-cma-region-position.patch [new file with mode: 0644]
queue-3.18/mm-cma-constify-and-use-correct-signness-in-mm-cma.c.patch [new file with mode: 0644]
queue-3.18/mm-cma-fix-cma-aligned-offset-calculation.patch [new file with mode: 0644]
queue-3.18/mm-cma-fix-incorrect-type-conversion-for-size-during-dma-allocation.patch [new file with mode: 0644]
queue-3.18/mm-cma-fix-totalcma_pages-to-include-dt-defined-cma-regions.patch [new file with mode: 0644]
queue-3.18/mm-cma-make-kmemleak-ignore-cma-regions.patch [new file with mode: 0644]
queue-3.18/mm-cma-split-cma-reserved-in-dmesg-log.patch [new file with mode: 0644]
queue-3.18/series
queue-3.18/x86-io-add-memory-clobber-to-insb-insw-insl-outsb-outsw-outsl.patch [new file with mode: 0644]

diff --git a/queue-3.18/arm64-mm-abort-uaccess-retries-upon-fatal-signal.patch b/queue-3.18/arm64-mm-abort-uaccess-retries-upon-fatal-signal.patch
new file mode 100644 (file)
index 0000000..14171f1
--- /dev/null
@@ -0,0 +1,54 @@
+From 289d07a2dc6c6b6f3e4b8a62669320d99dbe6c3d Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Tue, 11 Jul 2017 15:19:22 +0100
+Subject: arm64: mm: abort uaccess retries upon fatal signal
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 289d07a2dc6c6b6f3e4b8a62669320d99dbe6c3d upstream.
+
+When there's a fatal signal pending, arm64's do_page_fault()
+implementation returns 0. The intent is that we'll return to the
+faulting userspace instruction, delivering the signal on the way.
+
+However, if we take a fatal signal during fixing up a uaccess, this
+results in a return to the faulting kernel instruction, which will be
+instantly retried, resulting in the same fault being taken forever. As
+the task never reaches userspace, the signal is not delivered, and the
+task is left unkillable. While the task is stuck in this state, it can
+inhibit the forward progress of the system.
+
+To avoid this, we must ensure that when a fatal signal is pending, we
+apply any necessary fixup for a faulting kernel instruction. Thus we
+will return to an error path, and it is up to that code to make forward
+progress towards delivering the fatal signal.
+
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Laura Abbott <labbott@redhat.com>
+Reviewed-by: Steve Capper <steve.capper@arm.com>
+Tested-by: Steve Capper <steve.capper@arm.com>
+Reviewed-by: James Morse <james.morse@arm.com>
+Tested-by: James Morse <james.morse@arm.com>
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/fault.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -253,8 +253,11 @@ retry:
+        * signal first. We do not need to release the mmap_sem because it
+        * would already be released in __lock_page_or_retry in mm/filemap.c.
+        */
+-      if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
++      if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
++              if (!user_mode(regs))
++                      goto no_context;
+               return 0;
++      }
+       /*
+        * Major/minor page fault accounting is only done on the initial
diff --git a/queue-3.18/lib-bitmap-add-alignment-offset-for-bitmap_find_next_zero_area.patch b/queue-3.18/lib-bitmap-add-alignment-offset-for-bitmap_find_next_zero_area.patch
new file mode 100644 (file)
index 0000000..06dda1f
--- /dev/null
@@ -0,0 +1,138 @@
+From 5e19b013f55a884c59a14391b22138899d1cc4cc Mon Sep 17 00:00:00 2001
+From: Michal Nazarewicz <mina86@mina86.com>
+Date: Fri, 12 Dec 2014 16:54:45 -0800
+Subject: lib: bitmap: add alignment offset for bitmap_find_next_zero_area()
+
+From: Michal Nazarewicz <mina86@mina86.com>
+
+commit 5e19b013f55a884c59a14391b22138899d1cc4cc upstream.
+
+Add a bitmap_find_next_zero_area_off() function which works like
+bitmap_find_next_zero_area() function except it allows an offset to be
+specified when alignment is checked.  This lets caller request a bit such
+that its number plus the offset is aligned according to the mask.
+
+[gregory.0xf0@gmail.com: Retrieved from https://patchwork.linuxtv.org/patch/6254/ and updated documentation]
+Signed-off-by: Michal Nazarewicz <mina86@mina86.com>
+Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Signed-off-by: Gregory Fong <gregory.0xf0@gmail.com>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: Kukjin Kim <kgene.kim@samsung.com>
+Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Cc: Laura Abbott <lauraa@codeaurora.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/bitmap.h |   36 +++++++++++++++++++++++++++++++-----
+ lib/bitmap.c           |   24 +++++++++++++-----------
+ 2 files changed, 44 insertions(+), 16 deletions(-)
+
+--- a/include/linux/bitmap.h
++++ b/include/linux/bitmap.h
+@@ -45,6 +45,7 @@
+  * bitmap_set(dst, pos, nbits)                        Set specified bit area
+  * bitmap_clear(dst, pos, nbits)              Clear specified bit area
+  * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
++ * bitmap_find_next_zero_area_off(buf, len, pos, n, mask)     as above
+  * bitmap_shift_right(dst, src, n, nbits)     *dst = *src >> n
+  * bitmap_shift_left(dst, src, n, nbits)      *dst = *src << n
+  * bitmap_remap(dst, src, old, new, nbits)    *dst = map(old, new)(src)
+@@ -114,11 +115,36 @@ extern int __bitmap_weight(const unsigne
+ extern void bitmap_set(unsigned long *map, unsigned int start, int len);
+ extern void bitmap_clear(unsigned long *map, unsigned int start, int len);
+-extern unsigned long bitmap_find_next_zero_area(unsigned long *map,
+-                                       unsigned long size,
+-                                       unsigned long start,
+-                                       unsigned int nr,
+-                                       unsigned long align_mask);
++
++extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
++                                                  unsigned long size,
++                                                  unsigned long start,
++                                                  unsigned int nr,
++                                                  unsigned long align_mask,
++                                                  unsigned long align_offset);
++
++/**
++ * bitmap_find_next_zero_area - find a contiguous aligned zero area
++ * @map: The address to base the search on
++ * @size: The bitmap size in bits
++ * @start: The bitnumber to start searching at
++ * @nr: The number of zeroed bits we're looking for
++ * @align_mask: Alignment mask for zero area
++ *
++ * The @align_mask should be one less than a power of 2; the effect is that
++ * the bit offset of all zero areas this function finds is multiples of that
++ * power of 2. A @align_mask of 0 means no alignment is required.
++ */
++static inline unsigned long
++bitmap_find_next_zero_area(unsigned long *map,
++                         unsigned long size,
++                         unsigned long start,
++                         unsigned int nr,
++                         unsigned long align_mask)
++{
++      return bitmap_find_next_zero_area_off(map, size, start, nr,
++                                            align_mask, 0);
++}
+ extern int bitmap_scnprintf(char *buf, unsigned int len,
+                       const unsigned long *src, int nbits);
+--- a/lib/bitmap.c
++++ b/lib/bitmap.c
+@@ -326,30 +326,32 @@ void bitmap_clear(unsigned long *map, un
+ }
+ EXPORT_SYMBOL(bitmap_clear);
+-/*
+- * bitmap_find_next_zero_area - find a contiguous aligned zero area
++/**
++ * bitmap_find_next_zero_area_off - find a contiguous aligned zero area
+  * @map: The address to base the search on
+  * @size: The bitmap size in bits
+  * @start: The bitnumber to start searching at
+  * @nr: The number of zeroed bits we're looking for
+  * @align_mask: Alignment mask for zero area
++ * @align_offset: Alignment offset for zero area.
+  *
+  * The @align_mask should be one less than a power of 2; the effect is that
+- * the bit offset of all zero areas this function finds is multiples of that
+- * power of 2. A @align_mask of 0 means no alignment is required.
++ * the bit offset of all zero areas this function finds plus @align_offset
++ * is multiple of that power of 2.
+  */
+-unsigned long bitmap_find_next_zero_area(unsigned long *map,
+-                                       unsigned long size,
+-                                       unsigned long start,
+-                                       unsigned int nr,
+-                                       unsigned long align_mask)
++unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
++                                           unsigned long size,
++                                           unsigned long start,
++                                           unsigned int nr,
++                                           unsigned long align_mask,
++                                           unsigned long align_offset)
+ {
+       unsigned long index, end, i;
+ again:
+       index = find_next_zero_bit(map, size, start);
+       /* Align allocation */
+-      index = __ALIGN_MASK(index, align_mask);
++      index = __ALIGN_MASK(index + align_offset, align_mask) - align_offset;
+       end = index + nr;
+       if (end > size)
+@@ -361,7 +363,7 @@ again:
+       }
+       return index;
+ }
+-EXPORT_SYMBOL(bitmap_find_next_zero_area);
++EXPORT_SYMBOL(bitmap_find_next_zero_area_off);
+ /*
+  * Bitmap printing & parsing functions: first version by Nadia Yvette Chambers,
diff --git a/queue-3.18/mm-cma-align-to-physical-address-not-cma-region-position.patch b/queue-3.18/mm-cma-align-to-physical-address-not-cma-region-position.patch
new file mode 100644 (file)
index 0000000..9a29103
--- /dev/null
@@ -0,0 +1,86 @@
+From b5be83e308f70e16c63c4e520ea7bb03ef57c46f Mon Sep 17 00:00:00 2001
+From: Gregory Fong <gregory.0xf0@gmail.com>
+Date: Fri, 12 Dec 2014 16:54:48 -0800
+Subject: mm: cma: align to physical address, not CMA region position
+
+From: Gregory Fong <gregory.0xf0@gmail.com>
+
+commit b5be83e308f70e16c63c4e520ea7bb03ef57c46f upstream.
+
+The alignment in cma_alloc() was done w.r.t. the bitmap.  This is a
+problem when, for example:
+
+- a device requires 16M (order 12) alignment
+- the CMA region is not 16 M aligned
+
+In such a case, can result with the CMA region starting at, say,
+0x2f800000 but any allocation you make from there will be aligned from
+there.  Requesting an allocation of 32 M with 16 M alignment will result
+in an allocation from 0x2f800000 to 0x31800000, which doesn't work very
+well if your strange device requires 16M alignment.
+
+Change to use bitmap_find_next_zero_area_off() to account for the
+difference in alignment at reserve-time and alloc-time.
+
+Signed-off-by: Gregory Fong <gregory.0xf0@gmail.com>
+Acked-by: Michal Nazarewicz <mina86@mina86.com>
+Cc: Marek Szyprowski <m.szyprowski@samsung.com>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: Kukjin Kim <kgene.kim@samsung.com>
+Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Cc: Laura Abbott <lauraa@codeaurora.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/cma.c |   19 ++++++++++++++++---
+ 1 file changed, 16 insertions(+), 3 deletions(-)
+
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -63,6 +63,17 @@ static unsigned long cma_bitmap_aligned_
+       return (1UL << (align_order - cma->order_per_bit)) - 1;
+ }
++static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order)
++{
++      unsigned int alignment;
++
++      if (align_order <= cma->order_per_bit)
++              return 0;
++      alignment = 1UL << (align_order - cma->order_per_bit);
++      return ALIGN(cma->base_pfn, alignment) -
++              (cma->base_pfn >> cma->order_per_bit);
++}
++
+ static unsigned long cma_bitmap_maxno(struct cma *cma)
+ {
+       return cma->count >> cma->order_per_bit;
+@@ -341,7 +352,7 @@ err:
+  */
+ struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
+ {
+-      unsigned long mask, pfn, start = 0;
++      unsigned long mask, offset, pfn, start = 0;
+       unsigned long bitmap_maxno, bitmap_no, bitmap_count;
+       struct page *page = NULL;
+       int ret;
+@@ -356,13 +367,15 @@ struct page *cma_alloc(struct cma *cma,
+               return NULL;
+       mask = cma_bitmap_aligned_mask(cma, align);
++      offset = cma_bitmap_aligned_offset(cma, align);
+       bitmap_maxno = cma_bitmap_maxno(cma);
+       bitmap_count = cma_bitmap_pages_to_bits(cma, count);
+       for (;;) {
+               mutex_lock(&cma->lock);
+-              bitmap_no = bitmap_find_next_zero_area(cma->bitmap,
+-                              bitmap_maxno, start, bitmap_count, mask);
++              bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
++                              bitmap_maxno, start, bitmap_count, mask,
++                              offset);
+               if (bitmap_no >= bitmap_maxno) {
+                       mutex_unlock(&cma->lock);
+                       break;
diff --git a/queue-3.18/mm-cma-constify-and-use-correct-signness-in-mm-cma.c.patch b/queue-3.18/mm-cma-constify-and-use-correct-signness-in-mm-cma.c.patch
new file mode 100644 (file)
index 0000000..b25c3a8
--- /dev/null
@@ -0,0 +1,132 @@
+From ac173824959adeb489f9fcf88858774c4535a241 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sasha.levin@oracle.com>
+Date: Tue, 14 Apr 2015 15:47:04 -0700
+Subject: mm: cma: constify and use correct signness in mm/cma.c
+
+From: Sasha Levin <sasha.levin@oracle.com>
+
+commit ac173824959adeb489f9fcf88858774c4535a241 upstream.
+
+Constify function parameters and use correct signness where needed.
+
+Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
+Cc: Michal Nazarewicz <mina86@mina86.com>
+Cc: Marek Szyprowski <m.szyprowski@samsung.com>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+Acked-by: Gregory Fong <gregory.0xf0@gmail.com>
+Cc: Pintu Kumar <pintu.k@samsung.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/cma.h |   12 ++++++------
+ mm/cma.c            |   24 ++++++++++++++----------
+ 2 files changed, 20 insertions(+), 16 deletions(-)
+
+--- a/include/linux/cma.h
++++ b/include/linux/cma.h
+@@ -16,16 +16,16 @@
+ struct cma;
+ extern unsigned long totalcma_pages;
+-extern phys_addr_t cma_get_base(struct cma *cma);
+-extern unsigned long cma_get_size(struct cma *cma);
++extern phys_addr_t cma_get_base(const struct cma *cma);
++extern unsigned long cma_get_size(const struct cma *cma);
+ extern int __init cma_declare_contiguous(phys_addr_t base,
+                       phys_addr_t size, phys_addr_t limit,
+                       phys_addr_t alignment, unsigned int order_per_bit,
+                       bool fixed, struct cma **res_cma);
+-extern int cma_init_reserved_mem(phys_addr_t base,
+-                                      phys_addr_t size, int order_per_bit,
++extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
++                                      unsigned int order_per_bit,
+                                       struct cma **res_cma);
+-extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align);
+-extern bool cma_release(struct cma *cma, struct page *pages, int count);
++extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align);
++extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
+ #endif
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -47,17 +47,18 @@ static struct cma cma_areas[MAX_CMA_AREA
+ static unsigned cma_area_count;
+ static DEFINE_MUTEX(cma_mutex);
+-phys_addr_t cma_get_base(struct cma *cma)
++phys_addr_t cma_get_base(const struct cma *cma)
+ {
+       return PFN_PHYS(cma->base_pfn);
+ }
+-unsigned long cma_get_size(struct cma *cma)
++unsigned long cma_get_size(const struct cma *cma)
+ {
+       return cma->count << PAGE_SHIFT;
+ }
+-static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
++static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
++                                           int align_order)
+ {
+       if (align_order <= cma->order_per_bit)
+               return 0;
+@@ -68,7 +69,8 @@ static unsigned long cma_bitmap_aligned_
+  * Find a PFN aligned to the specified order and return an offset represented in
+  * order_per_bits.
+  */
+-static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order)
++static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
++                                             int align_order)
+ {
+       if (align_order <= cma->order_per_bit)
+               return 0;
+@@ -82,13 +84,14 @@ static unsigned long cma_bitmap_maxno(st
+       return cma->count >> cma->order_per_bit;
+ }
+-static unsigned long cma_bitmap_pages_to_bits(struct cma *cma,
+-                                              unsigned long pages)
++static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
++                                            unsigned long pages)
+ {
+       return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
+ }
+-static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, int count)
++static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
++                           unsigned int count)
+ {
+       unsigned long bitmap_no, bitmap_count;
+@@ -167,7 +170,8 @@ core_initcall(cma_init_reserved_areas);
+  * This function creates custom contiguous area from already reserved memory.
+  */
+ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
+-                               int order_per_bit, struct cma **res_cma)
++                               unsigned int order_per_bit,
++                               struct cma **res_cma)
+ {
+       struct cma *cma;
+       phys_addr_t alignment;
+@@ -359,7 +363,7 @@ err:
+  * This function allocates part of contiguous memory on specific
+  * contiguous memory area.
+  */
+-struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
++struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align)
+ {
+       unsigned long mask, offset, pfn, start = 0;
+       unsigned long bitmap_maxno, bitmap_no, bitmap_count;
+@@ -430,7 +434,7 @@ struct page *cma_alloc(struct cma *cma,
+  * It returns false when provided pages do not belong to contiguous area and
+  * true otherwise.
+  */
+-bool cma_release(struct cma *cma, struct page *pages, int count)
++bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
+ {
+       unsigned long pfn;
diff --git a/queue-3.18/mm-cma-fix-cma-aligned-offset-calculation.patch b/queue-3.18/mm-cma-fix-cma-aligned-offset-calculation.patch
new file mode 100644 (file)
index 0000000..f679b0d
--- /dev/null
@@ -0,0 +1,64 @@
+From 850fc430f47aad52092deaaeb32b99f97f0e6aca Mon Sep 17 00:00:00 2001
+From: Danesh Petigara <dpetigara@broadcom.com>
+Date: Thu, 12 Mar 2015 16:25:57 -0700
+Subject: mm: cma: fix CMA aligned offset calculation
+
+From: Danesh Petigara <dpetigara@broadcom.com>
+
+commit 850fc430f47aad52092deaaeb32b99f97f0e6aca upstream.
+
+The CMA aligned offset calculation is incorrect for non-zero order_per_bit
+values.
+
+For example, if cma->order_per_bit=1, cma->base_pfn= 0x2f800000 and
+align_order=12, the function returns a value of 0x17c00 instead of 0x400.
+
+This patch fixes the CMA aligned offset calculation.
+
+The previous calculation was wrong and would return too-large values for
+the offset, so that when cma_alloc looks for free pages in the bitmap with
+the requested alignment > order_per_bit, it starts too far into the bitmap
+and so CMA allocations will fail despite there actually being plenty of
+free pages remaining.  It will also probably have the wrong alignment.
+With this change, we will get the correct offset into the bitmap.
+
+One affected user is powerpc KVM, which has kvm_cma->order_per_bit set to
+KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, or 18 - 12 = 6.
+
+[gregory.0xf0@gmail.com: changelog additions]
+Signed-off-by: Danesh Petigara <dpetigara@broadcom.com>
+Reviewed-by: Gregory Fong <gregory.0xf0@gmail.com>
+Acked-by: Michal Nazarewicz <mina86@mina86.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/cma.c |   12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -64,15 +64,17 @@ static unsigned long cma_bitmap_aligned_
+       return (1UL << (align_order - cma->order_per_bit)) - 1;
+ }
++/*
++ * Find a PFN aligned to the specified order and return an offset represented in
++ * order_per_bits.
++ */
+ static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order)
+ {
+-      unsigned int alignment;
+-
+       if (align_order <= cma->order_per_bit)
+               return 0;
+-      alignment = 1UL << (align_order - cma->order_per_bit);
+-      return ALIGN(cma->base_pfn, alignment) -
+-              (cma->base_pfn >> cma->order_per_bit);
++
++      return (ALIGN(cma->base_pfn, (1UL << align_order))
++              - cma->base_pfn) >> cma->order_per_bit;
+ }
+ static unsigned long cma_bitmap_maxno(struct cma *cma)
diff --git a/queue-3.18/mm-cma-fix-incorrect-type-conversion-for-size-during-dma-allocation.patch b/queue-3.18/mm-cma-fix-incorrect-type-conversion-for-size-during-dma-allocation.patch
new file mode 100644 (file)
index 0000000..41c0cc1
--- /dev/null
@@ -0,0 +1,96 @@
+From 67a2e213e7e937c41c52ab5bc46bf3f4de469f6e Mon Sep 17 00:00:00 2001
+From: Rohit Vaswani <rvaswani@codeaurora.org>
+Date: Thu, 22 Oct 2015 13:32:11 -0700
+Subject: mm: cma: fix incorrect type conversion for size during dma allocation
+
+From: Rohit Vaswani <rvaswani@codeaurora.org>
+
+commit 67a2e213e7e937c41c52ab5bc46bf3f4de469f6e upstream.
+
+This was found during userspace fuzzing test when a large size dma cma
+allocation is made by driver(like ion) through userspace.
+
+  show_stack+0x10/0x1c
+  dump_stack+0x74/0xc8
+  kasan_report_error+0x2b0/0x408
+  kasan_report+0x34/0x40
+  __asan_storeN+0x15c/0x168
+  memset+0x20/0x44
+  __dma_alloc_coherent+0x114/0x18c
+
+Signed-off-by: Rohit Vaswani <rvaswani@codeaurora.org>
+Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Marek Szyprowski <m.szyprowski@samsung.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Miles Chen <miles.chen@mediatek.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/base/dma-contiguous.c  |    2 +-
+ include/linux/cma.h            |    2 +-
+ include/linux/dma-contiguous.h |    4 ++--
+ mm/cma.c                       |    4 ++--
+ 4 files changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/base/dma-contiguous.c
++++ b/drivers/base/dma-contiguous.c
+@@ -187,7 +187,7 @@ int __init dma_contiguous_reserve_area(p
+  * global one. Requires architecture specific dev_get_cma_area() helper
+  * function.
+  */
+-struct page *dma_alloc_from_contiguous(struct device *dev, int count,
++struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
+                                      unsigned int align)
+ {
+       if (align > CONFIG_CMA_ALIGNMENT)
+--- a/include/linux/cma.h
++++ b/include/linux/cma.h
+@@ -26,6 +26,6 @@ extern int __init cma_declare_contiguous
+ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
+                                       unsigned int order_per_bit,
+                                       struct cma **res_cma);
+-extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align);
++extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align);
+ extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
+ #endif
+--- a/include/linux/dma-contiguous.h
++++ b/include/linux/dma-contiguous.h
+@@ -111,7 +111,7 @@ static inline int dma_declare_contiguous
+       return ret;
+ }
+-struct page *dma_alloc_from_contiguous(struct device *dev, int count,
++struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
+                                      unsigned int order);
+ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+                                int count);
+@@ -144,7 +144,7 @@ int dma_declare_contiguous(struct device
+ }
+ static inline
+-struct page *dma_alloc_from_contiguous(struct device *dev, int count,
++struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
+                                      unsigned int order)
+ {
+       return NULL;
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -363,7 +363,7 @@ err:
+  * This function allocates part of contiguous memory on specific
+  * contiguous memory area.
+  */
+-struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align)
++struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
+ {
+       unsigned long mask, offset, pfn, start = 0;
+       unsigned long bitmap_maxno, bitmap_no, bitmap_count;
+@@ -373,7 +373,7 @@ struct page *cma_alloc(struct cma *cma,
+       if (!cma || !cma->count)
+               return NULL;
+-      pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
++      pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
+                count, align);
+       if (!count)
diff --git a/queue-3.18/mm-cma-fix-totalcma_pages-to-include-dt-defined-cma-regions.patch b/queue-3.18/mm-cma-fix-totalcma_pages-to-include-dt-defined-cma-regions.patch
new file mode 100644 (file)
index 0000000..5c68619
--- /dev/null
@@ -0,0 +1,47 @@
+From 94737a85f332aee75255960eaa16e89ddfa4c75a Mon Sep 17 00:00:00 2001
+From: "George G. Davis" <ggdavisiv@gmail.com>
+Date: Wed, 11 Feb 2015 15:26:27 -0800
+Subject: mm: cma: fix totalcma_pages to include DT defined CMA regions
+
+From: George G. Davis <ggdavisiv@gmail.com>
+
+commit 94737a85f332aee75255960eaa16e89ddfa4c75a upstream.
+
+The totalcma_pages variable is not updated to account for CMA regions
+defined via device tree reserved-memory sub-nodes.  Fix this omission by
+moving the calculation of totalcma_pages into cma_init_reserved_mem()
+instead of cma_declare_contiguous() such that it will include reserved
+memory used by all CMA regions.
+
+Signed-off-by: George G. Davis <george_davis@mentor.com>
+Cc: Marek Szyprowski <m.szyprowski@samsung.com>
+Acked-by: Michal Nazarewicz <mina86@mina86.com>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+Cc: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/cma.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -200,6 +200,7 @@ int __init cma_init_reserved_mem(phys_ad
+       cma->order_per_bit = order_per_bit;
+       *res_cma = cma;
+       cma_area_count++;
++      totalcma_pages += (size / PAGE_SIZE);
+       return 0;
+ }
+@@ -338,7 +339,6 @@ int __init cma_declare_contiguous(phys_a
+       if (ret)
+               goto err;
+-      totalcma_pages += (size / PAGE_SIZE);
+       pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
+               &base);
+       return 0;
diff --git a/queue-3.18/mm-cma-make-kmemleak-ignore-cma-regions.patch b/queue-3.18/mm-cma-make-kmemleak-ignore-cma-regions.patch
new file mode 100644 (file)
index 0000000..bb4ffc3
--- /dev/null
@@ -0,0 +1,63 @@
+From 620951e2745750de1482128615adc15b74ee37ed Mon Sep 17 00:00:00 2001
+From: Thierry Reding <treding@nvidia.com>
+Date: Fri, 12 Dec 2014 16:58:31 -0800
+Subject: mm/cma: make kmemleak ignore CMA regions
+
+From: Thierry Reding <treding@nvidia.com>
+
+commit 620951e2745750de1482128615adc15b74ee37ed upstream.
+
+kmemleak will add allocations as objects to a pool.  The memory allocated
+for each object in this pool is periodically searched for pointers to
+other allocated objects.  This only works for memory that is mapped into
+the kernel's virtual address space, which happens not to be the case for
+most CMA regions.
+
+Furthermore, CMA regions are typically used to store data transferred to
+or from a device and therefore don't contain pointers to other objects.
+
+Without this, the kernel crashes on the first execution of the
+scan_gray_list() because it tries to access highmem.  Perhaps a more
+appropriate fix would be to reject any object that can't map to a kernel
+virtual address?
+
+[akpm@linux-foundation.org: add comment]
+[akpm@linux-foundation.org: fix comment, per Catalin]
+[sfr@canb.auug.org.au: include linux/io.h for phys_to_virt()]
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Cc: Michal Nazarewicz <mina86@mina86.com>
+Cc: Marek Szyprowski <m.szyprowski@samsung.com>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/cma.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -33,6 +33,7 @@
+ #include <linux/log2.h>
+ #include <linux/cma.h>
+ #include <linux/highmem.h>
++#include <linux/io.h>
+ struct cma {
+       unsigned long   base_pfn;
+@@ -325,6 +326,11 @@ int __init cma_declare_contiguous(phys_a
+                       }
+               }
++              /*
++               * kmemleak scans/reads tracked objects for pointers to other
++               * objects but this address isn't mapped and accessible
++               */
++              kmemleak_ignore(phys_to_virt(addr));
+               base = addr;
+       }
diff --git a/queue-3.18/mm-cma-split-cma-reserved-in-dmesg-log.patch b/queue-3.18/mm-cma-split-cma-reserved-in-dmesg-log.patch
new file mode 100644 (file)
index 0000000..f4dbac0
--- /dev/null
@@ -0,0 +1,94 @@
+From e48322abb061d75096fe52d71886b237e7ae7bfb Mon Sep 17 00:00:00 2001
+From: Pintu Kumar <pintu.k@samsung.com>
+Date: Thu, 18 Dec 2014 16:17:15 -0800
+Subject: mm: cma: split cma-reserved in dmesg log
+
+From: Pintu Kumar <pintu.k@samsung.com>
+
+commit e48322abb061d75096fe52d71886b237e7ae7bfb upstream.
+
+When the system boots up, in the dmesg logs we can see the memory
+statistics along with total reserved as below.  Memory: 458840k/458840k
+available, 65448k reserved, 0K highmem
+
+When CMA is enabled, still the total reserved memory remains the same.
+However, the CMA memory is not considered as reserved.  But, when we see
+/proc/meminfo, the CMA memory is part of free memory.  This creates
+confusion.  This patch corrects the problem by properly subtracting the
+CMA reserved memory from the total reserved memory in dmesg logs.
+
+Below is the dmesg snapshot from an arm based device with 512MB RAM and
+12MB single CMA region.
+
+Before this change:
+  Memory: 458840k/458840k available, 65448k reserved, 0K highmem
+
+After this change:
+  Memory: 458840k/458840k available, 53160k reserved, 12288k cma-reserved, 0K highmem
+
+Signed-off-by: Pintu Kumar <pintu.k@samsung.com>
+Signed-off-by: Vishnu Pratap Singh <vishnu.ps@samsung.com>
+Acked-by: Michal Nazarewicz <mina86@mina86.com>
+Cc: Rafael Aquini <aquini@redhat.com>
+Cc: Jerome Marchand <jmarchan@redhat.com>
+Cc: Marek Szyprowski <m.szyprowski@samsung.com>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/cma.h |    1 +
+ mm/cma.c            |    1 +
+ mm/page_alloc.c     |    6 ++++--
+ 3 files changed, 6 insertions(+), 2 deletions(-)
+
+--- a/include/linux/cma.h
++++ b/include/linux/cma.h
+@@ -15,6 +15,7 @@
+ struct cma;
++extern unsigned long totalcma_pages;
+ extern phys_addr_t cma_get_base(struct cma *cma);
+ extern unsigned long cma_get_size(struct cma *cma);
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -338,6 +338,7 @@ int __init cma_declare_contiguous(phys_a
+       if (ret)
+               goto err;
++      totalcma_pages += (size / PAGE_SIZE);
+       pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
+               &base);
+       return 0;
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -110,6 +110,7 @@ static DEFINE_SPINLOCK(managed_page_coun
+ unsigned long totalram_pages __read_mostly;
+ unsigned long totalreserve_pages __read_mostly;
++unsigned long totalcma_pages __read_mostly;
+ /*
+  * When calculating the number of globally allowed dirty pages, there
+  * is a certain number of per-zone reserves that should not be
+@@ -5522,7 +5523,7 @@ void __init mem_init_print_info(const ch
+       printk("Memory: %luK/%luK available "
+              "(%luK kernel code, %luK rwdata, %luK rodata, "
+-             "%luK init, %luK bss, %luK reserved"
++             "%luK init, %luK bss, %luK reserved, %luK cma-reserved"
+ #ifdef        CONFIG_HIGHMEM
+              ", %luK highmem"
+ #endif
+@@ -5530,7 +5531,8 @@ void __init mem_init_print_info(const ch
+              nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10),
+              codesize >> 10, datasize >> 10, rosize >> 10,
+              (init_data_size + init_code_size) >> 10, bss_size >> 10,
+-             (physpages - totalram_pages) << (PAGE_SHIFT-10),
++             (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT-10),
++             totalcma_pages << (PAGE_SHIFT-10),
+ #ifdef        CONFIG_HIGHMEM
+              totalhigh_pages << (PAGE_SHIFT-10),
+ #endif
index aef7592910d293a5c95341fcc317f6970d62ca97..9b048b333177c88873be7000fbfe1afc24a26f8a 100644 (file)
@@ -4,3 +4,13 @@ gcov-add-support-for-gcc-5.1.patch
 gcov-add-support-for-gcc-version-6.patch
 gcov-support-gcc-7.1.patch
 p54-memset-0-whole-array.patch
+arm64-mm-abort-uaccess-retries-upon-fatal-signal.patch
+lib-bitmap-add-alignment-offset-for-bitmap_find_next_zero_area.patch
+mm-cma-align-to-physical-address-not-cma-region-position.patch
+mm-cma-make-kmemleak-ignore-cma-regions.patch
+mm-cma-split-cma-reserved-in-dmesg-log.patch
+mm-cma-fix-totalcma_pages-to-include-dt-defined-cma-regions.patch
+mm-cma-fix-cma-aligned-offset-calculation.patch
+mm-cma-constify-and-use-correct-signness-in-mm-cma.c.patch
+mm-cma-fix-incorrect-type-conversion-for-size-during-dma-allocation.patch
+x86-io-add-memory-clobber-to-insb-insw-insl-outsb-outsw-outsl.patch
diff --git a/queue-3.18/x86-io-add-memory-clobber-to-insb-insw-insl-outsb-outsw-outsl.patch b/queue-3.18/x86-io-add-memory-clobber-to-insb-insw-insl-outsb-outsw-outsl.patch
new file mode 100644 (file)
index 0000000..f25b429
--- /dev/null
@@ -0,0 +1,66 @@
+From 7206f9bf108eb9513d170c73f151367a1bdf3dbf Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Wed, 19 Jul 2017 14:53:02 +0200
+Subject: x86/io: Add "memory" clobber to insb/insw/insl/outsb/outsw/outsl
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit 7206f9bf108eb9513d170c73f151367a1bdf3dbf upstream.
+
+The x86 version of insb/insw/insl uses an inline assembly that does
+not have the target buffer listed as an output. This can confuse
+the compiler, leading it to think that a subsequent access of the
+buffer is uninitialized:
+
+  drivers/net/wireless/wl3501_cs.c: In function ‘wl3501_mgmt_scan_confirm’:
+  drivers/net/wireless/wl3501_cs.c:665:9: error: ‘sig.status’ is used uninitialized in this function [-Werror=uninitialized]
+  drivers/net/wireless/wl3501_cs.c:668:12: error: ‘sig.cap_info’ may be used uninitialized in this function [-Werror=maybe-uninitialized]
+  drivers/net/sb1000.c: In function 'sb1000_rx':
+  drivers/net/sb1000.c:775:9: error: 'st[0]' is used uninitialized in this function [-Werror=uninitialized]
+  drivers/net/sb1000.c:776:10: error: 'st[1]' may be used uninitialized in this function [-Werror=maybe-uninitialized]
+  drivers/net/sb1000.c:784:11: error: 'st[1]' may be used uninitialized in this function [-Werror=maybe-uninitialized]
+
+I tried to mark the exact input buffer as an output here, but couldn't
+figure it out. As suggested by Linus, marking all memory as clobbered
+however is good enough too. For the outs operations, I also add the
+memory clobber, to force the input to be written to local variables.
+This is probably already guaranteed by the "asm volatile", but it can't
+hurt to do this for symmetry.
+
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Link: http://lkml.kernel.org/r/20170719125310.2487451-5-arnd@arndb.de
+Link: https://lkml.org/lkml/2017/7/12/605
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/io.h |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/io.h
++++ b/arch/x86/include/asm/io.h
+@@ -297,13 +297,13 @@ static inline unsigned type in##bwl##_p(
+ static inline void outs##bwl(int port, const void *addr, unsigned long count) \
+ {                                                                     \
+       asm volatile("rep; outs" #bwl                                   \
+-                   : "+S"(addr), "+c"(count) : "d"(port));            \
++                   : "+S"(addr), "+c"(count) : "d"(port) : "memory"); \
+ }                                                                     \
+                                                                       \
+ static inline void ins##bwl(int port, void *addr, unsigned long count)        \
+ {                                                                     \
+       asm volatile("rep; ins" #bwl                                    \
+-                   : "+D"(addr), "+c"(count) : "d"(port));            \
++                   : "+D"(addr), "+c"(count) : "d"(port) : "memory"); \
+ }
+ BUILDIO(b, b, char)