]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 30 Jul 2014 00:59:17 +0000 (17:59 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 30 Jul 2014 00:59:17 +0000 (17:59 -0700)
added patches:
introduce-size_max.patch
mm-kmemleak-avoid-false-negatives-on-vmalloc-ed-objects.patch

queue-3.4/introduce-size_max.patch [new file with mode: 0644]
queue-3.4/mm-kmemleak-avoid-false-negatives-on-vmalloc-ed-objects.patch [new file with mode: 0644]
queue-3.4/series

diff --git a/queue-3.4/introduce-size_max.patch b/queue-3.4/introduce-size_max.patch
new file mode 100644 (file)
index 0000000..76ab61c
--- /dev/null
@@ -0,0 +1,84 @@
+From a3860c1c5dd1137db23d7786d284939c5761d517 Mon Sep 17 00:00:00 2001
+From: Xi Wang <xi.wang@gmail.com>
+Date: Thu, 31 May 2012 16:26:04 -0700
+Subject: introduce SIZE_MAX
+
+From: Xi Wang <xi.wang@gmail.com>
+
+commit a3860c1c5dd1137db23d7786d284939c5761d517 upstream.
+
+ULONG_MAX is often used to check for integer overflow when calculating
+allocation size.  While ULONG_MAX happens to work on most systems, there
+is no guarantee that `size_t' must be the same size as `long'.
+
+This patch introduces SIZE_MAX, the maximum value of `size_t', to improve
+portability and readability for allocation size validation.
+
+Signed-off-by: Xi Wang <xi.wang@gmail.com>
+Acked-by: Alex Elder <elder@dreamhost.com>
+Cc: David Airlie <airlied@linux.ie>
+Cc: Pekka Enberg <penberg@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Qiang Huang <h.huangqiang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ceph/snap.c             |    2 +-
+ include/drm/drm_mem_util.h |    4 ++--
+ include/linux/kernel.h     |    1 +
+ include/linux/slab.h       |    2 +-
+ 4 files changed, 5 insertions(+), 4 deletions(-)
+
+--- a/fs/ceph/snap.c
++++ b/fs/ceph/snap.c
+@@ -331,7 +331,7 @@ static int build_snap_context(struct cep
+       /* alloc new snap context */
+       err = -ENOMEM;
+-      if (num > (ULONG_MAX - sizeof(*snapc)) / sizeof(u64))
++      if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64))
+               goto fail;
+       snapc = kzalloc(sizeof(*snapc) + num*sizeof(u64), GFP_NOFS);
+       if (!snapc)
+--- a/include/drm/drm_mem_util.h
++++ b/include/drm/drm_mem_util.h
+@@ -31,7 +31,7 @@
+ static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
+ {
+-      if (size != 0 && nmemb > ULONG_MAX / size)
++      if (size != 0 && nmemb > SIZE_MAX / size)
+               return NULL;
+       if (size * nmemb <= PAGE_SIZE)
+@@ -44,7 +44,7 @@ static __inline__ void *drm_calloc_large
+ /* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
+ static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
+ {
+-      if (size != 0 && nmemb > ULONG_MAX / size)
++      if (size != 0 && nmemb > SIZE_MAX / size)
+               return NULL;
+       if (size * nmemb <= PAGE_SIZE)
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -35,6 +35,7 @@
+ #define LLONG_MAX     ((long long)(~0ULL>>1))
+ #define LLONG_MIN     (-LLONG_MAX - 1)
+ #define ULLONG_MAX    (~0ULL)
++#define SIZE_MAX      (~(size_t)0)
+ #define STACK_MAGIC   0xdeadbeef
+--- a/include/linux/slab.h
++++ b/include/linux/slab.h
+@@ -242,7 +242,7 @@ size_t ksize(const void *);
+  */
+ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
+ {
+-      if (size != 0 && n > ULONG_MAX / size)
++      if (size != 0 && n > SIZE_MAX / size)
+               return NULL;
+       return __kmalloc(n * size, flags);
+ }
diff --git a/queue-3.4/mm-kmemleak-avoid-false-negatives-on-vmalloc-ed-objects.patch b/queue-3.4/mm-kmemleak-avoid-false-negatives-on-vmalloc-ed-objects.patch
new file mode 100644 (file)
index 0000000..b4f120d
--- /dev/null
@@ -0,0 +1,77 @@
+From 7f88f88f83ed609650a01b18572e605ea50cd163 Mon Sep 17 00:00:00 2001
+From: Catalin Marinas <catalin.marinas@arm.com>
+Date: Tue, 12 Nov 2013 15:07:45 -0800
+Subject: mm: kmemleak: avoid false negatives on vmalloc'ed objects
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+commit 7f88f88f83ed609650a01b18572e605ea50cd163 upstream.
+
+Commit 248ac0e1943a ("mm/vmalloc: remove guard page from between vmap
+blocks") had the side effect of making vmap_area.va_end member point to
+the next vmap_area.va_start.  This was creating an artificial reference
+to vmalloc'ed objects and kmemleak was rarely reporting vmalloc() leaks.
+
+This patch marks the vmap_area containing pointers explicitly and
+reduces the min ref_count to 2 as vm_struct still contains a reference
+to the vmalloc'ed object.  The kmemleak add_scan_area() function has
+been improved to allow a SIZE_MAX argument covering the rest of the
+object (for simpler calling sites).
+
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[hq: Backported to 3.4: Adjust context]
+Signed-off-by: Qiang Huang <h.huangqiang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ mm/kmemleak.c |    4 +++-
+ mm/vmalloc.c  |   14 ++++++++++----
+ 2 files changed, 13 insertions(+), 5 deletions(-)
+
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -750,7 +750,9 @@ static void add_scan_area(unsigned long
+       }
+       spin_lock_irqsave(&object->lock, flags);
+-      if (ptr + size > object->pointer + object->size) {
++      if (size == SIZE_MAX) {
++              size = object->pointer + object->size - ptr;
++      } else if (ptr + size > object->pointer + object->size) {
+               kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
+               dump_object_info(object);
+               kmem_cache_free(scan_area_cache, area);
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -349,6 +349,12 @@ static struct vmap_area *alloc_vmap_area
+       if (unlikely(!va))
+               return ERR_PTR(-ENOMEM);
++      /*
++       * Only scan the relevant parts containing pointers to other objects
++       * to avoid false negatives.
++       */
++      kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
++
+ retry:
+       spin_lock(&vmap_area_lock);
+       /*
+@@ -1669,11 +1675,11 @@ void *__vmalloc_node_range(unsigned long
+       insert_vmalloc_vmlist(area);
+       /*
+-       * A ref_count = 3 is needed because the vm_struct and vmap_area
+-       * structures allocated in the __get_vm_area_node() function contain
+-       * references to the virtual address of the vmalloc'ed block.
++       * A ref_count = 2 is needed because vm_struct allocated in
++       * __get_vm_area_node() contains a reference to the virtual address of
++       * the vmalloc'ed block.
+        */
+-      kmemleak_alloc(addr, real_size, 3, gfp_mask);
++      kmemleak_alloc(addr, real_size, 2, gfp_mask);
+       return addr;
index 5d5fe16d80525b5c733378d64438d0f191006829..d5a8493399b6f18979e9cca2b35b37f24780d277 100644 (file)
@@ -6,3 +6,5 @@ x86_32-entry-store-badsys-error-code-in-eax.patch
 mm-hugetlb-fix-copy_hugetlb_page_range.patch
 fix-gcc-4.9.0-miscompilation-of-load_balance-in-scheduler.patch
 s390-ptrace-fix-psw-mask-check.patch
+introduce-size_max.patch
+mm-kmemleak-avoid-false-negatives-on-vmalloc-ed-objects.patch