From: Greg Kroah-Hartman Date: Wed, 11 Jul 2012 15:45:28 +0000 (-0700) Subject: 3.4-stable patches X-Git-Tag: v3.0.37~22 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=ddd187c31f6b113d5bc490f3863d12a6e05d3245;p=thirdparty%2Fkernel%2Fstable-queue.git 3.4-stable patches added patches: mm-memblock-cleanup-on-duplicate-va-pa-conversion.patch mm-memblock-fix-memory-leak-on-extending-regions.patch mm-memblock-fix-overlapping-allocation-when-doubling-reserved-array.patch --- diff --git a/queue-3.4/mm-memblock-cleanup-on-duplicate-va-pa-conversion.patch b/queue-3.4/mm-memblock-cleanup-on-duplicate-va-pa-conversion.patch new file mode 100644 index 00000000000..01b814a6426 --- /dev/null +++ b/queue-3.4/mm-memblock-cleanup-on-duplicate-va-pa-conversion.patch @@ -0,0 +1,51 @@ +From 4e2f07750d9a94e8f23e86408df5ab95be88bf11 Mon Sep 17 00:00:00 2001 +From: Gavin Shan +Date: Tue, 29 May 2012 15:06:50 -0700 +Subject: mm/memblock: cleanup on duplicate VA/PA conversion + +From: Gavin Shan + +commit 4e2f07750d9a94e8f23e86408df5ab95be88bf11 upstream. + +The overall memblock has been organized into the memory regions and +reserved regions. Initially, the memory regions and reserved regions are +stored in the predetermined arrays of "struct memblock _region". It's +possible for the arrays to be enlarged when we have newly added regions +for them, but no enough space there. Under the situation, We will created +double-sized array to meet the requirement. However, the original +implementation converted the VA (Virtual Address) of the newly allocated +array of regions to PA (Physical Address), then translate back when we +allocates the new array from slab. That's actually unnecessary. + +The patch removes the duplicate VA/PA conversion. + +Signed-off-by: Gavin Shan +Cc: Johannes Weiner +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + mm/memblock.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +--- a/mm/memblock.c ++++ b/mm/memblock.c +@@ -212,14 +212,15 @@ static int __init_memblock memblock_doub + if (use_slab) { + new_array = kmalloc(new_size, GFP_KERNEL); + addr = new_array ? __pa(new_array) : 0; +- } else ++ } else { + addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t)); ++ new_array = addr ? __va(addr) : 0; ++ } + if (!addr) { + pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", + memblock_type_name(type), type->max, type->max * 2); + return -1; + } +- new_array = __va(addr); + + memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]", + memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1); diff --git a/queue-3.4/mm-memblock-fix-memory-leak-on-extending-regions.patch b/queue-3.4/mm-memblock-fix-memory-leak-on-extending-regions.patch new file mode 100644 index 00000000000..6a9e962f383 --- /dev/null +++ b/queue-3.4/mm-memblock-fix-memory-leak-on-extending-regions.patch @@ -0,0 +1,103 @@ +From 181eb39425f2b9275afcb015eaa547d11f71a02f Mon Sep 17 00:00:00 2001 +From: Gavin Shan +Date: Tue, 29 May 2012 15:06:50 -0700 +Subject: mm/memblock: fix memory leak on extending regions + +From: Gavin Shan + +commit 181eb39425f2b9275afcb015eaa547d11f71a02f upstream. + +The overall memblock has been organized into the memory regions and +reserved regions. Initially, the memory regions and reserved regions are +stored in the predetermined arrays of "struct memblock _region". It's +possible for the arrays to be enlarged when we have newly added regions, +but no free space left there. The policy here is to create double-sized +array either by slab allocator or memblock allocator. Unfortunately, we +didn't free the old array, which might be allocated through slab allocator +before. That would cause memory leak. + +The patch introduces 2 variables to trace where (slab or memblock) the +memory and reserved regions come from. The memory for the memory or +reserved regions will be deallocated by kfree() if that was allocated by +slab allocator. Thus to fix the memory leak issue. + +Signed-off-by: Gavin Shan +Cc: Johannes Weiner +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + mm/memblock.c | 37 ++++++++++++++++++++++++------------- + 1 file changed, 24 insertions(+), 13 deletions(-) + +--- a/mm/memblock.c ++++ b/mm/memblock.c +@@ -37,6 +37,8 @@ struct memblock memblock __initdata_memb + + int memblock_debug __initdata_memblock; + static int memblock_can_resize __initdata_memblock; ++static int memblock_memory_in_slab __initdata_memblock = 0; ++static int memblock_reserved_in_slab __initdata_memblock = 0; + + /* inline so we don't get a warning when pr_debug is compiled out */ + static inline const char *memblock_type_name(struct memblock_type *type) +@@ -187,6 +189,7 @@ static int __init_memblock memblock_doub + struct memblock_region *new_array, *old_array; + phys_addr_t old_size, new_size, addr; + int use_slab = slab_is_available(); ++ int *in_slab; + + /* We don't allow resizing until we know about the reserved regions + * of memory that aren't suitable for allocation +@@ -198,6 +201,12 @@ static int __init_memblock memblock_doub + old_size = type->max * sizeof(struct memblock_region); + new_size = old_size << 1; + ++ /* Retrieve the slab flag */ ++ if (type == &memblock.memory) ++ in_slab = &memblock_memory_in_slab; ++ else ++ in_slab = &memblock_reserved_in_slab; ++ + /* Try to find some space for it. + * + * WARNING: We assume that either slab_is_available() and we use it or +@@ -235,22 +244,24 @@ static int __init_memblock memblock_doub + type->regions = new_array; + type->max <<= 1; + +- /* If we use SLAB that's it, we are done */ +- if (use_slab) +- return 0; +- +- /* Add the new reserved region now. Should not fail ! */ +- BUG_ON(memblock_reserve(addr, new_size)); +- +- /* If the array wasn't our static init one, then free it. We only do +- * that before SLAB is available as later on, we don't know whether +- * to use kfree or free_bootmem_pages(). Shouldn't be a big deal +- * anyways ++ /* Free old array. We needn't free it if the array is the ++ * static one + */ +- if (old_array != memblock_memory_init_regions && +- old_array != memblock_reserved_init_regions) ++ if (*in_slab) ++ kfree(old_array); ++ else if (old_array != memblock_memory_init_regions && ++ old_array != memblock_reserved_init_regions) + memblock_free(__pa(old_array), old_size); + ++ /* Reserve the new array if that comes from the memblock. ++ * Otherwise, we needn't do it ++ */ ++ if (!use_slab) ++ BUG_ON(memblock_reserve(addr, new_size)); ++ ++ /* Update slab flag */ ++ *in_slab = use_slab; ++ + return 0; + } + diff --git a/queue-3.4/mm-memblock-fix-overlapping-allocation-when-doubling-reserved-array.patch b/queue-3.4/mm-memblock-fix-overlapping-allocation-when-doubling-reserved-array.patch new file mode 100644 index 00000000000..8055e06f4d7 --- /dev/null +++ b/queue-3.4/mm-memblock-fix-overlapping-allocation-when-doubling-reserved-array.patch @@ -0,0 +1,102 @@ +From 48c3b583bbddad2220ca4c22319ca5d1f78b2090 Mon Sep 17 00:00:00 2001 +From: Greg Pearson +Date: Wed, 20 Jun 2012 12:53:05 -0700 +Subject: mm/memblock: fix overlapping allocation when doubling reserved array + +From: Greg Pearson + +commit 48c3b583bbddad2220ca4c22319ca5d1f78b2090 upstream. + +__alloc_memory_core_early() asks memblock for a range of memory then try +to reserve it. If the reserved region array lacks space for the new +range, memblock_double_array() is called to allocate more space for the +array. If memblock is used to allocate memory for the new array it can +end up using a range that overlaps with the range originally allocated in +__alloc_memory_core_early(), leading to possible data corruption. + +With this patch memblock_double_array() now calls memblock_find_in_range() +with a narrowed candidate range (in cases where the reserved.regions array +is being doubled) so any memory allocated will not overlap with the +original range that was being reserved. The range is narrowed by passing +in the starting address and size of the previously allocated range. Then +the range above the ending address is searched and if a candidate is not +found, the range below the starting address is searched. + +Signed-off-by: Greg Pearson +Signed-off-by: Yinghai Lu +Acked-by: Tejun Heo +Cc: Benjamin Herrenschmidt +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + mm/memblock.c | 36 ++++++++++++++++++++++++++++++++---- + 1 file changed, 32 insertions(+), 4 deletions(-) + +--- a/mm/memblock.c ++++ b/mm/memblock.c +@@ -184,7 +184,24 @@ static void __init_memblock memblock_rem + } + } + +-static int __init_memblock memblock_double_array(struct memblock_type *type) ++/** ++ * memblock_double_array - double the size of the memblock regions array ++ * @type: memblock type of the regions array being doubled ++ * @new_area_start: starting address of memory range to avoid overlap with ++ * @new_area_size: size of memory range to avoid overlap with ++ * ++ * Double the size of the @type regions array. If memblock is being used to ++ * allocate memory for a new reserved regions array and there is a previously ++ * allocated memory range [@new_area_start,@new_area_start+@new_area_size] ++ * waiting to be reserved, ensure the memory used by the new array does ++ * not overlap. ++ * ++ * RETURNS: ++ * 0 on success, -1 on failure. ++ */ ++static int __init_memblock memblock_double_array(struct memblock_type *type, ++ phys_addr_t new_area_start, ++ phys_addr_t new_area_size) + { + struct memblock_region *new_array, *old_array; + phys_addr_t old_size, new_size, addr; +@@ -222,7 +239,18 @@ static int __init_memblock memblock_doub + new_array = kmalloc(new_size, GFP_KERNEL); + addr = new_array ? __pa(new_array) : 0; + } else { +- addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t)); ++ /* only exclude range when trying to double reserved.regions */ ++ if (type != &memblock.reserved) ++ new_area_start = new_area_size = 0; ++ ++ addr = memblock_find_in_range(new_area_start + new_area_size, ++ memblock.current_limit, ++ new_size, sizeof(phys_addr_t)); ++ if (!addr && new_area_size) ++ addr = memblock_find_in_range(0, ++ min(new_area_start, memblock.current_limit), ++ new_size, sizeof(phys_addr_t)); ++ + new_array = addr ? __va(addr) : 0; + } + if (!addr) { +@@ -399,7 +427,7 @@ repeat: + */ + if (!insert) { + while (type->cnt + nr_new > type->max) +- if (memblock_double_array(type) < 0) ++ if (memblock_double_array(type, obase, size) < 0) + return -ENOMEM; + insert = true; + goto repeat; +@@ -450,7 +478,7 @@ static int __init_memblock memblock_isol + + /* we'll create at most two more regions */ + while (type->cnt + 2 > type->max) +- if (memblock_double_array(type) < 0) ++ if (memblock_double_array(type, base, size) < 0) + return -ENOMEM; + + for (i = 0; i < type->cnt; i++) { diff --git a/queue-3.4/series b/queue-3.4/series index 3d012c190cb..65daab669cf 100644 --- a/queue-3.4/series +++ b/queue-3.4/series @@ -134,3 +134,6 @@ dm-persistent-data-fix-allocation-failure-in-space-map-checker-init.patch ecryptfs-gracefully-refuse-miscdev-file-ops-on-inherited-passed-files.patch ecryptfs-fix-lockdep-warning-in-miscdev-operations.patch ecryptfs-properly-check-for-o_rdonly-flag-before-doing-privileged-open.patch +mm-memblock-cleanup-on-duplicate-va-pa-conversion.patch +mm-memblock-fix-memory-leak-on-extending-regions.patch +mm-memblock-fix-overlapping-allocation-when-doubling-reserved-array.patch