From: Greg Kroah-Hartman Date: Mon, 29 Oct 2012 17:00:02 +0000 (-0700) Subject: 3.0-stable patches X-Git-Tag: v3.0.50~11 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=9bd90384c46f6e4eca7a24482e41a6fbc152d72e;p=thirdparty%2Fkernel%2Fstable-queue.git 3.0-stable patches added patches: x86-mm-find_early_table_space-based-on-ranges-that-are-actually-being-mapped.patch x86-mm-undo-incorrect-revert-in-arch-x86-mm-init.c.patch --- diff --git a/queue-3.0/series b/queue-3.0/series index 2b8b43d68a3..623a1a78560 100644 --- a/queue-3.0/series +++ b/queue-3.0/series @@ -28,3 +28,5 @@ arm-at91-i2c-change-id-to-let-i2c-gpio-work.patch mac80211-check-if-key-has-tkip-type-before-updating-iv.patch bcma-fix-unregistration-of-cores.patch cpufreq-powernow-k8-remove-usage-of-smp_processor_id-in-preemptible-code.patch +x86-mm-find_early_table_space-based-on-ranges-that-are-actually-being-mapped.patch +x86-mm-undo-incorrect-revert-in-arch-x86-mm-init.c.patch diff --git a/queue-3.0/x86-mm-find_early_table_space-based-on-ranges-that-are-actually-being-mapped.patch b/queue-3.0/x86-mm-find_early_table_space-based-on-ranges-that-are-actually-being-mapped.patch new file mode 100644 index 00000000000..d63920fa3a0 --- /dev/null +++ b/queue-3.0/x86-mm-find_early_table_space-based-on-ranges-that-are-actually-being-mapped.patch @@ -0,0 +1,142 @@ +From 844ab6f993b1d32eb40512503d35ff6ad0c57030 Mon Sep 17 00:00:00 2001 +From: Jacob Shin +Date: Wed, 24 Oct 2012 14:24:44 -0500 +Subject: x86, mm: Find_early_table_space based on ranges that are actually being mapped + +From: Jacob Shin + +commit 844ab6f993b1d32eb40512503d35ff6ad0c57030 upstream. + +Current logic finds enough space for direct mapping page tables from 0 +to end. Instead, we only need to find enough space to cover mr[0].start +to mr[nr_range].end -- the range that is actually being mapped by +init_memory_mapping() + +This is needed after 1bbbbe779aabe1f0768c2bf8f8c0a5583679b54a, to address +the panic reported here: + + https://lkml.org/lkml/2012/10/20/160 + https://lkml.org/lkml/2012/10/21/157 + +Signed-off-by: Jacob Shin +Link: http://lkml.kernel.org/r/20121024195311.GB11779@jshin-Toonie +Tested-by: Tom Rini +Signed-off-by: H. Peter Anvin +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/mm/init.c | 73 +++++++++++++++++++++++++++++++---------------------- + 1 file changed, 43 insertions(+), 30 deletions(-) + +--- a/arch/x86/mm/init.c ++++ b/arch/x86/mm/init.c +@@ -28,36 +28,54 @@ int direct_gbpages + #endif + ; + +-static void __init find_early_table_space(unsigned long end, int use_pse, +- int use_gbpages) ++struct map_range { ++ unsigned long start; ++ unsigned long end; ++ unsigned page_size_mask; ++}; ++ ++/* ++ * First calculate space needed for kernel direct mapping page tables to cover ++ * mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB ++ * pages. Then find enough contiguous space for those page tables. ++ */ ++static void __init find_early_table_space(struct map_range *mr, int nr_range) + { +- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; ++ int i; ++ unsigned long puds = 0, pmds = 0, ptes = 0, tables; ++ unsigned long start = 0, good_end; + phys_addr_t base; + +- puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; +- tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); ++ for (i = 0; i < nr_range; i++) { ++ unsigned long range, extra; + +- if (use_gbpages) { +- unsigned long extra; ++ range = mr[i].end - mr[i].start; ++ puds += (range + PUD_SIZE - 1) >> PUD_SHIFT; + +- extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); +- pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; +- } else +- pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; +- +- tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); ++ if (mr[i].page_size_mask & (1 << PG_LEVEL_1G)) { ++ extra = range - ((range >> PUD_SHIFT) << PUD_SHIFT); ++ pmds += (extra + PMD_SIZE - 1) >> PMD_SHIFT; ++ } else { ++ pmds += (range + PMD_SIZE - 1) >> PMD_SHIFT; ++ } + +- if (use_pse) { +- unsigned long extra; +- +- extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); ++ if (mr[i].page_size_mask & (1 << PG_LEVEL_2M)) { ++ extra = range - ((range >> PMD_SHIFT) << PMD_SHIFT); + #ifdef CONFIG_X86_32 +- extra += PMD_SIZE; ++ extra += PMD_SIZE; + #endif +- ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; +- } else +- ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; ++ /* The first 2/4M doesn't use large pages. */ ++ if (mr[i].start < PMD_SIZE) ++ extra += range; ++ ++ ptes += (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; ++ } else { ++ ptes += (range + PAGE_SIZE - 1) >> PAGE_SHIFT; ++ } ++ } + ++ tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); ++ tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); + tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); + + #ifdef CONFIG_X86_32 +@@ -74,8 +92,9 @@ static void __init find_early_table_spac + pgt_buf_end = pgt_buf_start; + pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); + +- printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n", +- end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT); ++ printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n", ++ mr[nr_range - 1].end - 1, pgt_buf_start << PAGE_SHIFT, ++ (pgt_buf_top << PAGE_SHIFT) - 1); + } + + void __init native_pagetable_reserve(u64 start, u64 end) +@@ -83,12 +102,6 @@ void __init native_pagetable_reserve(u64 + memblock_x86_reserve_range(start, end, "PGTABLE"); + } + +-struct map_range { +- unsigned long start; +- unsigned long end; +- unsigned page_size_mask; +-}; +- + #ifdef CONFIG_X86_32 + #define NR_RANGE_MR 3 + #else /* CONFIG_X86_64 */ +@@ -260,7 +273,7 @@ unsigned long __init_refok init_memory_m + * nodes are discovered. + */ + if (!after_bootmem) +- find_early_table_space(end, use_pse, use_gbpages); ++ find_early_table_space(mr, nr_range); + + for (i = 0; i < nr_range; i++) + ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, diff --git a/queue-3.0/x86-mm-undo-incorrect-revert-in-arch-x86-mm-init.c.patch b/queue-3.0/x86-mm-undo-incorrect-revert-in-arch-x86-mm-init.c.patch new file mode 100644 index 00000000000..c53fa25c7c0 --- /dev/null +++ b/queue-3.0/x86-mm-undo-incorrect-revert-in-arch-x86-mm-init.c.patch @@ -0,0 +1,42 @@ +From f82f64dd9f485e13f29f369772d4a0e868e5633a Mon Sep 17 00:00:00 2001 +From: Yinghai Lu +Date: Thu, 25 Oct 2012 15:45:26 -0700 +Subject: x86, mm: Undo incorrect revert in arch/x86/mm/init.c + +From: Yinghai Lu + +commit f82f64dd9f485e13f29f369772d4a0e868e5633a upstream. + +Commit + + 844ab6f9 x86, mm: Find_early_table_space based on ranges that are actually being mapped + +added back some lines back wrongly that has been removed in commit + + 7b16bbf97 Revert "x86/mm: Fix the size calculation of mapping tables" + +remove them again. + +Signed-off-by: Yinghai Lu +Link: http://lkml.kernel.org/r/CAE9FiQW_vuaYQbmagVnxT2DGsYc=9tNeAbdBq53sYkitPOwxSQ@mail.gmail.com +Acked-by: Jacob Shin +Signed-off-by: H. Peter Anvin +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/mm/init.c | 4 ---- + 1 file changed, 4 deletions(-) + +--- a/arch/x86/mm/init.c ++++ b/arch/x86/mm/init.c +@@ -64,10 +64,6 @@ static void __init find_early_table_spac + #ifdef CONFIG_X86_32 + extra += PMD_SIZE; + #endif +- /* The first 2/4M doesn't use large pages. */ +- if (mr[i].start < PMD_SIZE) +- extra += range; +- + ptes += (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; + } else { + ptes += (range + PAGE_SIZE - 1) >> PAGE_SHIFT;