]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.0-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 29 Oct 2012 17:00:02 +0000 (10:00 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 29 Oct 2012 17:00:02 +0000 (10:00 -0700)
added patches:
x86-mm-find_early_table_space-based-on-ranges-that-are-actually-being-mapped.patch
x86-mm-undo-incorrect-revert-in-arch-x86-mm-init.c.patch

queue-3.0/series
queue-3.0/x86-mm-find_early_table_space-based-on-ranges-that-are-actually-being-mapped.patch [new file with mode: 0644]
queue-3.0/x86-mm-undo-incorrect-revert-in-arch-x86-mm-init.c.patch [new file with mode: 0644]

index 2b8b43d68a361a3e8c058b930134561ae47d70c3..623a1a785606464e730892a51afad8a8fd95ff71 100644 (file)
@@ -28,3 +28,5 @@ arm-at91-i2c-change-id-to-let-i2c-gpio-work.patch
 mac80211-check-if-key-has-tkip-type-before-updating-iv.patch
 bcma-fix-unregistration-of-cores.patch
 cpufreq-powernow-k8-remove-usage-of-smp_processor_id-in-preemptible-code.patch
+x86-mm-find_early_table_space-based-on-ranges-that-are-actually-being-mapped.patch
+x86-mm-undo-incorrect-revert-in-arch-x86-mm-init.c.patch
diff --git a/queue-3.0/x86-mm-find_early_table_space-based-on-ranges-that-are-actually-being-mapped.patch b/queue-3.0/x86-mm-find_early_table_space-based-on-ranges-that-are-actually-being-mapped.patch
new file mode 100644 (file)
index 0000000..d63920f
--- /dev/null
@@ -0,0 +1,142 @@
+From 844ab6f993b1d32eb40512503d35ff6ad0c57030 Mon Sep 17 00:00:00 2001
+From: Jacob Shin <jacob.shin@amd.com>
+Date: Wed, 24 Oct 2012 14:24:44 -0500
+Subject: x86, mm: Find_early_table_space based on ranges that are actually being mapped
+
+From: Jacob Shin <jacob.shin@amd.com>
+
+commit 844ab6f993b1d32eb40512503d35ff6ad0c57030 upstream.
+
+Current logic finds enough space for direct mapping page tables from 0
+to end. Instead, we only need to find enough space to cover mr[0].start
+to mr[nr_range].end -- the range that is actually being mapped by
+init_memory_mapping()
+
+This is needed after 1bbbbe779aabe1f0768c2bf8f8c0a5583679b54a, to address
+the panic reported here:
+
+  https://lkml.org/lkml/2012/10/20/160
+  https://lkml.org/lkml/2012/10/21/157
+
+Signed-off-by: Jacob Shin <jacob.shin@amd.com>
+Link: http://lkml.kernel.org/r/20121024195311.GB11779@jshin-Toonie
+Tested-by: Tom Rini <trini@ti.com>
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/init.c |   73 +++++++++++++++++++++++++++++++----------------------
+ 1 file changed, 43 insertions(+), 30 deletions(-)
+
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -28,36 +28,54 @@ int direct_gbpages
+ #endif
+ ;
+-static void __init find_early_table_space(unsigned long end, int use_pse,
+-                                        int use_gbpages)
++struct map_range {
++      unsigned long start;
++      unsigned long end;
++      unsigned page_size_mask;
++};
++
++/*
++ * First calculate space needed for kernel direct mapping page tables to cover
++ * mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB
++ * pages. Then find enough contiguous space for those page tables.
++ */
++static void __init find_early_table_space(struct map_range *mr, int nr_range)
+ {
+-      unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
++      int i;
++      unsigned long puds = 0, pmds = 0, ptes = 0, tables;
++      unsigned long start = 0, good_end;
+       phys_addr_t base;
+-      puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
+-      tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
++      for (i = 0; i < nr_range; i++) {
++              unsigned long range, extra;
+-      if (use_gbpages) {
+-              unsigned long extra;
++              range = mr[i].end - mr[i].start;
++              puds += (range + PUD_SIZE - 1) >> PUD_SHIFT;
+-              extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
+-              pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
+-      } else
+-              pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
+-
+-      tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
++              if (mr[i].page_size_mask & (1 << PG_LEVEL_1G)) {
++                      extra = range - ((range >> PUD_SHIFT) << PUD_SHIFT);
++                      pmds += (extra + PMD_SIZE - 1) >> PMD_SHIFT;
++              } else {
++                      pmds += (range + PMD_SIZE - 1) >> PMD_SHIFT;
++              }
+-      if (use_pse) {
+-              unsigned long extra;
+-
+-              extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
++              if (mr[i].page_size_mask & (1 << PG_LEVEL_2M)) {
++                      extra = range - ((range >> PMD_SHIFT) << PMD_SHIFT);
+ #ifdef CONFIG_X86_32
+-              extra += PMD_SIZE;
++                      extra += PMD_SIZE;
+ #endif
+-              ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
+-      } else
+-              ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
++                      /* The first 2/4M doesn't use large pages. */
++                      if (mr[i].start < PMD_SIZE)
++                              extra += range;
++
++                      ptes += (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
++              } else {
++                      ptes += (range + PAGE_SIZE - 1) >> PAGE_SHIFT;
++              }
++      }
++      tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
++      tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
+       tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
+ #ifdef CONFIG_X86_32
+@@ -74,8 +92,9 @@ static void __init find_early_table_spac
+       pgt_buf_end = pgt_buf_start;
+       pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
+-      printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
+-              end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT);
++      printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n",
++              mr[nr_range - 1].end - 1, pgt_buf_start << PAGE_SHIFT,
++              (pgt_buf_top << PAGE_SHIFT) - 1);
+ }
+ void __init native_pagetable_reserve(u64 start, u64 end)
+@@ -83,12 +102,6 @@ void __init native_pagetable_reserve(u64
+       memblock_x86_reserve_range(start, end, "PGTABLE");
+ }
+-struct map_range {
+-      unsigned long start;
+-      unsigned long end;
+-      unsigned page_size_mask;
+-};
+-
+ #ifdef CONFIG_X86_32
+ #define NR_RANGE_MR 3
+ #else /* CONFIG_X86_64 */
+@@ -260,7 +273,7 @@ unsigned long __init_refok init_memory_m
+        * nodes are discovered.
+        */
+       if (!after_bootmem)
+-              find_early_table_space(end, use_pse, use_gbpages);
++              find_early_table_space(mr, nr_range);
+       for (i = 0; i < nr_range; i++)
+               ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
diff --git a/queue-3.0/x86-mm-undo-incorrect-revert-in-arch-x86-mm-init.c.patch b/queue-3.0/x86-mm-undo-incorrect-revert-in-arch-x86-mm-init.c.patch
new file mode 100644 (file)
index 0000000..c53fa25
--- /dev/null
@@ -0,0 +1,42 @@
+From f82f64dd9f485e13f29f369772d4a0e868e5633a Mon Sep 17 00:00:00 2001
+From: Yinghai Lu <yinghai@kernel.org>
+Date: Thu, 25 Oct 2012 15:45:26 -0700
+Subject: x86, mm: Undo incorrect revert in arch/x86/mm/init.c
+
+From: Yinghai Lu <yinghai@kernel.org>
+
+commit f82f64dd9f485e13f29f369772d4a0e868e5633a upstream.
+
+Commit
+
+    844ab6f9 x86, mm: Find_early_table_space based on ranges that are actually being mapped
+
+added back some lines back wrongly that has been removed in commit
+
+    7b16bbf97 Revert "x86/mm: Fix the size calculation of mapping tables"
+
+remove them again.
+
+Signed-off-by: Yinghai Lu <yinghai@kernel.org>
+Link: http://lkml.kernel.org/r/CAE9FiQW_vuaYQbmagVnxT2DGsYc=9tNeAbdBq53sYkitPOwxSQ@mail.gmail.com
+Acked-by: Jacob Shin <jacob.shin@amd.com>
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/init.c |    4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -64,10 +64,6 @@ static void __init find_early_table_spac
+ #ifdef CONFIG_X86_32
+                       extra += PMD_SIZE;
+ #endif
+-                      /* The first 2/4M doesn't use large pages. */
+-                      if (mr[i].start < PMD_SIZE)
+-                              extra += range;
+-
+                       ptes += (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
+               } else {
+                       ptes += (range + PAGE_SIZE - 1) >> PAGE_SHIFT;