]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.0-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 24 Jul 2012 22:44:43 +0000 (15:44 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 24 Jul 2012 22:44:43 +0000 (15:44 -0700)
added patches:
mm-memory-hotplug-check-if-pages-are-correctly-reserved-on-a-per-section-basis.patch
mm-vmstat.c-cache-align-vm_stat.patch

queue-3.0/mm-memory-hotplug-check-if-pages-are-correctly-reserved-on-a-per-section-basis.patch [new file with mode: 0644]
queue-3.0/mm-vmstat.c-cache-align-vm_stat.patch [new file with mode: 0644]
queue-3.0/series

diff --git a/queue-3.0/mm-memory-hotplug-check-if-pages-are-correctly-reserved-on-a-per-section-basis.patch b/queue-3.0/mm-memory-hotplug-check-if-pages-are-correctly-reserved-on-a-per-section-basis.patch
new file mode 100644 (file)
index 0000000..7dbb08e
--- /dev/null
@@ -0,0 +1,125 @@
+From 2bbcb8788311a40714b585fc11b51da6ffa2ab92 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Mon, 17 Oct 2011 16:38:20 +0200
+Subject: mm: memory hotplug: Check if pages are correctly reserved on a per-section basis
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit 2bbcb8788311a40714b585fc11b51da6ffa2ab92 upstream.
+
+Stable note: Fixes https://bugzilla.novell.com/show_bug.cgi?id=721039 .
+        Without the patch, memory hot-add can fail for kernel configurations
+        that do not set CONFIG_SPARSEMEM_VMEMMAP.
+
+(Resending as I am not seeing it in -next so maybe it got lost)
+
+mm: memory hotplug: Check if pages are correctly reserved on a per-section basis
+
+It is expected that memory being brought online is PageReserved
+similar to what happens when the page allocator is being brought up.
+Memory is onlined in "memory blocks" which consist of one or more
+sections. Unfortunately, the code that verifies PageReserved is
+currently assuming that the memmap backing all these pages is virtually
+contiguous which is only the case when CONFIG_SPARSEMEM_VMEMMAP is set.
+As a result, memory hot-add is failing on those configurations with
+the message;
+
+kernel: section number XXX page number 256 not reserved, was it already online?
+
+This patch updates the PageReserved check to lookup struct page once
+per section to guarantee the correct struct page is being checked.
+
+[Check pages within sections properly: rientjes@google.com]
+[original patch by: nfont@linux.vnet.ibm.com]
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Tested-by: Nathan Fontenot <nfont@linux.vnet.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/base/memory.c |   58 ++++++++++++++++++++++++++++++++++----------------
+ 1 file changed, 40 insertions(+), 18 deletions(-)
+
+--- a/drivers/base/memory.c
++++ b/drivers/base/memory.c
+@@ -224,13 +224,48 @@ int memory_isolate_notify(unsigned long
+ }
+ /*
++ * The probe routines leave the pages reserved, just as the bootmem code does.
++ * Make sure they're still that way.
++ */
++static bool pages_correctly_reserved(unsigned long start_pfn,
++                                      unsigned long nr_pages)
++{
++      int i, j;
++      struct page *page;
++      unsigned long pfn = start_pfn;
++
++      /*
++       * memmap between sections is not contiguous except with
++       * SPARSEMEM_VMEMMAP. We lookup the page once per section
++       * and assume memmap is contiguous within each section
++       */
++      for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) {
++              if (WARN_ON_ONCE(!pfn_valid(pfn)))
++                      return false;
++              page = pfn_to_page(pfn);
++
++              for (j = 0; j < PAGES_PER_SECTION; j++) {
++                      if (PageReserved(page + j))
++                              continue;
++
++                      printk(KERN_WARNING "section number %ld page number %d "
++                              "not reserved, was it already online?\n",
++                              pfn_to_section_nr(pfn), j);
++
++                      return false;
++              }
++      }
++
++      return true;
++}
++
++/*
+  * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
+  * OK to have direct references to sparsemem variables in here.
+  */
+ static int
+ memory_block_action(unsigned long phys_index, unsigned long action)
+ {
+-      int i;
+       unsigned long start_pfn, start_paddr;
+       unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
+       struct page *first_page;
+@@ -238,26 +273,13 @@ memory_block_action(unsigned long phys_i
+       first_page = pfn_to_page(phys_index << PFN_SECTION_SHIFT);
+-      /*
+-       * The probe routines leave the pages reserved, just
+-       * as the bootmem code does.  Make sure they're still
+-       * that way.
+-       */
+-      if (action == MEM_ONLINE) {
+-              for (i = 0; i < nr_pages; i++) {
+-                      if (PageReserved(first_page+i))
+-                              continue;
+-
+-                      printk(KERN_WARNING "section number %ld page number %d "
+-                              "not reserved, was it already online?\n",
+-                              phys_index, i);
+-                      return -EBUSY;
+-              }
+-      }
+-
+       switch (action) {
+               case MEM_ONLINE:
+                       start_pfn = page_to_pfn(first_page);
++
++                      if (!pages_correctly_reserved(start_pfn, nr_pages))
++                              return -EBUSY;
++
+                       ret = online_pages(start_pfn, nr_pages);
+                       break;
+               case MEM_OFFLINE:
diff --git a/queue-3.0/mm-vmstat.c-cache-align-vm_stat.patch b/queue-3.0/mm-vmstat.c-cache-align-vm_stat.patch
new file mode 100644 (file)
index 0000000..72a9e05
--- /dev/null
@@ -0,0 +1,42 @@
+From a1cb2c60ddc98ff4e5246f410558805401ceee67 Mon Sep 17 00:00:00 2001
+From: Dimitri Sivanich <sivanich@sgi.com>
+Date: Mon, 31 Oct 2011 17:09:46 -0700
+Subject: mm/vmstat.c: cache align vm_stat
+
+From: Dimitri Sivanich <sivanich@sgi.com>
+
+commit a1cb2c60ddc98ff4e5246f410558805401ceee67 upstream.
+
+Stable note: Not tracked on Bugzilla. This patch is known to make a big
+        difference to tmpfs performance on larger machines.
+
+This was found to adversely affect tmpfs I/O performance.
+
+Tests run on a 640 cpu UV system.
+
+With 120 threads doing parallel writes, each to different tmpfs mounts:
+No patch:              ~300 MB/sec
+With vm_stat alignment:        ~430 MB/sec
+
+Signed-off-by: Dimitri Sivanich <sivanich@sgi.com>
+Acked-by: Christoph Lameter <cl@gentwo.org>
+Acked-by: Mel Gorman <mel@csn.ul.ie>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+
+---
+ mm/vmstat.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
+  *
+  * vm_stat contains the global counters
+  */
+-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
++atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
+ EXPORT_SYMBOL(vm_stat);
+ #ifdef CONFIG_SMP
index a143a4ae63461d8672490fceb4b3126afe81f624..42d24d646bb664985ec9f4976f3be3eda35ae5fc 100644 (file)
@@ -4,3 +4,5 @@ mm-fix-lost-kswapd-wakeup-in-kswapd_stop.patch
 mips-properly-align-the-.data..init_task-section.patch
 ubifs-fix-a-bug-in-empty-space-fix-up.patch
 dm-raid1-fix-crash-with-mirror-recovery-and-discard.patch
+mm-vmstat.c-cache-align-vm_stat.patch
+mm-memory-hotplug-check-if-pages-are-correctly-reserved-on-a-per-section-basis.patch