]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.8-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 18 Mar 2013 19:35:04 +0000 (12:35 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 18 Mar 2013 19:35:04 +0000 (12:35 -0700)
added patches:
arm-davinci-edma-fix-dmaengine-induced-null-pointer-dereference-on-da830.patch
atmel_lcdfb-fix-16-bpp-modes-on-older-socs.patch
btrfs-use-rcu_barrier-to-wait-for-bdev-puts-at-unmount.patch
kbuild-fix-make-headers_check-with-make-3.80.patch
mtd-nand-reintroduce-nand_no_readrdy-as-nand_need_readrdy.patch
net-mlx4_en-disable-rfs-when-running-in-sriov-mode.patch
net-mlx4_en-initialize-rfs-filters-lock-and-list-in-init_netdev.patch
perf-x86-fix-link-failure-for-non-intel-configs.patch
perf-x86-fix-wrmsr_on_cpu-warning-on-suspend-resume.patch
powerpc-fix-cputable-entry-for-970mp-rev-1.0.patch
powerpc-fix-stab-initialization.patch
powerpc-make-vsid_bits-dependency-explicit.patch
powerpc-rename-user_esid_bits-to-esid_bits.patch
powerpc-update-kernel-vsid-range.patch
s390-critical-section-cleanup-vs.-machine-checks.patch
s390-mm-fix-flush_tlb_kernel_range.patch
selinux-use-gfp_atomic-under-spin_lock.patch

18 files changed:
queue-3.8/arm-davinci-edma-fix-dmaengine-induced-null-pointer-dereference-on-da830.patch [new file with mode: 0644]
queue-3.8/atmel_lcdfb-fix-16-bpp-modes-on-older-socs.patch [new file with mode: 0644]
queue-3.8/btrfs-use-rcu_barrier-to-wait-for-bdev-puts-at-unmount.patch [new file with mode: 0644]
queue-3.8/kbuild-fix-make-headers_check-with-make-3.80.patch [new file with mode: 0644]
queue-3.8/mtd-nand-reintroduce-nand_no_readrdy-as-nand_need_readrdy.patch [new file with mode: 0644]
queue-3.8/net-mlx4_en-disable-rfs-when-running-in-sriov-mode.patch [new file with mode: 0644]
queue-3.8/net-mlx4_en-initialize-rfs-filters-lock-and-list-in-init_netdev.patch [new file with mode: 0644]
queue-3.8/perf-x86-fix-link-failure-for-non-intel-configs.patch [new file with mode: 0644]
queue-3.8/perf-x86-fix-wrmsr_on_cpu-warning-on-suspend-resume.patch [new file with mode: 0644]
queue-3.8/powerpc-fix-cputable-entry-for-970mp-rev-1.0.patch [new file with mode: 0644]
queue-3.8/powerpc-fix-stab-initialization.patch [new file with mode: 0644]
queue-3.8/powerpc-make-vsid_bits-dependency-explicit.patch [new file with mode: 0644]
queue-3.8/powerpc-rename-user_esid_bits-to-esid_bits.patch [new file with mode: 0644]
queue-3.8/powerpc-update-kernel-vsid-range.patch [new file with mode: 0644]
queue-3.8/s390-critical-section-cleanup-vs.-machine-checks.patch [new file with mode: 0644]
queue-3.8/s390-mm-fix-flush_tlb_kernel_range.patch [new file with mode: 0644]
queue-3.8/selinux-use-gfp_atomic-under-spin_lock.patch [new file with mode: 0644]
queue-3.8/series

diff --git a/queue-3.8/arm-davinci-edma-fix-dmaengine-induced-null-pointer-dereference-on-da830.patch b/queue-3.8/arm-davinci-edma-fix-dmaengine-induced-null-pointer-dereference-on-da830.patch
new file mode 100644 (file)
index 0000000..5a145ca
--- /dev/null
@@ -0,0 +1,40 @@
+From 069552777a121eb39da29de4bc0383483dbe1f7e Mon Sep 17 00:00:00 2001
+From: Matt Porter <mporter@ti.com>
+Date: Tue, 5 Mar 2013 10:58:22 -0500
+Subject: ARM: davinci: edma: fix dmaengine induced null pointer dereference on da830
+
+From: Matt Porter <mporter@ti.com>
+
+commit 069552777a121eb39da29de4bc0383483dbe1f7e upstream.
+
+This adds additional error checking to the private edma api implementation
+to catch the case where the edma_alloc_slot() has an invalid controller
+parameter. The edma dmaengine wrapper driver relies on this condition
+being handled in order to avoid setting up a second edma dmaengine
+instance on DA830.
+
+Verfied using a DA850 with the second EDMA controller platform instance
+removed to simulate a DA830 which only has a single EDMA controller.
+
+Reported-by: Tomas Novotny <tomas@novotny.cz>
+Signed-off-by: Matt Porter <mporter@ti.com>
+Tested-by: Tomas Novotny <tomas@novotny.cz>
+Signed-off-by: Sekhar Nori <nsekhar@ti.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-davinci/dma.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/arm/mach-davinci/dma.c
++++ b/arch/arm/mach-davinci/dma.c
+@@ -743,6 +743,9 @@ EXPORT_SYMBOL(edma_free_channel);
+  */
+ int edma_alloc_slot(unsigned ctlr, int slot)
+ {
++      if (!edma_cc[ctlr])
++              return -EINVAL;
++
+       if (slot >= 0)
+               slot = EDMA_CHAN_SLOT(slot);
diff --git a/queue-3.8/atmel_lcdfb-fix-16-bpp-modes-on-older-socs.patch b/queue-3.8/atmel_lcdfb-fix-16-bpp-modes-on-older-socs.patch
new file mode 100644 (file)
index 0000000..0d070b5
--- /dev/null
@@ -0,0 +1,89 @@
+From a79eac7165ed62114e6ca197195aa5060a54f137 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <jhovold@gmail.com>
+Date: Tue, 5 Feb 2013 14:35:11 +0100
+Subject: atmel_lcdfb: fix 16-bpp modes on older SOCs
+
+From: Johan Hovold <jhovold@gmail.com>
+
+commit a79eac7165ed62114e6ca197195aa5060a54f137 upstream.
+
+Fix regression introduced by commit 787f9fd23283 ("atmel_lcdfb: support
+16bit BGR:565 mode, remove unsupported 15bit modes") which broke 16-bpp
+modes for older SOCs which use IBGR:555 (msb is intensity) rather
+than BGR:565.
+
+Use SOC-type to determine the pixel layout.
+
+Tested on at91sam9263 and at91sam9g45.
+
+Acked-by: Peter Korsgaard <jacmet@sunsite.dk>
+Signed-off-by: Johan Hovold <jhovold@gmail.com>
+Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/video/atmel_lcdfb.c |   22 +++++++++++++++-------
+ include/video/atmel_lcdc.h  |    1 +
+ 2 files changed, 16 insertions(+), 7 deletions(-)
+
+--- a/drivers/video/atmel_lcdfb.c
++++ b/drivers/video/atmel_lcdfb.c
+@@ -422,17 +422,22 @@ static int atmel_lcdfb_check_var(struct
+                       = var->bits_per_pixel;
+               break;
+       case 16:
++              /* Older SOCs use IBGR:555 rather than BGR:565. */
++              if (sinfo->have_intensity_bit)
++                      var->green.length = 5;
++              else
++                      var->green.length = 6;
++
+               if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
+-                      /* RGB:565 mode */
+-                      var->red.offset = 11;
++                      /* RGB:5X5 mode */
++                      var->red.offset = var->green.length + 5;
+                       var->blue.offset = 0;
+               } else {
+-                      /* BGR:565 mode */
++                      /* BGR:5X5 mode */
+                       var->red.offset = 0;
+-                      var->blue.offset = 11;
++                      var->blue.offset = var->green.length + 5;
+               }
+               var->green.offset = 5;
+-              var->green.length = 6;
+               var->red.length = var->blue.length = 5;
+               break;
+       case 32:
+@@ -679,8 +684,7 @@ static int atmel_lcdfb_setcolreg(unsigne
+       case FB_VISUAL_PSEUDOCOLOR:
+               if (regno < 256) {
+-                      if (cpu_is_at91sam9261() || cpu_is_at91sam9263()
+-                          || cpu_is_at91sam9rl()) {
++                      if (sinfo->have_intensity_bit) {
+                               /* old style I+BGR:555 */
+                               val  = ((red   >> 11) & 0x001f);
+                               val |= ((green >>  6) & 0x03e0);
+@@ -870,6 +874,10 @@ static int __init atmel_lcdfb_probe(stru
+       }
+       sinfo->info = info;
+       sinfo->pdev = pdev;
++      if (cpu_is_at91sam9261() || cpu_is_at91sam9263() ||
++                                                      cpu_is_at91sam9rl()) {
++              sinfo->have_intensity_bit = true;
++      }
+       strcpy(info->fix.id, sinfo->pdev->name);
+       info->flags = ATMEL_LCDFB_FBINFO_DEFAULT;
+--- a/include/video/atmel_lcdc.h
++++ b/include/video/atmel_lcdc.h
+@@ -62,6 +62,7 @@ struct atmel_lcdfb_info {
+       void (*atmel_lcdfb_power_control)(int on);
+       struct fb_monspecs      *default_monspecs;
+       u32                     pseudo_palette[16];
++      bool                    have_intensity_bit;
+ };
+ #define ATMEL_LCDC_DMABADDR1  0x00
diff --git a/queue-3.8/btrfs-use-rcu_barrier-to-wait-for-bdev-puts-at-unmount.patch b/queue-3.8/btrfs-use-rcu_barrier-to-wait-for-bdev-puts-at-unmount.patch
new file mode 100644 (file)
index 0000000..59f6e31
--- /dev/null
@@ -0,0 +1,58 @@
+From bc178622d40d87e75abc131007342429c9b03351 Mon Sep 17 00:00:00 2001
+From: Eric Sandeen <sandeen@redhat.com>
+Date: Sat, 9 Mar 2013 15:18:39 +0000
+Subject: btrfs: use rcu_barrier() to wait for bdev puts at unmount
+
+From: Eric Sandeen <sandeen@redhat.com>
+
+commit bc178622d40d87e75abc131007342429c9b03351 upstream.
+
+Doing this would reliably fail with -EBUSY for me:
+
+# mount /dev/sdb2 /mnt/scratch; umount /mnt/scratch; mkfs.btrfs -f /dev/sdb2
+...
+unable to open /dev/sdb2: Device or resource busy
+
+because mkfs.btrfs tries to open the device O_EXCL, and somebody still has it.
+
+Using systemtap to track bdev gets & puts shows a kworker thread doing a
+blkdev put after mkfs attempts a get; this is left over from the unmount
+path:
+
+btrfs_close_devices
+       __btrfs_close_devices
+               call_rcu(&device->rcu, free_device);
+                       free_device
+                               INIT_WORK(&device->rcu_work, __free_device);
+                               schedule_work(&device->rcu_work);
+
+so unmount might complete before __free_device fires & does its blkdev_put.
+
+Adding an rcu_barrier() to btrfs_close_devices() causes unmount to wait
+until all blkdev_put()s are done, and the device is truly free once
+unmount completes.
+
+Signed-off-by: Eric Sandeen <sandeen@redhat.com>
+Signed-off-by: Josef Bacik <jbacik@fusionio.com>
+Signed-off-by: Chris Mason <chris.mason@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/volumes.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -681,6 +681,12 @@ int btrfs_close_devices(struct btrfs_fs_
+               __btrfs_close_devices(fs_devices);
+               free_fs_devices(fs_devices);
+       }
++      /*
++       * Wait for rcu kworkers under __btrfs_close_devices
++       * to finish all blkdev_puts so device is really
++       * free when umount is done.
++       */
++      rcu_barrier();
+       return ret;
+ }
diff --git a/queue-3.8/kbuild-fix-make-headers_check-with-make-3.80.patch b/queue-3.8/kbuild-fix-make-headers_check-with-make-3.80.patch
new file mode 100644 (file)
index 0000000..4477b2c
--- /dev/null
@@ -0,0 +1,56 @@
+From c4619bc6fa5149a6ab39be845a39142b6a996ea5 Mon Sep 17 00:00:00 2001
+From: Sam Ravnborg <sam@ravnborg.org>
+Date: Mon, 4 Mar 2013 21:36:24 +0100
+Subject: kbuild: fix make headers_check with make 3.80
+
+From: Sam Ravnborg <sam@ravnborg.org>
+
+commit c4619bc6fa5149a6ab39be845a39142b6a996ea5 upstream.
+
+Commit 10b63956 ("UAPI: Plumb the UAPI Kbuilds into the user header
+installation and checking") introduced a dependency of make 3.81
+due to use of $(or ...)
+
+We do not want to lift the requirement to gmake 3.81 just yet...
+Included are a straightforward conversion to $(if ...)
+
+Bisected-and-tested-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Cc: David Howells <dhowells@redhat.com>
+Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
+Signed-off-by: Michal Marek <mmarek@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ scripts/Makefile.headersinst |   11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/scripts/Makefile.headersinst
++++ b/scripts/Makefile.headersinst
+@@ -8,7 +8,7 @@
+ # ==========================================================================
+ # called may set destination dir (when installing to asm/)
+-_dst := $(or $(destination-y),$(dst),$(obj))
++_dst := $(if $(destination-y),$(destination-y),$(if $(dst),$(dst),$(obj)))
+ # generated header directory
+ gen := $(if $(gen),$(gen),$(subst include/,include/generated/,$(obj)))
+@@ -48,13 +48,14 @@ all-files     := $(header-y) $(genhdr-y)
+ output-files  := $(addprefix $(installdir)/, $(all-files))
+ input-files   := $(foreach hdr, $(header-y), \
+-                 $(or \
++                 $(if $(wildcard $(srcdir)/$(hdr)), \
+                       $(wildcard $(srcdir)/$(hdr)), \
+-                      $(wildcard $(oldsrcdir)/$(hdr)), \
+-                      $(error Missing UAPI file $(srcdir)/$(hdr)) \
++                      $(if $(wildcard $(oldsrcdir)/$(hdr)), \
++                              $(wildcard $(oldsrcdir)/$(hdr)), \
++                              $(error Missing UAPI file $(srcdir)/$(hdr))) \
+                  )) \
+                $(foreach hdr, $(genhdr-y), \
+-                 $(or \
++                 $(if $(wildcard $(gendir)/$(hdr)), \
+                       $(wildcard $(gendir)/$(hdr)), \
+                       $(error Missing generated UAPI file $(gendir)/$(hdr)) \
+                  ))
diff --git a/queue-3.8/mtd-nand-reintroduce-nand_no_readrdy-as-nand_need_readrdy.patch b/queue-3.8/mtd-nand-reintroduce-nand_no_readrdy-as-nand_need_readrdy.patch
new file mode 100644 (file)
index 0000000..fbdf655
--- /dev/null
@@ -0,0 +1,178 @@
+From 5bc7c33ca93a285dcfe7b7fd64970f6314440ad1 Mon Sep 17 00:00:00 2001
+From: Brian Norris <computersforpeace@gmail.com>
+Date: Wed, 13 Mar 2013 09:51:31 -0700
+Subject: mtd: nand: reintroduce NAND_NO_READRDY as NAND_NEED_READRDY
+
+From: Brian Norris <computersforpeace@gmail.com>
+
+commit 5bc7c33ca93a285dcfe7b7fd64970f6314440ad1 upstream.
+
+This partially reverts commit 1696e6bc2ae83734e64e206ac99766ea19e9a14e
+("mtd: nand: kill NAND_NO_READRDY").
+
+In that patch I overlooked a few things.
+
+The original documentation for NAND_NO_READRDY included "True for all
+large page devices, as they do not support autoincrement." I was
+conflating "not support autoincrement" with the NAND_NO_AUTOINCR option,
+which was in fact doing nothing. So, when I dropped NAND_NO_AUTOINCR, I
+concluded that I then could harmlessly drop NAND_NO_READRDY. But of
+course the fact the NAND_NO_AUTOINCR was doing nothing didn't mean
+NAND_NO_READRDY was doing nothing...
+
+So, NAND_NO_READRDY is re-introduced as NAND_NEED_READRDY and applied
+only to those few remaining small-page NAND which needed it in the first
+place.
+
+Reported-by: Alexander Shiyan <shc_work@mail.ru>
+Tested-by: Alexander Shiyan <shc_work@mail.ru>
+Signed-off-by: Brian Norris <computersforpeace@gmail.com>
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/nand/nand_base.c |   16 ++++++++
+ drivers/mtd/nand/nand_ids.c  |   78 ++++++++++++++++++++++---------------------
+ include/linux/mtd/nand.h     |    7 +++
+ 3 files changed, 63 insertions(+), 38 deletions(-)
+
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -1527,6 +1527,14 @@ static int nand_do_read_ops(struct mtd_i
+                                       oobreadlen -= toread;
+                               }
+                       }
++
++                      if (chip->options & NAND_NEED_READRDY) {
++                              /* Apply delay or wait for ready/busy pin */
++                              if (!chip->dev_ready)
++                                      udelay(chip->chip_delay);
++                              else
++                                      nand_wait_ready(mtd);
++                      }
+               } else {
+                       memcpy(buf, chip->buffers->databuf + col, bytes);
+                       buf += bytes;
+@@ -1791,6 +1799,14 @@ static int nand_do_read_oob(struct mtd_i
+               len = min(len, readlen);
+               buf = nand_transfer_oob(chip, buf, ops, len);
++              if (chip->options & NAND_NEED_READRDY) {
++                      /* Apply delay or wait for ready/busy pin */
++                      if (!chip->dev_ready)
++                              udelay(chip->chip_delay);
++                      else
++                              nand_wait_ready(mtd);
++              }
++
+               readlen -= len;
+               if (!readlen)
+                       break;
+--- a/drivers/mtd/nand/nand_ids.c
++++ b/drivers/mtd/nand/nand_ids.c
+@@ -22,49 +22,51 @@
+ *     512     512 Byte page size
+ */
+ struct nand_flash_dev nand_flash_ids[] = {
++#define SP_OPTIONS NAND_NEED_READRDY
++#define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16)
+ #ifdef CONFIG_MTD_NAND_MUSEUM_IDS
+-      {"NAND 1MiB 5V 8-bit",          0x6e, 256, 1, 0x1000, 0},
+-      {"NAND 2MiB 5V 8-bit",          0x64, 256, 2, 0x1000, 0},
+-      {"NAND 4MiB 5V 8-bit",          0x6b, 512, 4, 0x2000, 0},
+-      {"NAND 1MiB 3,3V 8-bit",        0xe8, 256, 1, 0x1000, 0},
+-      {"NAND 1MiB 3,3V 8-bit",        0xec, 256, 1, 0x1000, 0},
+-      {"NAND 2MiB 3,3V 8-bit",        0xea, 256, 2, 0x1000, 0},
+-      {"NAND 4MiB 3,3V 8-bit",        0xd5, 512, 4, 0x2000, 0},
+-      {"NAND 4MiB 3,3V 8-bit",        0xe3, 512, 4, 0x2000, 0},
+-      {"NAND 4MiB 3,3V 8-bit",        0xe5, 512, 4, 0x2000, 0},
+-      {"NAND 8MiB 3,3V 8-bit",        0xd6, 512, 8, 0x2000, 0},
+-
+-      {"NAND 8MiB 1,8V 8-bit",        0x39, 512, 8, 0x2000, 0},
+-      {"NAND 8MiB 3,3V 8-bit",        0xe6, 512, 8, 0x2000, 0},
+-      {"NAND 8MiB 1,8V 16-bit",       0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16},
+-      {"NAND 8MiB 3,3V 16-bit",       0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16},
++      {"NAND 1MiB 5V 8-bit",          0x6e, 256, 1, 0x1000, SP_OPTIONS},
++      {"NAND 2MiB 5V 8-bit",          0x64, 256, 2, 0x1000, SP_OPTIONS},
++      {"NAND 4MiB 5V 8-bit",          0x6b, 512, 4, 0x2000, SP_OPTIONS},
++      {"NAND 1MiB 3,3V 8-bit",        0xe8, 256, 1, 0x1000, SP_OPTIONS},
++      {"NAND 1MiB 3,3V 8-bit",        0xec, 256, 1, 0x1000, SP_OPTIONS},
++      {"NAND 2MiB 3,3V 8-bit",        0xea, 256, 2, 0x1000, SP_OPTIONS},
++      {"NAND 4MiB 3,3V 8-bit",        0xd5, 512, 4, 0x2000, SP_OPTIONS},
++      {"NAND 4MiB 3,3V 8-bit",        0xe3, 512, 4, 0x2000, SP_OPTIONS},
++      {"NAND 4MiB 3,3V 8-bit",        0xe5, 512, 4, 0x2000, SP_OPTIONS},
++      {"NAND 8MiB 3,3V 8-bit",        0xd6, 512, 8, 0x2000, SP_OPTIONS},
++
++      {"NAND 8MiB 1,8V 8-bit",        0x39, 512, 8, 0x2000, SP_OPTIONS},
++      {"NAND 8MiB 3,3V 8-bit",        0xe6, 512, 8, 0x2000, SP_OPTIONS},
++      {"NAND 8MiB 1,8V 16-bit",       0x49, 512, 8, 0x2000, SP_OPTIONS16},
++      {"NAND 8MiB 3,3V 16-bit",       0x59, 512, 8, 0x2000, SP_OPTIONS16},
+ #endif
+-      {"NAND 16MiB 1,8V 8-bit",       0x33, 512, 16, 0x4000, 0},
+-      {"NAND 16MiB 3,3V 8-bit",       0x73, 512, 16, 0x4000, 0},
+-      {"NAND 16MiB 1,8V 16-bit",      0x43, 512, 16, 0x4000, NAND_BUSWIDTH_16},
+-      {"NAND 16MiB 3,3V 16-bit",      0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16},
+-
+-      {"NAND 32MiB 1,8V 8-bit",       0x35, 512, 32, 0x4000, 0},
+-      {"NAND 32MiB 3,3V 8-bit",       0x75, 512, 32, 0x4000, 0},
+-      {"NAND 32MiB 1,8V 16-bit",      0x45, 512, 32, 0x4000, NAND_BUSWIDTH_16},
+-      {"NAND 32MiB 3,3V 16-bit",      0x55, 512, 32, 0x4000, NAND_BUSWIDTH_16},
+-
+-      {"NAND 64MiB 1,8V 8-bit",       0x36, 512, 64, 0x4000, 0},
+-      {"NAND 64MiB 3,3V 8-bit",       0x76, 512, 64, 0x4000, 0},
+-      {"NAND 64MiB 1,8V 16-bit",      0x46, 512, 64, 0x4000, NAND_BUSWIDTH_16},
+-      {"NAND 64MiB 3,3V 16-bit",      0x56, 512, 64, 0x4000, NAND_BUSWIDTH_16},
+-
+-      {"NAND 128MiB 1,8V 8-bit",      0x78, 512, 128, 0x4000, 0},
+-      {"NAND 128MiB 1,8V 8-bit",      0x39, 512, 128, 0x4000, 0},
+-      {"NAND 128MiB 3,3V 8-bit",      0x79, 512, 128, 0x4000, 0},
+-      {"NAND 128MiB 1,8V 16-bit",     0x72, 512, 128, 0x4000, NAND_BUSWIDTH_16},
+-      {"NAND 128MiB 1,8V 16-bit",     0x49, 512, 128, 0x4000, NAND_BUSWIDTH_16},
+-      {"NAND 128MiB 3,3V 16-bit",     0x74, 512, 128, 0x4000, NAND_BUSWIDTH_16},
+-      {"NAND 128MiB 3,3V 16-bit",     0x59, 512, 128, 0x4000, NAND_BUSWIDTH_16},
++      {"NAND 16MiB 1,8V 8-bit",       0x33, 512, 16, 0x4000, SP_OPTIONS},
++      {"NAND 16MiB 3,3V 8-bit",       0x73, 512, 16, 0x4000, SP_OPTIONS},
++      {"NAND 16MiB 1,8V 16-bit",      0x43, 512, 16, 0x4000, SP_OPTIONS16},
++      {"NAND 16MiB 3,3V 16-bit",      0x53, 512, 16, 0x4000, SP_OPTIONS16},
++
++      {"NAND 32MiB 1,8V 8-bit",       0x35, 512, 32, 0x4000, SP_OPTIONS},
++      {"NAND 32MiB 3,3V 8-bit",       0x75, 512, 32, 0x4000, SP_OPTIONS},
++      {"NAND 32MiB 1,8V 16-bit",      0x45, 512, 32, 0x4000, SP_OPTIONS16},
++      {"NAND 32MiB 3,3V 16-bit",      0x55, 512, 32, 0x4000, SP_OPTIONS16},
++
++      {"NAND 64MiB 1,8V 8-bit",       0x36, 512, 64, 0x4000, SP_OPTIONS},
++      {"NAND 64MiB 3,3V 8-bit",       0x76, 512, 64, 0x4000, SP_OPTIONS},
++      {"NAND 64MiB 1,8V 16-bit",      0x46, 512, 64, 0x4000, SP_OPTIONS16},
++      {"NAND 64MiB 3,3V 16-bit",      0x56, 512, 64, 0x4000, SP_OPTIONS16},
++
++      {"NAND 128MiB 1,8V 8-bit",      0x78, 512, 128, 0x4000, SP_OPTIONS},
++      {"NAND 128MiB 1,8V 8-bit",      0x39, 512, 128, 0x4000, SP_OPTIONS},
++      {"NAND 128MiB 3,3V 8-bit",      0x79, 512, 128, 0x4000, SP_OPTIONS},
++      {"NAND 128MiB 1,8V 16-bit",     0x72, 512, 128, 0x4000, SP_OPTIONS16},
++      {"NAND 128MiB 1,8V 16-bit",     0x49, 512, 128, 0x4000, SP_OPTIONS16},
++      {"NAND 128MiB 3,3V 16-bit",     0x74, 512, 128, 0x4000, SP_OPTIONS16},
++      {"NAND 128MiB 3,3V 16-bit",     0x59, 512, 128, 0x4000, SP_OPTIONS16},
+-      {"NAND 256MiB 3,3V 8-bit",      0x71, 512, 256, 0x4000, 0},
++      {"NAND 256MiB 3,3V 8-bit",      0x71, 512, 256, 0x4000, SP_OPTIONS},
+       /*
+        * These are the new chips with large page size. The pagesize and the
+--- a/include/linux/mtd/nand.h
++++ b/include/linux/mtd/nand.h
+@@ -187,6 +187,13 @@ typedef enum {
+  * This happens with the Renesas AG-AND chips, possibly others.
+  */
+ #define BBT_AUTO_REFRESH      0x00000080
++/*
++ * Chip requires ready check on read (for auto-incremented sequential read).
++ * True only for small page devices; large page devices do not support
++ * autoincrement.
++ */
++#define NAND_NEED_READRDY     0x00000100
++
+ /* Chip does not allow subpage writes */
+ #define NAND_NO_SUBPAGE_WRITE 0x00000200
diff --git a/queue-3.8/net-mlx4_en-disable-rfs-when-running-in-sriov-mode.patch b/queue-3.8/net-mlx4_en-disable-rfs-when-running-in-sriov-mode.patch
new file mode 100644 (file)
index 0000000..2f6009a
--- /dev/null
@@ -0,0 +1,47 @@
+From a229e488ac3f904d06c20d8d3f47831db3c7a15a Mon Sep 17 00:00:00 2001
+From: Amir Vadai <amirv@mellanox.com>
+Date: Thu, 7 Mar 2013 03:46:57 +0000
+Subject: net/mlx4_en: Disable RFS when running in SRIOV mode
+
+From: Amir Vadai <amirv@mellanox.com>
+
+commit a229e488ac3f904d06c20d8d3f47831db3c7a15a upstream.
+
+Commit 37706996 "mlx4_en: fix allocation of CPU affinity reverse-map" fixed
+a bug when mlx4_dev->caps.comp_pool is larger from the device rx rings, but
+introduced a regression.
+
+When the mlx4_core is activating its "legacy mode" (e.g when running in SRIOV
+mode) w.r.t to EQs/IRQs usage, comp_pool becomes zero and we're crashing on
+divide by zero alloc_cpu_rmap.
+
+Fix that by enabling RFS only when running in non-legacy mode.
+
+Reported-by: Yan Burman <yanb@mellanox.com>
+Cc: Kleber Sacilotto de Souza <klebers@linux.vnet.ibm.com>
+Signed-off-by: Amir Vadai <amirv@mellanox.com>
+Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/mellanox/mlx4/en_netdev.c |    8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -1434,9 +1434,11 @@ int mlx4_en_alloc_resources(struct mlx4_
+       }
+ #ifdef CONFIG_RFS_ACCEL
+-      priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
+-      if (!priv->dev->rx_cpu_rmap)
+-              goto err;
++      if (priv->mdev->dev->caps.comp_pool) {
++              priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
++              if (!priv->dev->rx_cpu_rmap)
++                      goto err;
++      }
+ #endif
+       return 0;
diff --git a/queue-3.8/net-mlx4_en-initialize-rfs-filters-lock-and-list-in-init_netdev.patch b/queue-3.8/net-mlx4_en-initialize-rfs-filters-lock-and-list-in-init_netdev.patch
new file mode 100644 (file)
index 0000000..31ea743
--- /dev/null
@@ -0,0 +1,46 @@
+From 78fb2de711ec28997bf38bcf3e48e108e907be77 Mon Sep 17 00:00:00 2001
+From: Amir Vadai <amirv@mellanox.com>
+Date: Thu, 24 Jan 2013 01:54:19 +0000
+Subject: net/mlx4_en: Initialize RFS filters lock and list in init_netdev
+
+From: Amir Vadai <amirv@mellanox.com>
+
+commit 78fb2de711ec28997bf38bcf3e48e108e907be77 upstream.
+
+filters_lock might have been used while it was re-initialized.
+Moved filters_lock and filters_list initialization to init_netdev instead of
+alloc_resources which is called every time the device is configured.
+
+Signed-off-by: Amir Vadai <amirv@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/mellanox/mlx4/en_netdev.c |    8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -1437,9 +1437,6 @@ int mlx4_en_alloc_resources(struct mlx4_
+       priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
+       if (!priv->dev->rx_cpu_rmap)
+               goto err;
+-
+-      INIT_LIST_HEAD(&priv->filters);
+-      spin_lock_init(&priv->filters_lock);
+ #endif
+       return 0;
+@@ -1634,6 +1631,11 @@ int mlx4_en_init_netdev(struct mlx4_en_d
+       if (err)
+               goto out;
++#ifdef CONFIG_RFS_ACCEL
++      INIT_LIST_HEAD(&priv->filters);
++      spin_lock_init(&priv->filters_lock);
++#endif
++
+       /* Allocate page for receive rings */
+       err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
+                               MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
diff --git a/queue-3.8/perf-x86-fix-link-failure-for-non-intel-configs.patch b/queue-3.8/perf-x86-fix-link-failure-for-non-intel-configs.patch
new file mode 100644 (file)
index 0000000..5fb9d69
--- /dev/null
@@ -0,0 +1,48 @@
+From 6c4d3bc99b3341067775efd4d9d13cc8e655fd7c Mon Sep 17 00:00:00 2001
+From: David Rientjes <rientjes@google.com>
+Date: Sun, 17 Mar 2013 15:49:10 -0700
+Subject: perf,x86: fix link failure for non-Intel configs
+
+From: David Rientjes <rientjes@google.com>
+
+commit 6c4d3bc99b3341067775efd4d9d13cc8e655fd7c upstream.
+
+Commit 1d9d8639c063 ("perf,x86: fix kernel crash with PEBS/BTS after
+suspend/resume") introduces a link failure since
+perf_restore_debug_store() is only defined for CONFIG_CPU_SUP_INTEL:
+
+       arch/x86/power/built-in.o: In function `restore_processor_state':
+       (.text+0x45c): undefined reference to `perf_restore_debug_store'
+
+Fix it by defining the dummy function appropriately.
+
+Signed-off-by: David Rientjes <rientjes@google.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/perf_event.h |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -753,7 +753,6 @@ extern void perf_event_enable(struct per
+ extern void perf_event_disable(struct perf_event *event);
+ extern int __perf_event_disable(void *info);
+ extern void perf_event_task_tick(void);
+-extern void perf_restore_debug_store(void);
+ #else
+ static inline void
+ perf_event_task_sched_in(struct task_struct *prev,
+@@ -793,6 +792,11 @@ static inline void perf_event_enable(str
+ static inline void perf_event_disable(struct perf_event *event)               { }
+ static inline int __perf_event_disable(void *info)                    { return -1; }
+ static inline void perf_event_task_tick(void)                         { }
++#endif
++
++#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
++extern void perf_restore_debug_store(void);
++#else
+ static inline void perf_restore_debug_store(void)                     { }
+ #endif
diff --git a/queue-3.8/perf-x86-fix-wrmsr_on_cpu-warning-on-suspend-resume.patch b/queue-3.8/perf-x86-fix-wrmsr_on_cpu-warning-on-suspend-resume.patch
new file mode 100644 (file)
index 0000000..0df0375
--- /dev/null
@@ -0,0 +1,46 @@
+From 2a6e06b2aed6995af401dcd4feb5e79a0c7ea554 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Sun, 17 Mar 2013 15:44:43 -0700
+Subject: perf,x86: fix wrmsr_on_cpu() warning on suspend/resume
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 2a6e06b2aed6995af401dcd4feb5e79a0c7ea554 upstream.
+
+Commit 1d9d8639c063 ("perf,x86: fix kernel crash with PEBS/BTS after
+suspend/resume") fixed a crash when doing PEBS performance profiling
+after resuming, but in using init_debug_store_on_cpu() to restore the
+DS_AREA mtrr it also resulted in a new WARN_ON() triggering.
+
+init_debug_store_on_cpu() uses "wrmsr_on_cpu()", which in turn uses CPU
+cross-calls to do the MSR update.  Which is not really valid at the
+early resume stage, and the warning is quite reasonable.  Now, it all
+happens to _work_, for the simple reason that smp_call_function_single()
+ends up just doing the call directly on the CPU when the CPU number
+matches, but we really should just do the wrmsr() directly instead.
+
+This duplicates the wrmsr() logic, but hopefully we can just remove the
+wrmsr_on_cpu() version eventually.
+
+Reported-and-tested-by: Parag Warudkar <parag.lkml@gmail.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/perf_event_intel_ds.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
+@@ -732,8 +732,10 @@ void intel_ds_init(void)
+ void perf_restore_debug_store(void)
+ {
++      struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
++
+       if (!x86_pmu.bts && !x86_pmu.pebs)
+               return;
+-      init_debug_store_on_cpu(smp_processor_id());
++      wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds);
+ }
diff --git a/queue-3.8/powerpc-fix-cputable-entry-for-970mp-rev-1.0.patch b/queue-3.8/powerpc-fix-cputable-entry-for-970mp-rev-1.0.patch
new file mode 100644 (file)
index 0000000..3ea2f54
--- /dev/null
@@ -0,0 +1,33 @@
+From d63ac5f6cf31c8a83170a9509b350c1489a7262b Mon Sep 17 00:00:00 2001
+From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Date: Wed, 13 Mar 2013 09:55:02 +1100
+Subject: powerpc: Fix cputable entry for 970MP rev 1.0
+
+From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+
+commit d63ac5f6cf31c8a83170a9509b350c1489a7262b upstream.
+
+Commit 44ae3ab3358e962039c36ad4ae461ae9fb29596c forgot to update
+the entry for the 970MP rev 1.0 processor when moving some CPU
+features bits to the MMU feature bit mask. This breaks booting
+on some rare G5 models using that chip revision.
+
+Reported-by: Phileas Fogg <phileas-fogg@mail.ru>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/cputable.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/cputable.c
++++ b/arch/powerpc/kernel/cputable.c
+@@ -275,7 +275,7 @@ static struct cpu_spec __initdata cpu_sp
+               .cpu_features           = CPU_FTRS_PPC970,
+               .cpu_user_features      = COMMON_USER_POWER4 |
+                       PPC_FEATURE_HAS_ALTIVEC_COMP,
+-              .mmu_features           = MMU_FTR_HPTE_TABLE,
++              .mmu_features           = MMU_FTRS_PPC970,
+               .icache_bsize           = 128,
+               .dcache_bsize           = 128,
+               .num_pmcs               = 8,
diff --git a/queue-3.8/powerpc-fix-stab-initialization.patch b/queue-3.8/powerpc-fix-stab-initialization.patch
new file mode 100644 (file)
index 0000000..8ca4b60
--- /dev/null
@@ -0,0 +1,33 @@
+From 13938117a57f88a22f0df9722a5db7271fda85cd Mon Sep 17 00:00:00 2001
+From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Date: Wed, 13 Mar 2013 09:49:06 +1100
+Subject: powerpc: Fix STAB initialization
+
+From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+
+commit 13938117a57f88a22f0df9722a5db7271fda85cd upstream.
+
+Commit f5339277eb8d3aed37f12a27988366f68ab68930 accidentally removed
+more than just iSeries bits and took out the call to stab_initialize()
+thus breaking support for POWER3 processors.
+
+Put it back. (Yes, nobody noticed until now ...)
+
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/mm/hash_utils_64.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/powerpc/mm/hash_utils_64.c
++++ b/arch/powerpc/mm/hash_utils_64.c
+@@ -758,6 +758,8 @@ void __init early_init_mmu(void)
+       /* Initialize stab / SLB management */
+       if (mmu_has_feature(MMU_FTR_SLB))
+               slb_initialize();
++      else
++              stab_initialize(get_paca()->stab_real);
+ }
+ #ifdef CONFIG_SMP
diff --git a/queue-3.8/powerpc-make-vsid_bits-dependency-explicit.patch b/queue-3.8/powerpc-make-vsid_bits-dependency-explicit.patch
new file mode 100644 (file)
index 0000000..b2418eb
--- /dev/null
@@ -0,0 +1,51 @@
+From e39d1a471484662620651cd9520250d33843f235 Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+Date: Wed, 13 Mar 2013 03:34:53 +0000
+Subject: powerpc: Make VSID_BITS* dependency explicit
+
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+
+commit e39d1a471484662620651cd9520250d33843f235 upstream.
+
+VSID_BITS and VSID_BITS_1T depends on the context bits  and user esid
+bits. Make the dependency explicit
+
+Acked-by: Paul Mackerras <paulus@samba.org>
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/mmu-hash64.h |   11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/arch/powerpc/include/asm/mmu-hash64.h
++++ b/arch/powerpc/include/asm/mmu-hash64.h
+@@ -381,21 +381,22 @@ extern void slb_set_size(u16 size);
+  * hash collisions.
+  */
++#define CONTEXT_BITS          19
++#define USER_ESID_BITS                18
++#define USER_ESID_BITS_1T     6
++
+ /*
+  * This should be computed such that protovosid * vsid_mulitplier
+  * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
+  */
+ #define VSID_MULTIPLIER_256M  ASM_CONST(12538073)     /* 24-bit prime */
+-#define VSID_BITS_256M                38
++#define VSID_BITS_256M                (CONTEXT_BITS + USER_ESID_BITS + 1)
+ #define VSID_MODULUS_256M     ((1UL<<VSID_BITS_256M)-1)
+ #define VSID_MULTIPLIER_1T    ASM_CONST(12538073)     /* 24-bit prime */
+-#define VSID_BITS_1T          26
++#define VSID_BITS_1T          (CONTEXT_BITS + USER_ESID_BITS_1T + 1)
+ #define VSID_MODULUS_1T               ((1UL<<VSID_BITS_1T)-1)
+-#define CONTEXT_BITS          19
+-#define USER_ESID_BITS                18
+-#define USER_ESID_BITS_1T     6
+ #define USER_VSID_RANGE       (1UL << (USER_ESID_BITS + SID_SHIFT))
diff --git a/queue-3.8/powerpc-rename-user_esid_bits-to-esid_bits.patch b/queue-3.8/powerpc-rename-user_esid_bits-to-esid_bits.patch
new file mode 100644 (file)
index 0000000..da82194
--- /dev/null
@@ -0,0 +1,129 @@
+From af81d7878c641629f2693ae3fdaf74b4af14dfca Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+Date: Wed, 13 Mar 2013 03:34:55 +0000
+Subject: powerpc: Rename USER_ESID_BITS* to ESID_BITS*
+
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+
+commit af81d7878c641629f2693ae3fdaf74b4af14dfca upstream.
+
+Now we use ESID_BITS of kernel address to build proto vsid. So rename
+USER_ESIT_BITS to ESID_BITS
+
+Acked-by: Paul Mackerras <paulus@samba.org>
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/mmu-hash64.h |   16 ++++++++--------
+ arch/powerpc/kernel/exceptions-64s.S  |    2 +-
+ arch/powerpc/kvm/book3s_64_mmu_host.c |    4 ++--
+ arch/powerpc/mm/pgtable_64.c          |    2 +-
+ arch/powerpc/mm/slb_low.S             |    4 ++--
+ 5 files changed, 14 insertions(+), 14 deletions(-)
+
+--- a/arch/powerpc/include/asm/mmu-hash64.h
++++ b/arch/powerpc/include/asm/mmu-hash64.h
+@@ -378,12 +378,12 @@ extern void slb_set_size(u16 size);
+  */
+ #define CONTEXT_BITS          19
+-#define USER_ESID_BITS                18
+-#define USER_ESID_BITS_1T     6
++#define ESID_BITS             18
++#define ESID_BITS_1T          6
+ /*
+  * 256MB segment
+- * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments
++ * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
+  * available for user + kernel mapping. The top 4 contexts are used for
+  * kernel mapping. Each segment contains 2^28 bytes. Each
+  * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
+@@ -396,15 +396,15 @@ extern void slb_set_size(u16 size);
+  * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
+  */
+ #define VSID_MULTIPLIER_256M  ASM_CONST(12538073)     /* 24-bit prime */
+-#define VSID_BITS_256M                (CONTEXT_BITS + USER_ESID_BITS)
++#define VSID_BITS_256M                (CONTEXT_BITS + ESID_BITS)
+ #define VSID_MODULUS_256M     ((1UL<<VSID_BITS_256M)-1)
+ #define VSID_MULTIPLIER_1T    ASM_CONST(12538073)     /* 24-bit prime */
+-#define VSID_BITS_1T          (CONTEXT_BITS + USER_ESID_BITS_1T)
++#define VSID_BITS_1T          (CONTEXT_BITS + ESID_BITS_1T)
+ #define VSID_MODULUS_1T               ((1UL<<VSID_BITS_1T)-1)
+-#define USER_VSID_RANGE       (1UL << (USER_ESID_BITS + SID_SHIFT))
++#define USER_VSID_RANGE       (1UL << (ESID_BITS + SID_SHIFT))
+ /*
+  * This macro generates asm code to compute the VSID scramble
+@@ -540,9 +540,9 @@ static inline unsigned long get_vsid(uns
+               return 0;
+       if (ssize == MMU_SEGSIZE_256M)
+-              return vsid_scramble((context << USER_ESID_BITS)
++              return vsid_scramble((context << ESID_BITS)
+                                    | (ea >> SID_SHIFT), 256M);
+-      return vsid_scramble((context << USER_ESID_BITS_1T)
++      return vsid_scramble((context << ESID_BITS_1T)
+                            | (ea >> SID_SHIFT_1T), 1T);
+ }
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -1288,7 +1288,7 @@ _GLOBAL(do_stab_bolted)
+       addi    r9,r9,(MAX_USER_CONTEXT + 1)@l
+       srdi    r10,r11,SID_SHIFT
+-      rldimi  r10,r9,USER_ESID_BITS,0 /* proto vsid */
++      rldimi  r10,r9,ESID_BITS,0 /* proto vsid */
+       ASM_VSID_SCRAMBLE(r10, r9, 256M)
+       rldic   r9,r10,12,16    /* r9 = vsid << 12 */
+--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
++++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
+@@ -326,8 +326,8 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcp
+       vcpu3s->context_id[0] = err;
+       vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1)
+-                                << USER_ESID_BITS) - 1;
+-      vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS;
++                                << ESID_BITS) - 1;
++      vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << ESID_BITS;
+       vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
+       kvmppc_mmu_hpte_init(vcpu);
+--- a/arch/powerpc/mm/pgtable_64.c
++++ b/arch/powerpc/mm/pgtable_64.c
+@@ -61,7 +61,7 @@
+ #endif
+ #ifdef CONFIG_PPC_STD_MMU_64
+-#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
++#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
+ #error TASK_SIZE_USER64 exceeds user VSID range
+ #endif
+ #endif
+--- a/arch/powerpc/mm/slb_low.S
++++ b/arch/powerpc/mm/slb_low.S
+@@ -232,7 +232,7 @@ _GLOBAL(slb_allocate_user)
+  * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
+  */
+ slb_finish_load:
+-      rldimi  r10,r9,USER_ESID_BITS,0
++      rldimi  r10,r9,ESID_BITS,0
+       ASM_VSID_SCRAMBLE(r10,r9,256M)
+       /*
+        * bits above VSID_BITS_256M need to be ignored from r10
+@@ -301,7 +301,7 @@ _GLOBAL(slb_compare_rr_to_size)
+  */
+ slb_finish_load_1T:
+       srdi    r10,r10,(SID_SHIFT_1T - SID_SHIFT)      /* get 1T ESID */
+-      rldimi  r10,r9,USER_ESID_BITS_1T,0
++      rldimi  r10,r9,ESID_BITS_1T,0
+       ASM_VSID_SCRAMBLE(r10,r9,1T)
+       /*
+        * bits above VSID_BITS_1T need to be ignored from r10
diff --git a/queue-3.8/powerpc-update-kernel-vsid-range.patch b/queue-3.8/powerpc-update-kernel-vsid-range.patch
new file mode 100644 (file)
index 0000000..c6fc81b
--- /dev/null
@@ -0,0 +1,491 @@
+From c60ac5693c47df32a2b4b18af97fca5635def015 Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+Date: Wed, 13 Mar 2013 03:34:54 +0000
+Subject: powerpc: Update kernel VSID range
+
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+
+commit c60ac5693c47df32a2b4b18af97fca5635def015 upstream.
+
+This patch change the kernel VSID range so that we limit VSID_BITS to 37.
+This enables us to support 64TB with 65 bit VA (37+28). Without this patch
+we have boot hangs on platforms that only support 65 bit VA.
+
+With this patch we now have proto vsid generated as below:
+
+We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
+from mmu context id and effective segment id of the address.
+
+For user processes max context id is limited to ((1ul << 19) - 5)
+for kernel space, we use the top 4 context ids to map address as below
+0x7fffc -  [ 0xc000000000000000 - 0xc0003fffffffffff ]
+0x7fffd -  [ 0xd000000000000000 - 0xd0003fffffffffff ]
+0x7fffe -  [ 0xe000000000000000 - 0xe0003fffffffffff ]
+0x7ffff -  [ 0xf000000000000000 - 0xf0003fffffffffff ]
+
+Acked-by: Paul Mackerras <paulus@samba.org>
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Tested-by: Geoff Levand <geoff@infradead.org>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/mmu-hash64.h |  121 +++++++++++++++++-----------------
+ arch/powerpc/kernel/exceptions-64s.S  |   34 +++++++--
+ arch/powerpc/mm/hash_utils_64.c       |   20 ++++-
+ arch/powerpc/mm/mmu_context_hash64.c  |   11 ---
+ arch/powerpc/mm/slb_low.S             |   50 +++++++-------
+ arch/powerpc/mm/tlb_hash64.c          |    2 
+ 6 files changed, 129 insertions(+), 109 deletions(-)
+
+--- a/arch/powerpc/include/asm/mmu-hash64.h
++++ b/arch/powerpc/include/asm/mmu-hash64.h
+@@ -343,17 +343,16 @@ extern void slb_set_size(u16 size);
+ /*
+  * VSID allocation (256MB segment)
+  *
+- * We first generate a 38-bit "proto-VSID".  For kernel addresses this
+- * is equal to the ESID | 1 << 37, for user addresses it is:
+- *    (context << USER_ESID_BITS) | (esid & ((1U << USER_ESID_BITS) - 1)
+- *
+- * This splits the proto-VSID into the below range
+- *  0 - (2^(CONTEXT_BITS + USER_ESID_BITS) - 1) : User proto-VSID range
+- *  2^(CONTEXT_BITS + USER_ESID_BITS) - 2^(VSID_BITS) : Kernel proto-VSID range
+- *
+- * We also have CONTEXT_BITS + USER_ESID_BITS = VSID_BITS - 1
+- * That is, we assign half of the space to user processes and half
+- * to the kernel.
++ * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
++ * from mmu context id and effective segment id of the address.
++ *
++ * For user processes max context id is limited to ((1ul << 19) - 5)
++ * for kernel space, we use the top 4 context ids to map address as below
++ * NOTE: each context only support 64TB now.
++ * 0x7fffc -  [ 0xc000000000000000 - 0xc0003fffffffffff ]
++ * 0x7fffd -  [ 0xd000000000000000 - 0xd0003fffffffffff ]
++ * 0x7fffe -  [ 0xe000000000000000 - 0xe0003fffffffffff ]
++ * 0x7ffff -  [ 0xf000000000000000 - 0xf0003fffffffffff ]
+  *
+  * The proto-VSIDs are then scrambled into real VSIDs with the
+  * multiplicative hash:
+@@ -363,22 +362,19 @@ extern void slb_set_size(u16 size);
+  * VSID_MULTIPLIER is prime, so in particular it is
+  * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
+  * Because the modulus is 2^n-1 we can compute it efficiently without
+- * a divide or extra multiply (see below).
+- *
+- * This scheme has several advantages over older methods:
+- *
+- *    - We have VSIDs allocated for every kernel address
+- * (i.e. everything above 0xC000000000000000), except the very top
+- * segment, which simplifies several things.
+- *
+- *    - We allow for USER_ESID_BITS significant bits of ESID and
+- * CONTEXT_BITS  bits of context for user addresses.
+- *  i.e. 64T (46 bits) of address space for up to half a million contexts.
+- *
+- *    - The scramble function gives robust scattering in the hash
+- * table (at least based on some initial results).  The previous
+- * method was more susceptible to pathological cases giving excessive
+- * hash collisions.
++ * a divide or extra multiply (see below). The scramble function gives
++ * robust scattering in the hash table (at least based on some initial
++ * results).
++ *
++ * We also consider VSID 0 special. We use VSID 0 for slb entries mapping
++ * bad address. This enables us to consolidate bad address handling in
++ * hash_page.
++ *
++ * We also need to avoid the last segment of the last context, because that
++ * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
++ * because of the modulo operation in vsid scramble. But the vmemmap
++ * (which is what uses region 0xf) will never be close to 64TB in size
++ * (it's 56 bytes per page of system memory).
+  */
+ #define CONTEXT_BITS          19
+@@ -386,15 +382,25 @@ extern void slb_set_size(u16 size);
+ #define USER_ESID_BITS_1T     6
+ /*
++ * 256MB segment
++ * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments
++ * available for user + kernel mapping. The top 4 contexts are used for
++ * kernel mapping. Each segment contains 2^28 bytes. Each
++ * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
++ * (19 == 37 + 28 - 46).
++ */
++#define MAX_USER_CONTEXT      ((ASM_CONST(1) << CONTEXT_BITS) - 5)
++
++/*
+  * This should be computed such that protovosid * vsid_mulitplier
+  * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
+  */
+ #define VSID_MULTIPLIER_256M  ASM_CONST(12538073)     /* 24-bit prime */
+-#define VSID_BITS_256M                (CONTEXT_BITS + USER_ESID_BITS + 1)
++#define VSID_BITS_256M                (CONTEXT_BITS + USER_ESID_BITS)
+ #define VSID_MODULUS_256M     ((1UL<<VSID_BITS_256M)-1)
+ #define VSID_MULTIPLIER_1T    ASM_CONST(12538073)     /* 24-bit prime */
+-#define VSID_BITS_1T          (CONTEXT_BITS + USER_ESID_BITS_1T + 1)
++#define VSID_BITS_1T          (CONTEXT_BITS + USER_ESID_BITS_1T)
+ #define VSID_MODULUS_1T               ((1UL<<VSID_BITS_1T)-1)
+@@ -422,7 +428,8 @@ extern void slb_set_size(u16 size);
+       srdi    rx,rt,VSID_BITS_##size;                                 \
+       clrldi  rt,rt,(64-VSID_BITS_##size);                            \
+       add     rt,rt,rx;               /* add high and low bits */     \
+-      /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and         \
++      /* NOTE: explanation based on VSID_BITS_##size = 36             \
++       * Now, r3 == VSID (mod 2^36-1), and lies between 0 and         \
+        * 2^36-1+2^28-1.  That in particular means that if r3 >=       \
+        * 2^36-1, then r3+1 has the 2^36 bit set.  So, if r3+1 has     \
+        * the bit clear, r3 already has the answer we want, if it      \
+@@ -514,34 +521,6 @@ typedef struct {
+       })
+ #endif /* 1 */
+-/*
+- * This is only valid for addresses >= PAGE_OFFSET
+- * The proto-VSID space is divided into two class
+- * User:   0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1
+- * kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1
+- *
+- * With KERNEL_START at 0xc000000000000000, the proto vsid for
+- * the kernel ends up with 0xc00000000 (36 bits). With 64TB
+- * support we need to have kernel proto-VSID in the
+- * [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS.
+- */
+-static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
+-{
+-      unsigned long proto_vsid;
+-      /*
+-       * We need to make sure proto_vsid for the kernel is
+-       * >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T])
+-       */
+-      if (ssize == MMU_SEGSIZE_256M) {
+-              proto_vsid = ea >> SID_SHIFT;
+-              proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS));
+-              return vsid_scramble(proto_vsid, 256M);
+-      }
+-      proto_vsid = ea >> SID_SHIFT_1T;
+-      proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T));
+-      return vsid_scramble(proto_vsid, 1T);
+-}
+-
+ /* Returns the segment size indicator for a user address */
+ static inline int user_segment_size(unsigned long addr)
+ {
+@@ -551,10 +530,15 @@ static inline int user_segment_size(unsi
+       return MMU_SEGSIZE_256M;
+ }
+-/* This is only valid for user addresses (which are below 2^44) */
+ static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
+                                    int ssize)
+ {
++      /*
++       * Bad address. We return VSID 0 for that
++       */
++      if ((ea & ~REGION_MASK) >= PGTABLE_RANGE)
++              return 0;
++
+       if (ssize == MMU_SEGSIZE_256M)
+               return vsid_scramble((context << USER_ESID_BITS)
+                                    | (ea >> SID_SHIFT), 256M);
+@@ -562,6 +546,25 @@ static inline unsigned long get_vsid(uns
+                            | (ea >> SID_SHIFT_1T), 1T);
+ }
++/*
++ * This is only valid for addresses >= PAGE_OFFSET
++ *
++ * For kernel space, we use the top 4 context ids to map address as below
++ * 0x7fffc -  [ 0xc000000000000000 - 0xc0003fffffffffff ]
++ * 0x7fffd -  [ 0xd000000000000000 - 0xd0003fffffffffff ]
++ * 0x7fffe -  [ 0xe000000000000000 - 0xe0003fffffffffff ]
++ * 0x7ffff -  [ 0xf000000000000000 - 0xf0003fffffffffff ]
++ */
++static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
++{
++      unsigned long context;
++
++      /*
++       * kernel take the top 4 context from the available range
++       */
++      context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
++      return get_vsid(context, ea, ssize);
++}
+ #endif /* __ASSEMBLY__ */
+ #endif /* _ASM_POWERPC_MMU_HASH64_H_ */
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -1268,20 +1268,36 @@ do_ste_alloc:
+ _GLOBAL(do_stab_bolted)
+       stw     r9,PACA_EXSLB+EX_CCR(r13)       /* save CR in exc. frame */
+       std     r11,PACA_EXSLB+EX_SRR0(r13)     /* save SRR0 in exc. frame */
++      mfspr   r11,SPRN_DAR                    /* ea */
++      /*
++       * check for bad kernel/user address
++       * (ea & ~REGION_MASK) >= PGTABLE_RANGE
++       */
++      rldicr. r9,r11,4,(63 - 46 - 4)
++      li      r9,0    /* VSID = 0 for bad address */
++      bne-    0f
++
++      /*
++       * Calculate VSID:
++       * This is the kernel vsid, we take the top for context from
++       * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
++       * Here we know that (ea >> 60) == 0xc
++       */
++      lis     r9,(MAX_USER_CONTEXT + 1)@ha
++      addi    r9,r9,(MAX_USER_CONTEXT + 1)@l
++
++      srdi    r10,r11,SID_SHIFT
++      rldimi  r10,r9,USER_ESID_BITS,0 /* proto vsid */
++      ASM_VSID_SCRAMBLE(r10, r9, 256M)
++      rldic   r9,r10,12,16    /* r9 = vsid << 12 */
++
++0:
+       /* Hash to the primary group */
+       ld      r10,PACASTABVIRT(r13)
+-      mfspr   r11,SPRN_DAR
+-      srdi    r11,r11,28
++      srdi    r11,r11,SID_SHIFT
+       rldimi  r10,r11,7,52    /* r10 = first ste of the group */
+-      /* Calculate VSID */
+-      /* This is a kernel address, so protovsid = ESID | 1 << 37 */
+-      li      r9,0x1
+-      rldimi  r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0
+-      ASM_VSID_SCRAMBLE(r11, r9, 256M)
+-      rldic   r9,r11,12,16    /* r9 = vsid << 12 */
+-
+       /* Search the primary group for a free entry */
+ 1:    ld      r11,0(r10)      /* Test valid bit of the current ste    */
+       andi.   r11,r11,0x80
+--- a/arch/powerpc/mm/hash_utils_64.c
++++ b/arch/powerpc/mm/hash_utils_64.c
+@@ -194,6 +194,11 @@ int htab_bolt_mapping(unsigned long vsta
+               unsigned long vpn  = hpt_vpn(vaddr, vsid, ssize);
+               unsigned long tprot = prot;
++              /*
++               * If we hit a bad address return error.
++               */
++              if (!vsid)
++                      return -1;
+               /* Make kernel text executable */
+               if (overlaps_kernel_text(vaddr, vaddr + step))
+                       tprot &= ~HPTE_R_N;
+@@ -923,11 +928,6 @@ int hash_page(unsigned long ea, unsigned
+       DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
+               ea, access, trap);
+-      if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
+-              DBG_LOW(" out of pgtable range !\n");
+-              return 1;
+-      }
+-
+       /* Get region & vsid */
+       switch (REGION_ID(ea)) {
+       case USER_REGION_ID:
+@@ -958,6 +958,11 @@ int hash_page(unsigned long ea, unsigned
+       }
+       DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
++      /* Bad address. */
++      if (!vsid) {
++              DBG_LOW("Bad address!\n");
++              return 1;
++      }
+       /* Get pgdir */
+       pgdir = mm->pgd;
+       if (pgdir == NULL)
+@@ -1127,6 +1132,8 @@ void hash_preload(struct mm_struct *mm,
+       /* Get VSID */
+       ssize = user_segment_size(ea);
+       vsid = get_vsid(mm->context.id, ea, ssize);
++      if (!vsid)
++              return;
+       /* Hash doesn't like irqs */
+       local_irq_save(flags);
+@@ -1219,6 +1226,9 @@ static void kernel_map_linear_page(unsig
+       hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
+       hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
++      /* Don't create HPTE entries for bad address */
++      if (!vsid)
++              return;
+       ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr),
+                                mode, HPTE_V_BOLTED,
+                                mmu_linear_psize, mmu_kernel_ssize);
+--- a/arch/powerpc/mm/mmu_context_hash64.c
++++ b/arch/powerpc/mm/mmu_context_hash64.c
+@@ -29,15 +29,6 @@
+ static DEFINE_SPINLOCK(mmu_context_lock);
+ static DEFINE_IDA(mmu_context_ida);
+-/*
+- * 256MB segment
+- * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments
+- * available for user mappings. Each segment contains 2^28 bytes. Each
+- * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
+- * (19 == 37 + 28 - 46).
+- */
+-#define MAX_CONTEXT   ((1UL << CONTEXT_BITS) - 1)
+-
+ int __init_new_context(void)
+ {
+       int index;
+@@ -56,7 +47,7 @@ again:
+       else if (err)
+               return err;
+-      if (index > MAX_CONTEXT) {
++      if (index > MAX_USER_CONTEXT) {
+               spin_lock(&mmu_context_lock);
+               ida_remove(&mmu_context_ida, index);
+               spin_unlock(&mmu_context_lock);
+--- a/arch/powerpc/mm/slb_low.S
++++ b/arch/powerpc/mm/slb_low.S
+@@ -31,10 +31,15 @@
+  * No other registers are examined or changed.
+  */
+ _GLOBAL(slb_allocate_realmode)
+-      /* r3 = faulting address */
++      /*
++       * check for bad kernel/user address
++       * (ea & ~REGION_MASK) >= PGTABLE_RANGE
++       */
++      rldicr. r9,r3,4,(63 - 46 - 4)
++      bne-    8f
+       srdi    r9,r3,60                /* get region */
+-      srdi    r10,r3,28               /* get esid */
++      srdi    r10,r3,SID_SHIFT        /* get esid */
+       cmpldi  cr7,r9,0xc              /* cmp PAGE_OFFSET for later use */
+       /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
+@@ -56,12 +61,14 @@ _GLOBAL(slb_allocate_realmode)
+        */
+ _GLOBAL(slb_miss_kernel_load_linear)
+       li      r11,0
+-      li      r9,0x1
+       /*
+-       * for 1T we shift 12 bits more.  slb_finish_load_1T will do
+-       * the necessary adjustment
++       * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
++       * r9 = region id.
+        */
+-      rldimi  r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0
++      addis   r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
++      addi    r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
++
++
+ BEGIN_FTR_SECTION
+       b       slb_finish_load
+ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
+@@ -91,24 +98,19 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)
+       _GLOBAL(slb_miss_kernel_load_io)
+       li      r11,0
+ 6:
+-      li      r9,0x1
+       /*
+-       * for 1T we shift 12 bits more.  slb_finish_load_1T will do
+-       * the necessary adjustment
++       * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
++       * r9 = region id.
+        */
+-      rldimi  r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0
++      addis   r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
++      addi    r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
++
+ BEGIN_FTR_SECTION
+       b       slb_finish_load
+ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
+       b       slb_finish_load_1T
+-0:    /* user address: proto-VSID = context << 15 | ESID. First check
+-       * if the address is within the boundaries of the user region
+-       */
+-      srdi.   r9,r10,USER_ESID_BITS
+-      bne-    8f                      /* invalid ea bits set */
+-
+-
++0:
+       /* when using slices, we extract the psize off the slice bitmaps
+        * and then we need to get the sllp encoding off the mmu_psize_defs
+        * array.
+@@ -164,15 +166,13 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEG
+       ld      r9,PACACONTEXTID(r13)
+ BEGIN_FTR_SECTION
+       cmpldi  r10,0x1000
+-END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
+-      rldimi  r10,r9,USER_ESID_BITS,0
+-BEGIN_FTR_SECTION
+       bge     slb_finish_load_1T
+ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
+       b       slb_finish_load
+ 8:    /* invalid EA */
+       li      r10,0                   /* BAD_VSID */
++      li      r9,0                    /* BAD_VSID */
+       li      r11,SLB_VSID_USER       /* flags don't much matter */
+       b       slb_finish_load
+@@ -221,8 +221,6 @@ _GLOBAL(slb_allocate_user)
+       /* get context to calculate proto-VSID */
+       ld      r9,PACACONTEXTID(r13)
+-      rldimi  r10,r9,USER_ESID_BITS,0
+-
+       /* fall through slb_finish_load */
+ #endif /* __DISABLED__ */
+@@ -231,9 +229,10 @@ _GLOBAL(slb_allocate_user)
+ /*
+  * Finish loading of an SLB entry and return
+  *
+- * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
++ * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
+  */
+ slb_finish_load:
++      rldimi  r10,r9,USER_ESID_BITS,0
+       ASM_VSID_SCRAMBLE(r10,r9,256M)
+       /*
+        * bits above VSID_BITS_256M need to be ignored from r10
+@@ -298,10 +297,11 @@ _GLOBAL(slb_compare_rr_to_size)
+ /*
+  * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
+  *
+- * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9
++ * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9
+  */
+ slb_finish_load_1T:
+-      srdi    r10,r10,40-28           /* get 1T ESID */
++      srdi    r10,r10,(SID_SHIFT_1T - SID_SHIFT)      /* get 1T ESID */
++      rldimi  r10,r9,USER_ESID_BITS_1T,0
+       ASM_VSID_SCRAMBLE(r10,r9,1T)
+       /*
+        * bits above VSID_BITS_1T need to be ignored from r10
+--- a/arch/powerpc/mm/tlb_hash64.c
++++ b/arch/powerpc/mm/tlb_hash64.c
+@@ -82,11 +82,11 @@ void hpte_need_flush(struct mm_struct *m
+       if (!is_kernel_addr(addr)) {
+               ssize = user_segment_size(addr);
+               vsid = get_vsid(mm->context.id, addr, ssize);
+-              WARN_ON(vsid == 0);
+       } else {
+               vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
+               ssize = mmu_kernel_ssize;
+       }
++      WARN_ON(vsid == 0);
+       vpn = hpt_vpn(addr, vsid, ssize);
+       rpte = __real_pte(__pte(pte), ptep);
diff --git a/queue-3.8/s390-critical-section-cleanup-vs.-machine-checks.patch b/queue-3.8/s390-critical-section-cleanup-vs.-machine-checks.patch
new file mode 100644 (file)
index 0000000..267b0b9
--- /dev/null
@@ -0,0 +1,54 @@
+From 6551fbdfd8b85d1ab5822ac98abb4fb449bcfae0 Mon Sep 17 00:00:00 2001
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Thu, 28 Feb 2013 16:28:41 +0100
+Subject: s390: critical section cleanup vs. machine checks
+
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+
+commit 6551fbdfd8b85d1ab5822ac98abb4fb449bcfae0 upstream.
+
+The current machine check code uses the registers stored by the machine
+in the lowcore at __LC_GPREGS_SAVE_AREA as the registers of the interrupted
+context. The registers 0-7 of a user process can get clobbered if a machine
+checks interrupts the execution of a critical section in entry[64].S.
+
+The reason is that the critical section cleanup code may need to modify
+the PSW and the registers for the previous context to get to the end of a
+critical section. If registers 0-7 have to be replaced the relevant copy
+will be in the registers, which invalidates the copy in the lowcore. The
+machine check handler needs to explicitly store registers 0-7 to the stack.
+
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kernel/entry.S   |    3 ++-
+ arch/s390/kernel/entry64.S |    5 +++--
+ 2 files changed, 5 insertions(+), 3 deletions(-)
+
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -636,7 +636,8 @@ ENTRY(mcck_int_handler)
+       UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER
+ mcck_skip:
+       SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT
+-      mvc     __PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA
++      stm     %r0,%r7,__PT_R0(%r11)
++      mvc     __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32
+       stm     %r8,%r9,__PT_PSW(%r11)
+       xc      __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
+       l       %r1,BASED(.Ldo_machine_check)
+--- a/arch/s390/kernel/entry64.S
++++ b/arch/s390/kernel/entry64.S
+@@ -678,8 +678,9 @@ ENTRY(mcck_int_handler)
+       UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
+       LAST_BREAK %r14
+ mcck_skip:
+-      lghi    %r14,__LC_GPREGS_SAVE_AREA
+-      mvc     __PT_R0(128,%r11),0(%r14)
++      lghi    %r14,__LC_GPREGS_SAVE_AREA+64
++      stmg    %r0,%r7,__PT_R0(%r11)
++      mvc     __PT_R8(64,%r11),0(%r14)
+       stmg    %r8,%r9,__PT_PSW(%r11)
+       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+       lgr     %r2,%r11                # pass pointer to pt_regs
diff --git a/queue-3.8/s390-mm-fix-flush_tlb_kernel_range.patch b/queue-3.8/s390-mm-fix-flush_tlb_kernel_range.patch
new file mode 100644 (file)
index 0000000..10cb059
--- /dev/null
@@ -0,0 +1,44 @@
+From f6a70a07079518280022286a1dceb797d12e1edf Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+Date: Mon, 4 Mar 2013 14:14:11 +0100
+Subject: s390/mm: fix flush_tlb_kernel_range()
+
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+
+commit f6a70a07079518280022286a1dceb797d12e1edf upstream.
+
+Our flush_tlb_kernel_range() implementation calls __tlb_flush_mm() with
+&init_mm as argument. __tlb_flush_mm() however will only flush tlbs
+for the passed in mm if its mm_cpumask is not empty.
+
+For the init_mm however its mm_cpumask has never any bits set. Which in
+turn means that our flush_tlb_kernel_range() implementation doesn't
+work at all.
+
+This can be easily verified with a vmalloc/vfree loop which allocates
+a page, writes to it and then frees the page again. A crash will follow
+almost instantly.
+
+To fix this remove the cpumask_empty() check in __tlb_flush_mm() since
+there shouldn't be too many mms with a zero mm_cpumask, besides the
+init_mm of course.
+
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/include/asm/tlbflush.h |    2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/arch/s390/include/asm/tlbflush.h
++++ b/arch/s390/include/asm/tlbflush.h
+@@ -74,8 +74,6 @@ static inline void __tlb_flush_idte(unsi
+ static inline void __tlb_flush_mm(struct mm_struct * mm)
+ {
+-      if (unlikely(cpumask_empty(mm_cpumask(mm))))
+-              return;
+       /*
+        * If the machine has IDTE we prefer to do a per mm flush
+        * on all cpus instead of doing a local flush if the mm
diff --git a/queue-3.8/selinux-use-gfp_atomic-under-spin_lock.patch b/queue-3.8/selinux-use-gfp_atomic-under-spin_lock.patch
new file mode 100644 (file)
index 0000000..35b41df
--- /dev/null
@@ -0,0 +1,38 @@
+From 4502403dcf8f5c76abd4dbab8726c8e4ecb5cd34 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Sat, 16 Mar 2013 12:48:11 +0300
+Subject: selinux: use GFP_ATOMIC under spin_lock
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 4502403dcf8f5c76abd4dbab8726c8e4ecb5cd34 upstream.
+
+The call tree here is:
+
+sk_clone_lock()              <- takes bh_lock_sock(newsk);
+xfrm_sk_clone_policy()
+__xfrm_sk_clone_policy()
+clone_policy()               <- uses GFP_ATOMIC for allocations
+security_xfrm_policy_clone()
+security_ops->xfrm_policy_clone_security()
+selinux_xfrm_policy_clone()
+
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: James Morris <james.l.morris@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/selinux/xfrm.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/security/selinux/xfrm.c
++++ b/security/selinux/xfrm.c
+@@ -310,7 +310,7 @@ int selinux_xfrm_policy_clone(struct xfr
+       if (old_ctx) {
+               new_ctx = kmalloc(sizeof(*old_ctx) + old_ctx->ctx_len,
+-                                GFP_KERNEL);
++                                GFP_ATOMIC);
+               if (!new_ctx)
+                       return -ENOMEM;
index b92b946300f8da6bd5bcce073105e7ede47975fa..d158094792f086387fc92b32a77f4576b7056406 100644 (file)
@@ -33,3 +33,20 @@ hwmon-pmbus-ltc2978-fix-temperature-reporting.patch
 hwmon-lineage-pem-add-missing-terminating-entry-for-pem__attributes.patch
 w1-gpio-remove-erroneous-__exit-and-__exit_p.patch
 w1-fix-oops-when-w1_search-is-called-from-netlink-connector.patch
+powerpc-fix-stab-initialization.patch
+powerpc-fix-cputable-entry-for-970mp-rev-1.0.patch
+powerpc-make-vsid_bits-dependency-explicit.patch
+powerpc-update-kernel-vsid-range.patch
+powerpc-rename-user_esid_bits-to-esid_bits.patch
+arm-davinci-edma-fix-dmaengine-induced-null-pointer-dereference-on-da830.patch
+selinux-use-gfp_atomic-under-spin_lock.patch
+perf-x86-fix-wrmsr_on_cpu-warning-on-suspend-resume.patch
+perf-x86-fix-link-failure-for-non-intel-configs.patch
+s390-critical-section-cleanup-vs.-machine-checks.patch
+s390-mm-fix-flush_tlb_kernel_range.patch
+btrfs-use-rcu_barrier-to-wait-for-bdev-puts-at-unmount.patch
+mtd-nand-reintroduce-nand_no_readrdy-as-nand_need_readrdy.patch
+kbuild-fix-make-headers_check-with-make-3.80.patch
+atmel_lcdfb-fix-16-bpp-modes-on-older-socs.patch
+net-mlx4_en-initialize-rfs-filters-lock-and-list-in-init_netdev.patch
+net-mlx4_en-disable-rfs-when-running-in-sriov-mode.patch