--- /dev/null
+From 75e1c70fc31490ef8a373ea2a4bea2524099b478 Mon Sep 17 00:00:00 2001
+From: Jeff Moyer <jmoyer@redhat.com>
+Date: Fri, 10 Sep 2010 14:16:00 -0700
+Subject: aio: check for multiplication overflow in do_io_submit
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jeff Moyer <jmoyer@redhat.com>
+
+commit 75e1c70fc31490ef8a373ea2a4bea2524099b478 upstream.
+
+Tavis Ormandy pointed out that do_io_submit does not do proper bounds
+checking on the passed-in iocb array:
+
+ if (unlikely(nr < 0))
+ return -EINVAL;
+
+ if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(iocbpp)))))
+ return -EFAULT; ^^^^^^^^^^^^^^^^^^
+
+The attached patch checks for overflow, and if it is detected, the
+number of iocbs submitted is scaled down to a number that will fit in
+the long. This is an ok thing to do, as sys_io_submit is documented as
+returning the number of iocbs submitted, so callers should handle a
+return value of less than the 'nr' argument passed in.
+
+Reported-by: Tavis Ormandy <taviso@cmpxchg8b.com>
+Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/aio.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -1667,6 +1667,9 @@ long do_io_submit(aio_context_t ctx_id,
+ if (unlikely(nr < 0))
+ return -EINVAL;
+
++ if (unlikely(nr > LONG_MAX/sizeof(*iocbpp)))
++ nr = LONG_MAX/sizeof(*iocbpp);
++
+ if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
+ return -EFAULT;
+
--- /dev/null
+From a0c42bac79731276c9b2f28d54f9e658fcf843a2 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Wed, 22 Sep 2010 13:05:03 -0700
+Subject: aio: do not return ERESTARTSYS as a result of AIO
+
+From: Jan Kara <jack@suse.cz>
+
+commit a0c42bac79731276c9b2f28d54f9e658fcf843a2 upstream.
+
+OCFS2 can return ERESTARTSYS from its write function when the process is
+signalled while waiting for a cluster lock (and the filesystem is mounted
+with intr mount option). Generally, it seems reasonable to allow
+filesystems to return this error code from its IO functions. As we must
+not leak ERESTARTSYS (and similar error codes) to userspace as a result of
+an AIO operation, we have to properly convert it to EINTR inside AIO code
+(restarting the syscall isn't really an option because other AIO could
+have been already submitted by the same io_submit syscall).
+
+Signed-off-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Jeff Moyer <jmoyer@redhat.com>
+Cc: Christoph Hellwig <hch@infradead.org>
+Cc: Zach Brown <zach.brown@oracle.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/aio.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -712,8 +712,16 @@ static ssize_t aio_run_iocb(struct kiocb
+ */
+ ret = retry(iocb);
+
+- if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED)
++ if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
++ /*
++ * There's no easy way to restart the syscall since other AIO's
++ * may be already running. Just fail this IO with EINTR.
++ */
++ if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
++ ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK))
++ ret = -EINTR;
+ aio_complete(iocb, ret, 0);
++ }
+ out:
+ spin_lock_irq(&ctx->ctx_lock);
+
--- /dev/null
+From 1b0e372d7b52c9fc96348779015a6db7df7f286e Mon Sep 17 00:00:00 2001
+From: Daniel J Blueman <daniel.blueman@gmail.com>
+Date: Tue, 3 Aug 2010 11:09:13 +0100
+Subject: ALSA: hda - Fix beep frequency on IDT 92HD73xx and 92HD71Bxx codecs
+
+From: Daniel J Blueman <daniel.blueman@gmail.com>
+
+commit 1b0e372d7b52c9fc96348779015a6db7df7f286e upstream.
+
+Fix HDA beep frequency on IDT 92HD73xx and 92HD71Bxx codecs.
+These codecs use the standard beep frequency calculation although the
+datasheet says it's linear frequency.
+
+Other IDT/STAC codecs might have the same problem. They should be
+fixed individually later.
+
+Signed-off-by: Daniel J Blueman <daniel.blueman@gmail.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Cc: أحمد المحمودي <aelmahmoudy@sabily.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ sound/pci/hda/patch_sigmatel.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -202,6 +202,7 @@ struct sigmatel_spec {
+ unsigned int spdif_mute: 1;
+ unsigned int check_volume_offset:1;
+ unsigned int auto_mic:1;
++ unsigned int linear_tone_beep:1;
+
+ /* gpio lines */
+ unsigned int eapd_mask;
+@@ -3802,7 +3803,7 @@ static int stac92xx_parse_auto_config(st
+ return err;
+ if (codec->beep) {
+ /* IDT/STAC codecs have linear beep tone parameter */
+- codec->beep->linear_tone = 1;
++ codec->beep->linear_tone = spec->linear_tone_beep;
+ /* if no beep switch is available, make its own one */
+ caps = query_amp_caps(codec, nid, HDA_OUTPUT);
+ if (!(caps & AC_AMPCAP_MUTE)) {
+@@ -5005,6 +5006,7 @@ static int patch_stac9200(struct hda_cod
+
+ codec->no_trigger_sense = 1;
+ codec->spec = spec;
++ spec->linear_tone_beep = 1;
+ spec->num_pins = ARRAY_SIZE(stac9200_pin_nids);
+ spec->pin_nids = stac9200_pin_nids;
+ spec->board_config = snd_hda_check_board_config(codec, STAC_9200_MODELS,
+@@ -5068,6 +5070,7 @@ static int patch_stac925x(struct hda_cod
+
+ codec->no_trigger_sense = 1;
+ codec->spec = spec;
++ spec->linear_tone_beep = 1;
+ spec->num_pins = ARRAY_SIZE(stac925x_pin_nids);
+ spec->pin_nids = stac925x_pin_nids;
+
+@@ -5153,6 +5156,7 @@ static int patch_stac92hd73xx(struct hda
+
+ codec->no_trigger_sense = 1;
+ codec->spec = spec;
++ spec->linear_tone_beep = 0;
+ codec->slave_dig_outs = stac92hd73xx_slave_dig_outs;
+ spec->num_pins = ARRAY_SIZE(stac92hd73xx_pin_nids);
+ spec->pin_nids = stac92hd73xx_pin_nids;
+@@ -5300,6 +5304,7 @@ static int patch_stac92hd83xxx(struct hd
+
+ codec->no_trigger_sense = 1;
+ codec->spec = spec;
++ spec->linear_tone_beep = 1;
+ codec->slave_dig_outs = stac92hd83xxx_slave_dig_outs;
+ spec->digbeep_nid = 0x21;
+ spec->mux_nids = stac92hd83xxx_mux_nids;
+@@ -5522,6 +5527,7 @@ static int patch_stac92hd71bxx(struct hd
+
+ codec->no_trigger_sense = 1;
+ codec->spec = spec;
++ spec->linear_tone_beep = 0;
+ codec->patch_ops = stac92xx_patch_ops;
+ spec->num_pins = STAC92HD71BXX_NUM_PINS;
+ switch (codec->vendor_id) {
+@@ -5779,6 +5785,7 @@ static int patch_stac922x(struct hda_cod
+
+ codec->no_trigger_sense = 1;
+ codec->spec = spec;
++ spec->linear_tone_beep = 1;
+ spec->num_pins = ARRAY_SIZE(stac922x_pin_nids);
+ spec->pin_nids = stac922x_pin_nids;
+ spec->board_config = snd_hda_check_board_config(codec, STAC_922X_MODELS,
+@@ -5883,6 +5890,7 @@ static int patch_stac927x(struct hda_cod
+
+ codec->no_trigger_sense = 1;
+ codec->spec = spec;
++ spec->linear_tone_beep = 1;
+ codec->slave_dig_outs = stac927x_slave_dig_outs;
+ spec->num_pins = ARRAY_SIZE(stac927x_pin_nids);
+ spec->pin_nids = stac927x_pin_nids;
+@@ -6018,6 +6026,7 @@ static int patch_stac9205(struct hda_cod
+
+ codec->no_trigger_sense = 1;
+ codec->spec = spec;
++ spec->linear_tone_beep = 1;
+ spec->num_pins = ARRAY_SIZE(stac9205_pin_nids);
+ spec->pin_nids = stac9205_pin_nids;
+ spec->board_config = snd_hda_check_board_config(codec, STAC_9205_MODELS,
+@@ -6174,6 +6183,7 @@ static int patch_stac9872(struct hda_cod
+ return -ENOMEM;
+ codec->no_trigger_sense = 1;
+ codec->spec = spec;
++ spec->linear_tone_beep = 1;
+ spec->num_pins = ARRAY_SIZE(stac9872_pin_nids);
+ spec->pin_nids = stac9872_pin_nids;
+
--- /dev/null
+From 8d2602e0778299e2d6084f03086b716d6e7a1e1e Mon Sep 17 00:00:00 2001
+From: Nicolas Ferre <nicolas.ferre@atmel.com>
+Date: Fri, 20 Aug 2010 16:44:33 +0200
+Subject: AT91: change dma resource index
+
+From: Nicolas Ferre <nicolas.ferre@atmel.com>
+
+commit 8d2602e0778299e2d6084f03086b716d6e7a1e1e upstream.
+
+Reported-by: Dan Liang <dan.liang@atmel.com>
+Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/arm/mach-at91/at91sam9g45_devices.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/mach-at91/at91sam9g45_devices.c
++++ b/arch/arm/mach-at91/at91sam9g45_devices.c
+@@ -46,7 +46,7 @@ static struct resource hdmac_resources[]
+ .end = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+- [2] = {
++ [1] = {
+ .start = AT91SAM9G45_ID_DMA,
+ .end = AT91SAM9G45_ID_DMA,
+ .flags = IORESOURCE_IRQ,
--- /dev/null
+From 692ebd17c2905313fff3c504c249c6a0faad16ec Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Tue, 21 Sep 2010 11:51:01 +0200
+Subject: bdi: Fix warnings in __mark_inode_dirty for /dev/zero and friends
+
+From: Jan Kara <jack@suse.cz>
+
+commit 692ebd17c2905313fff3c504c249c6a0faad16ec upstream.
+
+Inodes of devices such as /dev/zero can get dirty for example via
+utime(2) syscall or due to atime update. Backing device of such inodes
+(zero_bdi, etc.) is however unable to handle dirty inodes and thus
+__mark_inode_dirty complains. In fact, inode should be rather dirtied
+against backing device of the filesystem holding it. This is generally a
+good rule except for filesystems such as 'bdev' or 'mtd_inodefs'. Inodes
+in these pseudofilesystems are referenced from ordinary filesystem
+inodes and carry mapping with real data of the device. Thus for these
+inodes we have to use inode->i_mapping->backing_dev_info as we did so
+far. We distinguish these filesystems by checking whether sb->s_bdi
+points to a non-trivial backing device or not.
+
+Example: Assume we have an ext3 filesystem on /dev/sda1 mounted on /.
+There's a device inode A described by a path "/dev/sdb" on this
+filesystem. This inode will be dirtied against backing device "8:0"
+after this patch. bdev filesystem contains block device inode B coupled
+with our inode A. When someone modifies a page of /dev/sdb, it's B that
+gets dirtied and the dirtying happens against the backing device "8:16".
+Thus both inodes get filed to a correct bdi list.
+
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/fs-writeback.c | 23 +++++++++++++++++++++--
+ 1 file changed, 21 insertions(+), 2 deletions(-)
+
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -28,8 +28,6 @@
+ #include <linux/buffer_head.h>
+ #include "internal.h"
+
+-#define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info)
+-
+ /*
+ * We don't actually have pdflush, but this one is exported though /proc...
+ */
+@@ -62,6 +60,27 @@ int writeback_in_progress(struct backing
+ return !list_empty(&bdi->work_list);
+ }
+
++static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
++{
++ struct super_block *sb = inode->i_sb;
++ struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
++
++ /*
++ * For inodes on standard filesystems, we use superblock's bdi. For
++ * inodes on virtual filesystems, we want to use inode mapping's bdi
++ * because they can possibly point to something useful (think about
++ * block_dev filesystem).
++ */
++ if (sb->s_bdi && sb->s_bdi != &noop_backing_dev_info) {
++ /* Some device inodes could play dirty tricks. Catch them... */
++ WARN(bdi != sb->s_bdi && bdi_cap_writeback_dirty(bdi),
++ "Dirtiable inode bdi %s != sb bdi %s\n",
++ bdi->name, sb->s_bdi->name);
++ return sb->s_bdi;
++ }
++ return bdi;
++}
++
+ static void bdi_queue_work(struct backing_dev_info *bdi,
+ struct wb_writeback_work *work)
+ {
--- /dev/null
+From 976e48f8a5b02fc33f3e5cad87fb3fcea041a49c Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Tue, 21 Sep 2010 11:48:55 +0200
+Subject: bdi: Initialize noop_backing_dev_info properly
+
+From: Jan Kara <jack@suse.cz>
+
+commit 976e48f8a5b02fc33f3e5cad87fb3fcea041a49c upstream.
+
+Properly initialize this backing dev info so that writeback code does not
+barf when getting to it e.g. via sb->s_bdi.
+
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/backing-dev.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -29,6 +29,7 @@ EXPORT_SYMBOL_GPL(default_backing_dev_in
+
+ struct backing_dev_info noop_backing_dev_info = {
+ .name = "noop",
++ .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
+ };
+ EXPORT_SYMBOL_GPL(noop_backing_dev_info);
+
+@@ -254,6 +255,7 @@ static int __init default_bdi_init(void)
+ err = bdi_init(&default_backing_dev_info);
+ if (!err)
+ bdi_register(&default_backing_dev_info, NULL, "default");
++ err = bdi_init(&noop_backing_dev_info);
+
+ return err;
+ }
--- /dev/null
+From 371d217ee1ff8b418b8f73fb2a34990f951ec2d4 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Tue, 21 Sep 2010 11:49:01 +0200
+Subject: char: Mark /dev/zero and /dev/kmem as not capable of writeback
+
+From: Jan Kara <jack@suse.cz>
+
+commit 371d217ee1ff8b418b8f73fb2a34990f951ec2d4 upstream.
+
+These devices don't do any writeback but their device inodes still can get
+dirty so mark bdi appropriately so that bdi code does the right thing and files
+inodes to lists of bdi carrying the device inodes.
+
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/char/mem.c | 3 ++-
+ fs/char_dev.c | 4 +++-
+ 2 files changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/char/mem.c
++++ b/drivers/char/mem.c
+@@ -788,10 +788,11 @@ static const struct file_operations zero
+ /*
+ * capabilities for /dev/zero
+ * - permits private mappings, "copies" are taken of the source of zeros
++ * - no writeback happens
+ */
+ static struct backing_dev_info zero_bdi = {
+ .name = "char/mem",
+- .capabilities = BDI_CAP_MAP_COPY,
++ .capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
+ };
+
+ static const struct file_operations full_fops = {
+--- a/fs/char_dev.c
++++ b/fs/char_dev.c
+@@ -40,7 +40,9 @@ struct backing_dev_info directly_mappabl
+ #endif
+ /* permit direct mmap, for read, write or exec */
+ BDI_CAP_MAP_DIRECT |
+- BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP),
++ BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP |
++ /* no writeback happens */
++ BDI_CAP_NO_ACCT_AND_WRITEBACK),
+ };
+
+ static struct kobj_map *cdev_map;
--- /dev/null
+From d5164dbf1f651d1e955b158fb70a9c844cc91cd1 Mon Sep 17 00:00:00 2001
+From: Islam Amer <pharon@gmail.com>
+Date: Thu, 24 Jun 2010 13:39:47 -0400
+Subject: dell-wmi: Add support for eject key on Dell Studio 1555
+
+From: Islam Amer <pharon@gmail.com>
+
+commit d5164dbf1f651d1e955b158fb70a9c844cc91cd1 upstream.
+
+Fixes pressing the eject key on Dell Studio 1555 does not work and produces
+message :
+
+dell-wmi: Unknown key 0 pressed
+
+Signed-off-by: Islam Amer <pharon@gmail.com>
+Cc: Kyle McMartin <kyle@mcmartin.ca>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/platform/x86/dell-wmi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/platform/x86/dell-wmi.c
++++ b/drivers/platform/x86/dell-wmi.c
+@@ -221,7 +221,7 @@ static void dell_wmi_notify(u32 value, v
+ return;
+ }
+
+- if (dell_new_hk_type)
++ if (dell_new_hk_type || buffer_entry[1] == 0x0)
+ reported_key = (int)buffer_entry[2];
+ else
+ reported_key = (int)buffer_entry[1] & 0xffff;
--- /dev/null
+From df08cdc7ef606509debe7677c439be0ca48790e4 Mon Sep 17 00:00:00 2001
+From: Andrew Morton <akpm@linux-foundation.org>
+Date: Wed, 22 Sep 2010 13:05:11 -0700
+Subject: drivers/pci/intel-iommu.c: fix build with older gcc's
+
+From: Andrew Morton <akpm@linux-foundation.org>
+
+commit df08cdc7ef606509debe7677c439be0ca48790e4 upstream.
+
+drivers/pci/intel-iommu.c: In function `__iommu_calculate_agaw':
+drivers/pci/intel-iommu.c:437: sorry, unimplemented: inlining failed in call to 'width_to_agaw': function body not available
+drivers/pci/intel-iommu.c:445: sorry, unimplemented: called from here
+
+Move the offending function (and its siblings) to top-of-file, remove the
+forward declaration.
+
+Addresses https://bugzilla.kernel.org/show_bug.cgi?id=17441
+
+Reported-by: Martin Mokrejs <mmokrejs@ribosome.natur.cuni.cz>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Jesse Barnes <jbarnes@virtuousgeek.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/pci/intel-iommu.c | 90 +++++++++++++++++++++-------------------------
+ 1 file changed, 43 insertions(+), 47 deletions(-)
+
+--- a/drivers/pci/intel-iommu.c
++++ b/drivers/pci/intel-iommu.c
+@@ -71,6 +71,49 @@
+ #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
+ #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
+
++/* page table handling */
++#define LEVEL_STRIDE (9)
++#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
++
++static inline int agaw_to_level(int agaw)
++{
++ return agaw + 2;
++}
++
++static inline int agaw_to_width(int agaw)
++{
++ return 30 + agaw * LEVEL_STRIDE;
++}
++
++static inline int width_to_agaw(int width)
++{
++ return (width - 30) / LEVEL_STRIDE;
++}
++
++static inline unsigned int level_to_offset_bits(int level)
++{
++ return (level - 1) * LEVEL_STRIDE;
++}
++
++static inline int pfn_level_offset(unsigned long pfn, int level)
++{
++ return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
++}
++
++static inline unsigned long level_mask(int level)
++{
++ return -1UL << level_to_offset_bits(level);
++}
++
++static inline unsigned long level_size(int level)
++{
++ return 1UL << level_to_offset_bits(level);
++}
++
++static inline unsigned long align_to_level(unsigned long pfn, int level)
++{
++ return (pfn + level_size(level) - 1) & level_mask(level);
++}
+
+ /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
+ are never going to work. */
+@@ -434,8 +477,6 @@ void free_iova_mem(struct iova *iova)
+ }
+
+
+-static inline int width_to_agaw(int width);
+-
+ static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
+ {
+ unsigned long sagaw;
+@@ -646,51 +687,6 @@ out:
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ }
+
+-/* page table handling */
+-#define LEVEL_STRIDE (9)
+-#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
+-
+-static inline int agaw_to_level(int agaw)
+-{
+- return agaw + 2;
+-}
+-
+-static inline int agaw_to_width(int agaw)
+-{
+- return 30 + agaw * LEVEL_STRIDE;
+-
+-}
+-
+-static inline int width_to_agaw(int width)
+-{
+- return (width - 30) / LEVEL_STRIDE;
+-}
+-
+-static inline unsigned int level_to_offset_bits(int level)
+-{
+- return (level - 1) * LEVEL_STRIDE;
+-}
+-
+-static inline int pfn_level_offset(unsigned long pfn, int level)
+-{
+- return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
+-}
+-
+-static inline unsigned long level_mask(int level)
+-{
+- return -1UL << level_to_offset_bits(level);
+-}
+-
+-static inline unsigned long level_size(int level)
+-{
+- return 1UL << level_to_offset_bits(level);
+-}
+-
+-static inline unsigned long align_to_level(unsigned long pfn, int level)
+-{
+- return (pfn + level_size(level) - 1) & level_mask(level);
+-}
+-
+ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
+ unsigned long pfn)
+ {
--- /dev/null
+From fd02db9de73faebc51240619c7c7f99bee9f65c7 Mon Sep 17 00:00:00 2001
+From: Dan Rosenberg <drosenberg@vsecurity.com>
+Date: Wed, 22 Sep 2010 13:05:09 -0700
+Subject: drivers/video/sis/sis_main.c: prevent reading uninitialized stack memory
+
+From: Dan Rosenberg <drosenberg@vsecurity.com>
+
+commit fd02db9de73faebc51240619c7c7f99bee9f65c7 upstream.
+
+The FBIOGET_VBLANK device ioctl allows unprivileged users to read 16 bytes
+of uninitialized stack memory, because the "reserved" member of the
+fb_vblank struct declared on the stack is not altered or zeroed before
+being copied back to the user. This patch takes care of it.
+
+Signed-off-by: Dan Rosenberg <dan.j.rosenberg@gmail.com>
+Cc: Thomas Winischhofer <thomas@winischhofer.net>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/video/sis/sis_main.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/video/sis/sis_main.c
++++ b/drivers/video/sis/sis_main.c
+@@ -1701,6 +1701,9 @@ static int sisfb_ioctl(struct fb_info *i
+ break;
+
+ case FBIOGET_VBLANK:
++
++ memset(&sisvbblank, 0, sizeof(struct fb_vblank));
++
+ sisvbblank.count = 0;
+ sisvbblank.flags = sisfb_setupvbblankflags(ivideo, &sisvbblank.vcount, &sisvbblank.hcount);
+
--- /dev/null
+From b4aaa78f4c2f9cde2f335b14f4ca30b01f9651ca Mon Sep 17 00:00:00 2001
+From: Dan Rosenberg <drosenberg@vsecurity.com>
+Date: Wed, 15 Sep 2010 19:08:24 -0400
+Subject: drivers/video/via/ioctl.c: prevent reading uninitialized stack memory
+
+From: Dan Rosenberg <drosenberg@vsecurity.com>
+
+commit b4aaa78f4c2f9cde2f335b14f4ca30b01f9651ca upstream.
+
+The VIAFB_GET_INFO device ioctl allows unprivileged users to read 246
+bytes of uninitialized stack memory, because the "reserved" member of
+the viafb_ioctl_info struct declared on the stack is not altered or
+zeroed before being copied back to the user. This patch takes care of
+it.
+
+Signed-off-by: Dan Rosenberg <dan.j.rosenberg@gmail.com>
+Signed-off-by: Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/video/via/ioctl.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/video/via/ioctl.c
++++ b/drivers/video/via/ioctl.c
+@@ -25,6 +25,8 @@ int viafb_ioctl_get_viafb_info(u_long ar
+ {
+ struct viafb_ioctl_info viainfo;
+
++ memset(&viainfo, 0, sizeof(struct viafb_ioctl_info));
++
+ viainfo.viafb_id = VIAID;
+ viainfo.vendor_id = PCI_VIA_VENDOR_ID;
+
--- /dev/null
+From 41a51428916ab04587bacee2dda61c4a0c4fc02f Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Fri, 17 Sep 2010 08:22:30 +0100
+Subject: drm/i915,agp/intel: Add second set of PCI-IDs for B43
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit 41a51428916ab04587bacee2dda61c4a0c4fc02f upstream.
+
+There is a second revision of B43 (a desktop gen4 part) floating around,
+functionally equivalent to the original B43, so simply add the new
+PCI-IDs.
+
+Bugzilla: https://bugs.freedesktop.org/show_bugs.cgi?id=30221
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/char/agp/intel-agp.c | 2 ++
+ drivers/char/agp/intel-agp.h | 2 ++
+ drivers/gpu/drm/i915/i915_drv.c | 1 +
+ 3 files changed, 5 insertions(+)
+
+--- a/drivers/char/agp/intel-agp.c
++++ b/drivers/char/agp/intel-agp.c
+@@ -805,6 +805,8 @@ static const struct intel_driver_descrip
+ "G45/G43", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
+ "B43", NULL, &intel_i965_driver },
++ { PCI_DEVICE_ID_INTEL_B43_1_HB, PCI_DEVICE_ID_INTEL_B43_1_IG,
++ "B43", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
+ "G41", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
+--- a/drivers/char/agp/intel-agp.h
++++ b/drivers/char/agp/intel-agp.h
+@@ -178,6 +178,8 @@
+ #define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
+ #define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
+ #define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
++#define PCI_DEVICE_ID_INTEL_B43_1_HB 0x2E90
++#define PCI_DEVICE_ID_INTEL_B43_1_IG 0x2E92
+ #define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
+ #define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
+ #define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -175,6 +175,7 @@ static const struct pci_device_id pciidl
+ INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),
++ INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */
+ INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
+ INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
+ INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
--- /dev/null
+From 615661f3948a066fd22a36fe8ea0c528b75ee373 Mon Sep 17 00:00:00 2001
+From: Marcin Slusarz <marcin.slusarz@gmail.com>
+Date: Sun, 22 Aug 2010 20:54:08 +0200
+Subject: drm/nv50: initialize ramht_refs list for faked 0 channel
+
+From: Marcin Slusarz <marcin.slusarz@gmail.com>
+
+commit 615661f3948a066fd22a36fe8ea0c528b75ee373 upstream.
+
+We need it for PFIFO_INTR_CACHE_ERROR interrupt handling,
+because nouveau_fifo_swmthd looks for matching gpuobj in
+ramht_refs list.
+It fixes kernel panic in nouveau_gpuobj_ref_find.
+
+Signed-off-by: Marcin Slusarz <marcin.slusarz@gmail.com>
+Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/nouveau/nv50_instmem.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
++++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
+@@ -141,6 +141,8 @@ nv50_instmem_init(struct drm_device *dev
+ chan->file_priv = (struct drm_file *)-2;
+ dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
+
++ INIT_LIST_HEAD(&chan->ramht_refs);
++
+ /* Channel's PRAMIN object + heap */
+ ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0,
+ NULL, &chan->ramin);
--- /dev/null
+From 59b25ed91400ace98d6cf0d59b1cb6928ad5cd37 Mon Sep 17 00:00:00 2001
+From: Morten H. Larsen <m-larsen@post6.tele.dk>
+Date: Tue, 31 Aug 2010 22:29:13 -0400
+Subject: Fix call to replaced SuperIO functions
+
+From: Morten H. Larsen <m-larsen@post6.tele.dk>
+
+commit 59b25ed91400ace98d6cf0d59b1cb6928ad5cd37 upstream.
+
+This patch fixes the failure to compile Alpha Generic because of
+previously overlooked calls to ns87312_enable_ide(). The function has
+been replaced by newer SuperIO code.
+
+Tested-by: Michael Cree <mcree@orcon.net.nz>
+Signed-off-by: Morten H. Larsen <m-larsen@post6.tele.dk>
+Signed-off-by: Matt Turner <mattst88@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/alpha/kernel/proto.h | 3 ---
+ arch/alpha/kernel/sys_cabriolet.c | 19 ++++++++++++++++---
+ arch/alpha/kernel/sys_takara.c | 11 +++++++++--
+ 3 files changed, 25 insertions(+), 8 deletions(-)
+
+--- a/arch/alpha/kernel/proto.h
++++ b/arch/alpha/kernel/proto.h
+@@ -156,9 +156,6 @@ extern void SMC669_Init(int);
+ /* es1888.c */
+ extern void es1888_init(void);
+
+-/* ns87312.c */
+-extern void ns87312_enable_ide(long ide_base);
+-
+ /* ../lib/fpreg.c */
+ extern void alpha_write_fp_reg (unsigned long reg, unsigned long val);
+ extern unsigned long alpha_read_fp_reg (unsigned long reg);
+--- a/arch/alpha/kernel/sys_cabriolet.c
++++ b/arch/alpha/kernel/sys_cabriolet.c
+@@ -33,7 +33,7 @@
+ #include "irq_impl.h"
+ #include "pci_impl.h"
+ #include "machvec_impl.h"
+-
++#include "pc873xx.h"
+
+ /* Note mask bit is true for DISABLED irqs. */
+ static unsigned long cached_irq_mask = ~0UL;
+@@ -236,17 +236,30 @@ cabriolet_map_irq(struct pci_dev *dev, u
+ }
+
+ static inline void __init
++cabriolet_enable_ide(void)
++{
++ if (pc873xx_probe() == -1) {
++ printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n");
++ } else {
++ printk(KERN_INFO "Found %s Super IO chip at 0x%x\n",
++ pc873xx_get_model(), pc873xx_get_base());
++
++ pc873xx_enable_ide();
++ }
++}
++
++static inline void __init
+ cabriolet_init_pci(void)
+ {
+ common_init_pci();
+- ns87312_enable_ide(0x398);
++ cabriolet_enable_ide();
+ }
+
+ static inline void __init
+ cia_cab_init_pci(void)
+ {
+ cia_init_pci();
+- ns87312_enable_ide(0x398);
++ cabriolet_enable_ide();
+ }
+
+ /*
+--- a/arch/alpha/kernel/sys_takara.c
++++ b/arch/alpha/kernel/sys_takara.c
+@@ -29,7 +29,7 @@
+ #include "irq_impl.h"
+ #include "pci_impl.h"
+ #include "machvec_impl.h"
+-
++#include "pc873xx.h"
+
+ /* Note mask bit is true for DISABLED irqs. */
+ static unsigned long cached_irq_mask[2] = { -1, -1 };
+@@ -264,7 +264,14 @@ takara_init_pci(void)
+ alpha_mv.pci_map_irq = takara_map_irq_srm;
+
+ cia_init_pci();
+- ns87312_enable_ide(0x26e);
++
++ if (pc873xx_probe() == -1) {
++ printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n");
++ } else {
++ printk(KERN_INFO "Found %s Super IO chip at 0x%x\n",
++ pc873xx_get_model(), pc873xx_get_base());
++ pc873xx_enable_ide();
++ }
+ }
+
+
--- /dev/null
+From f362b73244fb16ea4ae127ced1467dd8adaa7733 Mon Sep 17 00:00:00 2001
+From: Daniel J Blueman <daniel.blueman@gmail.com>
+Date: Tue, 17 Aug 2010 23:56:55 +0100
+Subject: Fix unprotected access to task credentials in waitid()
+
+From: Daniel J Blueman <daniel.blueman@gmail.com>
+
+commit f362b73244fb16ea4ae127ced1467dd8adaa7733 upstream.
+
+Using a program like the following:
+
+ #include <stdlib.h>
+ #include <unistd.h>
+ #include <sys/types.h>
+ #include <sys/wait.h>
+
+ int main() {
+ id_t id;
+ siginfo_t infop;
+ pid_t res;
+
+ id = fork();
+ if (id == 0) { sleep(1); exit(0); }
+ kill(id, SIGSTOP);
+ alarm(1);
+ waitid(P_PID, id, &infop, WCONTINUED);
+ return 0;
+ }
+
+to call waitid() on a stopped process results in access to the child task's
+credentials without the RCU read lock being held - which may be replaced in the
+meantime - eliciting the following warning:
+
+ ===================================================
+ [ INFO: suspicious rcu_dereference_check() usage. ]
+ ---------------------------------------------------
+ kernel/exit.c:1460 invoked rcu_dereference_check() without protection!
+
+ other info that might help us debug this:
+
+ rcu_scheduler_active = 1, debug_locks = 1
+ 2 locks held by waitid02/22252:
+ #0: (tasklist_lock){.?.?..}, at: [<ffffffff81061ce5>] do_wait+0xc5/0x310
+ #1: (&(&sighand->siglock)->rlock){-.-...}, at: [<ffffffff810611da>]
+ wait_consider_task+0x19a/0xbe0
+
+ stack backtrace:
+ Pid: 22252, comm: waitid02 Not tainted 2.6.35-323cd+ #3
+ Call Trace:
+ [<ffffffff81095da4>] lockdep_rcu_dereference+0xa4/0xc0
+ [<ffffffff81061b31>] wait_consider_task+0xaf1/0xbe0
+ [<ffffffff81061d15>] do_wait+0xf5/0x310
+ [<ffffffff810620b6>] sys_waitid+0x86/0x1f0
+ [<ffffffff8105fce0>] ? child_wait_callback+0x0/0x70
+ [<ffffffff81003282>] system_call_fastpath+0x16/0x1b
+
+This is fixed by holding the RCU read lock in wait_task_continued() to ensure
+that the task's current credentials aren't destroyed between us reading the
+cred pointer and us reading the UID from those credentials.
+
+Furthermore, protect wait_task_stopped() in the same way.
+
+We don't need to keep holding the RCU read lock once we've read the UID from
+the credentials as holding the RCU read lock doesn't stop the target task from
+changing its creds under us - so the credentials may be outdated immediately
+after we've read the pointer, lock or no lock.
+
+Signed-off-by: Daniel J Blueman <daniel.blueman@gmail.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/exit.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -1383,8 +1383,7 @@ static int wait_task_stopped(struct wait
+ if (!unlikely(wo->wo_flags & WNOWAIT))
+ *p_code = 0;
+
+- /* don't need the RCU readlock here as we're holding a spinlock */
+- uid = __task_cred(p)->uid;
++ uid = task_uid(p);
+ unlock_sig:
+ spin_unlock_irq(&p->sighand->siglock);
+ if (!exit_code)
+@@ -1457,7 +1456,7 @@ static int wait_task_continued(struct wa
+ }
+ if (!unlikely(wo->wo_flags & WNOWAIT))
+ p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
+- uid = __task_cred(p)->uid;
++ uid = task_uid(p);
+ spin_unlock_irq(&p->sighand->siglock);
+
+ pid = task_pid_vnr(p);
--- /dev/null
+From 5f4874903df3562b9d5649fc1cf7b8c6bb238e42 Mon Sep 17 00:00:00 2001
+From: Steven Whitehouse <swhiteho@redhat.com>
+Date: Thu, 9 Sep 2010 14:45:00 +0100
+Subject: GFS2: gfs2_logd should be using interruptible waits
+
+From: Steven Whitehouse <swhiteho@redhat.com>
+
+commit 5f4874903df3562b9d5649fc1cf7b8c6bb238e42 upstream.
+
+Looks like this crept in, in a recent update.
+
+Reported-by: Krzysztof Urbaniak <urban@bash.org.pl>
+Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/gfs2/log.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/gfs2/log.c
++++ b/fs/gfs2/log.c
+@@ -932,7 +932,7 @@ int gfs2_logd(void *data)
+
+ do {
+ prepare_to_wait(&sdp->sd_logd_waitq, &wait,
+- TASK_UNINTERRUPTIBLE);
++ TASK_INTERRUPTIBLE);
+ if (!gfs2_ail_flush_reqd(sdp) &&
+ !gfs2_jrnl_flush_reqd(sdp) &&
+ !kthread_should_stop())
--- /dev/null
+From 8ca3eb08097f6839b2206e2242db4179aee3cfb3 Mon Sep 17 00:00:00 2001
+From: Luck, Tony <tony.luck@intel.com>
+Date: Tue, 24 Aug 2010 11:44:18 -0700
+Subject: guard page for stacks that grow upwards
+
+From: Luck, Tony <tony.luck@intel.com>
+
+commit 8ca3eb08097f6839b2206e2242db4179aee3cfb3 upstream.
+
+pa-risc and ia64 have stacks that grow upwards. Check that
+they do not run into other mappings. By making VM_GROWSUP
+0x0 on architectures that do not ever use it, we can avoid
+some unpleasant #ifdefs in check_stack_guard_page().
+
+Signed-off-by: Tony Luck <tony.luck@intel.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: dann frazier <dannf@debian.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/mm.h | 8 +++++++-
+ mm/memory.c | 15 +++++++++++----
+ mm/mmap.c | 3 ---
+ 3 files changed, 18 insertions(+), 8 deletions(-)
+
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -78,7 +78,11 @@ extern unsigned int kobjsize(const void
+ #define VM_MAYSHARE 0x00000080
+
+ #define VM_GROWSDOWN 0x00000100 /* general info on the segment */
++#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
+ #define VM_GROWSUP 0x00000200
++#else
++#define VM_GROWSUP 0x00000000
++#endif
+ #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
+ #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
+
+@@ -1329,8 +1333,10 @@ unsigned long ra_submit(struct file_ra_s
+
+ /* Do stack extension */
+ extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
+-#ifdef CONFIG_IA64
++#if VM_GROWSUP
+ extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
++#else
++ #define expand_upwards(vma, address) do { } while (0)
+ #endif
+ extern int expand_stack_downwards(struct vm_area_struct *vma,
+ unsigned long address);
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2791,11 +2791,9 @@ out_release:
+ }
+
+ /*
+- * This is like a special single-page "expand_downwards()",
+- * except we must first make sure that 'address-PAGE_SIZE'
++ * This is like a special single-page "expand_{down|up}wards()",
++ * except we must first make sure that 'address{-|+}PAGE_SIZE'
+ * doesn't hit another vma.
+- *
+- * The "find_vma()" will do the right thing even if we wrap
+ */
+ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
+ {
+@@ -2814,6 +2812,15 @@ static inline int check_stack_guard_page
+
+ expand_stack(vma, address - PAGE_SIZE);
+ }
++ if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
++ struct vm_area_struct *next = vma->vm_next;
++
++ /* As VM_GROWSDOWN but s/below/above/ */
++ if (next && next->vm_start == address + PAGE_SIZE)
++ return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
++
++ expand_upwards(vma, address + PAGE_SIZE);
++ }
+ return 0;
+ }
+
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -1704,9 +1704,6 @@ static int acct_stack_growth(struct vm_a
+ * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
+ * vma is the last one with address > vma->vm_end. Have to extend vma.
+ */
+-#ifndef CONFIG_IA64
+-static
+-#endif
+ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+ {
+ int error;
--- /dev/null
+From 068e35eee9ef98eb4cab55181977e24995d273be Mon Sep 17 00:00:00 2001
+From: Matt Helsley <matthltc@us.ibm.com>
+Date: Mon, 13 Sep 2010 13:01:18 -0700
+Subject: hw breakpoints: Fix pid namespace bug
+
+From: Matt Helsley <matthltc@us.ibm.com>
+
+commit 068e35eee9ef98eb4cab55181977e24995d273be upstream.
+
+Hardware breakpoints can't be registered within pid namespaces
+because tsk->pid is passed rather than the pid in the current
+namespace.
+
+(See https://bugzilla.kernel.org/show_bug.cgi?id=17281 )
+
+This is a quick fix demonstrating the problem but is not the
+best method of solving the problem since passing pids internally
+is not the best way to avoid pid namespace bugs. Subsequent patches
+will show a better solution.
+
+Much thanks to Frederic Weisbecker <fweisbec@gmail.com> for doing
+the bulk of the work finding this bug.
+
+Reported-by: Robin Green <greenrd@greenrd.org>
+Signed-off-by: Matt Helsley <matthltc@us.ibm.com>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Prasad <prasad@linux.vnet.ibm.com>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+LKML-Reference: <f63454af09fb1915717251570423eb9ddd338340.1284407762.git.matthltc@us.ibm.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/hw_breakpoint.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/kernel/hw_breakpoint.c
++++ b/kernel/hw_breakpoint.c
+@@ -417,7 +417,8 @@ register_user_hw_breakpoint(struct perf_
+ perf_overflow_handler_t triggered,
+ struct task_struct *tsk)
+ {
+- return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
++ return perf_event_create_kernel_counter(attr, -1, task_pid_vnr(tsk),
++ triggered);
+ }
+ EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
+
--- /dev/null
+From f574c843191728d9407b766a027f779dcd27b272 Mon Sep 17 00:00:00 2001
+From: Tony Luck <tony.luck@intel.com>
+Date: Thu, 9 Sep 2010 15:16:56 -0700
+Subject: IA64: fix siglock
+
+From: Tony Luck <tony.luck@intel.com>
+
+commit f574c843191728d9407b766a027f779dcd27b272 upstream.
+
+When ia64 converted to using ticket locks, an inline implementation
+of trylock/unlock in fsys.S was missed. This was not noticed because
+in most circumstances it simply resulted in using the slow path because
+the siglock was apparently not available (under old spinlock rules).
+
+Problems occur when the ticket spinlock has value 0x0 (when first
+initialised, or when it wraps around). At this point the fsys.S
+code acquires the lock (changing the 0x0 to 0x1. If another process
+attempts to get the lock at this point, it will change the value from
+0x1 to 0x2 (using new ticket lock rules). Then the fsys.S code will
+free the lock using old spinlock rules by writing 0x0 to it. From
+here a variety of bad things can happen.
+
+Signed-off-by: Tony Luck <tony.luck@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/ia64/kernel/fsys.S | 46 +++++++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 39 insertions(+), 7 deletions(-)
+
+--- a/arch/ia64/kernel/fsys.S
++++ b/arch/ia64/kernel/fsys.S
+@@ -424,14 +424,26 @@ EX(.fail_efault, ld8 r14=[r33]) // r14
+ andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP
+
+ #ifdef CONFIG_SMP
+- mov r17=1
+- ;;
+- cmpxchg4.acq r18=[r31],r17,ar.ccv // try to acquire the lock
++ // __ticket_spin_trylock(r31)
++ ld4 r17=[r31]
+ mov r8=EINVAL // default to EINVAL
+ ;;
++ extr r9=r17,17,15
++ ;;
++ xor r18=r17,r9
++ adds r19=1,r17
++ ;;
++ extr.u r18=r18,0,15
++ ;;
++ cmp.eq p0,p7=0,r18
++(p7) br.cond.spnt.many .lock_contention
++ mov.m ar.ccv=r17
++ ;;
++ cmpxchg4.acq r9=[r31],r19,ar.ccv
++ ;;
++ cmp4.eq p0,p7=r9,r17
++(p7) br.cond.spnt.many .lock_contention
+ ld8 r3=[r2] // re-read current->blocked now that we hold the lock
+- cmp4.ne p6,p0=r18,r0
+-(p6) br.cond.spnt.many .lock_contention
+ ;;
+ #else
+ ld8 r3=[r2] // re-read current->blocked now that we hold the lock
+@@ -490,7 +502,17 @@ EX(.fail_efault, ld8 r14=[r33]) // r14
+ (p6) br.cond.spnt.few 1b // yes -> retry
+
+ #ifdef CONFIG_SMP
+- st4.rel [r31]=r0 // release the lock
++ // __ticket_spin_unlock(r31)
++ adds r31=2,r31
++ ;;
++ ld2.bias r2=[r31]
++ mov r3=65534
++ ;;
++ adds r2=2,r2
++ ;;
++ and r3=r3,r2
++ ;;
++ st2.rel [r31]=r3
+ #endif
+ SSM_PSR_I(p0, p9, r31)
+ ;;
+@@ -512,7 +534,17 @@ EX(.fail_efault, (p15) st8 [r34]=r3)
+
+ .sig_pending:
+ #ifdef CONFIG_SMP
+- st4.rel [r31]=r0 // release the lock
++ // __ticket_spin_unlock(r31)
++ adds r31=2,r31
++ ;;
++ ld2.bias r2=[r31]
++ mov r3=65534
++ ;;
++ adds r2=2,r2
++ ;;
++ and r3=r3,r2
++ ;;
++ st2.rel [r31]=r3
+ #endif
+ SSM_PSR_I(p0, p9, r17)
+ ;;
--- /dev/null
+From 2d2b6901649a62977452be85df53eda2412def24 Mon Sep 17 00:00:00 2001
+From: Petr Tesarik <ptesarik@suse.cz>
+Date: Wed, 15 Sep 2010 15:35:48 -0700
+Subject: IA64: Optimize ticket spinlocks in fsys_rt_sigprocmask
+
+From: Petr Tesarik <ptesarik@suse.cz>
+
+commit 2d2b6901649a62977452be85df53eda2412def24 upstream.
+
+Tony's fix (f574c843191728d9407b766a027f779dcd27b272) has a small bug,
+it incorrectly uses "r3" as a scratch register in the first of the two
+unlock paths ... it is also inefficient. Optimize the fast path again.
+
+Signed-off-by: Petr Tesarik <ptesarik@suse.cz>
+Signed-off-by: Tony Luck <tony.luck@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/ia64/kernel/fsys.S | 42 +++++++++++-------------------------------
+ 1 file changed, 11 insertions(+), 31 deletions(-)
+
+--- a/arch/ia64/kernel/fsys.S
++++ b/arch/ia64/kernel/fsys.S
+@@ -420,34 +420,31 @@ EX(.fail_efault, ld8 r14=[r33]) // r14
+ ;;
+
+ RSM_PSR_I(p0, r18, r19) // mask interrupt delivery
+- mov ar.ccv=0
+ andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP
++ mov r8=EINVAL // default to EINVAL
+
+ #ifdef CONFIG_SMP
+ // __ticket_spin_trylock(r31)
+ ld4 r17=[r31]
+- mov r8=EINVAL // default to EINVAL
+- ;;
+- extr r9=r17,17,15
+ ;;
+- xor r18=r17,r9
++ mov.m ar.ccv=r17
++ extr.u r9=r17,17,15
+ adds r19=1,r17
++ extr.u r18=r17,0,15
+ ;;
+- extr.u r18=r18,0,15
++ cmp.eq p6,p7=r9,r18
+ ;;
+- cmp.eq p0,p7=0,r18
++(p6) cmpxchg4.acq r9=[r31],r19,ar.ccv
++(p6) dep.z r20=r19,1,15 // next serving ticket for unlock
+ (p7) br.cond.spnt.many .lock_contention
+- mov.m ar.ccv=r17
+- ;;
+- cmpxchg4.acq r9=[r31],r19,ar.ccv
+ ;;
+ cmp4.eq p0,p7=r9,r17
++ adds r31=2,r31
+ (p7) br.cond.spnt.many .lock_contention
+ ld8 r3=[r2] // re-read current->blocked now that we hold the lock
+ ;;
+ #else
+ ld8 r3=[r2] // re-read current->blocked now that we hold the lock
+- mov r8=EINVAL // default to EINVAL
+ #endif
+ add r18=IA64_TASK_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r16
+ add r19=IA64_TASK_SIGNAL_OFFSET,r16
+@@ -503,16 +500,8 @@ EX(.fail_efault, ld8 r14=[r33]) // r14
+
+ #ifdef CONFIG_SMP
+ // __ticket_spin_unlock(r31)
+- adds r31=2,r31
+- ;;
+- ld2.bias r2=[r31]
+- mov r3=65534
+- ;;
+- adds r2=2,r2
+- ;;
+- and r3=r3,r2
+- ;;
+- st2.rel [r31]=r3
++ st2.rel [r31]=r20
++ mov r20=0 // i must not leak kernel bits...
+ #endif
+ SSM_PSR_I(p0, p9, r31)
+ ;;
+@@ -535,16 +524,7 @@ EX(.fail_efault, (p15) st8 [r34]=r3)
+ .sig_pending:
+ #ifdef CONFIG_SMP
+ // __ticket_spin_unlock(r31)
+- adds r31=2,r31
+- ;;
+- ld2.bias r2=[r31]
+- mov r3=65534
+- ;;
+- adds r2=2,r2
+- ;;
+- and r3=r3,r2
+- ;;
+- st2.rel [r31]=r3
++ st2.rel [r31]=r20 // release the lock
+ #endif
+ SSM_PSR_I(p0, p9, r17)
+ ;;
--- /dev/null
+From 611da04f7a31b2208e838be55a42c7a1310ae321 Mon Sep 17 00:00:00 2001
+From: Eric Paris <eparis@redhat.com>
+Date: Wed, 28 Jul 2010 10:18:37 -0400
+Subject: inotify: send IN_UNMOUNT events
+
+From: Eric Paris <eparis@redhat.com>
+
+commit 611da04f7a31b2208e838be55a42c7a1310ae321 upstream.
+
+Since the .31 or so notify rewrite inotify has not sent events about
+inodes which are unmounted. This patch restores those events.
+
+Signed-off-by: Eric Paris <eparis@redhat.com>
+Cc: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/notify/inotify/inotify_user.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/fs/notify/inotify/inotify_user.c
++++ b/fs/notify/inotify/inotify_user.c
+@@ -96,8 +96,11 @@ static inline __u32 inotify_arg_to_mask(
+ {
+ __u32 mask;
+
+- /* everything should accept their own ignored and cares about children */
+- mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD);
++ /*
++ * everything should accept their own ignored, cares about children,
++ * and should receive events when the inode is unmounted
++ */
++ mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT);
+
+ /* mask off the flags used to open the fd */
+ mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT));
--- /dev/null
+From 3d96406c7da1ed5811ea52a3b0905f4f0e295376 Mon Sep 17 00:00:00 2001
+From: David Howells <dhowells@redhat.com>
+Date: Fri, 10 Sep 2010 09:59:51 +0100
+Subject: KEYS: Fix bug in keyctl_session_to_parent() if parent has no session keyring
+
+From: David Howells <dhowells@redhat.com>
+
+commit 3d96406c7da1ed5811ea52a3b0905f4f0e295376 upstream.
+
+Fix a bug in keyctl_session_to_parent() whereby it tries to check the ownership
+of the parent process's session keyring whether or not the parent has a session
+keyring [CVE-2010-2960].
+
+This results in the following oops:
+
+ BUG: unable to handle kernel NULL pointer dereference at 00000000000000a0
+ IP: [<ffffffff811ae4dd>] keyctl_session_to_parent+0x251/0x443
+ ...
+ Call Trace:
+ [<ffffffff811ae2f3>] ? keyctl_session_to_parent+0x67/0x443
+ [<ffffffff8109d286>] ? __do_fault+0x24b/0x3d0
+ [<ffffffff811af98c>] sys_keyctl+0xb4/0xb8
+ [<ffffffff81001eab>] system_call_fastpath+0x16/0x1b
+
+if the parent process has no session keyring.
+
+If the system is using pam_keyinit then it mostly protected against this as all
+processes derived from a login will have inherited the session keyring created
+by pam_keyinit during the log in procedure.
+
+To test this, pam_keyinit calls need to be commented out in /etc/pam.d/.
+
+Reported-by: Tavis Ormandy <taviso@cmpxchg8b.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Acked-by: Tavis Ormandy <taviso@cmpxchg8b.com>
+Cc: dann frazier <dannf@debian.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ security/keys/keyctl.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -1292,7 +1292,8 @@ long keyctl_session_to_parent(void)
+ goto not_permitted;
+
+ /* the keyrings must have the same UID */
+- if (pcred->tgcred->session_keyring->uid != mycred->euid ||
++ if ((pcred->tgcred->session_keyring &&
++ pcred->tgcred->session_keyring->uid != mycred->euid) ||
+ mycred->tgcred->session_keyring->uid != mycred->euid)
+ goto not_permitted;
+
--- /dev/null
+From 9d1ac65a9698513d00e5608d93fca0c53f536c14 Mon Sep 17 00:00:00 2001
+From: David Howells <dhowells@redhat.com>
+Date: Fri, 10 Sep 2010 09:59:46 +0100
+Subject: KEYS: Fix RCU no-lock warning in keyctl_session_to_parent()
+
+From: David Howells <dhowells@redhat.com>
+
+commit 9d1ac65a9698513d00e5608d93fca0c53f536c14 upstream.
+
+There's an protected access to the parent process's credentials in the middle
+of keyctl_session_to_parent(). This results in the following RCU warning:
+
+ ===================================================
+ [ INFO: suspicious rcu_dereference_check() usage. ]
+ ---------------------------------------------------
+ security/keys/keyctl.c:1291 invoked rcu_dereference_check() without protection!
+
+ other info that might help us debug this:
+
+ rcu_scheduler_active = 1, debug_locks = 0
+ 1 lock held by keyctl-session-/2137:
+ #0: (tasklist_lock){.+.+..}, at: [<ffffffff811ae2ec>] keyctl_session_to_parent+0x60/0x236
+
+ stack backtrace:
+ Pid: 2137, comm: keyctl-session- Not tainted 2.6.36-rc2-cachefs+ #1
+ Call Trace:
+ [<ffffffff8105606a>] lockdep_rcu_dereference+0xaa/0xb3
+ [<ffffffff811ae379>] keyctl_session_to_parent+0xed/0x236
+ [<ffffffff811af77e>] sys_keyctl+0xb4/0xb6
+ [<ffffffff81001eab>] system_call_fastpath+0x16/0x1b
+
+The code should take the RCU read lock to make sure the parents credentials
+don't go away, even though it's holding a spinlock and has IRQ disabled.
+
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: dann frazier <dannf@debian.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ security/keys/keyctl.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -1259,6 +1259,7 @@ long keyctl_session_to_parent(void)
+ keyring_r = NULL;
+
+ me = current;
++ rcu_read_lock();
+ write_lock_irq(&tasklist_lock);
+
+ parent = me->real_parent;
+@@ -1306,6 +1307,7 @@ long keyctl_session_to_parent(void)
+ set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME);
+
+ write_unlock_irq(&tasklist_lock);
++ rcu_read_unlock();
+ if (oldcred)
+ put_cred(oldcred);
+ return 0;
+@@ -1314,6 +1316,7 @@ already_same:
+ ret = 0;
+ not_permitted:
+ write_unlock_irq(&tasklist_lock);
++ rcu_read_unlock();
+ put_cred(cred);
+ return ret;
+
--- /dev/null
+From mtosatti@redhat.com Thu Sep 23 11:11:40 2010
+From: Avi Kivity <avi@redhat.com>
+Date: Fri, 17 Sep 2010 13:13:13 -0300
+Subject: KVM: Keep slot ID in memory slot structure
+To: greg@kroah.com
+Cc: avi@redhat.com, mtosatti@redhat.com, stable@kernel.org
+Message-ID: <1284739998-13454-2-git-send-email-mtosatti@redhat.com>
+
+
+From: Avi Kivity <avi@redhat.com>
+
+commit e36d96f7cfaa71870c407131eb4fbd38ea285c01 upstream.
+
+May be used for distinguishing between internal and user slots, or for sorting
+slots in size order.
+
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Cc: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/kvm_host.h | 1 +
+ virt/kvm/kvm_main.c | 1 +
+ 2 files changed, 2 insertions(+)
+
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -123,6 +123,7 @@ struct kvm_memory_slot {
+ } *lpage_info[KVM_NR_PAGE_SIZES - 1];
+ unsigned long userspace_addr;
+ int user_alloc;
++ int id;
+ };
+
+ static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -566,6 +566,7 @@ int __kvm_set_memory_region(struct kvm *
+
+ new = old = *memslot;
+
++ new.id = mem->slot;
+ new.base_gfn = base_gfn;
+ new.npages = npages;
+ new.flags = mem->flags;
--- /dev/null
+From mtosatti@redhat.com Thu Sep 23 11:12:22 2010
+From: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
+Date: Fri, 17 Sep 2010 13:13:15 -0300
+Subject: KVM: MMU: fix direct sp's access corrupted
+To: greg@kroah.com
+Cc: avi@redhat.com, mtosatti@redhat.com, stable@kernel.org
+Message-ID: <1284739998-13454-4-git-send-email-mtosatti@redhat.com>
+
+
+From: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
+
+commit 9e7b0e7fba45ca3c6357aeb7091ebc281f1de365 upstream.
+
+If the mapping is writable but the dirty flag is not set, we will find
+the read-only direct sp and setup the mapping, then if the write #PF
+occur, we will mark this mapping writable in the read-only direct sp,
+now, other real read-only mapping will happily write it without #PF.
+
+It may hurt guest's COW
+
+Fixed by re-install the mapping when write #PF occur.
+
+Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ arch/x86/kvm/paging_tmpl.h | 28 ++++++++++++++++++++++++++--
+ 1 file changed, 26 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/paging_tmpl.h
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -324,8 +324,32 @@ static u64 *FNAME(fetch)(struct kvm_vcpu
+ break;
+ }
+
+- if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
+- continue;
++ if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
++ struct kvm_mmu_page *child;
++ unsigned direct_access;
++
++ if (level != gw->level)
++ continue;
++
++ /*
++ * For the direct sp, if the guest pte's dirty bit
++ * changed form clean to dirty, it will corrupt the
++ * sp's access: allow writable in the read-only sp,
++ * so we should update the spte at this point to get
++ * a new sp with the correct access.
++ */
++ direct_access = gw->pt_access & gw->pte_access;
++ if (!is_dirty_gpte(gw->ptes[gw->level - 1]))
++ direct_access &= ~ACC_WRITE_MASK;
++
++ child = page_header(*sptep & PT64_BASE_ADDR_MASK);
++ if (child->role.access == direct_access)
++ continue;
++
++ mmu_page_remove_parent_pte(child, sptep);
++ __set_spte(sptep, shadow_trap_nonpresent_pte);
++ kvm_flush_remote_tlbs(vcpu->kvm);
++ }
+
+ if (is_large_pte(*sptep)) {
+ rmap_remove(vcpu->kvm, sptep);
--- /dev/null
+From mtosatti@redhat.com Thu Sep 23 11:12:48 2010
+From: Andrea Arcangeli <aarcange@redhat.com>
+Date: Fri, 17 Sep 2010 13:13:17 -0300
+Subject: KVM: MMU: fix mmu notifier invalidate handler for huge spte
+To: greg@kroah.com
+Cc: mtosatti@redhat.com, avi@redhat.com, stable@kernel.org
+Message-ID: <1284739998-13454-6-git-send-email-mtosatti@redhat.com>
+
+
+From: Andrea Arcangeli <aarcange@redhat.com>
+
+commit 6e3e243c3b6e0bbd18c6ce0fbc12bc3fe2d77b34 upstream.
+
+The index wasn't calculated correctly (off by one) for huge spte so KVM guest
+was unstable with transparent hugepages.
+
+Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
+Reviewed-by: Reviewed-by: Rik van Riel <riel@redhat.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Cc: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ arch/x86/kvm/mmu.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -799,8 +799,12 @@ static int kvm_handle_hva(struct kvm *kv
+ ret = handler(kvm, &memslot->rmap[gfn_offset], data);
+
+ for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
+- int idx = gfn_offset;
+- idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j);
++ unsigned long idx;
++ int nr;
++
++ nr = KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL+j);
++ idx = (memslot->base_gfn+gfn_offset) / nr -
++ memslot->base_gfn / nr;
+ ret |= handler(kvm,
+ &memslot->lpage_info[j][idx].rmap_pde,
+ data);
--- /dev/null
+From mtosatti@redhat.com Thu Sep 23 11:12:05 2010
+From: Avi Kivity <avi@redhat.com>
+Date: Fri, 17 Sep 2010 13:13:14 -0300
+Subject: KVM: Prevent internal slots from being COWed
+To: greg@kroah.com
+Cc: avi@redhat.com, mtosatti@redhat.com, stable@kernel.org
+Message-ID: <1284739998-13454-3-git-send-email-mtosatti@redhat.com>
+
+
+From: Avi Kivity <avi@redhat.com>
+
+commit 7ac77099ce88a0c31b75acd0ec5ef3da4415a6d8 upstream.
+
+If a process with a memory slot is COWed, the page will change its address
+(despite having an elevated reference count). This breaks internal memory
+slots which have their physical addresses loaded into vmcs registers (see
+the APIC access memory slot).
+
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Cc: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ arch/x86/kvm/x86.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -5438,6 +5438,11 @@ int kvm_arch_prepare_memory_region(struc
+ int user_alloc)
+ {
+ int npages = memslot->npages;
++ int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
++
++ /* Prevent internal slot pages from being moved by fork()/COW. */
++ if (memslot->id >= KVM_MEMORY_SLOTS)
++ map_flags = MAP_SHARED | MAP_ANONYMOUS;
+
+ /*To keep backward compatibility with older userspace,
+ *x86 needs to hanlde !user_alloc case.
+@@ -5450,7 +5455,7 @@ int kvm_arch_prepare_memory_region(struc
+ userspace_addr = do_mmap(NULL, 0,
+ npages * PAGE_SIZE,
+ PROT_READ | PROT_WRITE,
+- MAP_PRIVATE | MAP_ANONYMOUS,
++ map_flags,
+ 0);
+ up_write(¤t->mm->mmap_sem);
+
--- /dev/null
+From mtosatti@redhat.com Thu Sep 23 11:13:09 2010
+From: Avi Kivity <avi@redhat.com>
+Date: Fri, 17 Sep 2010 13:13:18 -0300
+Subject: KVM: VMX: Fix host GDT.LIMIT corruption
+To: greg@kroah.com
+Cc: avi@redhat.com, mtosatti@redhat.com, stable@kernel.org
+Message-ID: <1284739998-13454-7-git-send-email-mtosatti@redhat.com>
+
+
+From: Avi Kivity <avi@redhat.com>
+
+commit 3444d7da1839b851eefedd372978d8a982316c36 upstream.
+
+vmx does not restore GDT.LIMIT to the host value, instead it sets it to 64KB.
+This means host userspace can learn a few bits of host memory.
+
+Fix by reloading GDTR when we load other host state.
+
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ arch/x86/kvm/vmx.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -177,6 +177,7 @@ static u64 construct_eptp(unsigned long
+ static DEFINE_PER_CPU(struct vmcs *, vmxarea);
+ static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
+ static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu);
++static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
+
+ static unsigned long *vmx_io_bitmap_a;
+ static unsigned long *vmx_io_bitmap_b;
+@@ -812,6 +813,7 @@ static void __vmx_load_host_state(struct
+ wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
+ }
+ #endif
++ load_gdt(&__get_cpu_var(host_gdt));
+ }
+
+ static void vmx_load_host_state(struct vcpu_vmx *vmx)
+@@ -1314,6 +1316,8 @@ static int hardware_enable(void *garbage
+
+ ept_sync_global();
+
++ store_gdt(&__get_cpu_var(host_gdt));
++
+ return 0;
+ }
+
--- /dev/null
+From mtosatti@redhat.com Thu Sep 23 11:12:35 2010
+From: Gleb Natapov <gleb@redhat.com>
+Date: Fri, 17 Sep 2010 13:13:16 -0300
+Subject: KVM: x86: emulator: inc/dec can have lock prefix
+To: greg@kroah.com
+Cc: avi@redhat.com, mtosatti@redhat.com, stable@kernel.org
+Message-ID: <1284739998-13454-5-git-send-email-mtosatti@redhat.com>
+
+
+From: Gleb Natapov <gleb@redhat.com>
+
+commit c0e0608cb902af1a1fd8d413ec0a07ee1e62c652 upstream.
+
+Mark inc (0xfe/0 0xff/0) and dec (0xfe/1 0xff/1) as lock prefix capable.
+
+Signed-off-by: Gleb Natapov <gleb@redhat.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ arch/x86/kvm/emulate.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -345,10 +345,10 @@ static u32 group_table[] = {
+ DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
+ 0, 0, 0, 0,
+ [Group4*8] =
+- ByteOp | DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM,
++ ByteOp | DstMem | SrcNone | ModRM | Lock, ByteOp | DstMem | SrcNone | ModRM | Lock,
+ 0, 0, 0, 0, 0, 0,
+ [Group5*8] =
+- DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
++ DstMem | SrcNone | ModRM | Lock, DstMem | SrcNone | ModRM | Lock,
+ SrcMem | ModRM | Stack, 0,
+ SrcMem | ModRM | Stack, SrcMem | ModRM | Src2Mem16 | ImplicitOps,
+ SrcMem | ModRM | Stack, 0,
--- /dev/null
+From aa45484031ddee09b06350ab8528bfe5b2c76d1c Mon Sep 17 00:00:00 2001
+From: Christoph Lameter <cl@linux.com>
+Date: Thu, 9 Sep 2010 16:38:17 -0700
+Subject: mm: page allocator: calculate a better estimate of NR_FREE_PAGES when memory is low and kswapd is awake
+
+From: Christoph Lameter <cl@linux.com>
+
+commit aa45484031ddee09b06350ab8528bfe5b2c76d1c upstream.
+
+Ordinarily watermark checks are based on the vmstat NR_FREE_PAGES as it is
+cheaper than scanning a number of lists. To avoid synchronization
+overhead, counter deltas are maintained on a per-cpu basis and drained
+both periodically and when the delta is above a threshold. On large CPU
+systems, the difference between the estimated and real value of
+NR_FREE_PAGES can be very high. If NR_FREE_PAGES is much higher than
+number of real free page in buddy, the VM can allocate pages below min
+watermark, at worst reducing the real number of pages to zero. Even if
+the OOM killer kills some victim for freeing memory, it may not free
+memory if the exit path requires a new page resulting in livelock.
+
+This patch introduces a zone_page_state_snapshot() function (courtesy of
+Christoph) that takes a slightly more accurate view of an arbitrary vmstat
+counter. It is used to read NR_FREE_PAGES while kswapd is awake to avoid
+the watermark being accidentally broken. The estimate is not perfect and
+may result in cache line bounces but is expected to be lighter than the
+IPI calls necessary to continually drain the per-cpu counters while kswapd
+is awake.
+
+Signed-off-by: Christoph Lameter <cl@linux.com>
+Signed-off-by: Mel Gorman <mel@csn.ul.ie>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/mmzone.h | 13 +++++++++++++
+ include/linux/vmstat.h | 22 ++++++++++++++++++++++
+ mm/mmzone.c | 21 +++++++++++++++++++++
+ mm/page_alloc.c | 4 ++--
+ mm/vmstat.c | 15 ++++++++++++++-
+ 5 files changed, 72 insertions(+), 3 deletions(-)
+
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -284,6 +284,13 @@ struct zone {
+ unsigned long watermark[NR_WMARK];
+
+ /*
++ * When free pages are below this point, additional steps are taken
++ * when reading the number of free pages to avoid per-cpu counter
++ * drift allowing watermarks to be breached
++ */
++ unsigned long percpu_drift_mark;
++
++ /*
+ * We don't know if the memory that we're going to allocate will be freeable
+ * or/and it will be released eventually, so to avoid totally wasting several
+ * GB of ram we must reserve some of the lower zone memory (otherwise we risk
+@@ -456,6 +463,12 @@ static inline int zone_is_oom_locked(con
+ return test_bit(ZONE_OOM_LOCKED, &zone->flags);
+ }
+
++#ifdef CONFIG_SMP
++unsigned long zone_nr_free_pages(struct zone *zone);
++#else
++#define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES)
++#endif /* CONFIG_SMP */
++
+ /*
+ * The "priority" of VM scanning is how much of the queues we will scan in one
+ * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
+--- a/include/linux/vmstat.h
++++ b/include/linux/vmstat.h
+@@ -170,6 +170,28 @@ static inline unsigned long zone_page_st
+ return x;
+ }
+
++/*
++ * More accurate version that also considers the currently pending
++ * deltas. For that we need to loop over all cpus to find the current
++ * deltas. There is no synchronization so the result cannot be
++ * exactly accurate either.
++ */
++static inline unsigned long zone_page_state_snapshot(struct zone *zone,
++ enum zone_stat_item item)
++{
++ long x = atomic_long_read(&zone->vm_stat[item]);
++
++#ifdef CONFIG_SMP
++ int cpu;
++ for_each_online_cpu(cpu)
++ x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
++
++ if (x < 0)
++ x = 0;
++#endif
++ return x;
++}
++
+ extern unsigned long global_reclaimable_pages(void);
+ extern unsigned long zone_reclaimable_pages(struct zone *zone);
+
+--- a/mm/mmzone.c
++++ b/mm/mmzone.c
+@@ -87,3 +87,24 @@ int memmap_valid_within(unsigned long pf
+ return 1;
+ }
+ #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
++
++#ifdef CONFIG_SMP
++/* Called when a more accurate view of NR_FREE_PAGES is needed */
++unsigned long zone_nr_free_pages(struct zone *zone)
++{
++ unsigned long nr_free_pages = zone_page_state(zone, NR_FREE_PAGES);
++
++ /*
++ * While kswapd is awake, it is considered the zone is under some
++ * memory pressure. Under pressure, there is a risk that
++ * per-cpu-counter-drift will allow the min watermark to be breached
++ * potentially causing a live-lock. While kswapd is awake and
++ * free pages are low, get a better estimate for free pages
++ */
++ if (nr_free_pages < zone->percpu_drift_mark &&
++ !waitqueue_active(&zone->zone_pgdat->kswapd_wait))
++ return zone_page_state_snapshot(zone, NR_FREE_PAGES);
++
++ return nr_free_pages;
++}
++#endif /* CONFIG_SMP */
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1461,7 +1461,7 @@ int zone_watermark_ok(struct zone *z, in
+ {
+ /* free_pages my go negative - that's OK */
+ long min = mark;
+- long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
++ long free_pages = zone_nr_free_pages(z) - (1 << order) + 1;
+ int o;
+
+ if (alloc_flags & ALLOC_HIGH)
+@@ -2424,7 +2424,7 @@ void show_free_areas(void)
+ " all_unreclaimable? %s"
+ "\n",
+ zone->name,
+- K(zone_page_state(zone, NR_FREE_PAGES)),
++ K(zone_nr_free_pages(zone)),
+ K(min_wmark_pages(zone)),
+ K(low_wmark_pages(zone)),
+ K(high_wmark_pages(zone)),
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -138,11 +138,24 @@ static void refresh_zone_stat_thresholds
+ int threshold;
+
+ for_each_populated_zone(zone) {
++ unsigned long max_drift, tolerate_drift;
++
+ threshold = calculate_threshold(zone);
+
+ for_each_online_cpu(cpu)
+ per_cpu_ptr(zone->pageset, cpu)->stat_threshold
+ = threshold;
++
++ /*
++ * Only set percpu_drift_mark if there is a danger that
++ * NR_FREE_PAGES reports the low watermark is ok when in fact
++ * the min watermark could be breached by an allocation
++ */
++ tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
++ max_drift = num_online_cpus() * threshold;
++ if (max_drift > tolerate_drift)
++ zone->percpu_drift_mark = high_wmark_pages(zone) +
++ max_drift;
+ }
+ }
+
+@@ -813,7 +826,7 @@ static void zoneinfo_show_print(struct s
+ "\n scanned %lu"
+ "\n spanned %lu"
+ "\n present %lu",
+- zone_page_state(zone, NR_FREE_PAGES),
++ zone_nr_free_pages(zone),
+ min_wmark_pages(zone),
+ low_wmark_pages(zone),
+ high_wmark_pages(zone),
--- /dev/null
+From 9ee493ce0a60bf42c0f8fd0b0fe91df5704a1cbf Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mel@csn.ul.ie>
+Date: Thu, 9 Sep 2010 16:38:18 -0700
+Subject: mm: page allocator: drain per-cpu lists after direct reclaim allocation fails
+
+From: Mel Gorman <mel@csn.ul.ie>
+
+commit 9ee493ce0a60bf42c0f8fd0b0fe91df5704a1cbf upstream.
+
+When under significant memory pressure, a process enters direct reclaim
+and immediately afterwards tries to allocate a page. If it fails and no
+further progress is made, it's possible the system will go OOM. However,
+on systems with large amounts of memory, it's possible that a significant
+number of pages are on per-cpu lists and inaccessible to the calling
+process. This leads to a process entering direct reclaim more often than
+it should increasing the pressure on the system and compounding the
+problem.
+
+This patch notes that if direct reclaim is making progress but allocations
+are still failing that the system is already under heavy pressure. In
+this case, it drains the per-cpu lists and tries the allocation a second
+time before continuing.
+
+Signed-off-by: Mel Gorman <mel@csn.ul.ie>
+Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
+Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Reviewed-by: Christoph Lameter <cl@linux.com>
+Cc: Dave Chinner <david@fromorbit.com>
+Cc: Wu Fengguang <fengguang.wu@intel.com>
+Cc: David Rientjes <rientjes@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/page_alloc.c | 20 ++++++++++++++++----
+ 1 file changed, 16 insertions(+), 4 deletions(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1843,6 +1843,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_m
+ struct page *page = NULL;
+ struct reclaim_state reclaim_state;
+ struct task_struct *p = current;
++ bool drained = false;
+
+ cond_resched();
+
+@@ -1861,14 +1862,25 @@ __alloc_pages_direct_reclaim(gfp_t gfp_m
+
+ cond_resched();
+
+- if (order != 0)
+- drain_all_pages();
++ if (unlikely(!(*did_some_progress)))
++ return NULL;
+
+- if (likely(*did_some_progress))
+- page = get_page_from_freelist(gfp_mask, nodemask, order,
++retry:
++ page = get_page_from_freelist(gfp_mask, nodemask, order,
+ zonelist, high_zoneidx,
+ alloc_flags, preferred_zone,
+ migratetype);
++
++ /*
++ * If an allocation failed after direct reclaim, it could be because
++ * pages are pinned on the per-cpu lists. Drain them and try again
++ */
++ if (!page && !drained) {
++ drain_all_pages();
++ drained = true;
++ goto retry;
++ }
++
+ return page;
+ }
+
--- /dev/null
+From 72853e2991a2702ae93aaf889ac7db743a415dd3 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mel@csn.ul.ie>
+Date: Thu, 9 Sep 2010 16:38:16 -0700
+Subject: mm: page allocator: update free page counters after pages are placed on the free list
+
+From: Mel Gorman <mel@csn.ul.ie>
+
+commit 72853e2991a2702ae93aaf889ac7db743a415dd3 upstream.
+
+When allocating a page, the system uses NR_FREE_PAGES counters to
+determine if watermarks would remain intact after the allocation was made.
+This check is made without interrupts disabled or the zone lock held and
+so is race-prone by nature. Unfortunately, when pages are being freed in
+batch, the counters are updated before the pages are added on the list.
+During this window, the counters are misleading as the pages do not exist
+yet. When under significant pressure on systems with large numbers of
+CPUs, it's possible for processes to make progress even though they should
+have been stalled. This is particularly problematic if a number of the
+processes are using GFP_ATOMIC as the min watermark can be accidentally
+breached and in extreme cases, the system can livelock.
+
+This patch updates the counters after the pages have been added to the
+list. This makes the allocator more cautious with respect to preserving
+the watermarks and mitigates livelock possibilities.
+
+[akpm@linux-foundation.org: avoid modifying incoming args]
+Signed-off-by: Mel Gorman <mel@csn.ul.ie>
+Reviewed-by: Rik van Riel <riel@redhat.com>
+Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
+Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Reviewed-by: Christoph Lameter <cl@linux.com>
+Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/page_alloc.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -588,13 +588,13 @@ static void free_pcppages_bulk(struct zo
+ {
+ int migratetype = 0;
+ int batch_free = 0;
++ int to_free = count;
+
+ spin_lock(&zone->lock);
+ zone->all_unreclaimable = 0;
+ zone->pages_scanned = 0;
+
+- __mod_zone_page_state(zone, NR_FREE_PAGES, count);
+- while (count) {
++ while (to_free) {
+ struct page *page;
+ struct list_head *list;
+
+@@ -619,8 +619,9 @@ static void free_pcppages_bulk(struct zo
+ /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
+ __free_one_page(page, zone, 0, page_private(page));
+ trace_mm_page_pcpu_drain(page, 0, page_private(page));
+- } while (--count && --batch_free && !list_empty(list));
++ } while (--to_free && --batch_free && !list_empty(list));
+ }
++ __mod_zone_page_state(zone, NR_FREE_PAGES, count);
+ spin_unlock(&zone->lock);
+ }
+
+@@ -631,8 +632,8 @@ static void free_one_page(struct zone *z
+ zone->all_unreclaimable = 0;
+ zone->pages_scanned = 0;
+
+- __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
+ __free_one_page(page, zone, order, migratetype);
++ __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
+ spin_unlock(&zone->lock);
+ }
+
--- /dev/null
+From 2aeadc30de45a72648f271603203ab392b80f607 Mon Sep 17 00:00:00 2001
+From: Andrea Arcangeli <aarcange@redhat.com>
+Date: Wed, 22 Sep 2010 13:05:12 -0700
+Subject: mmap: call unlink_anon_vmas() in __split_vma() in case of error
+
+From: Andrea Arcangeli <aarcange@redhat.com>
+
+commit 2aeadc30de45a72648f271603203ab392b80f607 upstream.
+
+If __split_vma fails because of an out of memory condition the
+anon_vma_chain isn't teardown and freed potentially leading to rmap walks
+accessing freed vma information plus there's a memleak.
+
+Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
+Acked-by: Johannes Weiner <jweiner@redhat.com>
+Acked-by: Rik van Riel <riel@redhat.com>
+Acked-by: Hugh Dickins <hughd@google.com>
+Cc: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/mmap.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -1997,6 +1997,7 @@ static int __split_vma(struct mm_struct
+ removed_exe_file_vma(mm);
+ fput(new->vm_file);
+ }
++ unlink_anon_vmas(new);
+ out_free_mpol:
+ mpol_put(pol);
+ out_free_vma:
--- /dev/null
+From c33f543d320843e1732534c3931da4bbd18e6c14 Mon Sep 17 00:00:00 2001
+From: Patrick Simmons <linuxrocks123@netscape.net>
+Date: Wed, 8 Sep 2010 10:34:28 -0400
+Subject: oprofile: Add Support for Intel CPU Family 6 / Model 22 (Intel Celeron 540)
+
+From: Patrick Simmons <linuxrocks123@netscape.net>
+
+commit c33f543d320843e1732534c3931da4bbd18e6c14 upstream.
+
+This patch adds CPU type detection for the Intel Celeron 540, which is
+part of the Core 2 family according to Wikipedia; the family and ID pair
+is absent from the Volume 3B table referenced in the source code
+comments. I have tested this patch on an Intel Celeron 540 machine
+reporting itself as Family 6 Model 22, and OProfile runs on the machine
+without issue.
+
+Spec:
+
+ http://download.intel.com/design/mobile/SPECUPDT/317667.pdf
+
+Signed-off-by: Patrick Simmons <linuxrocks123@netscape.net>
+Acked-by: Andi Kleen <ak@linux.intel.com>
+Acked-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Robert Richter <robert.richter@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/oprofile/nmi_int.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/oprofile/nmi_int.c
++++ b/arch/x86/oprofile/nmi_int.c
+@@ -671,7 +671,9 @@ static int __init ppro_init(char **cpu_t
+ case 14:
+ *cpu_type = "i386/core";
+ break;
+- case 15: case 23:
++ case 0x0f:
++ case 0x16:
++ case 0x17:
+ *cpu_type = "i386/core_2";
+ break;
+ case 0x1a:
--- /dev/null
+From 46b30ea9bc3698bc1d1e6fd726c9601d46fa0a91 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Tue, 21 Sep 2010 07:57:19 +0200
+Subject: percpu: fix pcpu_last_unit_cpu
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 46b30ea9bc3698bc1d1e6fd726c9601d46fa0a91 upstream.
+
+pcpu_first/last_unit_cpu are used to track which cpu has the first and
+last units assigned. This in turn is used to determine the span of a
+chunk for man/unmap cache flushes and whether an address belongs to
+the first chunk or not in per_cpu_ptr_to_phys().
+
+When the number of possible CPUs isn't power of two, a chunk may
+contain unassigned units towards the end of a chunk. The logic to
+determine pcpu_last_unit_cpu was incorrect when there was an unused
+unit at the end of a chunk. It failed to ignore the unused unit and
+assigned the unused marker NR_CPUS to pcpu_last_unit_cpu.
+
+This was discovered through kdump failure which was caused by
+malfunctioning per_cpu_ptr_to_phys() on a kvm setup with 50 possible
+CPUs by CAI Qian.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: CAI Qian <caiqian@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/percpu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -1413,9 +1413,9 @@ int __init pcpu_setup_first_chunk(const
+
+ if (pcpu_first_unit_cpu == NR_CPUS)
+ pcpu_first_unit_cpu = cpu;
++ pcpu_last_unit_cpu = cpu;
+ }
+ }
+- pcpu_last_unit_cpu = cpu;
+ pcpu_nr_units = unit;
+
+ for_each_possible_cpu(cpu)
--- /dev/null
+From 950eaaca681c44aab87a46225c9e44f902c080aa Mon Sep 17 00:00:00 2001
+From: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Date: Tue, 31 Aug 2010 17:00:18 -0700
+Subject: pid: make setpgid() system call use RCU read-side critical section
+
+From: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+commit 950eaaca681c44aab87a46225c9e44f902c080aa upstream.
+
+[ 23.584719]
+[ 23.584720] ===================================================
+[ 23.585059] [ INFO: suspicious rcu_dereference_check() usage. ]
+[ 23.585176] ---------------------------------------------------
+[ 23.585176] kernel/pid.c:419 invoked rcu_dereference_check() without protection!
+[ 23.585176]
+[ 23.585176] other info that might help us debug this:
+[ 23.585176]
+[ 23.585176]
+[ 23.585176] rcu_scheduler_active = 1, debug_locks = 1
+[ 23.585176] 1 lock held by rc.sysinit/728:
+[ 23.585176] #0: (tasklist_lock){.+.+..}, at: [<ffffffff8104771f>] sys_setpgid+0x5f/0x193
+[ 23.585176]
+[ 23.585176] stack backtrace:
+[ 23.585176] Pid: 728, comm: rc.sysinit Not tainted 2.6.36-rc2 #2
+[ 23.585176] Call Trace:
+[ 23.585176] [<ffffffff8105b436>] lockdep_rcu_dereference+0x99/0xa2
+[ 23.585176] [<ffffffff8104c324>] find_task_by_pid_ns+0x50/0x6a
+[ 23.585176] [<ffffffff8104c35b>] find_task_by_vpid+0x1d/0x1f
+[ 23.585176] [<ffffffff81047727>] sys_setpgid+0x67/0x193
+[ 23.585176] [<ffffffff810029eb>] system_call_fastpath+0x16/0x1b
+[ 24.959669] type=1400 audit(1282938522.956:4): avc: denied { module_request } for pid=766 comm="hwclock" kmod="char-major-10-135" scontext=system_u:system_r:hwclock_t:s0 tcontext=system_u:system_r:kernel_t:s0 tclas
+
+It turns out that the setpgid() system call fails to enter an RCU
+read-side critical section before doing a PID-to-task_struct translation.
+This commit therefore does rcu_read_lock() before the translation, and
+also does rcu_read_unlock() after the last use of the returned pointer.
+
+Reported-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: David Howells <dhowells@redhat.com>
+Cc: Jiri Slaby <jslaby@suse.cz>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/sys.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -931,6 +931,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid
+ pgid = pid;
+ if (pgid < 0)
+ return -EINVAL;
++ rcu_read_lock();
+
+ /* From this point forward we keep holding onto the tasklist lock
+ * so that our parent does not change from under us. -DaveM
+@@ -984,6 +985,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid
+ out:
+ /* All paths lead to here, thus we are safe. -DaveM */
+ write_unlock_irq(&tasklist_lock);
++ rcu_read_unlock();
+ return err;
+ }
+
--- /dev/null
+From 6715045ddc7472a22be5e49d4047d2d89b391f45 Mon Sep 17 00:00:00 2001
+From: Rafael J. Wysocki <rjw@sisk.pl>
+Date: Sat, 11 Sep 2010 20:58:27 +0200
+Subject: PM / Hibernate: Avoid hitting OOM during preallocation of memory
+
+From: Rafael J. Wysocki <rjw@sisk.pl>
+
+commit 6715045ddc7472a22be5e49d4047d2d89b391f45 upstream.
+
+There is a problem in hibernate_preallocate_memory() that it calls
+preallocate_image_memory() with an argument that may be greater than
+the total number of available non-highmem memory pages. If that's
+the case, the OOM condition is guaranteed to trigger, which in turn
+can cause significant slowdown to occur during hibernation.
+
+To avoid that, make preallocate_image_memory() adjust its argument
+before calling preallocate_image_pages(), so that the total number of
+saveable non-highem pages left is not less than the minimum size of
+a hibernation image. Change hibernate_preallocate_memory() to try to
+allocate from highmem if the number of pages allocated by
+preallocate_image_memory() is too low.
+
+Modify free_unnecessary_pages() to take all possible memory
+allocation patterns into account.
+
+Reported-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Tested-by: M. Vefa Bicakci <bicave@superonline.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/power/snapshot.c | 85 ++++++++++++++++++++++++++++++++++++------------
+ 1 file changed, 65 insertions(+), 20 deletions(-)
+
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -1121,9 +1121,19 @@ static unsigned long preallocate_image_p
+ return nr_alloc;
+ }
+
+-static unsigned long preallocate_image_memory(unsigned long nr_pages)
++static unsigned long preallocate_image_memory(unsigned long nr_pages,
++ unsigned long avail_normal)
+ {
+- return preallocate_image_pages(nr_pages, GFP_IMAGE);
++ unsigned long alloc;
++
++ if (avail_normal <= alloc_normal)
++ return 0;
++
++ alloc = avail_normal - alloc_normal;
++ if (nr_pages < alloc)
++ alloc = nr_pages;
++
++ return preallocate_image_pages(alloc, GFP_IMAGE);
+ }
+
+ #ifdef CONFIG_HIGHMEM
+@@ -1169,15 +1179,22 @@ static inline unsigned long preallocate_
+ */
+ static void free_unnecessary_pages(void)
+ {
+- unsigned long save_highmem, to_free_normal, to_free_highmem;
++ unsigned long save, to_free_normal, to_free_highmem;
+
+- to_free_normal = alloc_normal - count_data_pages();
+- save_highmem = count_highmem_pages();
+- if (alloc_highmem > save_highmem) {
+- to_free_highmem = alloc_highmem - save_highmem;
++ save = count_data_pages();
++ if (alloc_normal >= save) {
++ to_free_normal = alloc_normal - save;
++ save = 0;
++ } else {
++ to_free_normal = 0;
++ save -= alloc_normal;
++ }
++ save += count_highmem_pages();
++ if (alloc_highmem >= save) {
++ to_free_highmem = alloc_highmem - save;
+ } else {
+ to_free_highmem = 0;
+- to_free_normal -= save_highmem - alloc_highmem;
++ to_free_normal -= save - alloc_highmem;
+ }
+
+ memory_bm_position_reset(©_bm);
+@@ -1258,7 +1275,7 @@ int hibernate_preallocate_memory(void)
+ {
+ struct zone *zone;
+ unsigned long saveable, size, max_size, count, highmem, pages = 0;
+- unsigned long alloc, save_highmem, pages_highmem;
++ unsigned long alloc, save_highmem, pages_highmem, avail_normal;
+ struct timeval start, stop;
+ int error;
+
+@@ -1295,6 +1312,7 @@ int hibernate_preallocate_memory(void)
+ else
+ count += zone_page_state(zone, NR_FREE_PAGES);
+ }
++ avail_normal = count;
+ count += highmem;
+ count -= totalreserve_pages;
+
+@@ -1309,12 +1327,21 @@ int hibernate_preallocate_memory(void)
+ */
+ if (size >= saveable) {
+ pages = preallocate_image_highmem(save_highmem);
+- pages += preallocate_image_memory(saveable - pages);
++ pages += preallocate_image_memory(saveable - pages, avail_normal);
+ goto out;
+ }
+
+ /* Estimate the minimum size of the image. */
+ pages = minimum_image_size(saveable);
++ /*
++ * To avoid excessive pressure on the normal zone, leave room in it to
++ * accommodate an image of the minimum size (unless it's already too
++ * small, in which case don't preallocate pages from it at all).
++ */
++ if (avail_normal > pages)
++ avail_normal -= pages;
++ else
++ avail_normal = 0;
+ if (size < pages)
+ size = min_t(unsigned long, pages, max_size);
+
+@@ -1335,16 +1362,34 @@ int hibernate_preallocate_memory(void)
+ */
+ pages_highmem = preallocate_image_highmem(highmem / 2);
+ alloc = (count - max_size) - pages_highmem;
+- pages = preallocate_image_memory(alloc);
+- if (pages < alloc)
+- goto err_out;
+- size = max_size - size;
+- alloc = size;
+- size = preallocate_highmem_fraction(size, highmem, count);
+- pages_highmem += size;
+- alloc -= size;
+- pages += preallocate_image_memory(alloc);
+- pages += pages_highmem;
++ pages = preallocate_image_memory(alloc, avail_normal);
++ if (pages < alloc) {
++ /* We have exhausted non-highmem pages, try highmem. */
++ alloc -= pages;
++ pages += pages_highmem;
++ pages_highmem = preallocate_image_highmem(alloc);
++ if (pages_highmem < alloc)
++ goto err_out;
++ pages += pages_highmem;
++ /*
++ * size is the desired number of saveable pages to leave in
++ * memory, so try to preallocate (all memory - size) pages.
++ */
++ alloc = (count - pages) - size;
++ pages += preallocate_image_highmem(alloc);
++ } else {
++ /*
++ * There are approximately max_size saveable pages at this point
++ * and we want to reduce this number down to size.
++ */
++ alloc = max_size - size;
++ size = preallocate_highmem_fraction(alloc, highmem, count);
++ pages_highmem += size;
++ alloc -= size;
++ size = preallocate_image_memory(alloc, avail_normal);
++ pages_highmem += preallocate_image_highmem(alloc - size);
++ pages += pages_highmem + size;
++ }
+
+ /*
+ * We only need as many page frames for the image as there are saveable
--- /dev/null
+From 152e1d592071c8b312bb898bc1118b64e4aea535 Mon Sep 17 00:00:00 2001
+From: Colin Cross <ccross@android.com>
+Date: Fri, 3 Sep 2010 01:24:07 +0200
+Subject: PM: Prevent waiting forever on asynchronous resume after failing suspend
+
+From: Colin Cross <ccross@android.com>
+
+commit 152e1d592071c8b312bb898bc1118b64e4aea535 upstream.
+
+During suspend, the power.completion is expected to be set when a
+device has not yet started suspending. Set it on init to fix a
+corner case where a device is resumed when its parent has never
+suspended.
+
+Consider three drivers, A, B, and C. The parent of A is C, and C
+has async_suspend set. On boot, C->power.completion is initialized
+to 0.
+
+During the first suspend:
+suspend_devices_and_enter(...)
+ dpm_resume(...)
+ device_suspend(A)
+ device_suspend(B) returns error, aborts suspend
+ dpm_resume_end(...)
+ dpm_resume(...)
+ device_resume(A)
+ dpm_wait(A->parent == C)
+ wait_for_completion(C->power.completion)
+
+The wait_for_completion will never complete, because
+complete_all(C->power.completion) will only be called from
+device_suspend(C) or device_resume(C), neither of which is called
+if suspend is aborted before C.
+
+After a successful suspend->resume cycle, where B doesn't abort
+suspend, C->power.completion is left in the completed state by the
+call to device_resume(C), and the same call path will work if B
+aborts suspend.
+
+Signed-off-by: Colin Cross <ccross@android.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/base/power/main.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -59,6 +59,7 @@ void device_pm_init(struct device *dev)
+ {
+ dev->power.status = DPM_ON;
+ init_completion(&dev->power.completion);
++ complete_all(&dev->power.completion);
+ pm_runtime_init(dev);
+ }
+
--- /dev/null
+From 767b68e96993e29e3480d7ecdd9c4b84667c5762 Mon Sep 17 00:00:00 2001
+From: Dan Rosenberg <drosenberg@vsecurity.com>
+Date: Wed, 22 Sep 2010 14:32:56 -0400
+Subject: Prevent freeing uninitialized pointer in compat_do_readv_writev
+
+From: Dan Rosenberg <drosenberg@vsecurity.com>
+
+commit 767b68e96993e29e3480d7ecdd9c4b84667c5762 upstream.
+
+In 32-bit compatibility mode, the error handling for
+compat_do_readv_writev() may free an uninitialized pointer, potentially
+leading to all sorts of ugly memory corruption. This is reliably
+triggerable by unprivileged users by invoking the readv()/writev()
+syscalls with an invalid iovec pointer. The below patch fixes this to
+emulate the non-compat version.
+
+Introduced by commit b83733639a49 ("compat: factor out
+compat_rw_copy_check_uvector from compat_do_readv_writev")
+
+Signed-off-by: Dan Rosenberg <dan.j.rosenberg@gmail.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/compat.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/compat.c
++++ b/fs/compat.c
+@@ -1150,7 +1150,7 @@ static ssize_t compat_do_readv_writev(in
+ {
+ compat_ssize_t tot_len;
+ struct iovec iovstack[UIO_FASTIOV];
+- struct iovec *iov;
++ struct iovec *iov = iovstack;
+ ssize_t ret;
+ io_fn_t fn;
+ iov_fn_t fnv;
--- /dev/null
+From c227e69028473c7c7994a9b0a2cc0034f3f7e0fe Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Wed, 22 Sep 2010 13:04:54 -0700
+Subject: /proc/vmcore: fix seeking
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit c227e69028473c7c7994a9b0a2cc0034f3f7e0fe upstream.
+
+Commit 73296bc611 ("procfs: Use generic_file_llseek in /proc/vmcore")
+broke seeking on /proc/vmcore. This changes it back to use default_llseek
+in order to restore the original behaviour.
+
+The problem with generic_file_llseek is that it only allows seeks up to
+inode->i_sb->s_maxbytes, which is zero on procfs and some other virtual
+file systems. We should merge generic_file_llseek and default_llseek some
+day and clean this up in a proper way, but for 2.6.35/36, reverting vmcore
+is the safer solution.
+
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+Reported-by: CAI Qian <caiqian@redhat.com>
+Tested-by: CAI Qian <caiqian@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/proc/vmcore.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/proc/vmcore.c
++++ b/fs/proc/vmcore.c
+@@ -163,7 +163,7 @@ static ssize_t read_vmcore(struct file *
+
+ static const struct file_operations proc_vmcore_operations = {
+ .read = read_vmcore,
+- .llseek = generic_file_llseek,
++ .llseek = default_llseek,
+ };
+
+ static struct vmcore* __init get_new_element(void)
--- /dev/null
+From f501ed524b26ba1b739b7f7feb0a0e1496878769 Mon Sep 17 00:00:00 2001
+From: Vladimir Zapolskiy <vzapolskiy@gmail.com>
+Date: Wed, 22 Sep 2010 13:05:13 -0700
+Subject: rtc: s3c: balance state changes of wakeup flag
+
+From: Vladimir Zapolskiy <vzapolskiy@gmail.com>
+
+commit f501ed524b26ba1b739b7f7feb0a0e1496878769 upstream.
+
+This change resolves a problem about unbalanced calls of
+enable_irq_wakeup() and disable_irq_wakeup() for alarm interrupt.
+
+Bug reproduction:
+
+root@eb600:~# echo 0 > /sys/class/rtc/rtc0/wakealarm
+
+WARNING: at kernel/irq/manage.c:361 set_irq_wake+0x7c/0xe4()
+ Unbalanced IRQ 46 wake disable
+Modules linked in:
+[<c0025708>] (unwind_backtrace+0x0/0xd8) from [<c003358c>] (warn_slowpath_common+0x44/0x5c)
+[<c003358c>] (warn_slowpath_common+0x44/0x5c) from [<c00335dc>] (warn_slowpath_fmt+0x24/0x30)
+[<c00335dc>] (warn_slowpath_fmt+0x24/0x30) from [<c0058c20>] (set_irq_wake+0x7c/0xe4)
+[<c0058c20>] (set_irq_wake+0x7c/0xe4) from [<c01b5e80>] (s3c_rtc_setalarm+0xa8/0xb8)
+[<c01b5e80>] (s3c_rtc_setalarm+0xa8/0xb8) from [<c01b47a0>] (rtc_set_alarm+0x60/0x74)
+[<c01b47a0>] (rtc_set_alarm+0x60/0x74) from [<c01b5a98>] (rtc_sysfs_set_wakealarm+0xc8/0xd8)
+[<c01b5a98>] (rtc_sysfs_set_wakealarm+0xc8/0xd8) from [<c01891ec>] (dev_attr_store+0x20/0x24)
+[<c01891ec>] (dev_attr_store+0x20/0x24) from [<c00be934>] (sysfs_write_file+0x104/0x13c)
+[<c00be934>] (sysfs_write_file+0x104/0x13c) from [<c0080e7c>] (vfs_write+0xb0/0x158)
+[<c0080e7c>] (vfs_write+0xb0/0x158) from [<c0080fcc>] (sys_write+0x3c/0x68)
+[<c0080fcc>] (sys_write+0x3c/0x68) from [<c0020ec0>] (ret_fast_syscall+0x0/0x28)
+
+Signed-off-by: Vladimir Zapolskiy <vzapolskiy@gmail.com>
+Cc: Alessandro Zummo <a.zummo@towertech.it>
+Cc: Ben Dooks <ben@fluff.org.uk>
+Cc: Atul Dahiya <atul.dahiya@samsung.com>
+Cc: Taekgyun Ko <taeggyun.ko@samsung.com>
+Cc: Kukjin Kim <kgene.kim@samsung.com>
+Cc: <stable@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/rtc/rtc-s3c.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/drivers/rtc/rtc-s3c.c
++++ b/drivers/rtc/rtc-s3c.c
+@@ -298,11 +298,6 @@ static int s3c_rtc_setalarm(struct devic
+
+ s3c_rtc_setaie(alrm->enabled);
+
+- if (alrm->enabled)
+- enable_irq_wake(s3c_rtc_alarmno);
+- else
+- disable_irq_wake(s3c_rtc_alarmno);
+-
+ return 0;
+ }
+
+@@ -547,6 +542,10 @@ static int s3c_rtc_suspend(struct platfo
+ ticnt_en_save &= S3C64XX_RTCCON_TICEN;
+ }
+ s3c_rtc_enable(pdev, 0);
++
++ if (device_may_wakeup(&pdev->dev))
++ enable_irq_wake(s3c_rtc_alarmno);
++
+ return 0;
+ }
+
+@@ -560,6 +559,10 @@ static int s3c_rtc_resume(struct platfor
+ tmp = readb(s3c_rtc_base + S3C2410_RTCCON);
+ writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
+ }
++
++ if (device_may_wakeup(&pdev->dev))
++ disable_irq_wake(s3c_rtc_alarmno);
++
+ return 0;
+ }
+ #else
--- /dev/null
+From e75e863dd5c7d96b91ebbd241da5328fc38a78cc Mon Sep 17 00:00:00 2001
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+Date: Tue, 14 Sep 2010 16:35:14 +0200
+Subject: sched: Fix user time incorrectly accounted as system time on 32-bit
+
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+
+commit e75e863dd5c7d96b91ebbd241da5328fc38a78cc upstream.
+
+We have 32-bit variable overflow possibility when multiply in
+task_times() and thread_group_times() functions. When the
+overflow happens then the scaled utime value becomes erroneously
+small and the scaled stime becomes i erroneously big.
+
+Reported here:
+
+ https://bugzilla.redhat.com/show_bug.cgi?id=633037
+ https://bugzilla.kernel.org/show_bug.cgi?id=16559
+
+Reported-by: Michael Chapman <redhat-bugzilla@very.puzzling.org>
+Reported-by: Ciriaco Garcia de Celis <sysman@etherpilot.com>
+Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
+LKML-Reference: <20100914143513.GB8415@redhat.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/sched.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -3349,9 +3349,9 @@ void task_times(struct task_struct *p, c
+ rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
+
+ if (total) {
+- u64 temp;
++ u64 temp = rtime;
+
+- temp = (u64)(rtime * utime);
++ temp *= utime;
+ do_div(temp, total);
+ utime = (cputime_t)temp;
+ } else
+@@ -3382,9 +3382,9 @@ void thread_group_times(struct task_stru
+ rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
+
+ if (total) {
+- u64 temp;
++ u64 temp = rtime;
+
+- temp = (u64)(rtime * cputime.utime);
++ temp *= cputime.utime;
+ do_div(temp, total);
+ utime = (cputime_t)temp;
+ } else
--- /dev/null
+From 2a1b7e575b80ceb19ea50bfa86ce0053ea57181d Mon Sep 17 00:00:00 2001
+From: Ryan Kuester <rkuester@kspace.net>
+Date: Mon, 26 Apr 2010 18:11:54 -0500
+Subject: SCSI: mptsas: fix hangs caused by ATA pass-through
+
+From: Ryan Kuester <rkuester@kspace.net>
+
+commit 2a1b7e575b80ceb19ea50bfa86ce0053ea57181d upstream.
+
+I may have an explanation for the LSI 1068 HBA hangs provoked by ATA
+pass-through commands, in particular by smartctl.
+
+First, my version of the symptoms. On an LSI SAS1068E B3 HBA running
+01.29.00.00 firmware, with SATA disks, and with smartd running, I'm seeing
+occasional task, bus, and host resets, some of which lead to hard faults of
+the HBA requiring a reboot. Abusively looping the smartctl command,
+
+ # while true; do smartctl -a /dev/sdb > /dev/null; done
+
+dramatically increases the frequency of these failures to nearly one per
+minute. A high IO load through the HBA while looping smartctl seems to
+improve the chance of a full scsi host reset or a non-recoverable hang.
+
+I reduced what smartctl was doing down to a simple test case which
+causes the hang with a single IO when pointed at the sd interface. See
+the code at the bottom of this e-mail. It uses an SG_IO ioctl to issue
+a single pass-through ATA identify device command. If the buffer
+userspace gives for the read data has certain alignments, the task is
+issued to the HBA but the HBA fails to respond. If run against the sg
+interface, neither the test code nor smartctl causes a hang.
+
+sd and sg handle the SG_IO ioctl slightly differently. Unless you
+specifically set a flag to do direct IO, sg passes a buffer of its own,
+which is page-aligned, to the block layer and later copies the result
+into the userspace buffer regardless of its alignment. sd, on the other
+hand, always does direct IO unless the userspace buffer fails an
+alignment test at block/blk-map.c line 57, in which case a page-aligned
+buffer is created and used for the transfer.
+
+The alignment test currently checks for word-alignment, the default
+setup by scsi_lib.c; therefore, userspace buffers of almost any
+alignment are given directly to the HBA as DMA targets. The LSI 1068
+hardware doesn't seem to like at least a couple of the alignments which
+cross a page boundary (see the test code below). Curiously, many
+page-boundary-crossing alignments do work just fine.
+
+So, either the hardware has an bug handling certain alignments or the
+hardware has a stricter alignment requirement than the driver is
+advertising. If stricter alignment is required, then in no case should
+misaligned buffers from userspace be allowed through without being
+bounced or at least causing an error to be returned.
+
+It seems the mptsas driver could use blk_queue_dma_alignment() to advertise
+a stricter alignment requirement. If it does, sd does the right thing and
+bounces misaligned buffers (see block/blk-map.c line 57). The following
+patch to 2.6.34-rc5 makes my symptoms go away. I'm sure this is the wrong
+place for this code, but it gets my idea across.
+
+Acked-by: "Desai, Kashyap" <Kashyap.Desai@lsi.com>
+Signed-off-by: James Bottomley <James.Bottomley@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/message/fusion/mptscsih.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/message/fusion/mptscsih.c
++++ b/drivers/message/fusion/mptscsih.c
+@@ -2459,6 +2459,8 @@ mptscsih_slave_configure(struct scsi_dev
+ ioc->name,sdev->tagged_supported, sdev->simple_tags,
+ sdev->ordered_tags));
+
++ blk_queue_dma_alignment (sdev->request_queue, 512 - 1);
++
+ return 0;
+ }
+
intel_agp-drm-i915-add-all-sandybridge-graphics-devices-support.patch
agp-intel-fix-physical-address-mask-bits-for-sandybridge.patch
agp-intel-fix-dma-mask-bits-on-sandybridge.patch
+hw-breakpoints-fix-pid-namespace-bug.patch
+pid-make-setpgid-system-call-use-rcu-read-side-critical-section.patch
+sched-fix-user-time-incorrectly-accounted-as-system-time-on-32-bit.patch
+oprofile-add-support-for-intel-cpu-family-6-model-22-intel-celeron-540.patch
+drm-i915-agp-intel-add-second-set-of-pci-ids-for-b43.patch
+bdi-initialize-noop_backing_dev_info-properly.patch
+bdi-fix-warnings-in-__mark_inode_dirty-for-dev-zero-and-friends.patch
+char-mark-dev-zero-and-dev-kmem-as-not-capable-of-writeback.patch
+drivers-pci-intel-iommu.c-fix-build-with-older-gcc-s.patch
+mmap-call-unlink_anon_vmas-in-__split_vma-in-case-of-error.patch
+drivers-video-sis-sis_main.c-prevent-reading-uninitialized-stack-memory.patch
+rtc-s3c-balance-state-changes-of-wakeup-flag.patch
+prevent-freeing-uninitialized-pointer-in-compat_do_readv_writev.patch
+proc-vmcore-fix-seeking.patch
+vmscan-check-all_unreclaimable-in-direct-reclaim-path.patch
+percpu-fix-pcpu_last_unit_cpu.patch
+aio-do-not-return-erestartsys-as-a-result-of-aio.patch
+aio-check-for-multiplication-overflow-in-do_io_submit.patch
+x86-platform-drivers-hp-wmi-reorder-event-id-processing.patch
+gfs2-gfs2_logd-should-be-using-interruptible-waits.patch
+drm-nv50-initialize-ramht_refs-list-for-faked-0-channel.patch
+inotify-send-in_unmount-events.patch
+scsi-mptsas-fix-hangs-caused-by-ata-pass-through.patch
+kvm-keep-slot-id-in-memory-slot-structure.patch
+kvm-prevent-internal-slots-from-being-cowed.patch
+kvm-mmu-fix-direct-sp-s-access-corrupted.patch
+kvm-x86-emulator-inc-dec-can-have-lock-prefix.patch
+kvm-mmu-fix-mmu-notifier-invalidate-handler-for-huge-spte.patch
+kvm-vmx-fix-host-gdt.limit-corruption.patch
+ia64-fix-siglock.patch
+ia64-optimize-ticket-spinlocks-in-fsys_rt_sigprocmask.patch
+keys-fix-rcu-no-lock-warning-in-keyctl_session_to_parent.patch
+keys-fix-bug-in-keyctl_session_to_parent-if-parent-has-no-session-keyring.patch
+xfs-prevent-reading-uninitialized-stack-memory.patch
+drivers-video-via-ioctl.c-prevent-reading-uninitialized-stack-memory.patch
+at91-change-dma-resource-index.patch
+pm-prevent-waiting-forever-on-asynchronous-resume-after-failing-suspend.patch
+pm-hibernate-avoid-hitting-oom-during-preallocation-of-memory.patch
+x86-asm-use-a-lower-case-name-for-the-end-macro-in-atomic64_386_32.s.patch
+alsa-hda-fix-beep-frequency-on-idt-92hd73xx-and-92hd71bxx-codecs.patch
+fix-call-to-replaced-superio-functions.patch
+dell-wmi-add-support-for-eject-key-on-dell-studio-1555.patch
+mm-page-allocator-drain-per-cpu-lists-after-direct-reclaim-allocation-fails.patch
+mm-page-allocator-calculate-a-better-estimate-of-nr_free_pages-when-memory-is-low-and-kswapd-is-awake.patch
+mm-page-allocator-update-free-page-counters-after-pages-are-placed-on-the-free-list.patch
+guard-page-for-stacks-that-grow-upwards.patch
+fix-unprotected-access-to-task-credentials-in-waitid.patch
--- /dev/null
+From d1908362ae0b97374eb8328fbb471576332f9fb1 Mon Sep 17 00:00:00 2001
+From: Minchan Kim <minchan.kim@gmail.com>
+Date: Wed, 22 Sep 2010 13:05:01 -0700
+Subject: vmscan: check all_unreclaimable in direct reclaim path
+
+From: Minchan Kim <minchan.kim@gmail.com>
+
+commit d1908362ae0b97374eb8328fbb471576332f9fb1 upstream.
+
+M. Vefa Bicakci reported 2.6.35 kernel hang up when hibernation on his
+32bit 3GB mem machine.
+(https://bugzilla.kernel.org/show_bug.cgi?id=16771). Also he bisected
+the regression to
+
+ commit bb21c7ce18eff8e6e7877ca1d06c6db719376e3c
+ Author: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+ Date: Fri Jun 4 14:15:05 2010 -0700
+
+ vmscan: fix do_try_to_free_pages() return value when priority==0 reclaim failure
+
+At first impression, this seemed very strange because the above commit
+only chenged function return value and hibernate_preallocate_memory()
+ignore return value of shrink_all_memory(). But it's related.
+
+Now, page allocation from hibernation code may enter infinite loop if the
+system has highmem. The reasons are that vmscan don't care enough OOM
+case when oom_killer_disabled.
+
+The problem sequence is following as.
+
+1. hibernation
+2. oom_disable
+3. alloc_pages
+4. do_try_to_free_pages
+ if (scanning_global_lru(sc) && !all_unreclaimable)
+ return 1;
+
+If kswapd is not freozen, it would set zone->all_unreclaimable to 1 and
+then shrink_zones maybe return true(ie, all_unreclaimable is true). So at
+last, alloc_pages could go to _nopage_. If it is, it should have no
+problem.
+
+This patch adds all_unreclaimable check to protect in direct reclaim path,
+too. It can care of hibernation OOM case and help bailout
+all_unreclaimable case slightly.
+
+Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Signed-off-by: Minchan Kim <minchan.kim@gmail.com>
+Reported-by: M. Vefa Bicakci <bicave@superonline.com>
+Reported-by: <caiqian@redhat.com>
+Reviewed-by: Johannes Weiner <hannes@cmpxchg.org>
+Tested-by: <caiqian@redhat.com>
+Acked-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: Rik van Riel <riel@redhat.com>
+Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Cc: Balbir Singh <balbir@in.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/vmscan.c | 43 +++++++++++++++++++++++++++++++++++--------
+ 1 file changed, 35 insertions(+), 8 deletions(-)
+
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1726,13 +1726,12 @@ static void shrink_zone(int priority, st
+ * If a zone is deemed to be full of pinned pages then just give it a light
+ * scan then give up on it.
+ */
+-static bool shrink_zones(int priority, struct zonelist *zonelist,
++static void shrink_zones(int priority, struct zonelist *zonelist,
+ struct scan_control *sc)
+ {
+ enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
+ struct zoneref *z;
+ struct zone *zone;
+- bool all_unreclaimable = true;
+
+ for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
+ sc->nodemask) {
+@@ -1759,8 +1758,38 @@ static bool shrink_zones(int priority, s
+ }
+
+ shrink_zone(priority, zone, sc);
+- all_unreclaimable = false;
+ }
++}
++
++static bool zone_reclaimable(struct zone *zone)
++{
++ return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
++}
++
++/*
++ * As hibernation is going on, kswapd is freezed so that it can't mark
++ * the zone into all_unreclaimable. It can't handle OOM during hibernation.
++ * So let's check zone's unreclaimable in direct reclaim as well as kswapd.
++ */
++static bool all_unreclaimable(struct zonelist *zonelist,
++ struct scan_control *sc)
++{
++ struct zoneref *z;
++ struct zone *zone;
++ bool all_unreclaimable = true;
++
++ for_each_zone_zonelist_nodemask(zone, z, zonelist,
++ gfp_zone(sc->gfp_mask), sc->nodemask) {
++ if (!populated_zone(zone))
++ continue;
++ if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
++ continue;
++ if (zone_reclaimable(zone)) {
++ all_unreclaimable = false;
++ break;
++ }
++ }
++
+ return all_unreclaimable;
+ }
+
+@@ -1784,7 +1813,6 @@ static unsigned long do_try_to_free_page
+ struct scan_control *sc)
+ {
+ int priority;
+- bool all_unreclaimable;
+ unsigned long total_scanned = 0;
+ struct reclaim_state *reclaim_state = current->reclaim_state;
+ unsigned long lru_pages = 0;
+@@ -1815,7 +1843,7 @@ static unsigned long do_try_to_free_page
+ sc->nr_scanned = 0;
+ if (!priority)
+ disable_swap_token();
+- all_unreclaimable = shrink_zones(priority, zonelist, sc);
++ shrink_zones(priority, zonelist, sc);
+ /*
+ * Don't shrink slabs when reclaiming memory from
+ * over limit cgroups
+@@ -1879,7 +1907,7 @@ out:
+ return sc->nr_reclaimed;
+
+ /* top priority shrink_zones still had more to do? don't OOM, then */
+- if (scanning_global_lru(sc) && !all_unreclaimable)
++ if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
+ return 1;
+
+ return 0;
+@@ -2137,8 +2165,7 @@ loop_again:
+ total_scanned += sc.nr_scanned;
+ if (zone->all_unreclaimable)
+ continue;
+- if (nr_slab == 0 &&
+- zone->pages_scanned >= (zone_reclaimable_pages(zone) * 6))
++ if (nr_slab == 0 && !zone_reclaimable(zone))
+ zone->all_unreclaimable = 1;
+ /*
+ * If we've done a decent amount of scanning and
--- /dev/null
+From 417484d47e115774745ef025bce712a102b6f86f Mon Sep 17 00:00:00 2001
+From: Luca Barbieri <luca@luca-barbieri.com>
+Date: Thu, 12 Aug 2010 07:00:35 -0700
+Subject: x86, asm: Use a lower case name for the end macro in atomic64_386_32.S
+
+From: Luca Barbieri <luca@luca-barbieri.com>
+
+commit 417484d47e115774745ef025bce712a102b6f86f upstream.
+
+Use a lowercase name for the end macro, which somehow fixes a binutils 2.16
+problem.
+
+Signed-off-by: Luca Barbieri <luca@luca-barbieri.com>
+LKML-Reference: <tip-30246557a06bb20618bed906a06d1e1e0faa8bb4@git.kernel.org>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/lib/atomic64_386_32.S | 38 ++++++++++++++++++++------------------
+ 1 file changed, 20 insertions(+), 18 deletions(-)
+
+--- a/arch/x86/lib/atomic64_386_32.S
++++ b/arch/x86/lib/atomic64_386_32.S
+@@ -26,35 +26,37 @@
+ .endm
+
+ #define BEGIN(op) \
+-.macro END; \
++.macro endp; \
+ CFI_ENDPROC; \
+ ENDPROC(atomic64_##op##_386); \
+-.purgem END; \
++.purgem endp; \
+ .endm; \
+ ENTRY(atomic64_##op##_386); \
+ CFI_STARTPROC; \
+ LOCK v;
+
++#define ENDP endp
++
+ #define RET \
+ UNLOCK v; \
+ ret
+
+-#define RET_END \
++#define RET_ENDP \
+ RET; \
+- END
++ ENDP
+
+ #define v %ecx
+ BEGIN(read)
+ movl (v), %eax
+ movl 4(v), %edx
+-RET_END
++RET_ENDP
+ #undef v
+
+ #define v %esi
+ BEGIN(set)
+ movl %ebx, (v)
+ movl %ecx, 4(v)
+-RET_END
++RET_ENDP
+ #undef v
+
+ #define v %esi
+@@ -63,14 +65,14 @@ BEGIN(xchg)
+ movl 4(v), %edx
+ movl %ebx, (v)
+ movl %ecx, 4(v)
+-RET_END
++RET_ENDP
+ #undef v
+
+ #define v %ecx
+ BEGIN(add)
+ addl %eax, (v)
+ adcl %edx, 4(v)
+-RET_END
++RET_ENDP
+ #undef v
+
+ #define v %ecx
+@@ -79,14 +81,14 @@ BEGIN(add_return)
+ adcl 4(v), %edx
+ movl %eax, (v)
+ movl %edx, 4(v)
+-RET_END
++RET_ENDP
+ #undef v
+
+ #define v %ecx
+ BEGIN(sub)
+ subl %eax, (v)
+ sbbl %edx, 4(v)
+-RET_END
++RET_ENDP
+ #undef v
+
+ #define v %ecx
+@@ -98,14 +100,14 @@ BEGIN(sub_return)
+ adcl 4(v), %edx
+ movl %eax, (v)
+ movl %edx, 4(v)
+-RET_END
++RET_ENDP
+ #undef v
+
+ #define v %esi
+ BEGIN(inc)
+ addl $1, (v)
+ adcl $0, 4(v)
+-RET_END
++RET_ENDP
+ #undef v
+
+ #define v %esi
+@@ -116,14 +118,14 @@ BEGIN(inc_return)
+ adcl $0, %edx
+ movl %eax, (v)
+ movl %edx, 4(v)
+-RET_END
++RET_ENDP
+ #undef v
+
+ #define v %esi
+ BEGIN(dec)
+ subl $1, (v)
+ sbbl $0, 4(v)
+-RET_END
++RET_ENDP
+ #undef v
+
+ #define v %esi
+@@ -134,7 +136,7 @@ BEGIN(dec_return)
+ sbbl $0, %edx
+ movl %eax, (v)
+ movl %edx, 4(v)
+-RET_END
++RET_ENDP
+ #undef v
+
+ #define v %ecx
+@@ -156,7 +158,7 @@ BEGIN(add_unless)
+ jne 1b
+ xorl %eax, %eax
+ jmp 2b
+-END
++ENDP
+ #undef v
+
+ #define v %esi
+@@ -177,7 +179,7 @@ BEGIN(inc_not_zero)
+ testl %edx, %edx
+ jne 1b
+ jmp 2b
+-END
++ENDP
+ #undef v
+
+ #define v %esi
+@@ -190,5 +192,5 @@ BEGIN(dec_if_positive)
+ movl %eax, (v)
+ movl %edx, 4(v)
+ 1:
+-RET_END
++RET_ENDP
+ #undef v
--- /dev/null
+From 751ae808f6b29803228609f51aa1ae057f5c576e Mon Sep 17 00:00:00 2001
+From: Thomas Renninger <trenn@suse.de>
+Date: Fri, 21 May 2010 16:18:09 +0200
+Subject: x86 platform drivers: hp-wmi Reorder event id processing
+
+From: Thomas Renninger <trenn@suse.de>
+
+commit 751ae808f6b29803228609f51aa1ae057f5c576e upstream.
+
+Event id 0x4 defines the hotkey event.
+No need (or even wrong) to query HPWMI_HOTKEY_QUERY if event id is != 0x4.
+
+Reorder the eventcode conditionals and use switch case instead of if/else.
+Use an enum for the event ids cases.
+
+Signed-off-by: Thomas Renninger <trenn@suse.de>
+Signed-off-by: Matthew Garrett <mjg@redhat.com>
+CC: linux-acpi@vger.kernel.org
+CC: platform-driver-x86@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/platform/x86/hp-wmi.c | 51 ++++++++++++++++++++++++++----------------
+ 1 file changed, 32 insertions(+), 19 deletions(-)
+
+--- a/drivers/platform/x86/hp-wmi.c
++++ b/drivers/platform/x86/hp-wmi.c
+@@ -58,6 +58,12 @@ enum hp_wmi_radio {
+ HPWMI_WWAN = 2,
+ };
+
++enum hp_wmi_event_ids {
++ HPWMI_DOCK_EVENT = 1,
++ HPWMI_BEZEL_BUTTON = 4,
++ HPWMI_WIRELESS = 5,
++};
++
+ static int __devinit hp_wmi_bios_setup(struct platform_device *device);
+ static int __exit hp_wmi_bios_remove(struct platform_device *device);
+ static int hp_wmi_resume_handler(struct device *device);
+@@ -338,7 +344,7 @@ static void hp_wmi_notify(u32 value, voi
+ struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+ static struct key_entry *key;
+ union acpi_object *obj;
+- int eventcode;
++ int eventcode, key_code;
+ acpi_status status;
+
+ status = wmi_get_event_data(value, &response);
+@@ -357,28 +363,32 @@ static void hp_wmi_notify(u32 value, voi
+
+ eventcode = *((u8 *) obj->buffer.pointer);
+ kfree(obj);
+- if (eventcode == 0x4)
+- eventcode = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0,
+- 0);
+- key = hp_wmi_get_entry_by_scancode(eventcode);
+- if (key) {
+- switch (key->type) {
+- case KE_KEY:
+- input_report_key(hp_wmi_input_dev,
+- key->keycode, 1);
+- input_sync(hp_wmi_input_dev);
+- input_report_key(hp_wmi_input_dev,
+- key->keycode, 0);
+- input_sync(hp_wmi_input_dev);
+- break;
+- }
+- } else if (eventcode == 0x1) {
++ switch (eventcode) {
++ case HPWMI_DOCK_EVENT:
+ input_report_switch(hp_wmi_input_dev, SW_DOCK,
+ hp_wmi_dock_state());
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+ hp_wmi_tablet_state());
+ input_sync(hp_wmi_input_dev);
+- } else if (eventcode == 0x5) {
++ break;
++ case HPWMI_BEZEL_BUTTON:
++ key_code = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0,
++ 0);
++ key = hp_wmi_get_entry_by_scancode(key_code);
++ if (key) {
++ switch (key->type) {
++ case KE_KEY:
++ input_report_key(hp_wmi_input_dev,
++ key->keycode, 1);
++ input_sync(hp_wmi_input_dev);
++ input_report_key(hp_wmi_input_dev,
++ key->keycode, 0);
++ input_sync(hp_wmi_input_dev);
++ break;
++ }
++ }
++ break;
++ case HPWMI_WIRELESS:
+ if (wifi_rfkill)
+ rfkill_set_states(wifi_rfkill,
+ hp_wmi_get_sw_state(HPWMI_WIFI),
+@@ -391,9 +401,12 @@ static void hp_wmi_notify(u32 value, voi
+ rfkill_set_states(wwan_rfkill,
+ hp_wmi_get_sw_state(HPWMI_WWAN),
+ hp_wmi_get_hw_state(HPWMI_WWAN));
+- } else
++ break;
++ default:
+ printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n",
+ eventcode);
++ break;
++ }
+ }
+
+ static int __init hp_wmi_input_setup(void)
--- /dev/null
+From a122eb2fdfd78b58c6dd992d6f4b1aaef667eef9 Mon Sep 17 00:00:00 2001
+From: Dan Rosenberg <dan.j.rosenberg@gmail.com>
+Date: Mon, 6 Sep 2010 18:24:57 -0400
+Subject: xfs: prevent reading uninitialized stack memory
+
+From: Dan Rosenberg <dan.j.rosenberg@gmail.com>
+
+commit a122eb2fdfd78b58c6dd992d6f4b1aaef667eef9 upstream.
+
+The XFS_IOC_FSGETXATTR ioctl allows unprivileged users to read 12
+bytes of uninitialized stack memory, because the fsxattr struct
+declared on the stack in xfs_ioc_fsgetxattr() does not alter (or zero)
+the 12-byte fsx_pad member before copying it back to the user. This
+patch takes care of it.
+
+Signed-off-by: Dan Rosenberg <dan.j.rosenberg@gmail.com>
+Reviewed-by: Eric Sandeen <sandeen@redhat.com>
+Signed-off-by: Alex Elder <aelder@sgi.com>
+Cc: dann frazier <dannf@debian.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/xfs/linux-2.6/xfs_ioctl.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/xfs/linux-2.6/xfs_ioctl.c
++++ b/fs/xfs/linux-2.6/xfs_ioctl.c
+@@ -794,6 +794,8 @@ xfs_ioc_fsgetxattr(
+ {
+ struct fsxattr fa;
+
++ memset(&fa, 0, sizeof(struct fsxattr));
++
+ xfs_ilock(ip, XFS_ILOCK_SHARED);
+ fa.fsx_xflags = xfs_ip2xflags(ip);
+ fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;