--- /dev/null
+From a2d6d2fa90c0e1d2cc1d59ccb5bbe93bb28b7413 Mon Sep 17 00:00:00 2001
+From: Lei Xu <B33228@freescale.com>
+Date: Fri, 25 Feb 2011 14:44:23 -0800
+Subject: drivers/rtc/rtc-ds3232.c: fix time range difference between linux and RTC chip
+
+From: Lei Xu <B33228@freescale.com>
+
+commit a2d6d2fa90c0e1d2cc1d59ccb5bbe93bb28b7413 upstream.
+
+In linux rtc_time struct, tm_mon range is 0~11, tm_wday range is 0~6,
+while in RTC HW REG, month range is 1~12, day of the week range is 1~7,
+this patch adjusts difference of them.
+
+The efect of this bug was that most of month will be operated on as the
+next month by the hardware (When in Jan it maybe even worse). For
+example, if in May, software wrote 4 to the hardware, which handled it as
+April. Then the logic would be different between software and hardware,
+which would cause weird things to happen.
+
+Signed-off-by: Lei Xu <B33228@freescale.com>
+Cc: Alessandro Zummo <a.zummo@towertech.it>
+Cc: john stultz <johnstul@us.ibm.com>
+Cc: Jack Lan <jack.lan@freescale.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/rtc/rtc-ds3232.c | 14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+--- a/drivers/rtc/rtc-ds3232.c
++++ b/drivers/rtc/rtc-ds3232.c
+@@ -1,7 +1,7 @@
+ /*
+ * RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C
+ *
+- * Copyright (C) 2009-2010 Freescale Semiconductor.
++ * Copyright (C) 2009-2011 Freescale Semiconductor.
+ * Author: Jack Lan <jack.lan@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+@@ -141,9 +141,11 @@ static int ds3232_read_time(struct devic
+ time->tm_hour = bcd2bin(hour);
+ }
+
+- time->tm_wday = bcd2bin(week);
++ /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
++ time->tm_wday = bcd2bin(week) - 1;
+ time->tm_mday = bcd2bin(day);
+- time->tm_mon = bcd2bin(month & 0x7F);
++ /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
++ time->tm_mon = bcd2bin(month & 0x7F) - 1;
+ if (century)
+ add_century = 100;
+
+@@ -162,9 +164,11 @@ static int ds3232_set_time(struct device
+ buf[0] = bin2bcd(time->tm_sec);
+ buf[1] = bin2bcd(time->tm_min);
+ buf[2] = bin2bcd(time->tm_hour);
+- buf[3] = bin2bcd(time->tm_wday); /* Day of the week */
++ /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
++ buf[3] = bin2bcd(time->tm_wday + 1);
+ buf[4] = bin2bcd(time->tm_mday); /* Date */
+- buf[5] = bin2bcd(time->tm_mon);
++ /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
++ buf[5] = bin2bcd(time->tm_mon + 1);
+ if (time->tm_year >= 100) {
+ buf[5] |= 0x80;
+ buf[6] = bin2bcd(time->tm_year - 100);
--- /dev/null
+From 22bacca48a1755f79b7e0f192ddb9fbb7fc6e64e Mon Sep 17 00:00:00 2001
+From: Davide Libenzi <davidel@xmailserver.org>
+Date: Fri, 25 Feb 2011 14:44:12 -0800
+Subject: epoll: prevent creating circular epoll structures
+
+From: Davide Libenzi <davidel@xmailserver.org>
+
+commit 22bacca48a1755f79b7e0f192ddb9fbb7fc6e64e upstream.
+
+In several places, an epoll fd can call another file's ->f_op->poll()
+method with ep->mtx held. This is in general unsafe, because that other
+file could itself be an epoll fd that contains the original epoll fd.
+
+The code defends against this possibility in its own ->poll() method using
+ep_call_nested, but there are several other unsafe calls to ->poll
+elsewhere that can be made to deadlock. For example, the following simple
+program causes the call in ep_insert recursively call the original fd's
+->poll, leading to deadlock:
+
+ #include <unistd.h>
+ #include <sys/epoll.h>
+
+ int main(void) {
+ int e1, e2, p[2];
+ struct epoll_event evt = {
+ .events = EPOLLIN
+ };
+
+ e1 = epoll_create(1);
+ e2 = epoll_create(2);
+ pipe(p);
+
+ epoll_ctl(e2, EPOLL_CTL_ADD, e1, &evt);
+ epoll_ctl(e1, EPOLL_CTL_ADD, p[0], &evt);
+ write(p[1], p, sizeof p);
+ epoll_ctl(e1, EPOLL_CTL_ADD, e2, &evt);
+
+ return 0;
+ }
+
+On insertion, check whether the inserted file is itself a struct epoll,
+and if so, do a recursive walk to detect whether inserting this file would
+create a loop of epoll structures, which could lead to deadlock.
+
+[nelhage@ksplice.com: Use epmutex to serialize concurrent inserts]
+Signed-off-by: Davide Libenzi <davidel@xmailserver.org>
+Signed-off-by: Nelson Elhage <nelhage@ksplice.com>
+Reported-by: Nelson Elhage <nelhage@ksplice.com>
+Tested-by: Nelson Elhage <nelhage@ksplice.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/eventpoll.c | 95 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 95 insertions(+)
+
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -63,6 +63,13 @@
+ * cleanup path and it is also acquired by eventpoll_release_file()
+ * if a file has been pushed inside an epoll set and it is then
+ * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
++ * It is also acquired when inserting an epoll fd onto another epoll
++ * fd. We do this so that we walk the epoll tree and ensure that this
++ * insertion does not create a cycle of epoll file descriptors, which
++ * could lead to deadlock. We need a global mutex to prevent two
++ * simultaneous inserts (A into B and B into A) from racing and
++ * constructing a cycle without either insert observing that it is
++ * going to.
+ * It is possible to drop the "ep->mtx" and to use the global
+ * mutex "epmutex" (together with "ep->lock") to have it working,
+ * but having "ep->mtx" will make the interface more scalable.
+@@ -224,6 +231,9 @@ static int max_user_watches __read_mostl
+ */
+ static DEFINE_MUTEX(epmutex);
+
++/* Used to check for epoll file descriptor inclusion loops */
++static struct nested_calls poll_loop_ncalls;
++
+ /* Used for safe wake up implementation */
+ static struct nested_calls poll_safewake_ncalls;
+
+@@ -1195,6 +1205,62 @@ retry:
+ return res;
+ }
+
++/**
++ * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested()
++ * API, to verify that adding an epoll file inside another
++ * epoll structure, does not violate the constraints, in
++ * terms of closed loops, or too deep chains (which can
++ * result in excessive stack usage).
++ *
++ * @priv: Pointer to the epoll file to be currently checked.
++ * @cookie: Original cookie for this call. This is the top-of-the-chain epoll
++ * data structure pointer.
++ * @call_nests: Current dept of the @ep_call_nested() call stack.
++ *
++ * Returns: Returns zero if adding the epoll @file inside current epoll
++ * structure @ep does not violate the constraints, or -1 otherwise.
++ */
++static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
++{
++ int error = 0;
++ struct file *file = priv;
++ struct eventpoll *ep = file->private_data;
++ struct rb_node *rbp;
++ struct epitem *epi;
++
++ mutex_lock(&ep->mtx);
++ for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
++ epi = rb_entry(rbp, struct epitem, rbn);
++ if (unlikely(is_file_epoll(epi->ffd.file))) {
++ error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
++ ep_loop_check_proc, epi->ffd.file,
++ epi->ffd.file->private_data, current);
++ if (error != 0)
++ break;
++ }
++ }
++ mutex_unlock(&ep->mtx);
++
++ return error;
++}
++
++/**
++ * ep_loop_check - Performs a check to verify that adding an epoll file (@file)
++ * another epoll file (represented by @ep) does not create
++ * closed loops or too deep chains.
++ *
++ * @ep: Pointer to the epoll private data structure.
++ * @file: Pointer to the epoll file to be checked.
++ *
++ * Returns: Returns zero if adding the epoll @file inside current epoll
++ * structure @ep does not violate the constraints, or -1 otherwise.
++ */
++static int ep_loop_check(struct eventpoll *ep, struct file *file)
++{
++ return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
++ ep_loop_check_proc, file, ep, current);
++}
++
+ /*
+ * Open an eventpoll file descriptor.
+ */
+@@ -1243,6 +1309,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, in
+ struct epoll_event __user *, event)
+ {
+ int error;
++ int did_lock_epmutex = 0;
+ struct file *file, *tfile;
+ struct eventpoll *ep;
+ struct epitem *epi;
+@@ -1284,6 +1351,25 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, in
+ */
+ ep = file->private_data;
+
++ /*
++ * When we insert an epoll file descriptor, inside another epoll file
++ * descriptor, there is the change of creating closed loops, which are
++ * better be handled here, than in more critical paths.
++ *
++ * We hold epmutex across the loop check and the insert in this case, in
++ * order to prevent two separate inserts from racing and each doing the
++ * insert "at the same time" such that ep_loop_check passes on both
++ * before either one does the insert, thereby creating a cycle.
++ */
++ if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) {
++ mutex_lock(&epmutex);
++ did_lock_epmutex = 1;
++ error = -ELOOP;
++ if (ep_loop_check(ep, tfile) != 0)
++ goto error_tgt_fput;
++ }
++
++
+ mutex_lock(&ep->mtx);
+
+ /*
+@@ -1319,6 +1405,9 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, in
+ mutex_unlock(&ep->mtx);
+
+ error_tgt_fput:
++ if (unlikely(did_lock_epmutex))
++ mutex_unlock(&epmutex);
++
+ fput(tfile);
+ error_fput:
+ fput(file);
+@@ -1437,6 +1526,12 @@ static int __init eventpoll_init(void)
+ max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
+ EP_ITEM_COST;
+
++ /*
++ * Initialize the structure used to perform epoll file descriptor
++ * inclusion loops checks.
++ */
++ ep_nested_calls_init(&poll_loop_ncalls);
++
+ /* Initialize the structure used to perform safe poll wait head wake ups */
+ ep_nested_calls_init(&poll_safewake_ncalls);
+
--- /dev/null
+From 93b270f76e7ef3b81001576860c2701931cdc78b Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Thu, 24 Feb 2011 17:25:47 +1100
+Subject: Fix over-zealous flush_disk when changing device size.
+
+From: NeilBrown <neilb@suse.de>
+
+commit 93b270f76e7ef3b81001576860c2701931cdc78b upstream.
+
+There are two cases when we call flush_disk.
+In one, the device has disappeared (check_disk_change) so any
+data will hold becomes irrelevant.
+In the oter, the device has changed size (check_disk_size_change)
+so data we hold may be irrelevant.
+
+In both cases it makes sense to discard any 'clean' buffers,
+so they will be read back from the device if needed.
+
+In the former case it makes sense to discard 'dirty' buffers
+as there will never be anywhere safe to write the data. In the
+second case it *does*not* make sense to discard dirty buffers
+as that will lead to file system corruption when you simply enlarge
+the containing devices.
+
+flush_disk calls __invalidate_devices.
+__invalidate_device calls both invalidate_inodes and invalidate_bdev.
+
+invalidate_inodes *does* discard I_DIRTY inodes and this does lead
+to fs corruption.
+
+invalidate_bev *does*not* discard dirty pages, but I don't really care
+about that at present.
+
+So this patch adds a flag to __invalidate_device (calling it
+__invalidate_device2) to indicate whether dirty buffers should be
+killed, and this is passed to invalidate_inodes which can choose to
+skip dirty inodes.
+
+flusk_disk then passes true from check_disk_change and false from
+check_disk_size_change.
+
+dm avoids tripping over this problem by calling i_size_write directly
+rathher than using check_disk_size_change.
+
+md does use check_disk_size_change and so is affected.
+
+This regression was introduced by commit 608aeef17a which causes
+check_disk_size_change to call flush_disk, so it is suitable for any
+kernel since 2.6.27.
+
+Acked-by: Jeff Moyer <jmoyer@redhat.com>
+Cc: Andrew Patterson <andrew.patterson@hp.com>
+Cc: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ block/genhd.c | 2 +-
+ drivers/block/floppy.c | 2 +-
+ fs/block_dev.c | 12 ++++++------
+ fs/inode.c | 9 ++++++++-
+ fs/internal.h | 2 +-
+ include/linux/fs.h | 2 +-
+ 6 files changed, 18 insertions(+), 11 deletions(-)
+
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -1285,7 +1285,7 @@ int invalidate_partition(struct gendisk
+ struct block_device *bdev = bdget_disk(disk, partno);
+ if (bdev) {
+ fsync_bdev(bdev);
+- res = __invalidate_device(bdev);
++ res = __invalidate_device(bdev, true);
+ bdput(bdev);
+ }
+ return res;
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -3276,7 +3276,7 @@ static int set_geometry(unsigned int cmd
+ struct block_device *bdev = opened_bdev[cnt];
+ if (!bdev || ITYPE(drive_state[cnt].fd_device) != type)
+ continue;
+- __invalidate_device(bdev);
++ __invalidate_device(bdev, true);
+ }
+ mutex_unlock(&open_lock);
+ } else {
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -1217,9 +1217,9 @@ EXPORT_SYMBOL(open_by_devnum);
+ * when a disk has been changed -- either by a media change or online
+ * resize.
+ */
+-static void flush_disk(struct block_device *bdev)
++static void flush_disk(struct block_device *bdev, bool kill_dirty)
+ {
+- if (__invalidate_device(bdev)) {
++ if (__invalidate_device(bdev, kill_dirty)) {
+ char name[BDEVNAME_SIZE] = "";
+
+ if (bdev->bd_disk)
+@@ -1256,7 +1256,7 @@ void check_disk_size_change(struct gendi
+ "%s: detected capacity change from %lld to %lld\n",
+ name, bdev_size, disk_size);
+ i_size_write(bdev->bd_inode, disk_size);
+- flush_disk(bdev);
++ flush_disk(bdev, false);
+ }
+ }
+ EXPORT_SYMBOL(check_disk_size_change);
+@@ -1308,7 +1308,7 @@ int check_disk_change(struct block_devic
+ if (!bdops->media_changed(bdev->bd_disk))
+ return 0;
+
+- flush_disk(bdev);
++ flush_disk(bdev, true);
+ if (bdops->revalidate_disk)
+ bdops->revalidate_disk(bdev->bd_disk);
+ return 1;
+@@ -1776,7 +1776,7 @@ void close_bdev_exclusive(struct block_d
+
+ EXPORT_SYMBOL(close_bdev_exclusive);
+
+-int __invalidate_device(struct block_device *bdev)
++int __invalidate_device(struct block_device *bdev, bool kill_dirty)
+ {
+ struct super_block *sb = get_super(bdev);
+ int res = 0;
+@@ -1789,7 +1789,7 @@ int __invalidate_device(struct block_dev
+ * hold).
+ */
+ shrink_dcache_sb(sb);
+- res = invalidate_inodes(sb);
++ res = invalidate_inodes(sb, kill_dirty);
+ drop_super(sb);
+ }
+ invalidate_bdev(bdev);
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -532,11 +532,14 @@ void evict_inodes(struct super_block *sb
+ /**
+ * invalidate_inodes - attempt to free all inodes on a superblock
+ * @sb: superblock to operate on
++ * @kill_dirty: flag to guide handling of dirty inodes
+ *
+ * Attempts to free all inodes for a given superblock. If there were any
+ * busy inodes return a non-zero value, else zero.
++ * If @kill_dirty is set, discard dirty inodes too, otherwise treat
++ * them as busy.
+ */
+-int invalidate_inodes(struct super_block *sb)
++int invalidate_inodes(struct super_block *sb, bool kill_dirty)
+ {
+ int busy = 0;
+ struct inode *inode, *next;
+@@ -548,6 +551,10 @@ int invalidate_inodes(struct super_block
+ list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
+ if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE))
+ continue;
++ if (inode->i_state & I_DIRTY && !kill_dirty) {
++ busy = 1;
++ continue;
++ }
+ if (atomic_read(&inode->i_count)) {
+ busy = 1;
+ continue;
+--- a/fs/internal.h
++++ b/fs/internal.h
+@@ -107,4 +107,4 @@ extern void release_open_intent(struct n
+ */
+ extern int get_nr_dirty_inodes(void);
+ extern void evict_inodes(struct super_block *);
+-extern int invalidate_inodes(struct super_block *);
++extern int invalidate_inodes(struct super_block *, bool);
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2118,7 +2118,7 @@ extern void check_disk_size_change(struc
+ struct block_device *bdev);
+ extern int revalidate_disk(struct gendisk *);
+ extern int check_disk_change(struct block_device *);
+-extern int __invalidate_device(struct block_device *);
++extern int __invalidate_device(struct block_device *, bool);
+ extern int invalidate_partition(struct gendisk *, int);
+ #endif
+ unsigned long invalidate_mapping_pages(struct address_space *mapping,
--- /dev/null
+From 294f6cf48666825d23c9372ef37631232746e40d Mon Sep 17 00:00:00 2001
+From: Timo Warns <Warns@pre-sense.de>
+Date: Fri, 25 Feb 2011 14:44:21 -0800
+Subject: ldm: corrupted partition table can cause kernel oops
+
+From: Timo Warns <Warns@pre-sense.de>
+
+commit 294f6cf48666825d23c9372ef37631232746e40d upstream.
+
+The kernel automatically evaluates partition tables of storage devices.
+The code for evaluating LDM partitions (in fs/partitions/ldm.c) contains
+a bug that causes a kernel oops on certain corrupted LDM partitions. A
+kernel subsystem seems to crash, because, after the oops, the kernel no
+longer recognizes newly connected storage devices.
+
+The patch changes ldm_parse_vmdb() to Validate the value of vblk_size.
+
+Signed-off-by: Timo Warns <warns@pre-sense.de>
+Cc: Eugene Teo <eugeneteo@kernel.sg>
+Acked-by: Richard Russon <ldm@flatcap.org>
+Cc: Harvey Harrison <harvey.harrison@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/partitions/ldm.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/fs/partitions/ldm.c
++++ b/fs/partitions/ldm.c
+@@ -251,6 +251,11 @@ static bool ldm_parse_vmdb (const u8 *da
+ }
+
+ vm->vblk_size = get_unaligned_be32(data + 0x08);
++ if (vm->vblk_size == 0) {
++ ldm_error ("Illegal VBLK size");
++ return false;
++ }
++
+ vm->vblk_offset = get_unaligned_be32(data + 0x0C);
+ vm->last_vblk_seq = get_unaligned_be32(data + 0x04);
+
--- /dev/null
+From da9cf5050a2e3dbc3cf26a8d908482eb4485ed49 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Mon, 21 Feb 2011 18:25:57 +1100
+Subject: md: avoid spinlock problem in blk_throtl_exit
+
+From: NeilBrown <neilb@suse.de>
+
+commit da9cf5050a2e3dbc3cf26a8d908482eb4485ed49 upstream.
+
+blk_throtl_exit assumes that ->queue_lock still exists,
+so make sure that it does.
+To do this, we stop redirecting ->queue_lock to conf->device_lock
+and leave it pointing where it is initialised - __queue_lock.
+
+As the blk_plug functions check the ->queue_lock is held, we now
+take that spin_lock explicitly around the plug functions. We don't
+need the locking, just the warning removal.
+
+This is needed for any kernel with the blk_throtl code, which is
+which is 2.6.37 and later.
+
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/linear.c | 1 -
+ drivers/md/multipath.c | 1 -
+ drivers/md/raid0.c | 1 -
+ drivers/md/raid1.c | 6 ++++--
+ drivers/md/raid10.c | 7 ++++---
+ drivers/md/raid5.c | 1 -
+ 6 files changed, 8 insertions(+), 9 deletions(-)
+
+--- a/drivers/md/linear.c
++++ b/drivers/md/linear.c
+@@ -216,7 +216,6 @@ static int linear_run (mddev_t *mddev)
+
+ if (md_check_no_bitmap(mddev))
+ return -EINVAL;
+- mddev->queue->queue_lock = &mddev->queue->__queue_lock;
+ conf = linear_conf(mddev, mddev->raid_disks);
+
+ if (!conf)
+--- a/drivers/md/multipath.c
++++ b/drivers/md/multipath.c
+@@ -435,7 +435,6 @@ static int multipath_run (mddev_t *mddev
+ * bookkeeping area. [whatever we allocate in multipath_run(),
+ * should be freed in multipath_stop()]
+ */
+- mddev->queue->queue_lock = &mddev->queue->__queue_lock;
+
+ conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
+ mddev->private = conf;
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -353,7 +353,6 @@ static int raid0_run(mddev_t *mddev)
+ if (md_check_no_bitmap(mddev))
+ return -EINVAL;
+ blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
+- mddev->queue->queue_lock = &mddev->queue->__queue_lock;
+
+ /* if private is not null, we are here after takeover */
+ if (mddev->private == NULL) {
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -593,7 +593,10 @@ static int flush_pending_writes(conf_t *
+ if (conf->pending_bio_list.head) {
+ struct bio *bio;
+ bio = bio_list_get(&conf->pending_bio_list);
++ /* Only take the spinlock to quiet a warning */
++ spin_lock(conf->mddev->queue->queue_lock);
+ blk_remove_plug(conf->mddev->queue);
++ spin_unlock(conf->mddev->queue->queue_lock);
+ spin_unlock_irq(&conf->device_lock);
+ /* flush any pending bitmap writes to
+ * disk before proceeding w/ I/O */
+@@ -959,7 +962,7 @@ static int make_request(mddev_t *mddev,
+ atomic_inc(&r1_bio->remaining);
+ spin_lock_irqsave(&conf->device_lock, flags);
+ bio_list_add(&conf->pending_bio_list, mbio);
+- blk_plug_device(mddev->queue);
++ blk_plug_device_unlocked(mddev->queue);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ }
+ r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
+@@ -2024,7 +2027,6 @@ static int run(mddev_t *mddev)
+ if (IS_ERR(conf))
+ return PTR_ERR(conf);
+
+- mddev->queue->queue_lock = &conf->device_lock;
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -662,7 +662,10 @@ static int flush_pending_writes(conf_t *
+ if (conf->pending_bio_list.head) {
+ struct bio *bio;
+ bio = bio_list_get(&conf->pending_bio_list);
++ /* Spinlock only taken to quiet a warning */
++ spin_lock(conf->mddev->queue->queue_lock);
+ blk_remove_plug(conf->mddev->queue);
++ spin_unlock(conf->mddev->queue->queue_lock);
+ spin_unlock_irq(&conf->device_lock);
+ /* flush any pending bitmap writes to disk
+ * before proceeding w/ I/O */
+@@ -971,7 +974,7 @@ static int make_request(mddev_t *mddev,
+ atomic_inc(&r10_bio->remaining);
+ spin_lock_irqsave(&conf->device_lock, flags);
+ bio_list_add(&conf->pending_bio_list, mbio);
+- blk_plug_device(mddev->queue);
++ blk_plug_device_unlocked(mddev->queue);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ }
+
+@@ -2303,8 +2306,6 @@ static int run(mddev_t *mddev)
+ if (!conf)
+ goto out;
+
+- mddev->queue->queue_lock = &conf->device_lock;
+-
+ mddev->thread = conf->thread;
+ conf->thread = NULL;
+
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -5205,7 +5205,6 @@ static int run(mddev_t *mddev)
+
+ mddev->queue->backing_dev_info.congested_data = mddev;
+ mddev->queue->backing_dev_info.congested_fn = raid5_congested;
+- mddev->queue->queue_lock = &conf->device_lock;
+ mddev->queue->unplug_fn = raid5_unplug_queue;
+
+ chunk_size = mddev->chunk_sectors << 9;
--- /dev/null
+From 8f5f02c460b7ca74ce55ce126ce0c1e58a3f923d Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Wed, 16 Feb 2011 13:58:51 +1100
+Subject: md: correctly handle probe of an 'mdp' device.
+
+From: NeilBrown <neilb@suse.de>
+
+commit 8f5f02c460b7ca74ce55ce126ce0c1e58a3f923d upstream.
+
+'mdp' devices are md devices with preallocated device numbers
+for partitions. As such it is possible to mknod and open a partition
+before opening the whole device.
+
+this causes md_probe() to be called with a device number of a
+partition, which in-turn calls mddev_find with such a number.
+
+However mddev_find expects the number of a 'whole device' and
+does the wrong thing with partition numbers.
+
+So add code to mddev_find to remove the 'partition' part of
+a device number and just work with the 'whole device'.
+
+This patch addresses https://bugzilla.kernel.org/show_bug.cgi?id=28652
+
+Reported-by: hkmaly@bigfoot.com
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/md.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -553,6 +553,9 @@ static mddev_t * mddev_find(dev_t unit)
+ {
+ mddev_t *mddev, *new = NULL;
+
++ if (unit && MAJOR(unit) != MD_MAJOR)
++ unit &= ~((1<<MdpMinorShift)-1);
++
+ retry:
+ spin_lock(&all_mddevs_lock);
+
--- /dev/null
+From f0b4f7e2f29af678bd9af43422c537dcb6008603 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Thu, 24 Feb 2011 17:26:41 +1100
+Subject: md: Fix - again - partition detection when array becomes active
+
+From: NeilBrown <neilb@suse.de>
+
+commit f0b4f7e2f29af678bd9af43422c537dcb6008603 upstream.
+
+Revert
+ b821eaa572fd737faaf6928ba046e571526c36c6
+and
+ f3b99be19ded511a1bf05a148276239d9f13eefa
+
+When I wrote the first of these I had a wrong idea about the
+lifetime of 'struct block_device'. It can disappear at any time that
+the block device is not open if it falls out of the inode cache.
+
+So relying on the 'size' recorded with it to detect when the
+device size has changed and so we need to revalidate, is wrong.
+
+Rather, we really do need the 'changed' attribute stored directly in
+the mddev and set/tested as appropriate.
+
+Without this patch, a sequence of:
+ mknod / open / close / unlink
+
+(which can cause a block_device to be created and then destroyed)
+will result in a rescan of the partition table and consequence removal
+and addition of partitions.
+Several of these in a row can get udev racing to create and unlink and
+other code can get confused.
+
+With the patch, the rescan is only performed when needed and so there
+are no races.
+
+This is suitable for any stable kernel from 2.6.35.
+
+Reported-by: "Wojcik, Krzysztof" <krzysztof.wojcik@intel.com>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/md.c | 22 +++++++++++++++++++++-
+ drivers/md/md.h | 2 ++
+ 2 files changed, 23 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -4614,6 +4614,7 @@ static int do_md_run(mddev_t *mddev)
+ }
+ set_capacity(mddev->gendisk, mddev->array_sectors);
+ revalidate_disk(mddev->gendisk);
++ mddev->changed = 1;
+ kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
+ out:
+ return err;
+@@ -4702,6 +4703,7 @@ static void md_clean(mddev_t *mddev)
+ mddev->sync_speed_min = mddev->sync_speed_max = 0;
+ mddev->recovery = 0;
+ mddev->in_sync = 0;
++ mddev->changed = 0;
+ mddev->degraded = 0;
+ mddev->safemode = 0;
+ mddev->bitmap_info.offset = 0;
+@@ -4811,6 +4813,7 @@ static int do_md_stop(mddev_t * mddev, i
+
+ set_capacity(disk, 0);
+ mutex_unlock(&mddev->open_mutex);
++ mddev->changed = 1;
+ revalidate_disk(disk);
+
+ if (mddev->ro)
+@@ -5994,7 +5997,7 @@ static int md_open(struct block_device *
+ atomic_inc(&mddev->openers);
+ mutex_unlock(&mddev->open_mutex);
+
+- check_disk_size_change(mddev->gendisk, bdev);
++ check_disk_change(bdev);
+ out:
+ return err;
+ }
+@@ -6009,6 +6012,21 @@ static int md_release(struct gendisk *di
+
+ return 0;
+ }
++
++static int md_media_changed(struct gendisk *disk)
++{
++ mddev_t *mddev = disk->private_data;
++
++ return mddev->changed;
++}
++
++static int md_revalidate(struct gendisk *disk)
++{
++ mddev_t *mddev = disk->private_data;
++
++ mddev->changed = 0;
++ return 0;
++}
+ static const struct block_device_operations md_fops =
+ {
+ .owner = THIS_MODULE,
+@@ -6019,6 +6037,8 @@ static const struct block_device_operati
+ .compat_ioctl = md_compat_ioctl,
+ #endif
+ .getgeo = md_getgeo,
++ .media_changed = md_media_changed,
++ .revalidate_disk= md_revalidate,
+ };
+
+ static int md_thread(void * arg)
+--- a/drivers/md/md.h
++++ b/drivers/md/md.h
+@@ -270,6 +270,8 @@ struct mddev_s
+ atomic_t active; /* general refcount */
+ atomic_t openers; /* number of active opens */
+
++ int changed; /* True if we might need to
++ * reread partition info */
+ int degraded; /* whether md should consider
+ * adding a spare
+ */
--- /dev/null
+From 29723fccc837d20039078f7a571e8d457eb0d6c6 Mon Sep 17 00:00:00 2001
+From: Namhyung Kim <namhyung@gmail.com>
+Date: Fri, 25 Feb 2011 14:44:25 -0800
+Subject: mm: fix dubious code in __count_immobile_pages()
+
+From: Namhyung Kim <namhyung@gmail.com>
+
+commit 29723fccc837d20039078f7a571e8d457eb0d6c6 upstream.
+
+When pfn_valid_within() failed 'iter' was incremented twice.
+
+Signed-off-by: Namhyung Kim <namhyung@gmail.com>
+Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/page_alloc.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5335,10 +5335,9 @@ __count_immobile_pages(struct zone *zone
+ for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
+ unsigned long check = pfn + iter;
+
+- if (!pfn_valid_within(check)) {
+- iter++;
++ if (!pfn_valid_within(check))
+ continue;
+- }
++
+ page = pfn_to_page(check);
+ if (!page_count(page)) {
+ if (PageBuddy(page))
--- /dev/null
+From 805bdaec1a44155db35f6ee5410d6bbc365324a8 Mon Sep 17 00:00:00 2001
+From: Rafael J. Wysocki <rjw@sisk.pl>
+Date: Thu, 24 Feb 2011 11:10:01 +0100
+Subject: PM: Make ACPI wakeup from S5 work again when CONFIG_PM_SLEEP is unset
+
+From: Rafael J. Wysocki <rjw@sisk.pl>
+
+commit 805bdaec1a44155db35f6ee5410d6bbc365324a8 upstream.
+
+Commit 074037e (PM / Wakeup: Introduce wakeup source objects and
+event statistics (v3)) caused ACPI wakeup to only work if
+CONFIG_PM_SLEEP is set, but it also worked for CONFIG_PM_SLEEP unset
+before. This can be fixed by making device_set_wakeup_enable(),
+device_init_wakeup() and device_may_wakeup() work in the same way
+as before commit 074037e when CONFIG_PM_SLEEP is unset.
+
+Reported-and-tested-by: Justin Maggard <jmaggard10@gmail.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/pm.h | 2 ++
+ include/linux/pm_wakeup.h | 25 ++++++++++++++-----------
+ 2 files changed, 16 insertions(+), 11 deletions(-)
+
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -470,6 +470,8 @@ struct dev_pm_info {
+ struct list_head entry;
+ struct completion completion;
+ struct wakeup_source *wakeup;
++#else
++ unsigned int should_wakeup:1;
+ #endif
+ #ifdef CONFIG_PM_RUNTIME
+ struct timer_list suspend_timer;
+--- a/include/linux/pm_wakeup.h
++++ b/include/linux/pm_wakeup.h
+@@ -109,11 +109,6 @@ static inline bool device_can_wakeup(str
+ return dev->power.can_wakeup;
+ }
+
+-static inline bool device_may_wakeup(struct device *dev)
+-{
+- return false;
+-}
+-
+ static inline struct wakeup_source *wakeup_source_create(const char *name)
+ {
+ return NULL;
+@@ -134,24 +129,32 @@ static inline void wakeup_source_unregis
+
+ static inline int device_wakeup_enable(struct device *dev)
+ {
+- return -EINVAL;
++ dev->power.should_wakeup = true;
++ return 0;
+ }
+
+ static inline int device_wakeup_disable(struct device *dev)
+ {
++ dev->power.should_wakeup = false;
+ return 0;
+ }
+
+-static inline int device_init_wakeup(struct device *dev, bool val)
++static inline int device_set_wakeup_enable(struct device *dev, bool enable)
+ {
+- dev->power.can_wakeup = val;
+- return val ? -EINVAL : 0;
++ dev->power.should_wakeup = enable;
++ return 0;
+ }
+
++static inline int device_init_wakeup(struct device *dev, bool val)
++{
++ device_set_wakeup_capable(dev, val);
++ device_set_wakeup_enable(dev, val);
++ return 0;
++}
+
+-static inline int device_set_wakeup_enable(struct device *dev, bool enable)
++static inline bool device_may_wakeup(struct device *dev)
+ {
+- return -EINVAL;
++ return dev->power.can_wakeup && dev->power.should_wakeup;
+ }
+
+ static inline void __pm_stay_awake(struct wakeup_source *ws) {}
staging-usbip-vhci-give-back-urbs-from-in-flight-unlink-requests.patch
staging-usbip-vhci-refuse-to-enqueue-for-dead-connections.patch
staging-usbip-vhci-use-urb-dev-portnum-to-find-port.patch
+epoll-prevent-creating-circular-epoll-structures.patch
+swiotlb-fix-wrong-panic.patch
+ldm-corrupted-partition-table-can-cause-kernel-oops.patch
+drivers-rtc-rtc-ds3232.c-fix-time-range-difference-between-linux-and-rtc-chip.patch
+mm-fix-dubious-code-in-__count_immobile_pages.patch
+md-correctly-handle-probe-of-an-mdp-device.patch
+md-avoid-spinlock-problem-in-blk_throtl_exit.patch
+md-fix-again-partition-detection-when-array-becomes-active.patch
+fix-over-zealous-flush_disk-when-changing-device-size.patch
+pm-make-acpi-wakeup-from-s5-work-again-when-config_pm_sleep-is-unset.patch
+x86-quirk-fix-polarity-for-irq0-pin2-override-on-sb800-systems.patch
+xhci-avoid-bug-in-interrupt-context.patch
+xhci-clarify-some-expressions-in-the-trb-math.patch
+xhci-fix-errors-in-the-running-total-calculations-in-the-trb-math.patch
+xhci-fix-an-error-in-count_sg_trbs_needed.patch
+usb-reset-usb-3.0-devices-on-re-discovery.patch
+usb-prevent-buggy-hubs-from-crashing-the-usb-stack.patch
+usb-musb-core-set-has_tt-flag.patch
--- /dev/null
+From fba99fa38b023224680308a482e12a0eca87e4e1 Mon Sep 17 00:00:00 2001
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Date: Fri, 25 Feb 2011 14:44:16 -0800
+Subject: swiotlb: fix wrong panic
+
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+
+commit fba99fa38b023224680308a482e12a0eca87e4e1 upstream.
+
+swiotlb's map_page wrongly calls panic() when it can't find a buffer fit
+for device's dma mask. It should return an error instead.
+
+Devices with an odd dma mask (i.e. under 4G) like b44 network card hit
+this bug (the system crashes):
+
+ http://marc.info/?l=linux-kernel&m=129648943830106&w=2
+
+If swiotlb returns an error, b44 driver can use the own bouncing
+mechanism.
+
+Reported-by: Chuck Ebbert <cebbert@redhat.com>
+Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Tested-by: Arkadiusz Miskiewicz <arekm@maven.pl>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ lib/swiotlb.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/lib/swiotlb.c
++++ b/lib/swiotlb.c
+@@ -686,8 +686,10 @@ dma_addr_t swiotlb_map_page(struct devic
+ /*
+ * Ensure that the address returned is DMA'ble
+ */
+- if (!dma_capable(dev, dev_addr, size))
+- panic("map_single: bounce buffer is not DMA'ble");
++ if (!dma_capable(dev, dev_addr, size)) {
++ swiotlb_tbl_unmap_single(dev, map, size, dir);
++ dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer);
++ }
+
+ return dev_addr;
+ }
--- /dev/null
+From ec95d35a6bd0047f05fe8a21e6c52f8bb418da55 Mon Sep 17 00:00:00 2001
+From: Felipe Balbi <balbi@ti.com>
+Date: Thu, 24 Feb 2011 10:36:53 +0200
+Subject: usb: musb: core: set has_tt flag
+
+From: Felipe Balbi <balbi@ti.com>
+
+commit ec95d35a6bd0047f05fe8a21e6c52f8bb418da55 upstream.
+
+MUSB is a non-standard host implementation which
+can handle all speeds with the same core. We need
+to set has_tt flag after commit
+d199c96d41d80a567493e12b8e96ea056a1350c1 (USB: prevent
+buggy hubs from crashing the USB stack) in order for
+MUSB HCD to continue working.
+
+Signed-off-by: Felipe Balbi <balbi@ti.com>
+Cc: Alan Stern <stern@rowland.harvard.edu>
+Tested-by: Michael Jones <michael.jones@matrix-vision.de>
+Tested-by: Alexander Holler <holler@ahsoftware.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/usb/musb/musb_core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -1880,6 +1880,7 @@ allocate_instance(struct device *dev,
+ INIT_LIST_HEAD(&musb->out_bulk);
+
+ hcd->uses_new_polling = 1;
++ hcd->has_tt = 1;
+
+ musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
+ musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
--- /dev/null
+From d199c96d41d80a567493e12b8e96ea056a1350c1 Mon Sep 17 00:00:00 2001
+From: Alan Stern <stern@rowland.harvard.edu>
+Date: Mon, 31 Jan 2011 10:56:37 -0500
+Subject: USB: prevent buggy hubs from crashing the USB stack
+
+From: Alan Stern <stern@rowland.harvard.edu>
+
+commit d199c96d41d80a567493e12b8e96ea056a1350c1 upstream.
+
+If anyone comes across a high-speed hub that (by mistake or by design)
+claims to have no Transaction Translators, plugging a full- or
+low-speed device into it will cause the USB stack to crash. This
+patch (as1446) prevents the problem by ignoring such devices, since
+the kernel has no way to communicate with them.
+
+Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
+Tested-by: Perry Neben <neben@vmware.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/usb/core/hub.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2740,6 +2740,11 @@ hub_port_init (struct usb_hub *hub, stru
+ udev->ttport = hdev->ttport;
+ } else if (udev->speed != USB_SPEED_HIGH
+ && hdev->speed == USB_SPEED_HIGH) {
++ if (!hub->tt.hub) {
++ dev_err(&udev->dev, "parent hub has no TT\n");
++ retval = -EINVAL;
++ goto fail;
++ }
+ udev->tt = &hub->tt;
+ udev->ttport = port1;
+ }
--- /dev/null
+From 07194ab7be63a972096309ab0ea747df455c6a20 Mon Sep 17 00:00:00 2001
+From: Luben Tuikov <ltuikov@yahoo.com>
+Date: Fri, 11 Feb 2011 11:33:10 -0800
+Subject: USB: Reset USB 3.0 devices on (re)discovery
+
+From: Luben Tuikov <ltuikov@yahoo.com>
+
+commit 07194ab7be63a972096309ab0ea747df455c6a20 upstream.
+
+If the device isn't reset, the XHCI HCD sends
+SET ADDRESS to address 0 while the device is
+already in Addressed state, and the request is
+dropped on the floor as it is addressed to the
+default address. This sequence of events, which this
+patch fixes looks like this:
+
+usb_reset_and_verify_device()
+ hub_port_init()
+ hub_set_address()
+ SET_ADDRESS to 0 with 1
+ usb_get_device_descriptor(udev, 8)
+ usb_get_device_descriptor(udev, 18)
+ descriptors_changed() --> goto re_enumerate:
+ hub_port_logical_disconnect()
+ kick_khubd()
+
+And then:
+
+hub_events()
+ hub_port_connect_change()
+ usb_disconnect()
+ usb_disable_device()
+ new device struct
+ sets device state to Powered
+ choose_address()
+ hub_port_init() <-- no reset, but SET ADDRESS to 0 with 1, timeout!
+
+The solution is to always reset the device in
+hub_port_init() to put it in a known state.
+
+Note from Sarah Sharp:
+
+This patch should be queued for stable trees all the way back to 2.6.34,
+since that was the first kernel that supported configured device reset.
+The code this patch touches has been there since 2.6.32, but the bug
+would never be hit before 2.6.34 because the xHCI driver would
+completely reject an attempt to reset a configured device under xHCI.
+
+Signed-off-by: Luben Tuikov <ltuikov@yahoo.com>
+Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/usb/core/hub.c | 18 +++++++-----------
+ 1 file changed, 7 insertions(+), 11 deletions(-)
+
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2672,17 +2672,13 @@ hub_port_init (struct usb_hub *hub, stru
+
+ mutex_lock(&usb_address0_mutex);
+
+- if (!udev->config && oldspeed == USB_SPEED_SUPER) {
+- /* Don't reset USB 3.0 devices during an initial setup */
+- usb_set_device_state(udev, USB_STATE_DEFAULT);
+- } else {
+- /* Reset the device; full speed may morph to high speed */
+- /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
+- retval = hub_port_reset(hub, port1, udev, delay);
+- if (retval < 0) /* error or disconnect */
+- goto fail;
+- /* success, speed is known */
+- }
++ /* Reset the device; full speed may morph to high speed */
++ /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
++ retval = hub_port_reset(hub, port1, udev, delay);
++ if (retval < 0) /* error or disconnect */
++ goto fail;
++ /* success, speed is known */
++
+ retval = -ENODEV;
+
+ if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
--- /dev/null
+From 7f74f8f28a2bd9db9404f7d364e2097a0c42cc12 Mon Sep 17 00:00:00 2001
+From: Andreas Herrmann <andreas.herrmann3@amd.com>
+Date: Thu, 24 Feb 2011 15:53:46 +0100
+Subject: x86 quirk: Fix polarity for IRQ0 pin2 override on SB800 systems
+
+From: Andreas Herrmann <andreas.herrmann3@amd.com>
+
+commit 7f74f8f28a2bd9db9404f7d364e2097a0c42cc12 upstream.
+
+On some SB800 systems polarity for IOAPIC pin2 is wrongly
+specified as low active by BIOS. This caused system hangs after
+resume from S3 when HPET was used in one-shot mode on such
+systems because a timer interrupt was missed (HPET signal is
+high active).
+
+For more details see:
+
+ http://marc.info/?l=linux-kernel&m=129623757413868
+
+Tested-by: Manoj Iyer <manoj.iyer@canonical.com>
+Tested-by: Andre Przywara <andre.przywara@amd.com>
+Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
+Cc: Borislav Petkov <borislav.petkov@amd.com>
+LKML-Reference: <20110224145346.GD3658@alberich.amd.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/acpi.h | 1 +
+ arch/x86/kernel/acpi/boot.c | 14 ++++++++++----
+ arch/x86/kernel/early-quirks.c | 16 +++++++---------
+ 3 files changed, 18 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/include/asm/acpi.h
++++ b/arch/x86/include/asm/acpi.h
+@@ -88,6 +88,7 @@ extern int acpi_disabled;
+ extern int acpi_pci_disabled;
+ extern int acpi_skip_timer_override;
+ extern int acpi_use_timer_override;
++extern int acpi_fix_pin2_polarity;
+
+ extern u8 acpi_sci_flags;
+ extern int acpi_sci_override_gsi;
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -72,6 +72,7 @@ u8 acpi_sci_flags __initdata;
+ int acpi_sci_override_gsi __initdata;
+ int acpi_skip_timer_override __initdata;
+ int acpi_use_timer_override __initdata;
++int acpi_fix_pin2_polarity __initdata;
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
+@@ -410,10 +411,15 @@ acpi_parse_int_src_ovr(struct acpi_subta
+ return 0;
+ }
+
+- if (acpi_skip_timer_override &&
+- intsrc->source_irq == 0 && intsrc->global_irq == 2) {
+- printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
+- return 0;
++ if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
++ if (acpi_skip_timer_override) {
++ printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
++ return 0;
++ }
++ if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
++ intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
++ printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
++ }
+ }
+
+ mp_override_legacy_irq(intsrc->source_irq,
+--- a/arch/x86/kernel/early-quirks.c
++++ b/arch/x86/kernel/early-quirks.c
+@@ -143,15 +143,10 @@ static void __init ati_bugs(int num, int
+
+ static u32 __init ati_sbx00_rev(int num, int slot, int func)
+ {
+- u32 old, d;
++ u32 d;
+
+- d = read_pci_config(num, slot, func, 0x70);
+- old = d;
+- d &= ~(1<<8);
+- write_pci_config(num, slot, func, 0x70, d);
+ d = read_pci_config(num, slot, func, 0x8);
+ d &= 0xff;
+- write_pci_config(num, slot, func, 0x70, old);
+
+ return d;
+ }
+@@ -160,13 +155,16 @@ static void __init ati_bugs_contd(int nu
+ {
+ u32 d, rev;
+
+- if (acpi_use_timer_override)
+- return;
+-
+ rev = ati_sbx00_rev(num, slot, func);
++ if (rev >= 0x40)
++ acpi_fix_pin2_polarity = 1;
++
+ if (rev > 0x13)
+ return;
+
++ if (acpi_use_timer_override)
++ return;
++
+ /* check for IRQ0 interrupt swap */
+ d = read_pci_config(num, slot, func, 0x64);
+ if (!(d & (1<<14)))
--- /dev/null
+From 68e41c5d032668e2905404afbef75bc58be179d6 Mon Sep 17 00:00:00 2001
+From: Paul Zimmerman <Paul.Zimmerman@synopsys.com>
+Date: Sat, 12 Feb 2011 14:06:06 -0800
+Subject: xhci: Avoid BUG() in interrupt context
+
+From: Paul Zimmerman <Paul.Zimmerman@synopsys.com>
+
+commit 68e41c5d032668e2905404afbef75bc58be179d6 upstream.
+
+Change the BUGs in xhci_find_new_dequeue_state() to WARN_ONs, to avoid
+bringing down the box if one of them is hit
+
+This patch should be queued for stable kernels back to 2.6.31.
+
+Signed-off-by: Paul Zimmerman <paulz@synopsys.com>
+Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/usb/host/xhci-ring.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -479,8 +479,11 @@ void xhci_find_new_dequeue_state(struct
+ state->new_deq_seg = find_trb_seg(cur_td->start_seg,
+ dev->eps[ep_index].stopped_trb,
+ &state->new_cycle_state);
+- if (!state->new_deq_seg)
+- BUG();
++ if (!state->new_deq_seg) {
++ WARN_ON(1);
++ return;
++ }
++
+ /* Dig out the cycle state saved by the xHC during the stop ep cmd */
+ xhci_dbg(xhci, "Finding endpoint context\n");
+ ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
+@@ -491,8 +494,10 @@ void xhci_find_new_dequeue_state(struct
+ state->new_deq_seg = find_trb_seg(state->new_deq_seg,
+ state->new_deq_ptr,
+ &state->new_cycle_state);
+- if (!state->new_deq_seg)
+- BUG();
++ if (!state->new_deq_seg) {
++ WARN_ON(1);
++ return;
++ }
+
+ trb = &state->new_deq_ptr->generic;
+ if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
--- /dev/null
+From a2490187011cc2263117626615a581927d19f1d3 Mon Sep 17 00:00:00 2001
+From: Paul Zimmerman <Paul.Zimmerman@synopsys.com>
+Date: Sat, 12 Feb 2011 14:06:44 -0800
+Subject: xhci: Clarify some expressions in the TRB math
+
+From: Paul Zimmerman <Paul.Zimmerman@synopsys.com>
+
+commit a2490187011cc2263117626615a581927d19f1d3 upstream.
+
+This makes it easier to spot some problems, which will be fixed by the
+next patch in the series. Also change dev_dbg to dev_err in
+check_trb_math(), so any math errors will be visible even when running
+with debug disabled.
+
+Note: This patch changes the expressions containing
+"((1 << TRB_MAX_BUFF_SHIFT) - 1)" to use the equivalent
+"(TRB_MAX_BUFF_SIZE - 1)". No change in behavior is intended for
+those expressions.
+
+This patch should be queued for stable kernels back to 2.6.31.
+
+Signed-off-by: Paul Zimmerman <paulz@synopsys.com>
+Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/usb/host/xhci-ring.c | 22 ++++++++++------------
+ 1 file changed, 10 insertions(+), 12 deletions(-)
+
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2374,7 +2374,7 @@ static unsigned int count_sg_trbs_needed
+
+ /* Scatter gather list entries may cross 64KB boundaries */
+ running_total = TRB_MAX_BUFF_SIZE -
+- (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
++ (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
+ if (running_total != 0)
+ num_trbs++;
+
+@@ -2404,11 +2404,11 @@ static unsigned int count_sg_trbs_needed
+ static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
+ {
+ if (num_trbs != 0)
+- dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
++ dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
+ "TRBs, %d left\n", __func__,
+ urb->ep->desc.bEndpointAddress, num_trbs);
+ if (running_total != urb->transfer_buffer_length)
+- dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
++ dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
+ "queued %#x (%d), asked for %#x (%d)\n",
+ __func__,
+ urb->ep->desc.bEndpointAddress,
+@@ -2540,8 +2540,7 @@ static int queue_bulk_sg_tx(struct xhci_
+ sg = urb->sg;
+ addr = (u64) sg_dma_address(sg);
+ this_sg_len = sg_dma_len(sg);
+- trb_buff_len = TRB_MAX_BUFF_SIZE -
+- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
++ trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
+ trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
+ if (trb_buff_len > urb->transfer_buffer_length)
+ trb_buff_len = urb->transfer_buffer_length;
+@@ -2577,7 +2576,7 @@ static int queue_bulk_sg_tx(struct xhci_
+ (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
+ (unsigned int) addr + trb_buff_len);
+ if (TRB_MAX_BUFF_SIZE -
+- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
++ (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
+ xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
+ xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
+ (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
+@@ -2621,7 +2620,7 @@ static int queue_bulk_sg_tx(struct xhci_
+ }
+
+ trb_buff_len = TRB_MAX_BUFF_SIZE -
+- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
++ (addr & (TRB_MAX_BUFF_SIZE - 1));
+ trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
+ if (running_total + trb_buff_len > urb->transfer_buffer_length)
+ trb_buff_len =
+@@ -2661,7 +2660,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
+ num_trbs = 0;
+ /* How much data is (potentially) left before the 64KB boundary? */
+ running_total = TRB_MAX_BUFF_SIZE -
+- (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
++ (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+
+ /* If there's some data on this 64KB chunk, or we have to send a
+ * zero-length transfer, we need at least one TRB
+@@ -2704,8 +2703,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
+ /* How much data is in the first TRB? */
+ addr = (u64) urb->transfer_dma;
+ trb_buff_len = TRB_MAX_BUFF_SIZE -
+- (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+- if (urb->transfer_buffer_length < trb_buff_len)
++ (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
++ if (trb_buff_len > urb->transfer_buffer_length)
+ trb_buff_len = urb->transfer_buffer_length;
+
+ first_trb = true;
+@@ -2877,8 +2876,7 @@ static int count_isoc_trbs_needed(struct
+ addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
+ td_len = urb->iso_frame_desc[i].length;
+
+- running_total = TRB_MAX_BUFF_SIZE -
+- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
++ running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
+ if (running_total != 0)
+ num_trbs++;
+
--- /dev/null
+From bcd2fde05341cef0052e49566ec88b406a521cf3 Mon Sep 17 00:00:00 2001
+From: Paul Zimmerman <Paul.Zimmerman@synopsys.com>
+Date: Sat, 12 Feb 2011 14:07:57 -0800
+Subject: xhci: Fix an error in count_sg_trbs_needed()
+
+From: Paul Zimmerman <Paul.Zimmerman@synopsys.com>
+
+commit bcd2fde05341cef0052e49566ec88b406a521cf3 upstream.
+
+The expression
+
+ while (running_total < sg_dma_len(sg))
+
+does not take into account that the remaining data length can be less
+than sg_dma_len(sg). In that case, running_total can end up being
+greater than the total data length, so an extra TRB is counted.
+Changing the expression to
+
+ while (running_total < sg_dma_len(sg) && running_total < temp)
+
+fixes that.
+
+This patch should be queued for stable kernels back to 2.6.31.
+
+Signed-off-by: Paul Zimmerman <paulz@synopsys.com>
+Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/usb/host/xhci-ring.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2380,7 +2380,7 @@ static unsigned int count_sg_trbs_needed
+ num_trbs++;
+
+ /* How many more 64KB chunks to transfer, how many more TRBs? */
+- while (running_total < sg_dma_len(sg)) {
++ while (running_total < sg_dma_len(sg) && running_total < temp) {
+ num_trbs++;
+ running_total += TRB_MAX_BUFF_SIZE;
+ }
--- /dev/null
+From 5807795bd4dececdf553719cc02869e633395787 Mon Sep 17 00:00:00 2001
+From: Paul Zimmerman <Paul.Zimmerman@synopsys.com>
+Date: Sat, 12 Feb 2011 14:07:20 -0800
+Subject: xhci: Fix errors in the running total calculations in the TRB math
+
+From: Paul Zimmerman <Paul.Zimmerman@synopsys.com>
+
+commit 5807795bd4dececdf553719cc02869e633395787 upstream.
+
+Calculations like
+
+ running_total = TRB_MAX_BUFF_SIZE -
+ (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
+ if (running_total != 0)
+ num_trbs++;
+
+are incorrect, because running_total can never be zero, so the if()
+expression will never be true. I think the intention was that
+running_total be in the range of 0 to TRB_MAX_BUFF_SIZE-1, not 1
+to TRB_MAX_BUFF_SIZE. So adding a
+
+ running_total &= TRB_MAX_BUFF_SIZE - 1;
+
+fixes the problem.
+
+This patch should be queued for stable kernels back to 2.6.31.
+
+Signed-off-by: Paul Zimmerman <paulz@synopsys.com>
+Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/usb/host/xhci-ring.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2375,6 +2375,7 @@ static unsigned int count_sg_trbs_needed
+ /* Scatter gather list entries may cross 64KB boundaries */
+ running_total = TRB_MAX_BUFF_SIZE -
+ (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
++ running_total &= TRB_MAX_BUFF_SIZE - 1;
+ if (running_total != 0)
+ num_trbs++;
+
+@@ -2661,6 +2662,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
+ /* How much data is (potentially) left before the 64KB boundary? */
+ running_total = TRB_MAX_BUFF_SIZE -
+ (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
++ running_total &= TRB_MAX_BUFF_SIZE - 1;
+
+ /* If there's some data on this 64KB chunk, or we have to send a
+ * zero-length transfer, we need at least one TRB
+@@ -2877,6 +2879,7 @@ static int count_isoc_trbs_needed(struct
+ td_len = urb->iso_frame_desc[i].length;
+
+ running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
++ running_total &= TRB_MAX_BUFF_SIZE - 1;
+ if (running_total != 0)
+ num_trbs++;
+