--- /dev/null
+From 76b99699a2bbf9efdb578f9a38a202af2ecb354b Mon Sep 17 00:00:00 2001
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Date: Wed, 19 May 2010 23:21:38 -0400
+Subject: Blackfin: set ARCH_KMALLOC_MINALIGN
+
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+
+commit 76b99699a2bbf9efdb578f9a38a202af2ecb354b upstream.
+
+Architectures that handle DMA-non-coherent memory need to set
+ARCH_KMALLOC_MINALIGN to make sure that kmalloc'ed buffer is DMA-safe:
+the buffer doesn't share a cache with the others.
+
+Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
+Signed-off-by: Mike Frysinger <vapier@gentoo.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/blackfin/include/asm/cache.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/blackfin/include/asm/cache.h
++++ b/arch/blackfin/include/asm/cache.h
+@@ -15,6 +15,8 @@
+ #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
++#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
++
+ #ifdef CONFIG_SMP
+ #define __cacheline_aligned
+ #else
--- /dev/null
+From ddf08f4b90a413892bbb9bb2e8a57aed991cd47d Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <error27@gmail.com>
+Date: Fri, 7 May 2010 11:05:33 +0200
+Subject: exofs: confusion between kmap() and kmap_atomic() api
+
+From: Dan Carpenter <error27@gmail.com>
+
+commit ddf08f4b90a413892bbb9bb2e8a57aed991cd47d upstream.
+
+For kmap_atomic() we call kunmap_atomic() on the returned pointer.
+That's different from kmap() and kunmap() and so it's easy to get them
+backwards.
+
+Signed-off-by: Dan Carpenter <error27@gmail.com>
+Signed-off-by: Boaz Harrosh <bharrosh@panasas.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/exofs/dir.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/exofs/dir.c
++++ b/fs/exofs/dir.c
+@@ -608,7 +608,7 @@ int exofs_make_empty(struct inode *inode
+ de->inode_no = cpu_to_le64(parent->i_ino);
+ memcpy(de->name, PARENT_DIR, sizeof(PARENT_DIR));
+ exofs_set_de_type(de, inode);
+- kunmap_atomic(page, KM_USER0);
++ kunmap_atomic(kaddr, KM_USER0);
+ err = exofs_commit_chunk(page, 0, chunk_size);
+ fail:
+ page_cache_release(page);
--- /dev/null
+From 69dcf3db03626c4f18de624e8632454ea12ff260 Mon Sep 17 00:00:00 2001
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Date: Mon, 24 May 2010 14:32:54 -0700
+Subject: frv: set ARCH_KMALLOC_MINALIGN
+
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+
+commit 69dcf3db03626c4f18de624e8632454ea12ff260 upstream.
+
+Architectures that handle DMA-non-coherent memory need to set
+ARCH_KMALLOC_MINALIGN to make sure that kmalloc'ed buffer is DMA-safe: the
+buffer doesn't share a cache with the others.
+
+Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Acked-by: David Howells <dhowells@redhat.com>
+Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/frv/include/asm/cache.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/frv/include/asm/cache.h
++++ b/arch/frv/include/asm/cache.h
+@@ -17,6 +17,8 @@
+ #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
+ #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
++#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
++
+ #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
+ #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
+
--- /dev/null
+From dd6c26a66bdc629a500174ffe73b010b070b9f1b Mon Sep 17 00:00:00 2001
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Date: Sun, 23 May 2010 19:38:14 +0200
+Subject: m68k: set ARCH_KMALLOC_MINALIGN
+
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+
+commit dd6c26a66bdc629a500174ffe73b010b070b9f1b upstream.
+
+Architectures that handle DMA-non-coherent memory need to set
+ARCH_KMALLOC_MINALIGN to make sure that kmalloc'ed buffer is DMA-safe: the
+buffer doesn't share a cache with the others.
+
+Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Cc: Geert Uytterhoeven <geert@linux-m68k.org>
+Cc: Roman Zippel <zippel@linux-m68k.org>
+Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/m68k/include/asm/cache.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/m68k/include/asm/cache.h
++++ b/arch/m68k/include/asm/cache.h
+@@ -8,4 +8,6 @@
+ #define L1_CACHE_SHIFT 4
+ #define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
+
++#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
++
+ #endif
--- /dev/null
+From af3a2cd6b8a479345786e7fe5e199ad2f6240e56 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Sat, 8 May 2010 08:20:17 +1000
+Subject: md: Fix read balancing in RAID1 and RAID10 on drives > 2TB
+
+From: NeilBrown <neilb@suse.de>
+
+commit af3a2cd6b8a479345786e7fe5e199ad2f6240e56 upstream.
+
+read_balance uses a "unsigned long" for a sector number which
+will get truncated beyond 2TB.
+This will cause read-balancing to be non-optimal, and can cause
+data to be read from the 'wrong' branch during a resync. This has a
+very small chance of returning wrong data.
+
+Reported-by: Jordan Russell <jr-list-2010@quo.to>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/raid1.c | 4 ++--
+ drivers/md/raid10.c | 2 +-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -417,7 +417,7 @@ static void raid1_end_write_request(stru
+ */
+ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
+ {
+- const unsigned long this_sector = r1_bio->sector;
++ const sector_t this_sector = r1_bio->sector;
+ int new_disk = conf->last_used, disk = new_disk;
+ int wonly_disk = -1;
+ const int sectors = r1_bio->sectors;
+@@ -433,7 +433,7 @@ static int read_balance(conf_t *conf, r1
+ retry:
+ if (conf->mddev->recovery_cp < MaxSector &&
+ (this_sector + sectors >= conf->next_resync)) {
+- /* Choose the first operation device, for consistancy */
++ /* Choose the first operational device, for consistancy */
+ new_disk = 0;
+
+ for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -493,7 +493,7 @@ static int raid10_mergeable_bvec(struct
+ */
+ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
+ {
+- const unsigned long this_sector = r10_bio->sector;
++ const sector_t this_sector = r10_bio->sector;
+ int disk, slot, nslot;
+ const int sectors = r10_bio->sectors;
+ sector_t new_distance, current_distance;
--- /dev/null
+From ef2f80ff7325b2c1888ff02ead28957b5840bf51 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Mon, 17 May 2010 11:27:00 +1000
+Subject: md/linear: avoid possible oops and array stop
+
+From: NeilBrown <neilb@suse.de>
+
+commit ef2f80ff7325b2c1888ff02ead28957b5840bf51 upstream.
+
+Since commit ef286f6fa673cd7fb367e1b145069d8dbfcc6081
+it has been important that each personality clears
+->private in the ->stop() function, or sets it to a
+attribute group to be removed.
+linear.c doesn't. This can sometimes lead to an oops,
+though it doesn't always.
+
+Suitable for 2.6.33-stable and 2.6.34.
+
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/linear.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/md/linear.c
++++ b/drivers/md/linear.c
+@@ -281,6 +281,7 @@ static int linear_stop (mddev_t *mddev)
+ rcu_barrier();
+ blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
+ kfree(conf);
++ mddev->private = NULL;
+
+ return 0;
+ }
--- /dev/null
+From 964147d5c86d63be79b442c30f3783d49860c078 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Tue, 18 May 2010 15:27:13 +1000
+Subject: md/raid1: fix counting of write targets.
+
+From: NeilBrown <neilb@suse.de>
+
+commit 964147d5c86d63be79b442c30f3783d49860c078 upstream.
+
+There is a very small race window when writing to a
+RAID1 such that if a device is marked faulty at exactly the wrong
+time, the write-in-progress will not be sent to the device,
+but the bitmap (if present) will be updated to say that
+the write was sent.
+
+Then if the device turned out to still be usable as was re-added
+to the array, the bitmap-based-resync would skip resyncing that
+block, possibly leading to corruption. This would only be a problem
+if no further writes were issued to that area of the device (i.e.
+that bitmap chunk).
+
+Suitable for any pending -stable kernel.
+
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/raid1.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -911,9 +911,10 @@ static int make_request(struct request_q
+ if (test_bit(Faulty, &rdev->flags)) {
+ rdev_dec_pending(rdev, mddev);
+ r1_bio->bios[i] = NULL;
+- } else
++ } else {
+ r1_bio->bios[i] = bio;
+- targets++;
++ targets++;
++ }
+ } else
+ r1_bio->bios[i] = NULL;
+ }
--- /dev/null
+From b6eb127d274385d81ce8dd45c98190f097bce1b4 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Thu, 15 Apr 2010 10:13:47 +1000
+Subject: md: remove unneeded sysfs files more promptly
+
+From: NeilBrown <neilb@suse.de>
+
+commit b6eb127d274385d81ce8dd45c98190f097bce1b4 upstream.
+
+When an array is stopped we need to remove some
+sysfs files which are dependent on the type of array.
+
+We need to delay that deletion as deleting them while holding
+reconfig_mutex can lead to deadlocks.
+
+We currently delay them until the array is completely destroyed.
+However it is possible to deactivate and then reactivate the array.
+It is also possible to need to remove sysfs files when changing level,
+which can potentially happen several times before an array is
+destroyed.
+
+So we need to delete these files more promptly: as soon as
+reconfig_mutex is dropped.
+
+We need to ensure this happens before do_md_run can restart the array,
+so we use open_mutex for some extra locking. This is not deadlock
+prone.
+
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/md.c | 41 +++++++++++++++++++++++++++++++----------
+ 1 file changed, 31 insertions(+), 10 deletions(-)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -507,9 +507,32 @@ static inline int mddev_trylock(mddev_t
+ return mutex_trylock(&mddev->reconfig_mutex);
+ }
+
++static struct attribute_group md_redundancy_group;
++
+ static inline void mddev_unlock(mddev_t * mddev)
+ {
+- mutex_unlock(&mddev->reconfig_mutex);
++ if (mddev->pers == NULL && mddev->private) {
++ /* These cannot be removed under reconfig_mutex as
++ * an access to the files will try to take reconfig_mutex
++ * while holding the file unremovable, which leads to
++ * a deadlock.
++ * So hold open_mutex instead - we are allowed to take
++ * it while holding reconfig_mutex, and md_run can
++ * use it to wait for the remove to complete.
++ */
++ mutex_lock(&mddev->open_mutex);
++ mutex_unlock(&mddev->reconfig_mutex);
++
++ sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
++ if (mddev->private != (void*)1)
++ sysfs_remove_group(&mddev->kobj, mddev->private);
++ if (mddev->sysfs_action)
++ sysfs_put(mddev->sysfs_action);
++ mddev->sysfs_action = NULL;
++ mddev->private = NULL;
++ mutex_unlock(&mddev->open_mutex);
++ } else
++ mutex_unlock(&mddev->reconfig_mutex);
+
+ md_wakeup_thread(mddev->thread);
+ }
+@@ -4081,15 +4104,6 @@ static void mddev_delayed_delete(struct
+ {
+ mddev_t *mddev = container_of(ws, mddev_t, del_work);
+
+- if (mddev->private) {
+- sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
+- if (mddev->private != (void*)1)
+- sysfs_remove_group(&mddev->kobj, mddev->private);
+- if (mddev->sysfs_action)
+- sysfs_put(mddev->sysfs_action);
+- mddev->sysfs_action = NULL;
+- mddev->private = NULL;
+- }
+ sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
+ kobject_del(&mddev->kobj);
+ kobject_put(&mddev->kobj);
+@@ -4247,6 +4261,13 @@ static int do_md_run(mddev_t * mddev)
+ if (mddev->pers)
+ return -EBUSY;
+
++ /* These two calls synchronise us with the
++ * sysfs_remove_group calls in mddev_unlock,
++ * so they must have completed.
++ */
++ mutex_lock(&mddev->open_mutex);
++ mutex_unlock(&mddev->open_mutex);
++
+ /*
+ * Analyze all RAID superblock(s)
+ */
--- /dev/null
+From e2218350465e7e0931676b4849b594c978437bce Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Wed, 12 May 2010 08:25:37 +1000
+Subject: md: set mddev readonly flag on blkdev BLKROSET ioctl
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit e2218350465e7e0931676b4849b594c978437bce upstream.
+
+When the user sets the block device to readwrite then the mddev should
+follow suit. Otherwise, the BUG_ON in md_write_start() will be set to
+trigger.
+
+The reverse direction, setting mddev->ro to match a set readonly
+request, can be ignored because the blkdev level readonly flag precludes
+the need to have mddev->ro set correctly. Nevermind the fact that
+setting mddev->ro to 1 may fail if the array is in use.
+
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/md.c | 29 +++++++++++++++++++++++++++++
+ 1 file changed, 29 insertions(+)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -5516,6 +5516,7 @@ static int md_ioctl(struct block_device
+ int err = 0;
+ void __user *argp = (void __user *)arg;
+ mddev_t *mddev = NULL;
++ int ro;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+@@ -5651,6 +5652,34 @@ static int md_ioctl(struct block_device
+ err = do_md_stop(mddev, 1, 1);
+ goto done_unlock;
+
++ case BLKROSET:
++ if (get_user(ro, (int __user *)(arg))) {
++ err = -EFAULT;
++ goto done_unlock;
++ }
++ err = -EINVAL;
++
++ /* if the bdev is going readonly the value of mddev->ro
++ * does not matter, no writes are coming
++ */
++ if (ro)
++ goto done_unlock;
++
++ /* are we are already prepared for writes? */
++ if (mddev->ro != 1)
++ goto done_unlock;
++
++ /* transitioning to readauto need only happen for
++ * arrays that call md_write_start
++ */
++ if (mddev->pers) {
++ err = restart_array(mddev);
++ if (err == 0) {
++ mddev->ro = 2;
++ set_disk_ro(mddev->gendisk, 0);
++ }
++ }
++ goto done_unlock;
+ }
+
+ /*
--- /dev/null
+From 6cdafaae41d52e6ef9a5c5be23602ef083e4d0f9 Mon Sep 17 00:00:00 2001
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Date: Mon, 24 May 2010 14:32:58 -0700
+Subject: mn10300: set ARCH_KMALLOC_MINALIGN
+
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+
+commit 6cdafaae41d52e6ef9a5c5be23602ef083e4d0f9 upstream.
+
+Architectures that handle DMA-non-coherent memory need to set
+ARCH_KMALLOC_MINALIGN to make sure that kmalloc'ed buffer is DMA-safe: the
+buffer doesn't share a cache with the others.
+
+Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Acked-by: David Howells <dhowells@redhat.com>
+Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
+Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/mn10300/include/asm/cache.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/mn10300/include/asm/cache.h
++++ b/arch/mn10300/include/asm/cache.h
+@@ -21,6 +21,8 @@
+ #define L1_CACHE_DISPARITY L1_CACHE_NENTRIES * L1_CACHE_BYTES
+ #endif
+
++#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
++
+ /* data cache purge registers
+ * - read from the register to unconditionally purge that cache line
+ * - write address & 0xffffff00 to conditionally purge that cache line
--- /dev/null
+From 6ba8bcd457d9fc793ac9435aa2e4138f571d4ec5 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <error27@gmail.com>
+Date: Mon, 24 May 2010 14:33:49 -0700
+Subject: rtc-cmos: do dev_set_drvdata() earlier in the initialization
+
+From: Dan Carpenter <error27@gmail.com>
+
+commit 6ba8bcd457d9fc793ac9435aa2e4138f571d4ec5 upstream.
+
+The bug is an oops when dev_get_drvdata() returned null in
+cmos_update_irq_enable(). The call tree looks like this:
+ rtc_dev_ioctl()
+ => rtc_update_irq_enable()
+ => cmos_update_irq_enable()
+
+It's caused by a race condition in the module initialization. It is
+rtc_device_register() which makes the ioctl operations live so I moved
+the call to dev_set_drvdata() before the call to rtc_device_register().
+
+Addresses https://bugzilla.kernel.org/show_bug.cgi?id=15963
+
+Reported-by: Randy Dunlap <randy.dunlap@oracle.com>
+Signed-off-by: Dan Carpenter <error27@gmail.com>
+Tested-by: Randy Dunlap <randy.dunlap@oracle.com>
+Cc: Alessandro Zummo <a.zummo@towertech.it>
+Cc: Paul Gortmaker <p_gortmaker@yahoo.com>
+Cc: Malte Schroder <maltesch@gmx.de>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: Herton Ronaldo Krzesinski <herton@mandriva.com.br>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/rtc/rtc-cmos.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/rtc/rtc-cmos.c
++++ b/drivers/rtc/rtc-cmos.c
+@@ -719,6 +719,9 @@ cmos_do_probe(struct device *dev, struct
+ }
+ }
+
++ cmos_rtc.dev = dev;
++ dev_set_drvdata(dev, &cmos_rtc);
++
+ cmos_rtc.rtc = rtc_device_register(driver_name, dev,
+ &cmos_rtc_ops, THIS_MODULE);
+ if (IS_ERR(cmos_rtc.rtc)) {
+@@ -726,8 +729,6 @@ cmos_do_probe(struct device *dev, struct
+ goto cleanup0;
+ }
+
+- cmos_rtc.dev = dev;
+- dev_set_drvdata(dev, &cmos_rtc);
+ rename_region(ports, dev_name(&cmos_rtc.rtc->dev));
+
+ spin_lock_irq(&rtc_lock);
--- /dev/null
+From e893de59a4982791368b3ce412bc67dd601a88a0 Mon Sep 17 00:00:00 2001
+From: Maurus Cuelenaere <mcuelenaere@gmail.com>
+Date: Fri, 4 Jun 2010 14:14:44 -0700
+Subject: rtc: s3c: initialize driver data before using it
+
+From: Maurus Cuelenaere <mcuelenaere@gmail.com>
+
+commit e893de59a4982791368b3ce412bc67dd601a88a0 upstream.
+
+s3c_rtc_setfreq() uses the platform driver data to derive struct rtc_device,
+so make sure drvdata is set _before_ s3c_rtc_setfreq() is called.
+
+Signed-off-by: Maurus Cuelenaere <mcuelenaere@gmail.com>
+Cc: Paul Gortmaker <p_gortmaker@yahoo.com>
+Cc: Alessandro Zummo <a.zummo@towertech.it>
+Cc: Maurus Cuelenaere <mcuelenaere@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/rtc/rtc-s3c.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/rtc/rtc-s3c.c
++++ b/drivers/rtc/rtc-s3c.c
+@@ -456,8 +456,6 @@ static int __devinit s3c_rtc_probe(struc
+ pr_debug("s3c2410_rtc: RTCCON=%02x\n",
+ readb(s3c_rtc_base + S3C2410_RTCCON));
+
+- s3c_rtc_setfreq(&pdev->dev, 1);
+-
+ device_init_wakeup(&pdev->dev, 1);
+
+ /* register RTC and exit */
+@@ -474,6 +472,9 @@ static int __devinit s3c_rtc_probe(struc
+ rtc->max_user_freq = 128;
+
+ platform_set_drvdata(pdev, rtc);
++
++ s3c_rtc_setfreq(&pdev->dev, 1);
++
+ return 0;
+
+ err_nortc:
powerpc-pseries-make-query_cpu_stopped-callable-outside-hotplug-cpu.patch
powerpc-oprofile-fix-potential-buffer-overrun-in-op_model_cell.c.patch
writeback-disable-periodic-old-data-writeback-for-dirty_writeback_centisecs.patch
+md-raid1-fix-counting-of-write-targets.patch
+md-fix-read-balancing-in-raid1-and-raid10-on-drives-2tb.patch
+md-linear-avoid-possible-oops-and-array-stop.patch
+md-remove-unneeded-sysfs-files-more-promptly.patch
+md-set-mddev-readonly-flag-on-blkdev-blkroset-ioctl.patch
+x86-amd-iommu-fix-crash-when-request_mem_region-fails.patch
+x86-amd-iommu-fall-back-to-gart-if-initialization-fails.patch
+exofs-confusion-between-kmap-and-kmap_atomic-api.patch
+mn10300-set-arch_kmalloc_minalign.patch
+m68k-set-arch_kmalloc_minalign.patch
+rtc-cmos-do-dev_set_drvdata-earlier-in-the-initialization.patch
+rtc-s3c-initialize-driver-data-before-using-it.patch
+frv-set-arch_kmalloc_minalign.patch
+xtensa-set-arch_kmalloc_minalign.patch
+blackfin-set-arch_kmalloc_minalign.patch
+tmpfs-insert-tmpfs-cache-pages-to-inactive-list-at-first.patch
--- /dev/null
+From e9d6c157385e4efa61cb8293e425c9d8beba70d3 Mon Sep 17 00:00:00 2001
+From: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Date: Mon, 24 May 2010 14:31:48 -0700
+Subject: tmpfs: insert tmpfs cache pages to inactive list at first
+
+From: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+
+commit e9d6c157385e4efa61cb8293e425c9d8beba70d3 upstream.
+
+Shaohua Li reported parallel file copy on tmpfs can lead to OOM killer.
+This is regression of caused by commit 9ff473b9a7 ("vmscan: evict
+streaming IO first"). Wow, It is 2 years old patch!
+
+Currently, tmpfs file cache is inserted active list at first. This means
+that the insertion doesn't only increase numbers of pages in anon LRU, but
+it also reduces anon scanning ratio. Therefore, vmscan will get totally
+confused. It scans almost only file LRU even though the system has plenty
+unused tmpfs pages.
+
+Historically, lru_cache_add_active_anon() was used for two reasons.
+1) Intend to priotize shmem page rather than regular file cache.
+2) Intend to avoid reclaim priority inversion of used once pages.
+
+But we've lost both motivation because (1) Now we have separate anon and
+file LRU list. then, to insert active list doesn't help such priotize.
+(2) In past, one pte access bit will cause page activation. then to
+insert inactive list with pte access bit mean higher priority than to
+insert active list. Its priority inversion may lead to uninteded lru
+chun. but it was already solved by commit 645747462 (vmscan: detect
+mapped file pages used only once). (Thanks Hannes, you are great!)
+
+Thus, now we can use lru_cache_add_anon() instead.
+
+Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Reported-by: Shaohua Li <shaohua.li@intel.com>
+Reviewed-by: Wu Fengguang <fengguang.wu@intel.com>
+Reviewed-by: Johannes Weiner <hannes@cmpxchg.org>
+Reviewed-by: Rik van Riel <riel@redhat.com>
+Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
+Acked-by: Hugh Dickins <hughd@google.com>
+Cc: Henrique de Moraes Holschuh <hmh@hmh.eng.br>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/swap.h | 10 ----------
+ mm/filemap.c | 4 ++--
+ 2 files changed, 2 insertions(+), 12 deletions(-)
+
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -223,21 +223,11 @@ static inline void lru_cache_add_anon(st
+ __lru_cache_add(page, LRU_INACTIVE_ANON);
+ }
+
+-static inline void lru_cache_add_active_anon(struct page *page)
+-{
+- __lru_cache_add(page, LRU_ACTIVE_ANON);
+-}
+-
+ static inline void lru_cache_add_file(struct page *page)
+ {
+ __lru_cache_add(page, LRU_INACTIVE_FILE);
+ }
+
+-static inline void lru_cache_add_active_file(struct page *page)
+-{
+- __lru_cache_add(page, LRU_ACTIVE_FILE);
+-}
+-
+ /* linux/mm/vmscan.c */
+ extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+ gfp_t gfp_mask, nodemask_t *mask);
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -441,7 +441,7 @@ int add_to_page_cache_lru(struct page *p
+ /*
+ * Splice_read and readahead add shmem/tmpfs pages into the page cache
+ * before shmem_readpage has a chance to mark them as SwapBacked: they
+- * need to go on the active_anon lru below, and mem_cgroup_cache_charge
++ * need to go on the anon lru below, and mem_cgroup_cache_charge
+ * (called in add_to_page_cache) needs to know where they're going too.
+ */
+ if (mapping_cap_swap_backed(mapping))
+@@ -452,7 +452,7 @@ int add_to_page_cache_lru(struct page *p
+ if (page_is_file_cache(page))
+ lru_cache_add_file(page);
+ else
+- lru_cache_add_active_anon(page);
++ lru_cache_add_anon(page);
+ }
+ return ret;
+ }
--- /dev/null
+From d7f0776975334070a93370ae048fda0c31a91c38 Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <joerg.roedel@amd.com>
+Date: Mon, 31 May 2010 15:05:20 +0200
+Subject: x86/amd-iommu: Fall back to GART if initialization fails
+
+From: Joerg Roedel <joerg.roedel@amd.com>
+
+commit d7f0776975334070a93370ae048fda0c31a91c38 upstream.
+
+This patch implements a fallback to the GART IOMMU if this
+is possible and the AMD IOMMU initialization failed.
+Otherwise the fallback would be nommu which is very
+problematic on machines with more than 4GB of memory or
+swiotlb which hurts io-performance.
+
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/amd_iommu.c | 4 ----
+ arch/x86/kernel/amd_iommu_init.c | 9 +++++++++
+ 2 files changed, 9 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kernel/amd_iommu.c
++++ b/arch/x86/kernel/amd_iommu.c
+@@ -2256,10 +2256,6 @@ int __init amd_iommu_init_dma_ops(void)
+
+ iommu_detected = 1;
+ swiotlb = 0;
+-#ifdef CONFIG_GART_IOMMU
+- gart_iommu_aperture_disabled = 1;
+- gart_iommu_aperture = 0;
+-#endif
+
+ /* Make the driver finally visible to the drivers */
+ dma_ops = &amd_iommu_dma_ops;
+--- a/arch/x86/kernel/amd_iommu_init.c
++++ b/arch/x86/kernel/amd_iommu_init.c
+@@ -1340,6 +1340,15 @@ free:
+
+ free_unity_maps();
+
++#ifdef CONFIG_GART_IOMMU
++ /*
++ * We failed to initialize the AMD IOMMU - try fallback to GART
++ * if possible.
++ */
++ gart_iommu_init();
++
++#endif
++
+ goto out;
+ }
+
--- /dev/null
+From e82752d8b5a7e0a5e4d607fd8713549e2a4e2741 Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <joerg.roedel@amd.com>
+Date: Fri, 28 May 2010 14:26:48 +0200
+Subject: x86/amd-iommu: Fix crash when request_mem_region fails
+
+From: Joerg Roedel <joerg.roedel@amd.com>
+
+commit e82752d8b5a7e0a5e4d607fd8713549e2a4e2741 upstream.
+
+When request_mem_region fails the error path tries to
+disable the IOMMUs. This accesses the mmio-region which was
+not allocated leading to a kernel crash. This patch fixes
+the issue.
+
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/amd_iommu_init.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/amd_iommu_init.c
++++ b/arch/x86/kernel/amd_iommu_init.c
+@@ -286,8 +286,12 @@ static u8 * __init iommu_map_mmio_space(
+ {
+ u8 *ret;
+
+- if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu"))
++ if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) {
++ pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n",
++ address);
++ pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
+ return NULL;
++ }
+
+ ret = ioremap_nocache(address, MMIO_REGION_LENGTH);
+ if (ret != NULL)
+@@ -1296,7 +1300,7 @@ static int __init amd_iommu_init(void)
+ ret = amd_iommu_init_dma_ops();
+
+ if (ret)
+- goto free;
++ goto free_disable;
+
+ amd_iommu_init_api();
+
+@@ -1314,9 +1318,10 @@ static int __init amd_iommu_init(void)
+ out:
+ return ret;
+
+-free:
++free_disable:
+ disable_iommus();
+
++free:
+ amd_iommu_uninit_devices();
+
+ free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
--- /dev/null
+From 498900fc9cd1adbad1ba6b55ed9d8f2f5d655ca3 Mon Sep 17 00:00:00 2001
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Date: Mon, 24 May 2010 14:31:45 -0700
+Subject: xtensa: set ARCH_KMALLOC_MINALIGN
+
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+
+commit 498900fc9cd1adbad1ba6b55ed9d8f2f5d655ca3 upstream.
+
+Architectures that handle DMA-non-coherent memory need to set
+ARCH_KMALLOC_MINALIGN to make sure that kmalloc'ed buffer is DMA-safe: the
+buffer doesn't share a cache with the others.
+
+Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Cc: Chris Zankel <chris@zankel.net>
+Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/xtensa/include/asm/cache.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/xtensa/include/asm/cache.h
++++ b/arch/xtensa/include/asm/cache.h
+@@ -29,5 +29,6 @@
+ # define CACHE_WAY_SIZE ICACHE_WAY_SIZE
+ #endif
+
++#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+
+ #endif /* _XTENSA_CACHE_H */