--- /dev/null
+From 76b99699a2bbf9efdb578f9a38a202af2ecb354b Mon Sep 17 00:00:00 2001
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Date: Wed, 19 May 2010 23:21:38 -0400
+Subject: Blackfin: set ARCH_KMALLOC_MINALIGN
+
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+
+commit 76b99699a2bbf9efdb578f9a38a202af2ecb354b upstream.
+
+Architectures that handle DMA-non-coherent memory need to set
+ARCH_KMALLOC_MINALIGN to make sure that kmalloc'ed buffer is DMA-safe:
+the buffer doesn't share a cache with the others.
+
+Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
+Signed-off-by: Mike Frysinger <vapier@gentoo.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/blackfin/include/asm/cache.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/blackfin/include/asm/cache.h
++++ b/arch/blackfin/include/asm/cache.h
+@@ -15,6 +15,8 @@
+ #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
++#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
++
+ #ifdef CONFIG_SMP
+ #define __cacheline_aligned
+ #else
--- /dev/null
+From ddf08f4b90a413892bbb9bb2e8a57aed991cd47d Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <error27@gmail.com>
+Date: Fri, 7 May 2010 11:05:33 +0200
+Subject: exofs: confusion between kmap() and kmap_atomic() api
+
+From: Dan Carpenter <error27@gmail.com>
+
+commit ddf08f4b90a413892bbb9bb2e8a57aed991cd47d upstream.
+
+For kmap_atomic() we call kunmap_atomic() on the returned pointer.
+That's different from kmap() and kunmap() and so it's easy to get them
+backwards.
+
+Signed-off-by: Dan Carpenter <error27@gmail.com>
+Signed-off-by: Boaz Harrosh <bharrosh@panasas.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/exofs/dir.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/exofs/dir.c
++++ b/fs/exofs/dir.c
+@@ -608,7 +608,7 @@ int exofs_make_empty(struct inode *inode
+ de->inode_no = cpu_to_le64(parent->i_ino);
+ memcpy(de->name, PARENT_DIR, sizeof(PARENT_DIR));
+ exofs_set_de_type(de, inode);
+- kunmap_atomic(page, KM_USER0);
++ kunmap_atomic(kaddr, KM_USER0);
+ err = exofs_commit_chunk(page, 0, chunk_size);
+ fail:
+ page_cache_release(page);
--- /dev/null
+From 69dcf3db03626c4f18de624e8632454ea12ff260 Mon Sep 17 00:00:00 2001
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Date: Mon, 24 May 2010 14:32:54 -0700
+Subject: frv: set ARCH_KMALLOC_MINALIGN
+
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+
+commit 69dcf3db03626c4f18de624e8632454ea12ff260 upstream.
+
+Architectures that handle DMA-non-coherent memory need to set
+ARCH_KMALLOC_MINALIGN to make sure that kmalloc'ed buffer is DMA-safe: the
+buffer doesn't share a cache with the others.
+
+Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Acked-by: David Howells <dhowells@redhat.com>
+Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/frv/include/asm/cache.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/frv/include/asm/cache.h
++++ b/arch/frv/include/asm/cache.h
+@@ -17,6 +17,8 @@
+ #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
+ #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
++#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
++
+ #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
+ #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
+
--- /dev/null
+From dd6c26a66bdc629a500174ffe73b010b070b9f1b Mon Sep 17 00:00:00 2001
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Date: Sun, 23 May 2010 19:38:14 +0200
+Subject: m68k: set ARCH_KMALLOC_MINALIGN
+
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+
+commit dd6c26a66bdc629a500174ffe73b010b070b9f1b upstream.
+
+Architectures that handle DMA-non-coherent memory need to set
+ARCH_KMALLOC_MINALIGN to make sure that kmalloc'ed buffer is DMA-safe: the
+buffer doesn't share a cache with the others.
+
+Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Cc: Geert Uytterhoeven <geert@linux-m68k.org>
+Cc: Roman Zippel <zippel@linux-m68k.org>
+Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/m68k/include/asm/cache.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/m68k/include/asm/cache.h
++++ b/arch/m68k/include/asm/cache.h
+@@ -8,4 +8,6 @@
+ #define L1_CACHE_SHIFT 4
+ #define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
+
++#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
++
+ #endif
--- /dev/null
+From af3a2cd6b8a479345786e7fe5e199ad2f6240e56 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Sat, 8 May 2010 08:20:17 +1000
+Subject: md: Fix read balancing in RAID1 and RAID10 on drives > 2TB
+
+From: NeilBrown <neilb@suse.de>
+
+commit af3a2cd6b8a479345786e7fe5e199ad2f6240e56 upstream.
+
+read_balance uses a "unsigned long" for a sector number which
+will get truncated beyond 2TB.
+This will cause read-balancing to be non-optimal, and can cause
+data to be read from the 'wrong' branch during a resync. This has a
+very small chance of returning wrong data.
+
+Reported-by: Jordan Russell <jr-list-2010@quo.to>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/raid1.c | 4 ++--
+ drivers/md/raid10.c | 2 +-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -417,7 +417,7 @@ static void raid1_end_write_request(stru
+ */
+ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
+ {
+- const unsigned long this_sector = r1_bio->sector;
++ const sector_t this_sector = r1_bio->sector;
+ int new_disk = conf->last_used, disk = new_disk;
+ int wonly_disk = -1;
+ const int sectors = r1_bio->sectors;
+@@ -433,7 +433,7 @@ static int read_balance(conf_t *conf, r1
+ retry:
+ if (conf->mddev->recovery_cp < MaxSector &&
+ (this_sector + sectors >= conf->next_resync)) {
+- /* Choose the first operation device, for consistancy */
++ /* Choose the first operational device, for consistancy */
+ new_disk = 0;
+
+ for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -493,7 +493,7 @@ static int raid10_mergeable_bvec(struct
+ */
+ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
+ {
+- const unsigned long this_sector = r10_bio->sector;
++ const sector_t this_sector = r10_bio->sector;
+ int disk, slot, nslot;
+ const int sectors = r10_bio->sectors;
+ sector_t new_distance, current_distance;
--- /dev/null
+From 964147d5c86d63be79b442c30f3783d49860c078 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Tue, 18 May 2010 15:27:13 +1000
+Subject: md/raid1: fix counting of write targets.
+
+From: NeilBrown <neilb@suse.de>
+
+commit 964147d5c86d63be79b442c30f3783d49860c078 upstream.
+
+There is a very small race window when writing to a
+RAID1 such that if a device is marked faulty at exactly the wrong
+time, the write-in-progress will not be sent to the device,
+but the bitmap (if present) will be updated to say that
+the write was sent.
+
+Then if the device turned out to still be usable as was re-added
+to the array, the bitmap-based-resync would skip resyncing that
+block, possibly leading to corruption. This would only be a problem
+if no further writes were issued to that area of the device (i.e.
+that bitmap chunk).
+
+Suitable for any pending -stable kernel.
+
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/raid1.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -891,9 +891,10 @@ static int make_request(struct request_q
+ if (test_bit(Faulty, &rdev->flags)) {
+ rdev_dec_pending(rdev, mddev);
+ r1_bio->bios[i] = NULL;
+- } else
++ } else {
+ r1_bio->bios[i] = bio;
+- targets++;
++ targets++;
++ }
+ } else
+ r1_bio->bios[i] = NULL;
+ }
--- /dev/null
+From e2218350465e7e0931676b4849b594c978437bce Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Wed, 12 May 2010 08:25:37 +1000
+Subject: md: set mddev readonly flag on blkdev BLKROSET ioctl
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit e2218350465e7e0931676b4849b594c978437bce upstream.
+
+When the user sets the block device to readwrite then the mddev should
+follow suit. Otherwise, the BUG_ON in md_write_start() will be set to
+trigger.
+
+The reverse direction, setting mddev->ro to match a set readonly
+request, can be ignored because the blkdev level readonly flag precludes
+the need to have mddev->ro set correctly. Nevermind the fact that
+setting mddev->ro to 1 may fail if the array is in use.
+
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/md.c | 29 +++++++++++++++++++++++++++++
+ 1 file changed, 29 insertions(+)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -5334,6 +5334,7 @@ static int md_ioctl(struct block_device
+ int err = 0;
+ void __user *argp = (void __user *)arg;
+ mddev_t *mddev = NULL;
++ int ro;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+@@ -5469,6 +5470,34 @@ static int md_ioctl(struct block_device
+ err = do_md_stop(mddev, 1, 1);
+ goto done_unlock;
+
++ case BLKROSET:
++ if (get_user(ro, (int __user *)(arg))) {
++ err = -EFAULT;
++ goto done_unlock;
++ }
++ err = -EINVAL;
++
++ /* if the bdev is going readonly the value of mddev->ro
++ * does not matter, no writes are coming
++ */
++ if (ro)
++ goto done_unlock;
++
++ /* are we are already prepared for writes? */
++ if (mddev->ro != 1)
++ goto done_unlock;
++
++ /* transitioning to readauto need only happen for
++ * arrays that call md_write_start
++ */
++ if (mddev->pers) {
++ err = restart_array(mddev);
++ if (err == 0) {
++ mddev->ro = 2;
++ set_disk_ro(mddev->gendisk, 0);
++ }
++ }
++ goto done_unlock;
+ }
+
+ /*
--- /dev/null
+From 6cdafaae41d52e6ef9a5c5be23602ef083e4d0f9 Mon Sep 17 00:00:00 2001
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Date: Mon, 24 May 2010 14:32:58 -0700
+Subject: mn10300: set ARCH_KMALLOC_MINALIGN
+
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+
+commit 6cdafaae41d52e6ef9a5c5be23602ef083e4d0f9 upstream.
+
+Architectures that handle DMA-non-coherent memory need to set
+ARCH_KMALLOC_MINALIGN to make sure that kmalloc'ed buffer is DMA-safe: the
+buffer doesn't share a cache with the others.
+
+Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Acked-by: David Howells <dhowells@redhat.com>
+Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
+Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/mn10300/include/asm/cache.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/mn10300/include/asm/cache.h
++++ b/arch/mn10300/include/asm/cache.h
+@@ -21,6 +21,8 @@
+ #define L1_CACHE_DISPARITY L1_CACHE_NENTRIES * L1_CACHE_BYTES
+ #endif
+
++#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
++
+ /* data cache purge registers
+ * - read from the register to unconditionally purge that cache line
+ * - write address & 0xffffff00 to conditionally purge that cache line
--- /dev/null
+From 6ba8bcd457d9fc793ac9435aa2e4138f571d4ec5 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <error27@gmail.com>
+Date: Mon, 24 May 2010 14:33:49 -0700
+Subject: rtc-cmos: do dev_set_drvdata() earlier in the initialization
+
+From: Dan Carpenter <error27@gmail.com>
+
+commit 6ba8bcd457d9fc793ac9435aa2e4138f571d4ec5 upstream.
+
+The bug is an oops when dev_get_drvdata() returned null in
+cmos_update_irq_enable(). The call tree looks like this:
+ rtc_dev_ioctl()
+ => rtc_update_irq_enable()
+ => cmos_update_irq_enable()
+
+It's caused by a race condition in the module initialization. It is
+rtc_device_register() which makes the ioctl operations live so I moved
+the call to dev_set_drvdata() before the call to rtc_device_register().
+
+Addresses https://bugzilla.kernel.org/show_bug.cgi?id=15963
+
+Reported-by: Randy Dunlap <randy.dunlap@oracle.com>
+Signed-off-by: Dan Carpenter <error27@gmail.com>
+Tested-by: Randy Dunlap <randy.dunlap@oracle.com>
+Cc: Alessandro Zummo <a.zummo@towertech.it>
+Cc: Paul Gortmaker <p_gortmaker@yahoo.com>
+Cc: Malte Schroder <maltesch@gmx.de>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: Herton Ronaldo Krzesinski <herton@mandriva.com.br>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/rtc/rtc-cmos.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/rtc/rtc-cmos.c
++++ b/drivers/rtc/rtc-cmos.c
+@@ -723,6 +723,9 @@ cmos_do_probe(struct device *dev, struct
+ }
+ }
+
++ cmos_rtc.dev = dev;
++ dev_set_drvdata(dev, &cmos_rtc);
++
+ cmos_rtc.rtc = rtc_device_register(driver_name, dev,
+ &cmos_rtc_ops, THIS_MODULE);
+ if (IS_ERR(cmos_rtc.rtc)) {
+@@ -730,8 +733,6 @@ cmos_do_probe(struct device *dev, struct
+ goto cleanup0;
+ }
+
+- cmos_rtc.dev = dev;
+- dev_set_drvdata(dev, &cmos_rtc);
+ rename_region(ports, dev_name(&cmos_rtc.rtc->dev));
+
+ spin_lock_irq(&rtc_lock);
--- /dev/null
+From e893de59a4982791368b3ce412bc67dd601a88a0 Mon Sep 17 00:00:00 2001
+From: Maurus Cuelenaere <mcuelenaere@gmail.com>
+Date: Fri, 4 Jun 2010 14:14:44 -0700
+Subject: rtc: s3c: initialize driver data before using it
+
+From: Maurus Cuelenaere <mcuelenaere@gmail.com>
+
+commit e893de59a4982791368b3ce412bc67dd601a88a0 upstream.
+
+s3c_rtc_setfreq() uses the platform driver data to derive struct rtc_device,
+so make sure drvdata is set _before_ s3c_rtc_setfreq() is called.
+
+Signed-off-by: Maurus Cuelenaere <mcuelenaere@gmail.com>
+Cc: Paul Gortmaker <p_gortmaker@yahoo.com>
+Cc: Alessandro Zummo <a.zummo@towertech.it>
+Cc: Maurus Cuelenaere <mcuelenaere@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/rtc/rtc-s3c.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/rtc/rtc-s3c.c
++++ b/drivers/rtc/rtc-s3c.c
+@@ -456,8 +456,6 @@ static int __devinit s3c_rtc_probe(struc
+ pr_debug("s3c2410_rtc: RTCCON=%02x\n",
+ readb(s3c_rtc_base + S3C2410_RTCCON));
+
+- s3c_rtc_setfreq(&pdev->dev, 1);
+-
+ device_init_wakeup(&pdev->dev, 1);
+
+ /* register RTC and exit */
+@@ -474,6 +472,9 @@ static int __devinit s3c_rtc_probe(struc
+ rtc->max_user_freq = 128;
+
+ platform_set_drvdata(pdev, rtc);
++
++ s3c_rtc_setfreq(&pdev->dev, 1);
++
+ return 0;
+
+ err_nortc:
powerpc-pseries-only-call-start-cpu-when-a-cpu-is-stopped.patch
powerpc-oprofile-fix-potential-buffer-overrun-in-op_model_cell.c.patch
writeback-disable-periodic-old-data-writeback-for-dirty_writeback_centisecs.patch
+md-raid1-fix-counting-of-write-targets.patch
+md-fix-read-balancing-in-raid1-and-raid10-on-drives-2tb.patch
+md-set-mddev-readonly-flag-on-blkdev-blkroset-ioctl.patch
+x86-amd-iommu-fix-suspend-resume-with-iommu.patch
+exofs-confusion-between-kmap-and-kmap_atomic-api.patch
+mn10300-set-arch_kmalloc_minalign.patch
+m68k-set-arch_kmalloc_minalign.patch
+rtc-cmos-do-dev_set_drvdata-earlier-in-the-initialization.patch
+rtc-s3c-initialize-driver-data-before-using-it.patch
+frv-set-arch_kmalloc_minalign.patch
+xtensa-set-arch_kmalloc_minalign.patch
+blackfin-set-arch_kmalloc_minalign.patch
+tmpfs-insert-tmpfs-cache-pages-to-inactive-list-at-first.patch
--- /dev/null
+From e9d6c157385e4efa61cb8293e425c9d8beba70d3 Mon Sep 17 00:00:00 2001
+From: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Date: Mon, 24 May 2010 14:31:48 -0700
+Subject: tmpfs: insert tmpfs cache pages to inactive list at first
+
+From: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+
+commit e9d6c157385e4efa61cb8293e425c9d8beba70d3 upstream.
+
+Shaohua Li reported parallel file copy on tmpfs can lead to OOM killer.
+This is regression of caused by commit 9ff473b9a7 ("vmscan: evict
+streaming IO first"). Wow, It is 2 years old patch!
+
+Currently, tmpfs file cache is inserted active list at first. This means
+that the insertion doesn't only increase numbers of pages in anon LRU, but
+it also reduces anon scanning ratio. Therefore, vmscan will get totally
+confused. It scans almost only file LRU even though the system has plenty
+unused tmpfs pages.
+
+Historically, lru_cache_add_active_anon() was used for two reasons.
+1) Intend to priotize shmem page rather than regular file cache.
+2) Intend to avoid reclaim priority inversion of used once pages.
+
+But we've lost both motivation because (1) Now we have separate anon and
+file LRU list. then, to insert active list doesn't help such priotize.
+(2) In past, one pte access bit will cause page activation. then to
+insert inactive list with pte access bit mean higher priority than to
+insert active list. Its priority inversion may lead to uninteded lru
+chun. but it was already solved by commit 645747462 (vmscan: detect
+mapped file pages used only once). (Thanks Hannes, you are great!)
+
+Thus, now we can use lru_cache_add_anon() instead.
+
+Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Reported-by: Shaohua Li <shaohua.li@intel.com>
+Reviewed-by: Wu Fengguang <fengguang.wu@intel.com>
+Reviewed-by: Johannes Weiner <hannes@cmpxchg.org>
+Reviewed-by: Rik van Riel <riel@redhat.com>
+Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
+Acked-by: Hugh Dickins <hughd@google.com>
+Cc: Henrique de Moraes Holschuh <hmh@hmh.eng.br>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/swap.h | 10 ----------
+ mm/filemap.c | 4 ++--
+ 2 files changed, 2 insertions(+), 12 deletions(-)
+
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -218,21 +218,11 @@ static inline void lru_cache_add_anon(st
+ __lru_cache_add(page, LRU_INACTIVE_ANON);
+ }
+
+-static inline void lru_cache_add_active_anon(struct page *page)
+-{
+- __lru_cache_add(page, LRU_ACTIVE_ANON);
+-}
+-
+ static inline void lru_cache_add_file(struct page *page)
+ {
+ __lru_cache_add(page, LRU_INACTIVE_FILE);
+ }
+
+-static inline void lru_cache_add_active_file(struct page *page)
+-{
+- __lru_cache_add(page, LRU_ACTIVE_FILE);
+-}
+-
+ /* linux/mm/vmscan.c */
+ extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+ gfp_t gfp_mask, nodemask_t *mask);
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -462,7 +462,7 @@ int add_to_page_cache_lru(struct page *p
+ /*
+ * Splice_read and readahead add shmem/tmpfs pages into the page cache
+ * before shmem_readpage has a chance to mark them as SwapBacked: they
+- * need to go on the active_anon lru below, and mem_cgroup_cache_charge
++ * need to go on the anon lru below, and mem_cgroup_cache_charge
+ * (called in add_to_page_cache) needs to know where they're going too.
+ */
+ if (mapping_cap_swap_backed(mapping))
+@@ -473,7 +473,7 @@ int add_to_page_cache_lru(struct page *p
+ if (page_is_file_cache(page))
+ lru_cache_add_file(page);
+ else
+- lru_cache_add_active_anon(page);
++ lru_cache_add_anon(page);
+ }
+ return ret;
+ }
--- /dev/null
+From joerg.roedel@amd.com Wed Jun 23 13:58:22 2010
+From: Joerg Roedel <joerg.roedel@amd.com>
+Date: Tue, 1 Jun 2010 11:41:44 +0200
+Subject: x86/amd-iommu: Fix suspend/resume with IOMMU
+To: Greg KH <gregkh@suse.de>
+Cc: stable@kernel.org, iommu@lists.linux-foundation.org
+Message-ID: <20100601094143.GA20522@amd.com>
+
+From: Joerg Roedel <joerg.roedel@amd.com>
+
+This is a suspend resume fix for 2.6.32-stable inclusion. The
+problem with this patch is that it is not upstream because the code
+changed with 2.6.33 and the function where this bug is in was
+removed. So this fix does not make sense anymore for anything later than
+2.6.32. The patch was tested by multiple partys and is confirmed to fix
+the broken suspend/resume issue with the 2.6.32 kernel.
+
+This patch fixes suspend/resume with AMD IOMMU enabled.
+
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ arch/x86/kernel/amd_iommu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/amd_iommu.c
++++ b/arch/x86/kernel/amd_iommu.c
+@@ -544,7 +544,7 @@ static void flush_devices_by_domain(stru
+
+ for (i = 0; i <= amd_iommu_last_bdf; ++i) {
+ if ((domain == NULL && amd_iommu_pd_table[i] == NULL) ||
+- (amd_iommu_pd_table[i] != domain))
++ (domain != NULL && amd_iommu_pd_table[i] != domain))
+ continue;
+
+ iommu = amd_iommu_rlookup_table[i];
--- /dev/null
+From 498900fc9cd1adbad1ba6b55ed9d8f2f5d655ca3 Mon Sep 17 00:00:00 2001
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Date: Mon, 24 May 2010 14:31:45 -0700
+Subject: xtensa: set ARCH_KMALLOC_MINALIGN
+
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+
+commit 498900fc9cd1adbad1ba6b55ed9d8f2f5d655ca3 upstream.
+
+Architectures that handle DMA-non-coherent memory need to set
+ARCH_KMALLOC_MINALIGN to make sure that kmalloc'ed buffer is DMA-safe: the
+buffer doesn't share a cache with the others.
+
+Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Cc: Chris Zankel <chris@zankel.net>
+Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/xtensa/include/asm/cache.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/xtensa/include/asm/cache.h
++++ b/arch/xtensa/include/asm/cache.h
+@@ -29,5 +29,6 @@
+ # define CACHE_WAY_SIZE ICACHE_WAY_SIZE
+ #endif
+
++#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+
+ #endif /* _XTENSA_CACHE_H */