From: Greg Kroah-Hartman Date: Wed, 23 Jun 2010 21:36:19 +0000 (-0700) Subject: .34 patches X-Git-Tag: v2.6.31.14~34 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=c189c96ada99145a29e933c883eaf2e0b58a3d5e;p=thirdparty%2Fkernel%2Fstable-queue.git .34 patches --- diff --git a/queue-2.6.34/blackfin-set-arch_kmalloc_minalign.patch b/queue-2.6.34/blackfin-set-arch_kmalloc_minalign.patch new file mode 100644 index 00000000000..f622ff5cd76 --- /dev/null +++ b/queue-2.6.34/blackfin-set-arch_kmalloc_minalign.patch @@ -0,0 +1,33 @@ +From 76b99699a2bbf9efdb578f9a38a202af2ecb354b Mon Sep 17 00:00:00 2001 +From: FUJITA Tomonori +Date: Wed, 19 May 2010 23:21:38 -0400 +Subject: Blackfin: set ARCH_KMALLOC_MINALIGN + +From: FUJITA Tomonori + +commit 76b99699a2bbf9efdb578f9a38a202af2ecb354b upstream. + +Architectures that handle DMA-non-coherent memory need to set +ARCH_KMALLOC_MINALIGN to make sure that kmalloc'ed buffer is DMA-safe: +the buffer doesn't share a cache with the others. + +Signed-off-by: FUJITA Tomonori +Acked-by: Pekka Enberg +Signed-off-by: Mike Frysinger +Signed-off-by: Greg Kroah-Hartman + +--- + arch/blackfin/include/asm/cache.h | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/arch/blackfin/include/asm/cache.h ++++ b/arch/blackfin/include/asm/cache.h +@@ -15,6 +15,8 @@ + #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + #define SMP_CACHE_BYTES L1_CACHE_BYTES + ++#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES ++ + #ifdef CONFIG_SMP + #define __cacheline_aligned + #else diff --git a/queue-2.6.34/clean-dcache_cant_mount-in-d_delete.patch b/queue-2.6.34/clean-dcache_cant_mount-in-d_delete.patch new file mode 100644 index 00000000000..39a96ff6ba1 --- /dev/null +++ b/queue-2.6.34/clean-dcache_cant_mount-in-d_delete.patch @@ -0,0 +1,38 @@ +From 13e3c5e5b9c67e59074d24e29f3ff794bb4dfef0 Mon Sep 17 00:00:00 2001 +From: Al Viro +Date: Fri, 21 May 2010 16:11:04 -0400 +Subject: clean DCACHE_CANT_MOUNT in d_delete() + +From: Al Viro + +commit 13e3c5e5b9c67e59074d24e29f3ff794bb4dfef0 upstream. + +We set the "it's dead, don't mount on it" flag _and_ do not remove it if +we turn the damn thing negative and leave it around. And if it goes +positive afterwards, well... + +Fortunately, there's only one place where that needs to be caught: +only d_delete() can turn the sucker negative without immediately freeing +it; all other places that can lead to ->d_iput() call are followed by +unconditionally freeing struct dentry in question. So the fix is obvious: + +Addresses https://bugzilla.kernel.org/show_bug.cgi?id=16014 +Reported-by: Adam Tkac +Tested-by: Adam Tkac +Signed-off-by: Al Viro +Signed-off-by: Greg Kroah-Hartman + +--- + fs/dcache.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -1529,6 +1529,7 @@ void d_delete(struct dentry * dentry) + spin_lock(&dentry->d_lock); + isdir = S_ISDIR(dentry->d_inode->i_mode); + if (atomic_read(&dentry->d_count) == 1) { ++ dentry->d_flags &= ~DCACHE_CANT_MOUNT; + dentry_iput(dentry); + fsnotify_nameremove(dentry, isdir); + return; diff --git a/queue-2.6.34/eeepc-wmi-depends-on-backlight_class_device.patch b/queue-2.6.34/eeepc-wmi-depends-on-backlight_class_device.patch new file mode 100644 index 00000000000..aec29af2ca1 --- /dev/null +++ b/queue-2.6.34/eeepc-wmi-depends-on-backlight_class_device.patch @@ -0,0 +1,34 @@ +From 89a7644be2c59eea443b0db2514fd42d5de909f8 Mon Sep 17 00:00:00 2001 +From: Randy Dunlap +Date: Fri, 7 May 2010 11:24:11 -0700 +Subject: eeepc-wmi: depends on BACKLIGHT_CLASS_DEVICE + +From: Randy Dunlap + +commit 89a7644be2c59eea443b0db2514fd42d5de909f8 upstream. + +eeepc-wmi uses backlight*() interfaces so it should depend on +BACKLIGHT_CLASS_DEVICE. + +eeepc-wmi.c:(.text+0x2d7f54): undefined reference to `backlight_force_update' +eeepc-wmi.c:(.text+0x2d8012): undefined reference to `backlight_device_register' +eeepc-wmi.c:(.devinit.text+0x1c31c): undefined reference to `backlight_device_unregister' +eeepc-wmi.c:(.devexit.text+0x2f8b): undefined reference to `backlight_device_unregister' + +Signed-off-by: Randy Dunlap +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/platform/x86/Kconfig | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/platform/x86/Kconfig ++++ b/drivers/platform/x86/Kconfig +@@ -390,6 +390,7 @@ config EEEPC_WMI + depends on ACPI_WMI + depends on INPUT + depends on EXPERIMENTAL ++ depends on BACKLIGHT_CLASS_DEVICE + select INPUT_SPARSEKMAP + ---help--- + Say Y here if you want to support WMI-based hotkeys on Eee PC laptops. diff --git a/queue-2.6.34/exofs-confusion-between-kmap-and-kmap_atomic-api.patch b/queue-2.6.34/exofs-confusion-between-kmap-and-kmap_atomic-api.patch new file mode 100644 index 00000000000..54f8c5bebe0 --- /dev/null +++ b/queue-2.6.34/exofs-confusion-between-kmap-and-kmap_atomic-api.patch @@ -0,0 +1,32 @@ +From ddf08f4b90a413892bbb9bb2e8a57aed991cd47d Mon Sep 17 00:00:00 2001 +From: Dan Carpenter +Date: Fri, 7 May 2010 11:05:33 +0200 +Subject: exofs: confusion between kmap() and kmap_atomic() api + +From: Dan Carpenter + +commit ddf08f4b90a413892bbb9bb2e8a57aed991cd47d upstream. + +For kmap_atomic() we call kunmap_atomic() on the returned pointer. +That's different from kmap() and kunmap() and so it's easy to get them +backwards. + +Signed-off-by: Dan Carpenter +Signed-off-by: Boaz Harrosh +Signed-off-by: Greg Kroah-Hartman + +--- + fs/exofs/dir.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/fs/exofs/dir.c ++++ b/fs/exofs/dir.c +@@ -608,7 +608,7 @@ int exofs_make_empty(struct inode *inode + de->inode_no = cpu_to_le64(parent->i_ino); + memcpy(de->name, PARENT_DIR, sizeof(PARENT_DIR)); + exofs_set_de_type(de, inode); +- kunmap_atomic(page, KM_USER0); ++ kunmap_atomic(kaddr, KM_USER0); + err = exofs_commit_chunk(page, 0, chunk_size); + fail: + page_cache_release(page); diff --git a/queue-2.6.34/frv-set-arch_kmalloc_minalign.patch b/queue-2.6.34/frv-set-arch_kmalloc_minalign.patch new file mode 100644 index 00000000000..1fd49fb8cbe --- /dev/null +++ b/queue-2.6.34/frv-set-arch_kmalloc_minalign.patch @@ -0,0 +1,35 @@ +From 69dcf3db03626c4f18de624e8632454ea12ff260 Mon Sep 17 00:00:00 2001 +From: FUJITA Tomonori +Date: Mon, 24 May 2010 14:32:54 -0700 +Subject: frv: set ARCH_KMALLOC_MINALIGN + +From: FUJITA Tomonori + +commit 69dcf3db03626c4f18de624e8632454ea12ff260 upstream. + +Architectures that handle DMA-non-coherent memory need to set +ARCH_KMALLOC_MINALIGN to make sure that kmalloc'ed buffer is DMA-safe: the +buffer doesn't share a cache with the others. + +Signed-off-by: FUJITA Tomonori +Acked-by: David Howells +Acked-by: Pekka Enberg +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + arch/frv/include/asm/cache.h | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/arch/frv/include/asm/cache.h ++++ b/arch/frv/include/asm/cache.h +@@ -17,6 +17,8 @@ + #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT) + #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + ++#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES ++ + #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES))) + #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES))) + diff --git a/queue-2.6.34/m68k-set-arch_kmalloc_minalign.patch b/queue-2.6.34/m68k-set-arch_kmalloc_minalign.patch new file mode 100644 index 00000000000..d511a378cba --- /dev/null +++ b/queue-2.6.34/m68k-set-arch_kmalloc_minalign.patch @@ -0,0 +1,34 @@ +From dd6c26a66bdc629a500174ffe73b010b070b9f1b Mon Sep 17 00:00:00 2001 +From: FUJITA Tomonori +Date: Sun, 23 May 2010 19:38:14 +0200 +Subject: m68k: set ARCH_KMALLOC_MINALIGN + +From: FUJITA Tomonori + +commit dd6c26a66bdc629a500174ffe73b010b070b9f1b upstream. + +Architectures that handle DMA-non-coherent memory need to set +ARCH_KMALLOC_MINALIGN to make sure that kmalloc'ed buffer is DMA-safe: the +buffer doesn't share a cache with the others. + +Signed-off-by: FUJITA Tomonori +Cc: Geert Uytterhoeven +Cc: Roman Zippel +Acked-by: Pekka Enberg +Signed-off-by: Andrew Morton +Signed-off-by: Geert Uytterhoeven +Signed-off-by: Greg Kroah-Hartman + +--- + arch/m68k/include/asm/cache.h | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/arch/m68k/include/asm/cache.h ++++ b/arch/m68k/include/asm/cache.h +@@ -8,4 +8,6 @@ + #define L1_CACHE_SHIFT 4 + #define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT) + ++#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES ++ + #endif diff --git a/queue-2.6.34/m68knommu-fix-broken-use-of-buad_table_size-in-68328serial-driver.patch b/queue-2.6.34/m68knommu-fix-broken-use-of-buad_table_size-in-68328serial-driver.patch new file mode 100644 index 00000000000..22328808888 --- /dev/null +++ b/queue-2.6.34/m68knommu-fix-broken-use-of-buad_table_size-in-68328serial-driver.patch @@ -0,0 +1,42 @@ +From e9a137cb00d8b0f71c08a9e67d993f53e7713d21 Mon Sep 17 00:00:00 2001 +From: Greg Ungerer +Date: Mon, 24 May 2010 14:32:55 -0700 +Subject: m68knommu: fix broken use of BUAD_TABLE_SIZE in 68328serial driver + +From: Greg Ungerer + +commit e9a137cb00d8b0f71c08a9e67d993f53e7713d21 upstream. + +Commit 8b505ca8e2600eb9e7dd2d6b2682a81717671374 ("serial: 68328serial.c: +remove BAUD_TABLE_SIZE macro") misses one use of BAUD_TABLE_SIZE. So the +resulting 68328serial.c does not compile: + +drivers/serial/68328serial.c: In function `m68328_console_setup': +drivers/serial/68328serial.c:1439: error: `BAUD_TABLE_SIZE' undeclared (first use in this function) +drivers/serial/68328serial.c:1439: error: (Each undeclared identifier is reported only once +drivers/serial/68328serial.c:1439: error: for each function it appears in.) + +Fix that last use of it. + +Signed-off-by: Greg Ungerer +Cc: Thiago Farina +Cc: Alan Cox +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/serial/68328serial.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/serial/68328serial.c ++++ b/drivers/serial/68328serial.c +@@ -1437,7 +1437,7 @@ int m68328_console_setup(struct console + for (i = 0; i < ARRAY_SIZE(baud_table); i++) + if (baud_table[i] == n) + break; +- if (i < BAUD_TABLE_SIZE) { ++ if (i < ARRAY_SIZE(baud_table)) { + m68328_console_baud = n; + m68328_console_cbaud = 0; + if (i > 15) { diff --git a/queue-2.6.34/md-fix-read-balancing-in-raid1-and-raid10-on-drives-2tb.patch b/queue-2.6.34/md-fix-read-balancing-in-raid1-and-raid10-on-drives-2tb.patch new file mode 100644 index 00000000000..e0ea8791ddf --- /dev/null +++ b/queue-2.6.34/md-fix-read-balancing-in-raid1-and-raid10-on-drives-2tb.patch @@ -0,0 +1,55 @@ +From af3a2cd6b8a479345786e7fe5e199ad2f6240e56 Mon Sep 17 00:00:00 2001 +From: NeilBrown +Date: Sat, 8 May 2010 08:20:17 +1000 +Subject: md: Fix read balancing in RAID1 and RAID10 on drives > 2TB + +From: NeilBrown + +commit af3a2cd6b8a479345786e7fe5e199ad2f6240e56 upstream. + +read_balance uses a "unsigned long" for a sector number which +will get truncated beyond 2TB. +This will cause read-balancing to be non-optimal, and can cause +data to be read from the 'wrong' branch during a resync. This has a +very small chance of returning wrong data. + +Reported-by: Jordan Russell +Signed-off-by: NeilBrown +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/raid1.c | 4 ++-- + drivers/md/raid10.c | 2 +- + 2 files changed, 3 insertions(+), 3 deletions(-) + +--- a/drivers/md/raid1.c ++++ b/drivers/md/raid1.c +@@ -418,7 +418,7 @@ static void raid1_end_write_request(stru + */ + static int read_balance(conf_t *conf, r1bio_t *r1_bio) + { +- const unsigned long this_sector = r1_bio->sector; ++ const sector_t this_sector = r1_bio->sector; + int new_disk = conf->last_used, disk = new_disk; + int wonly_disk = -1; + const int sectors = r1_bio->sectors; +@@ -434,7 +434,7 @@ static int read_balance(conf_t *conf, r1 + retry: + if (conf->mddev->recovery_cp < MaxSector && + (this_sector + sectors >= conf->next_resync)) { +- /* Choose the first operation device, for consistancy */ ++ /* Choose the first operational device, for consistancy */ + new_disk = 0; + + for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev); +--- a/drivers/md/raid10.c ++++ b/drivers/md/raid10.c +@@ -494,7 +494,7 @@ static int raid10_mergeable_bvec(struct + */ + static int read_balance(conf_t *conf, r10bio_t *r10_bio) + { +- const unsigned long this_sector = r10_bio->sector; ++ const sector_t this_sector = r10_bio->sector; + int disk, slot, nslot; + const int sectors = r10_bio->sectors; + sector_t new_distance, current_distance; diff --git a/queue-2.6.34/md-linear-avoid-possible-oops-and-array-stop.patch b/queue-2.6.34/md-linear-avoid-possible-oops-and-array-stop.patch new file mode 100644 index 00000000000..af8378951d4 --- /dev/null +++ b/queue-2.6.34/md-linear-avoid-possible-oops-and-array-stop.patch @@ -0,0 +1,35 @@ +From ef2f80ff7325b2c1888ff02ead28957b5840bf51 Mon Sep 17 00:00:00 2001 +From: NeilBrown +Date: Mon, 17 May 2010 11:27:00 +1000 +Subject: md/linear: avoid possible oops and array stop + +From: NeilBrown + +commit ef2f80ff7325b2c1888ff02ead28957b5840bf51 upstream. + +Since commit ef286f6fa673cd7fb367e1b145069d8dbfcc6081 +it has been important that each personality clears +->private in the ->stop() function, or sets it to a +attribute group to be removed. +linear.c doesn't. This can sometimes lead to an oops, +though it doesn't always. + +Suitable for 2.6.33-stable and 2.6.34. + +Signed-off-by: NeilBrown +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/linear.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/md/linear.c ++++ b/drivers/md/linear.c +@@ -282,6 +282,7 @@ static int linear_stop (mddev_t *mddev) + rcu_barrier(); + blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ + kfree(conf); ++ mddev->private = NULL; + + return 0; + } diff --git a/queue-2.6.34/md-raid1-fix-counting-of-write-targets.patch b/queue-2.6.34/md-raid1-fix-counting-of-write-targets.patch new file mode 100644 index 00000000000..115ecc2a05e --- /dev/null +++ b/queue-2.6.34/md-raid1-fix-counting-of-write-targets.patch @@ -0,0 +1,45 @@ +From 964147d5c86d63be79b442c30f3783d49860c078 Mon Sep 17 00:00:00 2001 +From: NeilBrown +Date: Tue, 18 May 2010 15:27:13 +1000 +Subject: md/raid1: fix counting of write targets. + +From: NeilBrown + +commit 964147d5c86d63be79b442c30f3783d49860c078 upstream. + +There is a very small race window when writing to a +RAID1 such that if a device is marked faulty at exactly the wrong +time, the write-in-progress will not be sent to the device, +but the bitmap (if present) will be updated to say that +the write was sent. + +Then if the device turned out to still be usable as was re-added +to the array, the bitmap-based-resync would skip resyncing that +block, possibly leading to corruption. This would only be a problem +if no further writes were issued to that area of the device (i.e. +that bitmap chunk). + +Suitable for any pending -stable kernel. + +Signed-off-by: NeilBrown +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/raid1.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +--- a/drivers/md/raid1.c ++++ b/drivers/md/raid1.c +@@ -912,9 +912,10 @@ static int make_request(struct request_q + if (test_bit(Faulty, &rdev->flags)) { + rdev_dec_pending(rdev, mddev); + r1_bio->bios[i] = NULL; +- } else ++ } else { + r1_bio->bios[i] = bio; +- targets++; ++ targets++; ++ } + } else + r1_bio->bios[i] = NULL; + } diff --git a/queue-2.6.34/md-remove-unneeded-sysfs-files-more-promptly.patch b/queue-2.6.34/md-remove-unneeded-sysfs-files-more-promptly.patch new file mode 100644 index 00000000000..69df6a39d9a --- /dev/null +++ b/queue-2.6.34/md-remove-unneeded-sysfs-files-more-promptly.patch @@ -0,0 +1,101 @@ +From b6eb127d274385d81ce8dd45c98190f097bce1b4 Mon Sep 17 00:00:00 2001 +From: NeilBrown +Date: Thu, 15 Apr 2010 10:13:47 +1000 +Subject: md: remove unneeded sysfs files more promptly + +From: NeilBrown + +commit b6eb127d274385d81ce8dd45c98190f097bce1b4 upstream. + +When an array is stopped we need to remove some +sysfs files which are dependent on the type of array. + +We need to delay that deletion as deleting them while holding +reconfig_mutex can lead to deadlocks. + +We currently delay them until the array is completely destroyed. +However it is possible to deactivate and then reactivate the array. +It is also possible to need to remove sysfs files when changing level, +which can potentially happen several times before an array is +destroyed. + +So we need to delete these files more promptly: as soon as +reconfig_mutex is dropped. + +We need to ensure this happens before do_md_run can restart the array, +so we use open_mutex for some extra locking. This is not deadlock +prone. + +Signed-off-by: NeilBrown +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/md.c | 41 +++++++++++++++++++++++++++++++---------- + 1 file changed, 31 insertions(+), 10 deletions(-) + +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -508,9 +508,32 @@ static inline int mddev_trylock(mddev_t + return mutex_trylock(&mddev->reconfig_mutex); + } + ++static struct attribute_group md_redundancy_group; ++ + static inline void mddev_unlock(mddev_t * mddev) + { +- mutex_unlock(&mddev->reconfig_mutex); ++ if (mddev->pers == NULL && mddev->private) { ++ /* These cannot be removed under reconfig_mutex as ++ * an access to the files will try to take reconfig_mutex ++ * while holding the file unremovable, which leads to ++ * a deadlock. ++ * So hold open_mutex instead - we are allowed to take ++ * it while holding reconfig_mutex, and md_run can ++ * use it to wait for the remove to complete. ++ */ ++ mutex_lock(&mddev->open_mutex); ++ mutex_unlock(&mddev->reconfig_mutex); ++ ++ sysfs_remove_group(&mddev->kobj, &md_redundancy_group); ++ if (mddev->private != (void*)1) ++ sysfs_remove_group(&mddev->kobj, mddev->private); ++ if (mddev->sysfs_action) ++ sysfs_put(mddev->sysfs_action); ++ mddev->sysfs_action = NULL; ++ mddev->private = NULL; ++ mutex_unlock(&mddev->open_mutex); ++ } else ++ mutex_unlock(&mddev->reconfig_mutex); + + md_wakeup_thread(mddev->thread); + } +@@ -4082,15 +4105,6 @@ static void mddev_delayed_delete(struct + { + mddev_t *mddev = container_of(ws, mddev_t, del_work); + +- if (mddev->private) { +- sysfs_remove_group(&mddev->kobj, &md_redundancy_group); +- if (mddev->private != (void*)1) +- sysfs_remove_group(&mddev->kobj, mddev->private); +- if (mddev->sysfs_action) +- sysfs_put(mddev->sysfs_action); +- mddev->sysfs_action = NULL; +- mddev->private = NULL; +- } + sysfs_remove_group(&mddev->kobj, &md_bitmap_group); + kobject_del(&mddev->kobj); + kobject_put(&mddev->kobj); +@@ -4248,6 +4262,13 @@ static int do_md_run(mddev_t * mddev) + if (mddev->pers) + return -EBUSY; + ++ /* These two calls synchronise us with the ++ * sysfs_remove_group calls in mddev_unlock, ++ * so they must have completed. ++ */ ++ mutex_lock(&mddev->open_mutex); ++ mutex_unlock(&mddev->open_mutex); ++ + /* + * Analyze all RAID superblock(s) + */ diff --git a/queue-2.6.34/md-set-mddev-readonly-flag-on-blkdev-blkroset-ioctl.patch b/queue-2.6.34/md-set-mddev-readonly-flag-on-blkdev-blkroset-ioctl.patch new file mode 100644 index 00000000000..61ca66e9c6f --- /dev/null +++ b/queue-2.6.34/md-set-mddev-readonly-flag-on-blkdev-blkroset-ioctl.patch @@ -0,0 +1,71 @@ +From e2218350465e7e0931676b4849b594c978437bce Mon Sep 17 00:00:00 2001 +From: Dan Williams +Date: Wed, 12 May 2010 08:25:37 +1000 +Subject: md: set mddev readonly flag on blkdev BLKROSET ioctl + +From: Dan Williams + +commit e2218350465e7e0931676b4849b594c978437bce upstream. + +When the user sets the block device to readwrite then the mddev should +follow suit. Otherwise, the BUG_ON in md_write_start() will be set to +trigger. + +The reverse direction, setting mddev->ro to match a set readonly +request, can be ignored because the blkdev level readonly flag precludes +the need to have mddev->ro set correctly. Nevermind the fact that +setting mddev->ro to 1 may fail if the array is in use. + +Signed-off-by: Dan Williams +Signed-off-by: NeilBrown +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/md.c | 29 +++++++++++++++++++++++++++++ + 1 file changed, 29 insertions(+) + +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -5517,6 +5517,7 @@ static int md_ioctl(struct block_device + int err = 0; + void __user *argp = (void __user *)arg; + mddev_t *mddev = NULL; ++ int ro; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; +@@ -5652,6 +5653,34 @@ static int md_ioctl(struct block_device + err = do_md_stop(mddev, 1, 1); + goto done_unlock; + ++ case BLKROSET: ++ if (get_user(ro, (int __user *)(arg))) { ++ err = -EFAULT; ++ goto done_unlock; ++ } ++ err = -EINVAL; ++ ++ /* if the bdev is going readonly the value of mddev->ro ++ * does not matter, no writes are coming ++ */ ++ if (ro) ++ goto done_unlock; ++ ++ /* are we are already prepared for writes? */ ++ if (mddev->ro != 1) ++ goto done_unlock; ++ ++ /* transitioning to readauto need only happen for ++ * arrays that call md_write_start ++ */ ++ if (mddev->pers) { ++ err = restart_array(mddev); ++ if (err == 0) { ++ mddev->ro = 2; ++ set_disk_ro(mddev->gendisk, 0); ++ } ++ } ++ goto done_unlock; + } + + /* diff --git a/queue-2.6.34/mn10300-set-arch_kmalloc_minalign.patch b/queue-2.6.34/mn10300-set-arch_kmalloc_minalign.patch new file mode 100644 index 00000000000..fbbdb6408d4 --- /dev/null +++ b/queue-2.6.34/mn10300-set-arch_kmalloc_minalign.patch @@ -0,0 +1,36 @@ +From 6cdafaae41d52e6ef9a5c5be23602ef083e4d0f9 Mon Sep 17 00:00:00 2001 +From: FUJITA Tomonori +Date: Mon, 24 May 2010 14:32:58 -0700 +Subject: mn10300: set ARCH_KMALLOC_MINALIGN + +From: FUJITA Tomonori + +commit 6cdafaae41d52e6ef9a5c5be23602ef083e4d0f9 upstream. + +Architectures that handle DMA-non-coherent memory need to set +ARCH_KMALLOC_MINALIGN to make sure that kmalloc'ed buffer is DMA-safe: the +buffer doesn't share a cache with the others. + +Signed-off-by: FUJITA Tomonori +Acked-by: David Howells +Cc: Koichi Yasutake +Acked-by: Pekka Enberg +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + arch/mn10300/include/asm/cache.h | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/arch/mn10300/include/asm/cache.h ++++ b/arch/mn10300/include/asm/cache.h +@@ -21,6 +21,8 @@ + #define L1_CACHE_DISPARITY L1_CACHE_NENTRIES * L1_CACHE_BYTES + #endif + ++#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES ++ + /* data cache purge registers + * - read from the register to unconditionally purge that cache line + * - write address & 0xffffff00 to conditionally purge that cache line diff --git a/queue-2.6.34/rtc-cmos-do-dev_set_drvdata-earlier-in-the-initialization.patch b/queue-2.6.34/rtc-cmos-do-dev_set_drvdata-earlier-in-the-initialization.patch new file mode 100644 index 00000000000..c8c0b79a03a --- /dev/null +++ b/queue-2.6.34/rtc-cmos-do-dev_set_drvdata-earlier-in-the-initialization.patch @@ -0,0 +1,58 @@ +From 6ba8bcd457d9fc793ac9435aa2e4138f571d4ec5 Mon Sep 17 00:00:00 2001 +From: Dan Carpenter +Date: Mon, 24 May 2010 14:33:49 -0700 +Subject: rtc-cmos: do dev_set_drvdata() earlier in the initialization + +From: Dan Carpenter + +commit 6ba8bcd457d9fc793ac9435aa2e4138f571d4ec5 upstream. + +The bug is an oops when dev_get_drvdata() returned null in +cmos_update_irq_enable(). The call tree looks like this: + rtc_dev_ioctl() + => rtc_update_irq_enable() + => cmos_update_irq_enable() + +It's caused by a race condition in the module initialization. It is +rtc_device_register() which makes the ioctl operations live so I moved +the call to dev_set_drvdata() before the call to rtc_device_register(). + +Addresses https://bugzilla.kernel.org/show_bug.cgi?id=15963 + +Reported-by: Randy Dunlap +Signed-off-by: Dan Carpenter +Tested-by: Randy Dunlap +Cc: Alessandro Zummo +Cc: Paul Gortmaker +Cc: Malte Schroder +Cc: Ralf Baechle +Cc: Herton Ronaldo Krzesinski +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/rtc/rtc-cmos.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +--- a/drivers/rtc/rtc-cmos.c ++++ b/drivers/rtc/rtc-cmos.c +@@ -719,6 +719,9 @@ cmos_do_probe(struct device *dev, struct + } + } + ++ cmos_rtc.dev = dev; ++ dev_set_drvdata(dev, &cmos_rtc); ++ + cmos_rtc.rtc = rtc_device_register(driver_name, dev, + &cmos_rtc_ops, THIS_MODULE); + if (IS_ERR(cmos_rtc.rtc)) { +@@ -726,8 +729,6 @@ cmos_do_probe(struct device *dev, struct + goto cleanup0; + } + +- cmos_rtc.dev = dev; +- dev_set_drvdata(dev, &cmos_rtc); + rename_region(ports, dev_name(&cmos_rtc.rtc->dev)); + + spin_lock_irq(&rtc_lock); diff --git a/queue-2.6.34/rtc-s3c-initialize-driver-data-before-using-it.patch b/queue-2.6.34/rtc-s3c-initialize-driver-data-before-using-it.patch new file mode 100644 index 00000000000..2569ec18a82 --- /dev/null +++ b/queue-2.6.34/rtc-s3c-initialize-driver-data-before-using-it.patch @@ -0,0 +1,45 @@ +From e893de59a4982791368b3ce412bc67dd601a88a0 Mon Sep 17 00:00:00 2001 +From: Maurus Cuelenaere +Date: Fri, 4 Jun 2010 14:14:44 -0700 +Subject: rtc: s3c: initialize driver data before using it + +From: Maurus Cuelenaere + +commit e893de59a4982791368b3ce412bc67dd601a88a0 upstream. + +s3c_rtc_setfreq() uses the platform driver data to derive struct rtc_device, +so make sure drvdata is set _before_ s3c_rtc_setfreq() is called. + +Signed-off-by: Maurus Cuelenaere +Cc: Paul Gortmaker +Cc: Alessandro Zummo +Cc: Maurus Cuelenaere +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/rtc/rtc-s3c.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +--- a/drivers/rtc/rtc-s3c.c ++++ b/drivers/rtc/rtc-s3c.c +@@ -457,8 +457,6 @@ static int __devinit s3c_rtc_probe(struc + pr_debug("s3c2410_rtc: RTCCON=%02x\n", + readb(s3c_rtc_base + S3C2410_RTCCON)); + +- s3c_rtc_setfreq(&pdev->dev, 1); +- + device_init_wakeup(&pdev->dev, 1); + + /* register RTC and exit */ +@@ -475,6 +473,9 @@ static int __devinit s3c_rtc_probe(struc + rtc->max_user_freq = 128; + + platform_set_drvdata(pdev, rtc); ++ ++ s3c_rtc_setfreq(&pdev->dev, 1); ++ + return 0; + + err_nortc: diff --git a/queue-2.6.34/series b/queue-2.6.34/series index f913761c56e..8ba9a25d2ea 100644 --- a/queue-2.6.34/series +++ b/queue-2.6.34/series @@ -83,3 +83,22 @@ powerpc-fsl-booke-fix-instructiontlberror-execute-permission-check.patch powerpc-fsl-booke-move-loadcam_entry-back-to-asm-code-to-fix-smp-ftrace.patch powerpc-oprofile-fix-potential-buffer-overrun-in-op_model_cell.c.patch writeback-disable-periodic-old-data-writeback-for-dirty_writeback_centisecs.patch +md-raid1-fix-counting-of-write-targets.patch +md-fix-read-balancing-in-raid1-and-raid10-on-drives-2tb.patch +md-linear-avoid-possible-oops-and-array-stop.patch +md-remove-unneeded-sysfs-files-more-promptly.patch +md-set-mddev-readonly-flag-on-blkdev-blkroset-ioctl.patch +x86-amd-iommu-fix-crash-when-request_mem_region-fails.patch +x86-amd-iommu-fall-back-to-gart-if-initialization-fails.patch +eeepc-wmi-depends-on-backlight_class_device.patch +clean-dcache_cant_mount-in-d_delete.patch +exofs-confusion-between-kmap-and-kmap_atomic-api.patch +mn10300-set-arch_kmalloc_minalign.patch +m68knommu-fix-broken-use-of-buad_table_size-in-68328serial-driver.patch +m68k-set-arch_kmalloc_minalign.patch +rtc-cmos-do-dev_set_drvdata-earlier-in-the-initialization.patch +rtc-s3c-initialize-driver-data-before-using-it.patch +frv-set-arch_kmalloc_minalign.patch +xtensa-set-arch_kmalloc_minalign.patch +blackfin-set-arch_kmalloc_minalign.patch +tmpfs-insert-tmpfs-cache-pages-to-inactive-list-at-first.patch diff --git a/queue-2.6.34/tmpfs-insert-tmpfs-cache-pages-to-inactive-list-at-first.patch b/queue-2.6.34/tmpfs-insert-tmpfs-cache-pages-to-inactive-list-at-first.patch new file mode 100644 index 00000000000..b6d43d919aa --- /dev/null +++ b/queue-2.6.34/tmpfs-insert-tmpfs-cache-pages-to-inactive-list-at-first.patch @@ -0,0 +1,94 @@ +From e9d6c157385e4efa61cb8293e425c9d8beba70d3 Mon Sep 17 00:00:00 2001 +From: KOSAKI Motohiro +Date: Mon, 24 May 2010 14:31:48 -0700 +Subject: tmpfs: insert tmpfs cache pages to inactive list at first + +From: KOSAKI Motohiro + +commit e9d6c157385e4efa61cb8293e425c9d8beba70d3 upstream. + +Shaohua Li reported parallel file copy on tmpfs can lead to OOM killer. +This is regression of caused by commit 9ff473b9a7 ("vmscan: evict +streaming IO first"). Wow, It is 2 years old patch! + +Currently, tmpfs file cache is inserted active list at first. This means +that the insertion doesn't only increase numbers of pages in anon LRU, but +it also reduces anon scanning ratio. Therefore, vmscan will get totally +confused. It scans almost only file LRU even though the system has plenty +unused tmpfs pages. + +Historically, lru_cache_add_active_anon() was used for two reasons. +1) Intend to priotize shmem page rather than regular file cache. +2) Intend to avoid reclaim priority inversion of used once pages. + +But we've lost both motivation because (1) Now we have separate anon and +file LRU list. then, to insert active list doesn't help such priotize. +(2) In past, one pte access bit will cause page activation. then to +insert inactive list with pte access bit mean higher priority than to +insert active list. Its priority inversion may lead to uninteded lru +chun. but it was already solved by commit 645747462 (vmscan: detect +mapped file pages used only once). (Thanks Hannes, you are great!) + +Thus, now we can use lru_cache_add_anon() instead. + +Signed-off-by: KOSAKI Motohiro +Reported-by: Shaohua Li +Reviewed-by: Wu Fengguang +Reviewed-by: Johannes Weiner +Reviewed-by: Rik van Riel +Reviewed-by: Minchan Kim +Acked-by: Hugh Dickins +Cc: Henrique de Moraes Holschuh +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + include/linux/swap.h | 10 ---------- + mm/filemap.c | 4 ++-- + 2 files changed, 2 insertions(+), 12 deletions(-) + +--- a/include/linux/swap.h ++++ b/include/linux/swap.h +@@ -223,21 +223,11 @@ static inline void lru_cache_add_anon(st + __lru_cache_add(page, LRU_INACTIVE_ANON); + } + +-static inline void lru_cache_add_active_anon(struct page *page) +-{ +- __lru_cache_add(page, LRU_ACTIVE_ANON); +-} +- + static inline void lru_cache_add_file(struct page *page) + { + __lru_cache_add(page, LRU_INACTIVE_FILE); + } + +-static inline void lru_cache_add_active_file(struct page *page) +-{ +- __lru_cache_add(page, LRU_ACTIVE_FILE); +-} +- + /* linux/mm/vmscan.c */ + extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, + gfp_t gfp_mask, nodemask_t *mask); +--- a/mm/filemap.c ++++ b/mm/filemap.c +@@ -441,7 +441,7 @@ int add_to_page_cache_lru(struct page *p + /* + * Splice_read and readahead add shmem/tmpfs pages into the page cache + * before shmem_readpage has a chance to mark them as SwapBacked: they +- * need to go on the active_anon lru below, and mem_cgroup_cache_charge ++ * need to go on the anon lru below, and mem_cgroup_cache_charge + * (called in add_to_page_cache) needs to know where they're going too. + */ + if (mapping_cap_swap_backed(mapping)) +@@ -452,7 +452,7 @@ int add_to_page_cache_lru(struct page *p + if (page_is_file_cache(page)) + lru_cache_add_file(page); + else +- lru_cache_add_active_anon(page); ++ lru_cache_add_anon(page); + } + return ret; + } diff --git a/queue-2.6.34/x86-amd-iommu-fall-back-to-gart-if-initialization-fails.patch b/queue-2.6.34/x86-amd-iommu-fall-back-to-gart-if-initialization-fails.patch new file mode 100644 index 00000000000..c35c8019bb2 --- /dev/null +++ b/queue-2.6.34/x86-amd-iommu-fall-back-to-gart-if-initialization-fails.patch @@ -0,0 +1,54 @@ +From d7f0776975334070a93370ae048fda0c31a91c38 Mon Sep 17 00:00:00 2001 +From: Joerg Roedel +Date: Mon, 31 May 2010 15:05:20 +0200 +Subject: x86/amd-iommu: Fall back to GART if initialization fails + +From: Joerg Roedel + +commit d7f0776975334070a93370ae048fda0c31a91c38 upstream. + +This patch implements a fallback to the GART IOMMU if this +is possible and the AMD IOMMU initialization failed. +Otherwise the fallback would be nommu which is very +problematic on machines with more than 4GB of memory or +swiotlb which hurts io-performance. + +Signed-off-by: Joerg Roedel +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/kernel/amd_iommu.c | 4 ---- + arch/x86/kernel/amd_iommu_init.c | 9 +++++++++ + 2 files changed, 9 insertions(+), 4 deletions(-) + +--- a/arch/x86/kernel/amd_iommu.c ++++ b/arch/x86/kernel/amd_iommu.c +@@ -2257,10 +2257,6 @@ int __init amd_iommu_init_dma_ops(void) + + iommu_detected = 1; + swiotlb = 0; +-#ifdef CONFIG_GART_IOMMU +- gart_iommu_aperture_disabled = 1; +- gart_iommu_aperture = 0; +-#endif + + /* Make the driver finally visible to the drivers */ + dma_ops = &amd_iommu_dma_ops; +--- a/arch/x86/kernel/amd_iommu_init.c ++++ b/arch/x86/kernel/amd_iommu_init.c +@@ -1357,6 +1357,15 @@ free: + + free_unity_maps(); + ++#ifdef CONFIG_GART_IOMMU ++ /* ++ * We failed to initialize the AMD IOMMU - try fallback to GART ++ * if possible. ++ */ ++ gart_iommu_init(); ++ ++#endif ++ + goto out; + } + diff --git a/queue-2.6.34/x86-amd-iommu-fix-crash-when-request_mem_region-fails.patch b/queue-2.6.34/x86-amd-iommu-fix-crash-when-request_mem_region-fails.patch new file mode 100644 index 00000000000..d3e26a10922 --- /dev/null +++ b/queue-2.6.34/x86-amd-iommu-fix-crash-when-request_mem_region-fails.patch @@ -0,0 +1,58 @@ +From e82752d8b5a7e0a5e4d607fd8713549e2a4e2741 Mon Sep 17 00:00:00 2001 +From: Joerg Roedel +Date: Fri, 28 May 2010 14:26:48 +0200 +Subject: x86/amd-iommu: Fix crash when request_mem_region fails + +From: Joerg Roedel + +commit e82752d8b5a7e0a5e4d607fd8713549e2a4e2741 upstream. + +When request_mem_region fails the error path tries to +disable the IOMMUs. This accesses the mmio-region which was +not allocated leading to a kernel crash. This patch fixes +the issue. + +Signed-off-by: Joerg Roedel +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/kernel/amd_iommu_init.c | 11 ++++++++--- + 1 file changed, 8 insertions(+), 3 deletions(-) + +--- a/arch/x86/kernel/amd_iommu_init.c ++++ b/arch/x86/kernel/amd_iommu_init.c +@@ -286,8 +286,12 @@ static u8 * __init iommu_map_mmio_space( + { + u8 *ret; + +- if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) ++ if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) { ++ pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n", ++ address); ++ pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n"); + return NULL; ++ } + + ret = ioremap_nocache(address, MMIO_REGION_LENGTH); + if (ret != NULL) +@@ -1313,7 +1317,7 @@ static int __init amd_iommu_init(void) + ret = amd_iommu_init_dma_ops(); + + if (ret) +- goto free; ++ goto free_disable; + + amd_iommu_init_api(); + +@@ -1331,9 +1335,10 @@ static int __init amd_iommu_init(void) + out: + return ret; + +-free: ++free_disable: + disable_iommus(); + ++free: + amd_iommu_uninit_devices(); + + free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, diff --git a/queue-2.6.34/xtensa-set-arch_kmalloc_minalign.patch b/queue-2.6.34/xtensa-set-arch_kmalloc_minalign.patch new file mode 100644 index 00000000000..2c022f7296f --- /dev/null +++ b/queue-2.6.34/xtensa-set-arch_kmalloc_minalign.patch @@ -0,0 +1,33 @@ +From 498900fc9cd1adbad1ba6b55ed9d8f2f5d655ca3 Mon Sep 17 00:00:00 2001 +From: FUJITA Tomonori +Date: Mon, 24 May 2010 14:31:45 -0700 +Subject: xtensa: set ARCH_KMALLOC_MINALIGN + +From: FUJITA Tomonori + +commit 498900fc9cd1adbad1ba6b55ed9d8f2f5d655ca3 upstream. + +Architectures that handle DMA-non-coherent memory need to set +ARCH_KMALLOC_MINALIGN to make sure that kmalloc'ed buffer is DMA-safe: the +buffer doesn't share a cache with the others. + +Signed-off-by: FUJITA Tomonori +Cc: Chris Zankel +Acked-by: Pekka Enberg +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + arch/xtensa/include/asm/cache.h | 1 + + 1 file changed, 1 insertion(+) + +--- a/arch/xtensa/include/asm/cache.h ++++ b/arch/xtensa/include/asm/cache.h +@@ -29,5 +29,6 @@ + # define CACHE_WAY_SIZE ICACHE_WAY_SIZE + #endif + ++#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES + + #endif /* _XTENSA_CACHE_H */