From: Greg Kroah-Hartman Date: Wed, 3 Oct 2012 17:23:29 +0000 (-0700) Subject: 3.5-stable patches X-Git-Tag: v3.0.45~21 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=fbdda0ad7259c368c874f5ad176743076c54b443;p=thirdparty%2Fkernel%2Fstable-queue.git 3.5-stable patches added patches: dm-handle-requests-beyond-end-of-device-instead-of-using-bug_on.patch dm-mpath-only-retry-ioctl-when-no-paths-if-queue_if_no_path-set.patch dm-table-clear-add_random-unless-all-devices-have-it-set.patch dm-verity-fix-overflow-check.patch vfs-dcache-fix-deadlock-in-tree-traversal.patch --- diff --git a/queue-3.5/dm-handle-requests-beyond-end-of-device-instead-of-using-bug_on.patch b/queue-3.5/dm-handle-requests-beyond-end-of-device-instead-of-using-bug_on.patch new file mode 100644 index 00000000000..021db1d3554 --- /dev/null +++ b/queue-3.5/dm-handle-requests-beyond-end-of-device-instead-of-using-bug_on.patch @@ -0,0 +1,131 @@ +From ba1cbad93dd47223b1f3b8edd50dd9ef2abcb2ed Mon Sep 17 00:00:00 2001 +From: Mike Snitzer +Date: Wed, 26 Sep 2012 23:45:42 +0100 +Subject: dm: handle requests beyond end of device instead of using BUG_ON + +From: Mike Snitzer + +commit ba1cbad93dd47223b1f3b8edd50dd9ef2abcb2ed upstream. + +The access beyond the end of device BUG_ON that was introduced to +dm_request_fn via commit 29e4013de7ad950280e4b2208 ("dm: implement +REQ_FLUSH/FUA support for request-based dm") was an overly +drastic (but simple) response to this situation. + +I have received a report that this BUG_ON was hit and now think +it would be better to use dm_kill_unmapped_request() to fail the clone +and original request with -EIO. + +map_request() will assign the valid target returned by +dm_table_find_target to tio->ti. But when the target +isn't valid tio->ti is never assigned (because map_request isn't +called); so add a check for tio->ti != NULL to dm_done(). + +Reported-by: Mike Christie +Signed-off-by: Mike Snitzer +Signed-off-by: Jun'ichi Nomura +Signed-off-by: Alasdair G Kergon +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/dm.c | 56 ++++++++++++++++++++++++++++++++++++++------------------ + 1 file changed, 38 insertions(+), 18 deletions(-) + +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -865,10 +865,14 @@ static void dm_done(struct request *clon + { + int r = error; + struct dm_rq_target_io *tio = clone->end_io_data; +- dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io; ++ dm_request_endio_fn rq_end_io = NULL; + +- if (mapped && rq_end_io) +- r = rq_end_io(tio->ti, clone, error, &tio->info); ++ if (tio->ti) { ++ rq_end_io = tio->ti->type->rq_end_io; ++ ++ if (mapped && rq_end_io) ++ r = rq_end_io(tio->ti, clone, error, &tio->info); ++ } + + if (r <= 0) + /* The target wants to complete the I/O */ +@@ -1566,15 +1570,6 @@ static int map_request(struct dm_target + int r, requeued = 0; + struct dm_rq_target_io *tio = clone->end_io_data; + +- /* +- * Hold the md reference here for the in-flight I/O. +- * We can't rely on the reference count by device opener, +- * because the device may be closed during the request completion +- * when all bios are completed. +- * See the comment in rq_completed() too. +- */ +- dm_get(md); +- + tio->ti = ti; + r = ti->type->map_rq(ti, clone, &tio->info); + switch (r) { +@@ -1606,6 +1601,26 @@ static int map_request(struct dm_target + return requeued; + } + ++static struct request *dm_start_request(struct mapped_device *md, struct request *orig) ++{ ++ struct request *clone; ++ ++ blk_start_request(orig); ++ clone = orig->special; ++ atomic_inc(&md->pending[rq_data_dir(clone)]); ++ ++ /* ++ * Hold the md reference here for the in-flight I/O. ++ * We can't rely on the reference count by device opener, ++ * because the device may be closed during the request completion ++ * when all bios are completed. ++ * See the comment in rq_completed() too. ++ */ ++ dm_get(md); ++ ++ return clone; ++} ++ + /* + * q->request_fn for request-based dm. + * Called with the queue lock held. +@@ -1635,14 +1650,21 @@ static void dm_request_fn(struct request + pos = blk_rq_pos(rq); + + ti = dm_table_find_target(map, pos); +- BUG_ON(!dm_target_is_valid(ti)); ++ if (!dm_target_is_valid(ti)) { ++ /* ++ * Must perform setup, that dm_done() requires, ++ * before calling dm_kill_unmapped_request ++ */ ++ DMERR_LIMIT("request attempted access beyond the end of device"); ++ clone = dm_start_request(md, rq); ++ dm_kill_unmapped_request(clone, -EIO); ++ continue; ++ } + + if (ti->type->busy && ti->type->busy(ti)) + goto delay_and_out; + +- blk_start_request(rq); +- clone = rq->special; +- atomic_inc(&md->pending[rq_data_dir(clone)]); ++ clone = dm_start_request(md, rq); + + spin_unlock(q->queue_lock); + if (map_request(ti, clone, md)) +@@ -1662,8 +1684,6 @@ delay_and_out: + blk_delay_queue(q, HZ / 10); + out: + dm_table_put(map); +- +- return; + } + + int dm_underlying_device_busy(struct request_queue *q) diff --git a/queue-3.5/dm-mpath-only-retry-ioctl-when-no-paths-if-queue_if_no_path-set.patch b/queue-3.5/dm-mpath-only-retry-ioctl-when-no-paths-if-queue_if_no_path-set.patch new file mode 100644 index 00000000000..9fed299df02 --- /dev/null +++ b/queue-3.5/dm-mpath-only-retry-ioctl-when-no-paths-if-queue_if_no_path-set.patch @@ -0,0 +1,84 @@ +From 7ba10aa6fbac7158a50bec142132b04bc480bb29 Mon Sep 17 00:00:00 2001 +From: Mike Snitzer +Date: Wed, 26 Sep 2012 23:45:41 +0100 +Subject: dm mpath: only retry ioctl when no paths if queue_if_no_path set + +From: Mike Snitzer + +commit 7ba10aa6fbac7158a50bec142132b04bc480bb29 upstream. + +When there are no paths and multipath receives an ioctl, it waits until +a path becomes available. This behaviour is incorrect if the +"queue_if_no_path" setting was not specified, as then the ioctl should +be rejected immediately, which this patch now does. + +commit 35991652b ("dm mpath: allow ioctls to trigger pg init") should +have checked if queue_if_no_path was configured before queueing IO. + +Checking for the queue_if_no_path feature, like is done in map_io(), +allows the following table load to work without blocking in the +multipath_ioctl retry loop: + + echo "0 1024 multipath 0 0 0 0" | dmsetup create mpath_nodevs + +Without this fix the multipath_ioctl will block with the following stack +trace: + + blkid D 0000000000000002 0 23936 1 0x00000000 + ffff8802b89e5cd8 0000000000000082 ffff8802b89e5fd8 0000000000012440 + ffff8802b89e4010 0000000000012440 0000000000012440 0000000000012440 + ffff8802b89e5fd8 0000000000012440 ffff88030c2aab30 ffff880325794040 + Call Trace: + [] schedule+0x29/0x70 + [] schedule_timeout+0x182/0x2e0 + [] ? lock_timer_base+0x70/0x70 + [] schedule_timeout_uninterruptible+0x1e/0x20 + [] msleep+0x20/0x30 + [] multipath_ioctl+0x109/0x170 [dm_multipath] + [] dm_blk_ioctl+0xbc/0xd0 [dm_mod] + [] __blkdev_driver_ioctl+0x28/0x30 + [] blkdev_ioctl+0xce/0x730 + [] block_ioctl+0x3c/0x40 + [] do_vfs_ioctl+0x8c/0x340 + [] ? sys_newfstat+0x33/0x40 + [] sys_ioctl+0xa1/0xb0 + [] system_call_fastpath+0x16/0x1b + +Signed-off-by: Mike Snitzer +Acked-by: Mikulas Patocka +Signed-off-by: Alasdair G Kergon +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/dm-mpath.c | 11 +++++++---- + 1 file changed, 7 insertions(+), 4 deletions(-) + +--- a/drivers/md/dm-mpath.c ++++ b/drivers/md/dm-mpath.c +@@ -1520,6 +1520,7 @@ static int multipath_ioctl(struct dm_tar + unsigned long arg) + { + struct multipath *m = ti->private; ++ struct pgpath *pgpath; + struct block_device *bdev; + fmode_t mode; + unsigned long flags; +@@ -1535,12 +1536,14 @@ again: + if (!m->current_pgpath) + __choose_pgpath(m, 0); + +- if (m->current_pgpath) { +- bdev = m->current_pgpath->path.dev->bdev; +- mode = m->current_pgpath->path.dev->mode; ++ pgpath = m->current_pgpath; ++ ++ if (pgpath) { ++ bdev = pgpath->path.dev->bdev; ++ mode = pgpath->path.dev->mode; + } + +- if (m->queue_io) ++ if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path)) + r = -EAGAIN; + else if (!bdev) + r = -EIO; diff --git a/queue-3.5/dm-table-clear-add_random-unless-all-devices-have-it-set.patch b/queue-3.5/dm-table-clear-add_random-unless-all-devices-have-it-set.patch new file mode 100644 index 00000000000..4038e542191 --- /dev/null +++ b/queue-3.5/dm-table-clear-add_random-unless-all-devices-have-it-set.patch @@ -0,0 +1,94 @@ +From c3c4555edd10dbc0b388a0125b9c50de5e79af05 Mon Sep 17 00:00:00 2001 +From: Milan Broz +Date: Wed, 26 Sep 2012 23:45:43 +0100 +Subject: dm table: clear add_random unless all devices have it set + +From: Milan Broz + +commit c3c4555edd10dbc0b388a0125b9c50de5e79af05 upstream. + +Always clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not +have it set. Otherwise devices with predictable characteristics may +contribute entropy. + +QUEUE_FLAG_ADD_RANDOM specifies whether or not queue IO timings +contribute to the random pool. + +For bio-based targets this flag is always 0 because such devices have no +real queue. + +For request-based devices this flag was always set to 1 by default. + +Now set it according to the flags on underlying devices. If there is at +least one device which should not contribute, set the flag to zero: If a +device, such as fast SSD storage, is not suitable for supplying entropy, +a request-based queue stacked over it will not be either. + +Because the checking logic is exactly same as for the rotational flag, +share the iteration function with device_is_nonrot(). + +Signed-off-by: Milan Broz +Signed-off-by: Alasdair G Kergon +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/dm-table.c | 26 ++++++++++++++++++++++---- + 1 file changed, 22 insertions(+), 4 deletions(-) + +--- a/drivers/md/dm-table.c ++++ b/drivers/md/dm-table.c +@@ -1351,17 +1351,25 @@ static int device_is_nonrot(struct dm_ta + return q && blk_queue_nonrot(q); + } + +-static bool dm_table_is_nonrot(struct dm_table *t) ++static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, ++ sector_t start, sector_t len, void *data) ++{ ++ struct request_queue *q = bdev_get_queue(dev->bdev); ++ ++ return q && !blk_queue_add_random(q); ++} ++ ++static bool dm_table_all_devices_attribute(struct dm_table *t, ++ iterate_devices_callout_fn func) + { + struct dm_target *ti; + unsigned i = 0; + +- /* Ensure that all underlying device are non-rotational. */ + while (i < dm_table_get_num_targets(t)) { + ti = dm_table_get_target(t, i++); + + if (!ti->type->iterate_devices || +- !ti->type->iterate_devices(ti, device_is_nonrot, NULL)) ++ !ti->type->iterate_devices(ti, func, NULL)) + return 0; + } + +@@ -1393,7 +1401,8 @@ void dm_table_set_restrictions(struct dm + if (!dm_table_discard_zeroes_data(t)) + q->limits.discard_zeroes_data = 0; + +- if (dm_table_is_nonrot(t)) ++ /* Ensure that all underlying devices are non-rotational. */ ++ if (dm_table_all_devices_attribute(t, device_is_nonrot)) + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + else + queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); +@@ -1401,6 +1410,15 @@ void dm_table_set_restrictions(struct dm + dm_table_set_integrity(t); + + /* ++ * Determine whether or not this queue's I/O timings contribute ++ * to the entropy pool, Only request-based targets use this. ++ * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not ++ * have it set. ++ */ ++ if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) ++ queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); ++ ++ /* + * QUEUE_FLAG_STACKABLE must be set after all queue settings are + * visible to other CPUs because, once the flag is set, incoming bios + * are processed by request-based dm, which refers to the queue diff --git a/queue-3.5/dm-verity-fix-overflow-check.patch b/queue-3.5/dm-verity-fix-overflow-check.patch new file mode 100644 index 00000000000..153c3311413 --- /dev/null +++ b/queue-3.5/dm-verity-fix-overflow-check.patch @@ -0,0 +1,46 @@ +From 1d55f6bcc0331d744cd5b56c4ee79e3809438161 Mon Sep 17 00:00:00 2001 +From: Mikulas Patocka +Date: Wed, 26 Sep 2012 23:45:48 +0100 +Subject: dm verity: fix overflow check + +From: Mikulas Patocka + +commit 1d55f6bcc0331d744cd5b56c4ee79e3809438161 upstream. + +This patch fixes sector_t overflow checking in dm-verity. + +Without this patch, the code checks for overflow only if sector_t is +smaller than long long, not if sector_t and long long have the same size. + +Signed-off-by: Mikulas Patocka +Signed-off-by: Alasdair G Kergon +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/dm-verity.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +--- a/drivers/md/dm-verity.c ++++ b/drivers/md/dm-verity.c +@@ -718,8 +718,8 @@ static int verity_ctr(struct dm_target * + v->hash_dev_block_bits = ffs(num) - 1; + + if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 || +- num_ll << (v->data_dev_block_bits - SECTOR_SHIFT) != +- (sector_t)num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) { ++ (sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) ++ >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll) { + ti->error = "Invalid data blocks"; + r = -EINVAL; + goto bad; +@@ -733,8 +733,8 @@ static int verity_ctr(struct dm_target * + } + + if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 || +- num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT) != +- (sector_t)num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT)) { ++ (sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT)) ++ >> (v->hash_dev_block_bits - SECTOR_SHIFT) != num_ll) { + ti->error = "Invalid hash start"; + r = -EINVAL; + goto bad; diff --git a/queue-3.5/series b/queue-3.5/series new file mode 100644 index 00000000000..375bd0b832a --- /dev/null +++ b/queue-3.5/series @@ -0,0 +1,5 @@ +vfs-dcache-fix-deadlock-in-tree-traversal.patch +dm-mpath-only-retry-ioctl-when-no-paths-if-queue_if_no_path-set.patch +dm-handle-requests-beyond-end-of-device-instead-of-using-bug_on.patch +dm-table-clear-add_random-unless-all-devices-have-it-set.patch +dm-verity-fix-overflow-check.patch diff --git a/queue-3.5/vfs-dcache-fix-deadlock-in-tree-traversal.patch b/queue-3.5/vfs-dcache-fix-deadlock-in-tree-traversal.patch new file mode 100644 index 00000000000..052fdc6bd23 --- /dev/null +++ b/queue-3.5/vfs-dcache-fix-deadlock-in-tree-traversal.patch @@ -0,0 +1,70 @@ +From 8110e16d42d587997bcaee0c864179e6d93603fe Mon Sep 17 00:00:00 2001 +From: Miklos Szeredi +Date: Mon, 17 Sep 2012 22:23:30 +0200 +Subject: vfs: dcache: fix deadlock in tree traversal + +From: Miklos Szeredi + +commit 8110e16d42d587997bcaee0c864179e6d93603fe upstream. + +IBM reported a deadlock in select_parent(). This was found to be caused +by taking rename_lock when already locked when restarting the tree +traversal. + +There are two cases when the traversal needs to be restarted: + + 1) concurrent d_move(); this can only happen when not already locked, + since taking rename_lock protects against concurrent d_move(). + + 2) racing with final d_put() on child just at the moment of ascending + to parent; rename_lock doesn't protect against this rare race, so it + can happen when already locked. + +Because of case 2, we need to be able to handle restarting the traversal +when rename_lock is already held. This patch fixes all three callers of +try_to_ascend(). + +IBM reported that the deadlock is gone with this patch. + +[ I rewrote the patch to be smaller and just do the "goto again" if the + lock was already held, but credit goes to Miklos for the real work. + - Linus ] + +Signed-off-by: Miklos Szeredi +Cc: Al Viro +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + fs/dcache.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -1132,6 +1132,8 @@ positive: + return 1; + + rename_retry: ++ if (locked) ++ goto again; + locked = 1; + write_seqlock(&rename_lock); + goto again; +@@ -1234,6 +1236,8 @@ out: + rename_retry: + if (found) + return found; ++ if (locked) ++ goto again; + locked = 1; + write_seqlock(&rename_lock); + goto again; +@@ -3031,6 +3035,8 @@ resume: + return; + + rename_retry: ++ if (locked) ++ goto again; + locked = 1; + write_seqlock(&rename_lock); + goto again;