--- /dev/null
+From ba1cbad93dd47223b1f3b8edd50dd9ef2abcb2ed Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Wed, 26 Sep 2012 23:45:42 +0100
+Subject: dm: handle requests beyond end of device instead of using BUG_ON
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit ba1cbad93dd47223b1f3b8edd50dd9ef2abcb2ed upstream.
+
+The access beyond the end of device BUG_ON that was introduced to
+dm_request_fn via commit 29e4013de7ad950280e4b2208 ("dm: implement
+REQ_FLUSH/FUA support for request-based dm") was an overly
+drastic (but simple) response to this situation.
+
+I have received a report that this BUG_ON was hit and now think
+it would be better to use dm_kill_unmapped_request() to fail the clone
+and original request with -EIO.
+
+map_request() will assign the valid target returned by
+dm_table_find_target to tio->ti. But when the target
+isn't valid tio->ti is never assigned (because map_request isn't
+called); so add a check for tio->ti != NULL to dm_done().
+
+Reported-by: Mike Christie <michaelc@cs.wisc.edu>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm.c | 56 ++++++++++++++++++++++++++++++++++++++------------------
+ 1 file changed, 38 insertions(+), 18 deletions(-)
+
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -865,10 +865,14 @@ static void dm_done(struct request *clon
+ {
+ int r = error;
+ struct dm_rq_target_io *tio = clone->end_io_data;
+- dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
++ dm_request_endio_fn rq_end_io = NULL;
+
+- if (mapped && rq_end_io)
+- r = rq_end_io(tio->ti, clone, error, &tio->info);
++ if (tio->ti) {
++ rq_end_io = tio->ti->type->rq_end_io;
++
++ if (mapped && rq_end_io)
++ r = rq_end_io(tio->ti, clone, error, &tio->info);
++ }
+
+ if (r <= 0)
+ /* The target wants to complete the I/O */
+@@ -1566,15 +1570,6 @@ static int map_request(struct dm_target
+ int r, requeued = 0;
+ struct dm_rq_target_io *tio = clone->end_io_data;
+
+- /*
+- * Hold the md reference here for the in-flight I/O.
+- * We can't rely on the reference count by device opener,
+- * because the device may be closed during the request completion
+- * when all bios are completed.
+- * See the comment in rq_completed() too.
+- */
+- dm_get(md);
+-
+ tio->ti = ti;
+ r = ti->type->map_rq(ti, clone, &tio->info);
+ switch (r) {
+@@ -1606,6 +1601,26 @@ static int map_request(struct dm_target
+ return requeued;
+ }
+
++static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
++{
++ struct request *clone;
++
++ blk_start_request(orig);
++ clone = orig->special;
++ atomic_inc(&md->pending[rq_data_dir(clone)]);
++
++ /*
++ * Hold the md reference here for the in-flight I/O.
++ * We can't rely on the reference count by device opener,
++ * because the device may be closed during the request completion
++ * when all bios are completed.
++ * See the comment in rq_completed() too.
++ */
++ dm_get(md);
++
++ return clone;
++}
++
+ /*
+ * q->request_fn for request-based dm.
+ * Called with the queue lock held.
+@@ -1635,14 +1650,21 @@ static void dm_request_fn(struct request
+ pos = blk_rq_pos(rq);
+
+ ti = dm_table_find_target(map, pos);
+- BUG_ON(!dm_target_is_valid(ti));
++ if (!dm_target_is_valid(ti)) {
++ /*
++ * Must perform setup, that dm_done() requires,
++ * before calling dm_kill_unmapped_request
++ */
++ DMERR_LIMIT("request attempted access beyond the end of device");
++ clone = dm_start_request(md, rq);
++ dm_kill_unmapped_request(clone, -EIO);
++ continue;
++ }
+
+ if (ti->type->busy && ti->type->busy(ti))
+ goto delay_and_out;
+
+- blk_start_request(rq);
+- clone = rq->special;
+- atomic_inc(&md->pending[rq_data_dir(clone)]);
++ clone = dm_start_request(md, rq);
+
+ spin_unlock(q->queue_lock);
+ if (map_request(ti, clone, md))
+@@ -1662,8 +1684,6 @@ delay_and_out:
+ blk_delay_queue(q, HZ / 10);
+ out:
+ dm_table_put(map);
+-
+- return;
+ }
+
+ int dm_underlying_device_busy(struct request_queue *q)
--- /dev/null
+From c3c4555edd10dbc0b388a0125b9c50de5e79af05 Mon Sep 17 00:00:00 2001
+From: Milan Broz <mbroz@redhat.com>
+Date: Wed, 26 Sep 2012 23:45:43 +0100
+Subject: dm table: clear add_random unless all devices have it set
+
+From: Milan Broz <mbroz@redhat.com>
+
+commit c3c4555edd10dbc0b388a0125b9c50de5e79af05 upstream.
+
+Always clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
+have it set. Otherwise devices with predictable characteristics may
+contribute entropy.
+
+QUEUE_FLAG_ADD_RANDOM specifies whether or not queue IO timings
+contribute to the random pool.
+
+For bio-based targets this flag is always 0 because such devices have no
+real queue.
+
+For request-based devices this flag was always set to 1 by default.
+
+Now set it according to the flags on underlying devices. If there is at
+least one device which should not contribute, set the flag to zero: If a
+device, such as fast SSD storage, is not suitable for supplying entropy,
+a request-based queue stacked over it will not be either.
+
+Because the checking logic is exactly same as for the rotational flag,
+share the iteration function with device_is_nonrot().
+
+Signed-off-by: Milan Broz <mbroz@redhat.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-table.c | 26 ++++++++++++++++++++++----
+ 1 file changed, 22 insertions(+), 4 deletions(-)
+
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -1351,17 +1351,25 @@ static int device_is_nonrot(struct dm_ta
+ return q && blk_queue_nonrot(q);
+ }
+
+-static bool dm_table_is_nonrot(struct dm_table *t)
++static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
++ sector_t start, sector_t len, void *data)
++{
++ struct request_queue *q = bdev_get_queue(dev->bdev);
++
++ return q && !blk_queue_add_random(q);
++}
++
++static bool dm_table_all_devices_attribute(struct dm_table *t,
++ iterate_devices_callout_fn func)
+ {
+ struct dm_target *ti;
+ unsigned i = 0;
+
+- /* Ensure that all underlying device are non-rotational. */
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+
+ if (!ti->type->iterate_devices ||
+- !ti->type->iterate_devices(ti, device_is_nonrot, NULL))
++ !ti->type->iterate_devices(ti, func, NULL))
+ return 0;
+ }
+
+@@ -1393,7 +1401,8 @@ void dm_table_set_restrictions(struct dm
+ if (!dm_table_discard_zeroes_data(t))
+ q->limits.discard_zeroes_data = 0;
+
+- if (dm_table_is_nonrot(t))
++ /* Ensure that all underlying devices are non-rotational. */
++ if (dm_table_all_devices_attribute(t, device_is_nonrot))
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+ else
+ queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
+@@ -1401,6 +1410,15 @@ void dm_table_set_restrictions(struct dm
+ dm_table_set_integrity(t);
+
+ /*
++ * Determine whether or not this queue's I/O timings contribute
++ * to the entropy pool, Only request-based targets use this.
++ * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
++ * have it set.
++ */
++ if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
++ queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
++
++ /*
+ * QUEUE_FLAG_STACKABLE must be set after all queue settings are
+ * visible to other CPUs because, once the flag is set, incoming bios
+ * are processed by request-based dm, which refers to the queue
--- /dev/null
+From 1d55f6bcc0331d744cd5b56c4ee79e3809438161 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Wed, 26 Sep 2012 23:45:48 +0100
+Subject: dm verity: fix overflow check
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 1d55f6bcc0331d744cd5b56c4ee79e3809438161 upstream.
+
+This patch fixes sector_t overflow checking in dm-verity.
+
+Without this patch, the code checks for overflow only if sector_t is
+smaller than long long, not if sector_t and long long have the same size.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-verity.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/md/dm-verity.c
++++ b/drivers/md/dm-verity.c
+@@ -718,8 +718,8 @@ static int verity_ctr(struct dm_target *
+ v->hash_dev_block_bits = ffs(num) - 1;
+
+ if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 ||
+- num_ll << (v->data_dev_block_bits - SECTOR_SHIFT) !=
+- (sector_t)num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) {
++ (sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT))
++ >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll) {
+ ti->error = "Invalid data blocks";
+ r = -EINVAL;
+ goto bad;
+@@ -733,8 +733,8 @@ static int verity_ctr(struct dm_target *
+ }
+
+ if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 ||
+- num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT) !=
+- (sector_t)num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT)) {
++ (sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT))
++ >> (v->hash_dev_block_bits - SECTOR_SHIFT) != num_ll) {
+ ti->error = "Invalid hash start";
+ r = -EINVAL;
+ goto bad;
--- /dev/null
+vfs-dcache-fix-deadlock-in-tree-traversal.patch
+dm-handle-requests-beyond-end-of-device-instead-of-using-bug_on.patch
+dm-table-clear-add_random-unless-all-devices-have-it-set.patch
+dm-verity-fix-overflow-check.patch
--- /dev/null
+From 8110e16d42d587997bcaee0c864179e6d93603fe Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <miklos@szeredi.hu>
+Date: Mon, 17 Sep 2012 22:23:30 +0200
+Subject: vfs: dcache: fix deadlock in tree traversal
+
+From: Miklos Szeredi <miklos@szeredi.hu>
+
+commit 8110e16d42d587997bcaee0c864179e6d93603fe upstream.
+
+IBM reported a deadlock in select_parent(). This was found to be caused
+by taking rename_lock when already locked when restarting the tree
+traversal.
+
+There are two cases when the traversal needs to be restarted:
+
+ 1) concurrent d_move(); this can only happen when not already locked,
+ since taking rename_lock protects against concurrent d_move().
+
+ 2) racing with final d_put() on child just at the moment of ascending
+ to parent; rename_lock doesn't protect against this rare race, so it
+ can happen when already locked.
+
+Because of case 2, we need to be able to handle restarting the traversal
+when rename_lock is already held. This patch fixes all three callers of
+try_to_ascend().
+
+IBM reported that the deadlock is gone with this patch.
+
+[ I rewrote the patch to be smaller and just do the "goto again" if the
+ lock was already held, but credit goes to Miklos for the real work.
+ - Linus ]
+
+Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
+Cc: Al Viro <viro@ZenIV.linux.org.uk>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/dcache.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -1116,6 +1116,8 @@ positive:
+ return 1;
+
+ rename_retry:
++ if (locked)
++ goto again;
+ locked = 1;
+ write_seqlock(&rename_lock);
+ goto again;
+@@ -1218,6 +1220,8 @@ out:
+ rename_retry:
+ if (found)
+ return found;
++ if (locked)
++ goto again;
+ locked = 1;
+ write_seqlock(&rename_lock);
+ goto again;
+@@ -2963,6 +2967,8 @@ resume:
+ return;
+
+ rename_retry:
++ if (locked)
++ goto again;
+ locked = 1;
+ write_seqlock(&rename_lock);
+ goto again;