1 From: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
2 Subject: Request-based multipath patches
3 References: FATE#302108
5 This is the latest version of the request-based multipathing patches,
6 posted to dm-devel and linux-scsi on 03.10.2008.
8 Signed-off-by: Hannes Reinecke <hare@suse.de>
11 drivers/md/dm-ioctl.c | 13
12 drivers/md/dm-mpath.c | 192 +++++---
13 drivers/md/dm-table.c | 82 +++
14 drivers/md/dm.c | 952 +++++++++++++++++++++++++++++++++++++++---
16 include/linux/device-mapper.h | 24 +
17 6 files changed, 1158 insertions(+), 122 deletions(-)
21 @@ -32,6 +32,7 @@ static unsigned int _major = 0;
23 static DEFINE_SPINLOCK(_minor_lock);
26 * One of these is allocated per bio.
29 @@ -43,6 +44,7 @@ struct dm_io {
34 * One of these is allocated per target within a bio. Hopefully
35 * this will be simplified out one day.
37 @@ -52,6 +54,31 @@ struct dm_target_io {
42 + * For request based dm.
43 + * One of these is allocated per request.
45 + * Since assuming "original request : cloned request = 1 : 1" and
46 + * a counter for number of clones like struct dm_io.io_count isn't needed,
47 + * struct dm_io and struct target_io can be merged.
49 +struct dm_rq_target_io {
50 + struct mapped_device *md;
51 + struct dm_target *ti;
52 + struct request *orig, clone;
54 + union map_info info;
58 + * For request based dm.
59 + * One of these is allocated per bio.
61 +struct dm_clone_bio_info {
66 union map_info *dm_get_mapinfo(struct bio *bio)
68 if (bio && bio->bi_private)
69 @@ -59,6 +86,14 @@ union map_info *dm_get_mapinfo(struct bi
73 +union map_info *dm_get_rq_mapinfo(struct request *rq)
75 + if (rq && rq->end_io_data)
76 + return &((struct dm_rq_target_io *)rq->end_io_data)->info;
79 +EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
81 #define MINOR_ALLOCED ((void *)-1)
84 @@ -76,7 +111,6 @@ union map_info *dm_get_mapinfo(struct bi
91 struct work_struct work;
92 @@ -126,6 +160,8 @@ struct mapped_device {
96 + unsigned int mempool_type; /* Type of mempools above. */
101 @@ -143,52 +179,74 @@ struct mapped_device {
103 /* forced geometry settings */
104 struct hd_geometry geometry;
106 + /* marker of flush suspend for request-based dm */
107 + struct request suspend_rq;
109 + /* For saving the address of __make_request for request based dm */
110 + make_request_fn *saved_make_request_fn;
114 static struct kmem_cache *_io_cache;
115 static struct kmem_cache *_tio_cache;
116 +static struct kmem_cache *_rq_tio_cache;
117 +static struct kmem_cache *_bio_info_cache;
119 static int __init local_init(void)
124 /* allocate a slab for the dm_ios */
125 _io_cache = KMEM_CACHE(dm_io, 0);
130 /* allocate a slab for the target ios */
131 _tio_cache = KMEM_CACHE(dm_target_io, 0);
133 - kmem_cache_destroy(_io_cache);
137 + goto out_free_io_cache;
139 + _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
140 + if (!_rq_tio_cache)
141 + goto out_free_tio_cache;
143 + _bio_info_cache = KMEM_CACHE(dm_clone_bio_info, 0);
144 + if (!_bio_info_cache)
145 + goto out_free_rq_tio_cache;
147 r = dm_uevent_init();
149 - kmem_cache_destroy(_tio_cache);
150 - kmem_cache_destroy(_io_cache);
154 + goto out_free_bio_info_cache;
157 r = register_blkdev(_major, _name);
159 - kmem_cache_destroy(_tio_cache);
160 - kmem_cache_destroy(_io_cache);
165 + goto out_uevent_exit;
174 +out_free_bio_info_cache:
175 + kmem_cache_destroy(_bio_info_cache);
176 +out_free_rq_tio_cache:
177 + kmem_cache_destroy(_rq_tio_cache);
179 + kmem_cache_destroy(_tio_cache);
181 + kmem_cache_destroy(_io_cache);
186 static void local_exit(void)
188 + kmem_cache_destroy(_bio_info_cache);
189 + kmem_cache_destroy(_rq_tio_cache);
190 kmem_cache_destroy(_tio_cache);
191 kmem_cache_destroy(_io_cache);
192 unregister_blkdev(_major, _name);
193 @@ -380,6 +438,28 @@ static void free_tio(struct mapped_devic
194 mempool_free(tio, md->tio_pool);
197 +static inline struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md)
199 + return mempool_alloc(md->tio_pool, GFP_ATOMIC);
202 +static inline void free_rq_tio(struct mapped_device *md,
203 + struct dm_rq_target_io *tio)
205 + mempool_free(tio, md->tio_pool);
208 +static inline struct dm_clone_bio_info *alloc_bio_info(struct mapped_device *md)
210 + return mempool_alloc(md->io_pool, GFP_ATOMIC);
213 +static inline void free_bio_info(struct mapped_device *md,
214 + struct dm_clone_bio_info *info)
216 + mempool_free(info, md->io_pool);
219 static void start_io_acct(struct dm_io *io)
221 struct mapped_device *md = io->md;
222 @@ -568,6 +648,266 @@ static void clone_endio(struct bio *bio,
227 + * Partial completion handling for request-based dm
229 +static void end_clone_bio(struct bio *clone, int error)
231 + struct dm_clone_bio_info *info = clone->bi_private;
232 + struct dm_rq_target_io *tio = info->rq->end_io_data;
233 + struct bio *bio = info->orig;
234 + unsigned int nr_bytes = info->orig->bi_size;
236 + free_bio_info(tio->md, info);
237 + clone->bi_private = tio->md->bs;
242 + * An error has already been detected on the request.
243 + * Once error occurred, just let clone->end_io() handle
247 + } else if (error) {
249 + * Don't notice the error to the upper layer yet.
250 + * The error handling decision is made by the target driver,
251 + * when the request is completed.
253 + tio->error = error;
258 + * I/O for the bio successfully completed.
259 + * Notice the data completion to the upper layer.
263 + * bios are processed from the head of the list.
264 + * So the completing bio should always be rq->bio.
265 + * If it's not, something wrong is happening.
267 + if (tio->orig->bio != bio)
268 + DMERR("bio completion is going in the middle of the request");
271 + * Update the original request.
272 + * Do not use blk_end_request() here, because it may complete
273 + * the original request before the clone, and break the ordering.
275 + blk_update_request(tio->orig, 0, nr_bytes);
278 +static void free_bio_clone(struct request *clone)
280 + struct dm_rq_target_io *tio = clone->end_io_data;
281 + struct mapped_device *md = tio->md;
283 + struct dm_clone_bio_info *info;
285 + while ((bio = clone->bio) != NULL) {
286 + clone->bio = bio->bi_next;
288 + info = bio->bi_private;
289 + free_bio_info(md, info);
291 + bio->bi_private = md->bs;
296 +static void dec_rq_pending(struct dm_rq_target_io *tio)
298 + if (!atomic_dec_return(&tio->md->pending))
299 + /* nudge anyone waiting on suspend queue */
300 + wake_up(&tio->md->wait);
303 +static void dm_unprep_request(struct request *rq)
305 + struct request *clone = rq->special;
306 + struct dm_rq_target_io *tio = clone->end_io_data;
308 + rq->special = NULL;
309 + rq->cmd_flags &= ~REQ_DONTPREP;
311 + free_bio_clone(clone);
312 + dec_rq_pending(tio);
313 + free_rq_tio(tio->md, tio);
317 + * Requeue the original request of a clone.
319 +void dm_requeue_request(struct request *clone)
321 + struct dm_rq_target_io *tio = clone->end_io_data;
322 + struct request *rq = tio->orig;
323 + struct request_queue *q = rq->q;
324 + unsigned long flags;
326 + dm_unprep_request(rq);
328 + spin_lock_irqsave(q->queue_lock, flags);
329 + if (elv_queue_empty(q))
330 + blk_plug_device(q);
331 + blk_requeue_request(q, rq);
332 + spin_unlock_irqrestore(q->queue_lock, flags);
334 +EXPORT_SYMBOL_GPL(dm_requeue_request);
336 +static inline void __stop_queue(struct request_queue *q)
341 +static void stop_queue(struct request_queue *q)
343 + unsigned long flags;
345 + spin_lock_irqsave(q->queue_lock, flags);
347 + spin_unlock_irqrestore(q->queue_lock, flags);
350 +static inline void __start_queue(struct request_queue *q)
352 + if (blk_queue_stopped(q))
353 + blk_start_queue(q);
356 +static void start_queue(struct request_queue *q)
358 + unsigned long flags;
360 + spin_lock_irqsave(q->queue_lock, flags);
362 + spin_unlock_irqrestore(q->queue_lock, flags);
366 + * Complete the clone and the original request
368 +static void dm_end_request(struct request *clone, int error)
370 + struct dm_rq_target_io *tio = clone->end_io_data;
371 + struct request *rq = tio->orig;
372 + struct request_queue *q = rq->q;
373 + unsigned int nr_bytes = blk_rq_bytes(rq);
375 + if (blk_pc_request(rq)) {
376 + rq->errors = clone->errors;
377 + rq->data_len = clone->data_len;
381 + * We are using the sense buffer of the original
383 + * So setting the length of the sense data is enough.
385 + rq->sense_len = clone->sense_len;
388 + free_bio_clone(clone);
389 + dec_rq_pending(tio);
390 + free_rq_tio(tio->md, tio);
392 + if (unlikely(blk_end_request(rq, error, nr_bytes)))
399 + * Request completion handler for request-based dm
401 +static void dm_softirq_done(struct request *rq)
403 + struct request *clone = rq->completion_data;
404 + struct dm_rq_target_io *tio = clone->end_io_data;
405 + dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
406 + int error = tio->error;
409 + if (rq->cmd_flags & REQ_FAILED)
413 + r = rq_end_io(tio->ti, clone, error, &tio->info);
415 + /* The target wants to complete the I/O */
417 + else if (r == DM_ENDIO_INCOMPLETE)
418 + /* The target will handle the I/O */
420 + else if (r == DM_ENDIO_REQUEUE) {
422 + * The target wants to requeue the I/O.
423 + * Don't invoke blk_run_queue() so that the requeued
424 + * request won't be dispatched again soon.
426 + dm_requeue_request(clone);
429 + DMWARN("unimplemented target endio return value: %d",
436 + dm_end_request(clone, error);
440 + * Called with the queue lock held
442 +static void end_clone_request(struct request *clone, int error)
444 + struct dm_rq_target_io *tio = clone->end_io_data;
445 + struct request *rq = tio->orig;
448 + * For just cleaning up the information of the queue in which
449 + * the clone was dispatched.
450 + * The clone is *NOT* freed actually here because it is alloced from
451 + * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
453 + __blk_put_request(clone->q, clone);
456 + * Actual request completion is done in a softirq context which doesn't
457 + * hold the queue lock. Otherwise, deadlock could occur because:
458 + * - another request may be submitted by the upper level driver
459 + * of the stacking during the completion
460 + * - the submission which requires queue lock may be done
461 + * against this queue
463 + tio->error = error;
464 + rq->completion_data = clone;
465 + blk_complete_request(rq);
469 + * Complete the original request of a clone with an error status.
470 + * Target's rq_end_io() function isn't called.
471 + * This may be used by target's map_rq() function when the mapping fails.
473 +void dm_kill_request(struct request *clone, int error)
475 + struct dm_rq_target_io *tio = clone->end_io_data;
476 + struct request *rq = tio->orig;
478 + tio->error = error;
479 + /* Avoid printing "I/O error" message, since we didn't I/O actually */
480 + rq->cmd_flags |= (REQ_FAILED | REQ_QUIET);
481 + rq->completion_data = clone;
482 + blk_complete_request(rq);
484 +EXPORT_SYMBOL_GPL(dm_kill_request);
486 static sector_t max_io_len(struct mapped_device *md,
487 sector_t sector, struct dm_target *ti)
489 @@ -886,7 +1226,7 @@ out:
490 * The request function that just remaps the bio built up by
493 -static int dm_request(struct request_queue *q, struct bio *bio)
494 +static int _dm_request(struct request_queue *q, struct bio *bio)
497 int rw = bio_data_dir(bio);
498 @@ -936,12 +1276,335 @@ out_req:
502 +static int dm_make_request(struct request_queue *q, struct bio *bio)
504 + struct mapped_device *md = (struct mapped_device *)q->queuedata;
506 + if (unlikely(bio_barrier(bio))) {
507 + bio_endio(bio, -EOPNOTSUPP);
511 + if (unlikely(!md->map)) {
512 + bio_endio(bio, -EIO);
516 + return md->saved_make_request_fn(q, bio); /* call __make_request() */
519 +static inline int dm_request_based(struct mapped_device *md)
521 + return blk_queue_stackable(md->queue);
524 +static int dm_request(struct request_queue *q, struct bio *bio)
526 + struct mapped_device *md = q->queuedata;
528 + if (dm_request_based(md))
529 + return dm_make_request(q, bio);
531 + return _dm_request(q, bio);
534 +void dm_dispatch_request(struct request *rq)
538 + rq->start_time = jiffies;
539 + r = blk_insert_cloned_request(rq->q, rq);
541 + dm_kill_request(rq, r);
543 +EXPORT_SYMBOL_GPL(dm_dispatch_request);
545 +static void copy_request_info(struct request *clone, struct request *rq)
547 + clone->cmd_flags = (rq_data_dir(rq) | REQ_NOMERGE);
548 + clone->cmd_type = rq->cmd_type;
549 + clone->sector = rq->sector;
550 + clone->hard_sector = rq->hard_sector;
551 + clone->nr_sectors = rq->nr_sectors;
552 + clone->hard_nr_sectors = rq->hard_nr_sectors;
553 + clone->current_nr_sectors = rq->current_nr_sectors;
554 + clone->hard_cur_sectors = rq->hard_cur_sectors;
555 + clone->nr_phys_segments = rq->nr_phys_segments;
556 + clone->ioprio = rq->ioprio;
557 + clone->buffer = rq->buffer;
558 + clone->cmd_len = rq->cmd_len;
560 + clone->cmd = rq->cmd;
561 + clone->data_len = rq->data_len;
562 + clone->extra_len = rq->extra_len;
563 + clone->sense_len = rq->sense_len;
564 + clone->data = rq->data;
565 + clone->sense = rq->sense;
568 +static int clone_request_bios(struct request *clone, struct request *rq,
569 + struct mapped_device *md)
571 + struct bio *bio, *clone_bio;
572 + struct dm_clone_bio_info *info;
574 + for (bio = rq->bio; bio; bio = bio->bi_next) {
575 + info = alloc_bio_info(md);
579 + clone_bio = bio_alloc_bioset(GFP_ATOMIC, bio->bi_max_vecs,
582 + free_bio_info(md, info);
586 + __bio_clone(clone_bio, bio);
587 + clone_bio->bi_destructor = dm_bio_destructor;
588 + clone_bio->bi_end_io = end_clone_bio;
591 + clone_bio->bi_private = info;
594 + clone->biotail->bi_next = clone_bio;
595 + clone->biotail = clone_bio;
597 + clone->bio = clone->biotail = clone_bio;
603 + free_bio_clone(clone);
608 +static int setup_clone(struct request *clone, struct request *rq,
609 + struct dm_rq_target_io *tio)
613 + blk_rq_init(NULL, clone);
615 + r = clone_request_bios(clone, rq, tio->md);
619 + copy_request_info(clone, rq);
620 + clone->start_time = jiffies;
621 + clone->end_io = end_clone_request;
622 + clone->end_io_data = tio;
627 +static inline int dm_flush_suspending(struct mapped_device *md)
629 + return !md->suspend_rq.data;
633 + * Called with the queue lock held.
635 +static int dm_prep_fn(struct request_queue *q, struct request *rq)
637 + struct mapped_device *md = (struct mapped_device *)q->queuedata;
638 + struct dm_rq_target_io *tio;
639 + struct request *clone;
641 + if (unlikely(rq == &md->suspend_rq)) { /* Flush suspend marker */
642 + if (dm_flush_suspending(md)) {
644 + return BLKPREP_DEFER;
646 + /* This device should be quiet now */
649 + BUG_ON(atomic_read(&md->pending));
650 + wake_up(&md->wait);
651 + return BLKPREP_KILL;
655 + * The suspend process was interrupted.
656 + * So no need to suspend now.
658 + return BLKPREP_KILL;
661 + if (unlikely(rq->special)) {
662 + DMWARN("Already has something in rq->special.");
663 + return BLKPREP_KILL;
666 + if (unlikely(!dm_request_based(md))) {
667 + DMWARN("Request was queued into bio-based device");
668 + return BLKPREP_KILL;
671 + tio = alloc_rq_tio(md); /* Only one for each original request */
674 + return BLKPREP_DEFER;
680 + memset(&tio->info, 0, sizeof(tio->info));
682 + clone = &tio->clone;
683 + if (setup_clone(clone, rq, tio)) {
685 + free_rq_tio(md, tio);
686 + return BLKPREP_DEFER;
689 + rq->special = clone;
690 + rq->cmd_flags |= REQ_DONTPREP;
695 +static void map_request(struct dm_target *ti, struct request *rq,
696 + struct mapped_device *md)
699 + struct request *clone = rq->special;
700 + struct dm_rq_target_io *tio = clone->end_io_data;
703 + atomic_inc(&md->pending);
706 + * Although submitted requests to the md->queue are checked against
707 + * the table/queue limitations at the submission time, the limitations
708 + * may be changed by a table swapping while those already checked
709 + * requests are in the md->queue.
710 + * If the limitations have been shrunk in such situations, we may be
711 + * dispatching requests violating the current limitations here.
712 + * Since struct request is a reliable one in the block-layer
713 + * and device drivers, dispatching such requests is dangerous.
714 + * (e.g. it may cause kernel panic easily.)
715 + * Avoid to dispatch such problematic requests in request-based dm.
717 + * Since dm_kill_request() decrements the md->pending, this have to
718 + * be done after incrementing the md->pending.
720 + r = blk_rq_check_limits(rq->q, rq);
722 + DMWARN("violating the queue limitation. the limitation may be"
723 + " shrunk while there are some requests in the queue.");
724 + dm_kill_request(clone, r);
728 + r = ti->type->map_rq(ti, clone, &tio->info);
730 + case DM_MAPIO_SUBMITTED:
731 + /* The target has taken the I/O to submit by itself later */
733 + case DM_MAPIO_REMAPPED:
734 + /* The target has remapped the I/O so dispatch it */
735 + dm_dispatch_request(clone);
737 + case DM_MAPIO_REQUEUE:
738 + /* The target wants to requeue the I/O */
739 + dm_requeue_request(clone);
743 + DMWARN("unimplemented target map return value: %d", r);
747 + /* The target wants to complete the I/O */
748 + dm_kill_request(clone, r);
754 + * q->request_fn for request-based dm.
755 + * Called with the queue lock held.
757 +static void dm_request_fn(struct request_queue *q)
759 + struct mapped_device *md = (struct mapped_device *)q->queuedata;
760 + struct dm_table *map = dm_get_table(md);
761 + struct dm_target *ti;
762 + struct request *rq;
765 + * The check for blk_queue_stopped() needs here, because:
766 + * - device suspend uses blk_stop_queue() and expects that
767 + * no I/O will be dispatched any more after the queue stop
768 + * - generic_unplug_device() doesn't call q->request_fn()
769 + * when the queue is stopped, so no problem
770 + * - but underlying device drivers may call q->request_fn()
771 + * without the check through blk_run_queue()
773 + while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
774 + rq = elv_next_request(q);
778 + ti = dm_table_find_target(map, rq->sector);
779 + if (ti->type->busy && ti->type->busy(ti))
782 + blkdev_dequeue_request(rq);
783 + spin_unlock(q->queue_lock);
784 + map_request(ti, rq, md);
785 + spin_lock_irq(q->queue_lock);
791 + if (!elv_queue_empty(q))
792 + /* Some requests still remain, retry later */
793 + blk_plug_device(q);
801 +int dm_underlying_device_busy(struct request_queue *q)
803 + return blk_lld_busy(q);
805 +EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
807 +static int dm_lld_busy(struct request_queue *q)
810 + struct mapped_device *md = q->queuedata;
811 + struct dm_table *map = dm_get_table(md);
813 + if (!map || test_bit(DMF_BLOCK_IO, &md->flags))
816 + r = dm_table_any_busy_target(map);
822 static void dm_unplug_all(struct request_queue *q)
824 struct mapped_device *md = q->queuedata;
825 struct dm_table *map = dm_get_table(md);
828 + if (dm_request_based(md))
829 + generic_unplug_device(q);
831 dm_table_unplug_all(map);
834 @@ -955,6 +1618,12 @@ static int dm_any_congested(void *conges
836 if (!map || test_bit(DMF_BLOCK_IO, &md->flags))
838 + else if (dm_request_based(md))
840 + * Request-based dm cares about only own queue for
841 + * the query about congestion status of request_queue
843 + r = md->queue->backing_dev_info.state & bdi_bits;
845 r = dm_table_any_congested(map, bdi_bits);
847 @@ -1075,10 +1744,22 @@ static struct mapped_device *alloc_dev(i
848 INIT_LIST_HEAD(&md->uevent_list);
849 spin_lock_init(&md->uevent_lock);
851 - md->queue = blk_alloc_queue(GFP_KERNEL);
852 + md->queue = blk_init_queue(dm_request_fn, NULL);
857 + * Request-based dm devices cannot be stacked on top of bio-based dm
858 + * devices. The type of this dm device has not been decided yet,
859 + * although we initialized the queue using blk_init_queue().
860 + * The type is decided at the first table loading time.
861 + * To prevent problematic device stacking, clear the queue flag
862 + * for request stacking support until then.
864 + * This queue is new, so no concurrency on the queue_flags.
866 + queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
867 + md->saved_make_request_fn = md->queue->make_request_fn;
868 md->queue->queuedata = md;
869 md->queue->backing_dev_info.congested_fn = dm_any_congested;
870 md->queue->backing_dev_info.congested_data = md;
871 @@ -1086,18 +1767,9 @@ static struct mapped_device *alloc_dev(i
872 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
873 md->queue->unplug_fn = dm_unplug_all;
874 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
876 - md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
880 - md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
884 - md->bs = bioset_create(16, 16);
886 - goto bad_no_bioset;
887 + blk_queue_softirq_done(md->queue, dm_softirq_done);
888 + blk_queue_prep_rq(md->queue, dm_prep_fn);
889 + blk_queue_lld_busy(md->queue, dm_lld_busy);
891 md->disk = alloc_disk(1);
893 @@ -1132,12 +1804,6 @@ static struct mapped_device *alloc_dev(i
897 - bioset_free(md->bs);
899 - mempool_destroy(md->tio_pool);
901 - mempool_destroy(md->io_pool);
903 blk_cleanup_queue(md->queue);
906 @@ -1159,9 +1825,12 @@ static void free_dev(struct mapped_devic
907 bdput(md->suspended_bdev);
909 destroy_workqueue(md->wq);
910 - mempool_destroy(md->tio_pool);
911 - mempool_destroy(md->io_pool);
912 - bioset_free(md->bs);
914 + mempool_destroy(md->tio_pool);
916 + mempool_destroy(md->io_pool);
918 + bioset_free(md->bs);
919 del_gendisk(md->disk);
922 @@ -1224,6 +1893,16 @@ static int __bind(struct mapped_device *
924 dm_table_event_callback(t, event_callback, md);
927 + * The queue hasn't been stopped yet, if the old table type wasn't
928 + * for request-based during suspension. So stop it to prevent
929 + * I/O mapping before resume.
930 + * This must be done before setting the queue restrictions,
931 + * because request-based dm may be run just after the setting.
933 + if (dm_table_request_based(t) && !blk_queue_stopped(q))
936 write_lock(&md->map_lock);
938 dm_table_set_restrictions(t, q);
939 @@ -1346,7 +2025,11 @@ static int dm_wait_for_completion(struct
940 set_current_state(TASK_INTERRUPTIBLE);
943 - if (!atomic_read(&md->pending))
944 + if (dm_request_based(md)) {
945 + if (!atomic_read(&md->pending) &&
946 + blk_queue_stopped(md->queue))
948 + } else if (!atomic_read(&md->pending))
951 if (signal_pending(current)) {
952 @@ -1369,7 +2052,13 @@ static void __flush_deferred_io(struct m
955 while ((c = bio_list_pop(&md->deferred))) {
956 - if (__split_bio(md, c))
958 + * Some bios might have been queued here during suspension
959 + * before setting of request-based dm in resume
961 + if (dm_request_based(md))
962 + generic_make_request(c);
963 + else if (__split_bio(md, c))
967 @@ -1394,9 +2083,6 @@ static void dm_wq_work(struct work_struc
969 down_write(&md->io_lock);
971 - case DM_WQ_FLUSH_ALL:
972 - __merge_pushback_list(md);
974 case DM_WQ_FLUSH_DEFERRED:
975 __flush_deferred_io(md);
977 @@ -1451,6 +2137,88 @@ out:
981 +static inline void dm_invalidate_flush_suspend(struct mapped_device *md)
983 + md->suspend_rq.data = (void *)0x1;
986 +static void dm_abort_suspend(struct mapped_device *md, int noflush)
988 + struct request_queue *q = md->queue;
989 + unsigned long flags;
992 + * For flush suspend, invalidation and queue restart must be protected
993 + * by a single queue lock to prevent a race with dm_prep_fn().
995 + spin_lock_irqsave(q->queue_lock, flags);
997 + dm_invalidate_flush_suspend(md);
999 + spin_unlock_irqrestore(q->queue_lock, flags);
1003 + * Additional suspend work for request-based dm.
1005 + * In request-based dm, stopping request_queue prevents mapping.
1006 + * Even after stopping the request_queue, submitted requests from upper-layer
1007 + * can be inserted to the request_queue. So original (unmapped) requests are
1008 + * kept in the request_queue during suspension.
1010 +static void dm_start_suspend(struct mapped_device *md, int noflush)
1012 + struct request *rq = &md->suspend_rq;
1013 + struct request_queue *q = md->queue;
1014 + unsigned long flags;
1022 + * For flush suspend, we need a marker to indicate the border line
1023 + * between flush needed I/Os and deferred I/Os, since all I/Os are
1024 + * queued in the request_queue during suspension.
1026 + * This marker must be inserted after setting DMF_BLOCK_IO,
1027 + * because dm_prep_fn() considers no DMF_BLOCK_IO to be
1028 + * a suspend interruption.
1030 + spin_lock_irqsave(q->queue_lock, flags);
1031 + if (unlikely(rq->ref_count)) {
1033 + * This can happen when the previous suspend was interrupted,
1034 + * the inserted suspend_rq for the previous suspend has still
1035 + * been in the queue and this suspend has been invoked.
1037 + * We could re-insert the suspend_rq by deleting it from
1038 + * the queue forcibly using list_del_init(&rq->queuelist).
1039 + * But it would break the block-layer easily.
1040 + * So we don't re-insert the suspend_rq again in such a case.
1041 + * The suspend_rq should be already invalidated during
1042 + * the previous suspend interruption, so just wait for it
1043 + * to be completed.
1045 + * This suspend will never complete, so warn the user to
1046 + * interrupt this suspend and retry later.
1048 + BUG_ON(!rq->data);
1049 + spin_unlock_irqrestore(q->queue_lock, flags);
1051 + DMWARN("Invalidating the previous suspend is still in"
1052 + " progress. This suspend will be never done."
1053 + " Please interrupt this suspend and retry later.");
1056 + spin_unlock_irqrestore(q->queue_lock, flags);
1058 + /* Now no user of the suspend_rq */
1059 + blk_rq_init(q, rq);
1060 + blk_insert_request(q, rq, 0, NULL);
1064 * Functions to lock and unlock any filesystem running on the
1066 @@ -1526,7 +2294,7 @@ int dm_suspend(struct mapped_device *md,
1067 if (!md->suspended_bdev) {
1068 DMWARN("bdget failed in dm_suspend");
1070 - goto flush_and_out;
1075 @@ -1549,6 +2317,9 @@ int dm_suspend(struct mapped_device *md,
1076 add_wait_queue(&md->wait, &wait);
1077 up_write(&md->io_lock);
1079 + if (dm_request_based(md))
1080 + dm_start_suspend(md, noflush);
1084 dm_table_unplug_all(map);
1085 @@ -1561,14 +2332,22 @@ int dm_suspend(struct mapped_device *md,
1086 down_write(&md->io_lock);
1087 remove_wait_queue(&md->wait, &wait);
1090 - __merge_pushback_list(md);
1092 + if (dm_request_based(md))
1093 + /* All requeued requests are already in md->queue */
1094 + clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
1096 + __merge_pushback_list(md);
1098 up_write(&md->io_lock);
1100 /* were we interrupted ? */
1102 dm_queue_flush(md, DM_WQ_FLUSH_DEFERRED, NULL);
1104 + if (dm_request_based(md))
1105 + dm_abort_suspend(md, noflush);
1108 goto out; /* pushback list is already flushed, so skip flush */
1110 @@ -1577,14 +2356,6 @@ int dm_suspend(struct mapped_device *md,
1112 set_bit(DMF_SUSPENDED, &md->flags);
1117 - * Because there may be already I/Os in the pushback list,
1118 - * flush them before return.
1120 - dm_queue_flush(md, DM_WQ_FLUSH_ALL, NULL);
1123 if (r && md->suspended_bdev) {
1124 bdput(md->suspended_bdev);
1125 @@ -1617,6 +2388,14 @@ int dm_resume(struct mapped_device *md)
1127 dm_queue_flush(md, DM_WQ_FLUSH_DEFERRED, NULL);
1130 + * Flushing deferred I/Os must be done after targets are resumed
1131 + * so that mapping of targets can work correctly.
1132 + * Request-based dm is queueing the deferred I/Os in its request_queue.
1134 + if (dm_request_based(md))
1135 + start_queue(md->queue);
1139 if (md->suspended_bdev) {
1140 @@ -1698,6 +2477,65 @@ int dm_noflush_suspending(struct dm_targ
1142 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
1144 +int dm_init_md_mempool(struct mapped_device *md, int type)
1146 + if (unlikely(type == DM_TYPE_NONE)) {
1147 + DMWARN("no type is specified, can't initialize mempool");
1151 + if (md->mempool_type == type)
1155 + /* The md has been using, can't change the mempool type */
1156 + DMWARN("can't change mempool type after a table is bound");
1160 + /* Not using the md yet, we can still change the mempool type */
1161 + if (md->mempool_type != DM_TYPE_NONE) {
1162 + mempool_destroy(md->io_pool);
1163 + md->io_pool = NULL;
1164 + mempool_destroy(md->tio_pool);
1165 + md->tio_pool = NULL;
1166 + bioset_free(md->bs);
1168 + md->mempool_type = DM_TYPE_NONE;
1171 + md->io_pool = (type == DM_TYPE_BIO_BASED) ?
1172 + mempool_create_slab_pool(MIN_IOS, _io_cache) :
1173 + mempool_create_slab_pool(MIN_IOS, _bio_info_cache);
1177 + md->tio_pool = (type == DM_TYPE_BIO_BASED) ?
1178 + mempool_create_slab_pool(MIN_IOS, _tio_cache) :
1179 + mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
1180 + if (!md->tio_pool)
1181 + goto free_io_pool_and_out;
1183 + md->bs = (type == DM_TYPE_BIO_BASED) ?
1184 + bioset_create(16, 16) : bioset_create(MIN_IOS, MIN_IOS);
1186 + goto free_tio_pool_and_out;
1188 + md->mempool_type = type;
1192 +free_tio_pool_and_out:
1193 + mempool_destroy(md->tio_pool);
1194 + md->tio_pool = NULL;
1196 +free_io_pool_and_out:
1197 + mempool_destroy(md->io_pool);
1198 + md->io_pool = NULL;
1203 static struct block_device_operations dm_blk_dops = {
1204 .open = dm_blk_open,
1205 .release = dm_blk_close,
1206 --- a/drivers/md/dm.h
1207 +++ b/drivers/md/dm.h
1209 #define DM_SUSPEND_NOFLUSH_FLAG (1 << 1)
1212 + * Type of table and mapped_device's mempool
1214 +#define DM_TYPE_NONE 0
1215 +#define DM_TYPE_BIO_BASED 1
1216 +#define DM_TYPE_REQUEST_BASED 2
1219 * List of devices that a metadevice uses and should open/close.
1222 @@ -49,6 +56,10 @@ void dm_table_presuspend_targets(struct
1223 void dm_table_postsuspend_targets(struct dm_table *t);
1224 int dm_table_resume_targets(struct dm_table *t);
1225 int dm_table_any_congested(struct dm_table *t, int bdi_bits);
1226 +int dm_table_any_busy_target(struct dm_table *t);
1227 +int dm_table_set_type(struct dm_table *t);
1228 +int dm_table_get_type(struct dm_table *t);
1229 +int dm_table_request_based(struct dm_table *t);
1230 void dm_table_unplug_all(struct dm_table *t);
1233 @@ -97,10 +108,16 @@ void *dm_vcalloc(unsigned long nmemb, un
1234 union map_info *dm_get_mapinfo(struct bio *bio);
1235 int dm_open_count(struct mapped_device *md);
1236 int dm_lock_for_deletion(struct mapped_device *md);
1237 +union map_info *dm_get_rq_mapinfo(struct request *rq);
1239 void dm_kobject_uevent(struct mapped_device *md);
1241 int dm_kcopyd_init(void);
1242 void dm_kcopyd_exit(void);
1245 + * Mempool initializer for a mapped_device
1247 +int dm_init_md_mempool(struct mapped_device *md, int type);
1250 --- a/drivers/md/dm-ioctl.c
1251 +++ b/drivers/md/dm-ioctl.c
1252 @@ -1046,6 +1046,12 @@ static int populate_table(struct dm_tabl
1256 + r = dm_table_set_type(table);
1258 + DMWARN("unable to set table type");
1262 return dm_table_complete(table);
1265 @@ -1070,6 +1076,13 @@ static int table_load(struct dm_ioctl *p
1269 + r = dm_init_md_mempool(md, dm_table_get_type(t));
1271 + DMWARN("unable to initialize the md mempools for this table");
1276 down_write(&_hash_lock);
1277 hc = dm_get_mdptr(md);
1278 if (!hc || hc->md != md) {
1279 --- a/drivers/md/dm-mpath.c
1280 +++ b/drivers/md/dm-mpath.c
1284 #include "dm-path-selector.h"
1285 -#include "dm-bio-list.h"
1286 -#include "dm-bio-record.h"
1287 #include "dm-uevent.h"
1289 #include <linux/ctype.h>
1290 @@ -83,7 +81,7 @@ struct multipath {
1291 unsigned pg_init_count; /* Number of times pg_init called */
1293 struct work_struct process_queued_ios;
1294 - struct bio_list queued_ios;
1295 + struct list_head queued_ios;
1296 unsigned queue_size;
1298 struct work_struct trigger_event;
1299 @@ -100,7 +98,6 @@ struct multipath {
1301 struct dm_mpath_io {
1302 struct pgpath *pgpath;
1303 - struct dm_bio_details details;
1306 typedef int (*action_fn) (struct pgpath *pgpath);
1307 @@ -197,6 +194,7 @@ static struct multipath *alloc_multipath
1308 m = kzalloc(sizeof(*m), GFP_KERNEL);
1310 INIT_LIST_HEAD(&m->priority_groups);
1311 + INIT_LIST_HEAD(&m->queued_ios);
1312 spin_lock_init(&m->lock);
1314 INIT_WORK(&m->process_queued_ios, process_queued_ios);
1315 @@ -321,12 +319,13 @@ static int __must_push_back(struct multi
1316 dm_noflush_suspending(m->ti));
1319 -static int map_io(struct multipath *m, struct bio *bio,
1320 +static int map_io(struct multipath *m, struct request *clone,
1321 struct dm_mpath_io *mpio, unsigned was_queued)
1323 int r = DM_MAPIO_REMAPPED;
1324 unsigned long flags;
1325 struct pgpath *pgpath;
1326 + struct block_device *bdev;
1328 spin_lock_irqsave(&m->lock, flags);
1330 @@ -343,16 +342,18 @@ static int map_io(struct multipath *m, s
1331 if ((pgpath && m->queue_io) ||
1332 (!pgpath && m->queue_if_no_path)) {
1333 /* Queue for the daemon to resubmit */
1334 - bio_list_add(&m->queued_ios, bio);
1335 + list_add_tail(&clone->queuelist, &m->queued_ios);
1337 if ((m->pg_init_required && !m->pg_init_in_progress) ||
1339 queue_work(kmultipathd, &m->process_queued_ios);
1341 r = DM_MAPIO_SUBMITTED;
1342 - } else if (pgpath)
1343 - bio->bi_bdev = pgpath->path.dev->bdev;
1344 - else if (__must_push_back(m))
1345 + } else if (pgpath) {
1346 + bdev = pgpath->path.dev->bdev;
1347 + clone->q = bdev_get_queue(bdev);
1348 + clone->rq_disk = bdev->bd_disk;
1349 + } else if (__must_push_back(m))
1350 r = DM_MAPIO_REQUEUE;
1352 r = -EIO; /* Failed */
1353 @@ -395,30 +396,31 @@ static void dispatch_queued_ios(struct m
1356 unsigned long flags;
1357 - struct bio *bio = NULL, *next;
1358 struct dm_mpath_io *mpio;
1359 union map_info *info;
1360 + struct request *clone, *n;
1363 spin_lock_irqsave(&m->lock, flags);
1364 - bio = bio_list_get(&m->queued_ios);
1365 + list_splice_init(&m->queued_ios, &cl);
1366 spin_unlock_irqrestore(&m->lock, flags);
1369 - next = bio->bi_next;
1370 - bio->bi_next = NULL;
1371 + list_for_each_entry_safe(clone, n, &cl, queuelist) {
1372 + list_del_init(&clone->queuelist);
1374 - info = dm_get_mapinfo(bio);
1375 + info = dm_get_rq_mapinfo(clone);
1378 - r = map_io(m, bio, mpio, 1);
1380 - bio_endio(bio, r);
1381 - else if (r == DM_MAPIO_REMAPPED)
1382 - generic_make_request(bio);
1383 - else if (r == DM_MAPIO_REQUEUE)
1384 - bio_endio(bio, -EIO);
1387 + r = map_io(m, clone, mpio, 1);
1389 + mempool_free(mpio, m->mpio_pool);
1390 + dm_kill_request(clone, r);
1391 + } else if (r == DM_MAPIO_REMAPPED)
1392 + dm_dispatch_request(clone);
1393 + else if (r == DM_MAPIO_REQUEUE) {
1394 + mempool_free(mpio, m->mpio_pool);
1395 + dm_requeue_request(clone);
1400 @@ -833,21 +835,24 @@ static void multipath_dtr(struct dm_targ
1404 - * Map bios, recording original fields for later in case we have to resubmit
1405 + * Map cloned requests
1407 -static int multipath_map(struct dm_target *ti, struct bio *bio,
1408 +static int multipath_map(struct dm_target *ti, struct request *clone,
1409 union map_info *map_context)
1412 struct dm_mpath_io *mpio;
1413 struct multipath *m = (struct multipath *) ti->private;
1415 - mpio = mempool_alloc(m->mpio_pool, GFP_NOIO);
1416 - dm_bio_record(&mpio->details, bio);
1417 + mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
1419 + /* ENOMEM, requeue */
1420 + return DM_MAPIO_REQUEUE;
1421 + memset(mpio, 0, sizeof(*mpio));
1423 map_context->ptr = mpio;
1424 - bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
1425 - r = map_io(m, bio, mpio, 0);
1426 + clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
1427 + r = map_io(m, clone, mpio, 0);
1428 if (r < 0 || r == DM_MAPIO_REQUEUE)
1429 mempool_free(mpio, m->mpio_pool);
1431 @@ -1129,53 +1134,41 @@ static void activate_path(struct work_st
1435 -static int do_end_io(struct multipath *m, struct bio *bio,
1436 +static int do_end_io(struct multipath *m, struct request *clone,
1437 int error, struct dm_mpath_io *mpio)
1440 + * We don't queue any clone request inside the multipath target
1441 + * during end I/O handling, since those clone requests don't have
1442 + * bio clones. If we queue them inside the multipath target,
1443 + * we need to make bio clones, that requires memory allocation.
1444 + * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
1445 + * don't have bio clones.)
1446 + * Instead of queueing the clone request here, we queue the original
1447 + * request into dm core, which will remake a clone request and
1448 + * clone bios for it and resubmit it later.
1450 + int r = DM_ENDIO_REQUEUE;
1451 unsigned long flags;
1454 + if (!error && !clone->errors)
1455 return 0; /* I/O complete */
1457 - if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio))
1460 if (error == -EOPNOTSUPP)
1463 - spin_lock_irqsave(&m->lock, flags);
1464 - if (!m->nr_valid_paths) {
1465 - if (__must_push_back(m)) {
1466 - spin_unlock_irqrestore(&m->lock, flags);
1467 - return DM_ENDIO_REQUEUE;
1468 - } else if (!m->queue_if_no_path) {
1469 - spin_unlock_irqrestore(&m->lock, flags);
1472 - spin_unlock_irqrestore(&m->lock, flags);
1476 - spin_unlock_irqrestore(&m->lock, flags);
1479 fail_path(mpio->pgpath);
1482 - dm_bio_restore(&mpio->details, bio);
1484 - /* queue for the daemon to resubmit or fail */
1485 spin_lock_irqsave(&m->lock, flags);
1486 - bio_list_add(&m->queued_ios, bio);
1489 - queue_work(kmultipathd, &m->process_queued_ios);
1490 + if (!m->nr_valid_paths && !m->queue_if_no_path && !__must_push_back(m))
1492 spin_unlock_irqrestore(&m->lock, flags);
1494 - return DM_ENDIO_INCOMPLETE; /* io not complete */
1498 -static int multipath_end_io(struct dm_target *ti, struct bio *bio,
1499 +static int multipath_end_io(struct dm_target *ti, struct request *clone,
1500 int error, union map_info *map_context)
1502 struct multipath *m = ti->private;
1503 @@ -1184,14 +1177,13 @@ static int multipath_end_io(struct dm_ta
1504 struct path_selector *ps;
1507 - r = do_end_io(m, bio, error, mpio);
1508 + r = do_end_io(m, clone, error, mpio);
1510 ps = &pgpath->pg->ps;
1511 if (ps->type->end_io)
1512 ps->type->end_io(ps, &pgpath->path);
1514 - if (r != DM_ENDIO_INCOMPLETE)
1515 - mempool_free(mpio, m->mpio_pool);
1516 + mempool_free(mpio, m->mpio_pool);
1520 @@ -1427,6 +1419,75 @@ static int multipath_ioctl(struct dm_tar
1521 bdev->bd_disk, cmd, arg);
1524 +static int __pgpath_busy(struct pgpath *pgpath)
1526 + struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1528 + return dm_underlying_device_busy(q);
1532 + * We return "busy", only when we can map I/Os but underlying devices
1533 + * are busy (so even if we map I/Os now, the I/Os will wait on
1534 + * the underlying queue).
1535 + * In other words, if we want to kill I/Os or queue them inside us
1536 + * due to map unavailability, we don't return "busy". Otherwise,
1537 + * dm core won't give us the I/Os and we can't do what we want.
1539 +static int multipath_busy(struct dm_target *ti)
1541 + int busy = 0, has_active = 0;
1542 + struct multipath *m = (struct multipath *) ti->private;
1543 + struct priority_group *pg;
1544 + struct pgpath *pgpath;
1545 + unsigned long flags;
1547 + spin_lock_irqsave(&m->lock, flags);
1549 + /* Guess which priority_group will be used at next mapping time */
1550 + if (unlikely(!m->current_pgpath && m->next_pg))
1552 + else if (likely(m->current_pg))
1553 + pg = m->current_pg;
1556 + * We don't know which pg will be used at next mapping time.
1557 + * We don't call __choose_pgpath() here to avoid to trigger
1558 + * pg_init just by busy checking.
1559 + * So we don't know whether underlying devices we will be using
1560 + * at next mapping time are busy or not. Just try mapping.
1565 + * If there is one non-busy active path at least, the path selector
1566 + * will be able to select it. So we consider such a pg as not busy.
1569 + list_for_each_entry(pgpath, &pg->pgpaths, list)
1570 + if (pgpath->is_active) {
1573 + if (!__pgpath_busy(pgpath)) {
1581 + * No active path in this pg, so this pg won't be used and
1582 + * the current_pg will be changed at next mapping time.
1583 + * We need to try mapping to determine it.
1588 + spin_unlock_irqrestore(&m->lock, flags);
1593 /*-----------------------------------------------------------------
1595 *---------------------------------------------------------------*/
1596 @@ -1436,13 +1497,14 @@ static struct target_type multipath_targ
1597 .module = THIS_MODULE,
1598 .ctr = multipath_ctr,
1599 .dtr = multipath_dtr,
1600 - .map = multipath_map,
1601 - .end_io = multipath_end_io,
1602 + .map_rq = multipath_map,
1603 + .rq_end_io = multipath_end_io,
1604 .presuspend = multipath_presuspend,
1605 .resume = multipath_resume,
1606 .status = multipath_status,
1607 .message = multipath_message,
1608 .ioctl = multipath_ioctl,
1609 + .busy = multipath_busy,
1612 static int __init dm_multipath_init(void)
1613 --- a/drivers/md/dm-table.c
1614 +++ b/drivers/md/dm-table.c
1615 @@ -108,6 +108,8 @@ static void combine_restrictions_low(str
1616 lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn);
1618 lhs->no_cluster |= rhs->no_cluster;
1620 + lhs->no_request_stacking |= rhs->no_request_stacking;
1624 @@ -522,6 +524,8 @@ void dm_set_device_limits(struct dm_targ
1625 rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);
1627 rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1629 + rs->no_request_stacking |= !blk_queue_stackable(q);
1631 EXPORT_SYMBOL_GPL(dm_set_device_limits);
1633 @@ -731,6 +735,66 @@ int dm_table_add_target(struct dm_table
1637 +int dm_table_set_type(struct dm_table *t)
1640 + int bio_based = 0, request_based = 0;
1641 + struct dm_target *tgt;
1643 + for (i = 0; i < t->num_targets; i++) {
1644 + tgt = t->targets + i;
1645 + if (tgt->type->map_rq)
1646 + request_based = 1;
1650 + if (bio_based && request_based) {
1651 + DMWARN("Inconsistent table: different target types"
1652 + " can't be mixed up");
1658 + /* We must use this table as bio-based */
1659 + t->limits.no_request_stacking = 1;
1663 + BUG_ON(!request_based); /* No targets in this table */
1665 + /* Non-request-stackable devices can't be used for request-based dm */
1666 + if (t->limits.no_request_stacking) {
1667 + DMWARN("table load rejected: including non-request-stackable"
1673 + * Request-based dm supports only tables that have a single target now.
1674 + * To support multiple targets, request splitting support is needed,
1675 + * and that needs lots of changes in the block-layer.
1676 + * (e.g. request completion process for partial completion.)
1678 + if (t->num_targets > 1) {
1679 + DMWARN("Request-based dm doesn't support multiple targets yet");
1686 +int dm_table_get_type(struct dm_table *t)
1688 + return t->limits.no_request_stacking ?
1689 + DM_TYPE_BIO_BASED : DM_TYPE_REQUEST_BASED;
1692 +int dm_table_request_based(struct dm_table *t)
1694 + return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
1697 static int setup_indexes(struct dm_table *t)
1700 @@ -861,6 +925,10 @@ void dm_table_set_restrictions(struct dm
1702 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
1704 + if (t->limits.no_request_stacking)
1705 + queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, q);
1707 + queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
1710 unsigned int dm_table_get_num_targets(struct dm_table *t)
1711 @@ -949,6 +1017,20 @@ int dm_table_any_congested(struct dm_tab
1715 +int dm_table_any_busy_target(struct dm_table *t)
1718 + struct dm_target *ti;
1720 + for (i = 0; i < t->num_targets; i++) {
1721 + ti = t->targets + i;
1722 + if (ti->type->busy && ti->type->busy(ti))
1729 void dm_table_unplug_all(struct dm_table *t)
1732 --- a/include/linux/device-mapper.h
1733 +++ b/include/linux/device-mapper.h
1734 @@ -46,6 +46,8 @@ typedef void (*dm_dtr_fn) (struct dm_tar
1736 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio,
1737 union map_info *map_context);
1738 +typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
1739 + union map_info *map_context);
1743 @@ -58,6 +60,9 @@ typedef int (*dm_map_fn) (struct dm_targ
1744 typedef int (*dm_endio_fn) (struct dm_target *ti,
1745 struct bio *bio, int error,
1746 union map_info *map_context);
1747 +typedef int (*dm_request_endio_fn) (struct dm_target *ti,
1748 + struct request *clone, int error,
1749 + union map_info *map_context);
1751 typedef void (*dm_flush_fn) (struct dm_target *ti);
1752 typedef void (*dm_presuspend_fn) (struct dm_target *ti);
1753 @@ -77,6 +82,13 @@ typedef int (*dm_ioctl_fn) (struct dm_ta
1754 typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
1755 struct bio_vec *biovec, int max_size);
1759 + * 0: The target can handle the next I/O immediately.
1760 + * 1: The target can't handle the next I/O immediately.
1762 +typedef int (*dm_busy_fn) (struct dm_target *ti);
1764 void dm_error(const char *message);
1767 @@ -103,7 +115,9 @@ struct target_type {
1771 + dm_map_request_fn map_rq;
1773 + dm_request_endio_fn rq_end_io;
1775 dm_presuspend_fn presuspend;
1776 dm_postsuspend_fn postsuspend;
1777 @@ -113,6 +127,7 @@ struct target_type {
1778 dm_message_fn message;
1784 struct io_restrictions {
1785 @@ -125,6 +140,7 @@ struct io_restrictions {
1786 unsigned short max_hw_segments;
1787 unsigned short max_phys_segments;
1788 unsigned char no_cluster; /* inverted so that 0 is default */
1789 + unsigned char no_request_stacking;
1793 @@ -348,4 +364,12 @@ static inline unsigned long to_bytes(sec
1794 return (n << SECTOR_SHIFT);
1797 +/*-----------------------------------------------------------------
1798 + * Helper for block layer and dm core operations
1799 + *---------------------------------------------------------------*/
1800 +void dm_dispatch_request(struct request *rq);
1801 +void dm_requeue_request(struct request *rq);
1802 +void dm_kill_request(struct request *rq, int error);
1803 +int dm_underlying_device_busy(struct request_queue *q);
1805 #endif /* _LINUX_DEVICE_MAPPER_H */