]> git.ipfire.org Git - ipfire-2.x.git/blame - src/patches/suse-2.6.27.31/patches.suse/rq-based-multipath
Move xen patchset to new version's subdir.
[ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.suse / rq-based-multipath
CommitLineData
00e5a55c
BS
1From: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
2Subject: Request-based multipath patches
3References: FATE#302108
4
5This is the latest version of the request-based multipathing patches,
6posted to dm-devel and linux-scsi on 03.10.2008.
7
8Signed-off-by: Hannes Reinecke <hare@suse.de>
9
10---
11 drivers/md/dm-ioctl.c | 13
12 drivers/md/dm-mpath.c | 192 +++++---
13 drivers/md/dm-table.c | 82 +++
14 drivers/md/dm.c | 952 +++++++++++++++++++++++++++++++++++++++---
15 drivers/md/dm.h | 17
16 include/linux/device-mapper.h | 24 +
17 6 files changed, 1158 insertions(+), 122 deletions(-)
18
19--- a/drivers/md/dm.c
20+++ b/drivers/md/dm.c
21@@ -32,6 +32,7 @@ static unsigned int _major = 0;
22
23 static DEFINE_SPINLOCK(_minor_lock);
24 /*
25+ * For bio based dm.
26 * One of these is allocated per bio.
27 */
28 struct dm_io {
29@@ -43,6 +44,7 @@ struct dm_io {
30 };
31
32 /*
33+ * For bio based dm.
34 * One of these is allocated per target within a bio. Hopefully
35 * this will be simplified out one day.
36 */
37@@ -52,6 +54,31 @@ struct dm_target_io {
38 union map_info info;
39 };
40
41+/*
42+ * For request based dm.
43+ * One of these is allocated per request.
44+ *
45+ * Since assuming "original request : cloned request = 1 : 1" and
46+ * a counter for number of clones like struct dm_io.io_count isn't needed,
47+ * struct dm_io and struct target_io can be merged.
48+ */
49+struct dm_rq_target_io {
50+ struct mapped_device *md;
51+ struct dm_target *ti;
52+ struct request *orig, clone;
53+ int error;
54+ union map_info info;
55+};
56+
57+/*
58+ * For request based dm.
59+ * One of these is allocated per bio.
60+ */
61+struct dm_clone_bio_info {
62+ struct bio *orig;
63+ struct request *rq;
64+};
65+
66 union map_info *dm_get_mapinfo(struct bio *bio)
67 {
68 if (bio && bio->bi_private)
69@@ -59,6 +86,14 @@ union map_info *dm_get_mapinfo(struct bi
70 return NULL;
71 }
72
73+union map_info *dm_get_rq_mapinfo(struct request *rq)
74+{
75+ if (rq && rq->end_io_data)
76+ return &((struct dm_rq_target_io *)rq->end_io_data)->info;
77+ return NULL;
78+}
79+EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
80+
81 #define MINOR_ALLOCED ((void *)-1)
82
83 /*
84@@ -76,7 +111,6 @@ union map_info *dm_get_mapinfo(struct bi
85 */
86 struct dm_wq_req {
87 enum {
88- DM_WQ_FLUSH_ALL,
89 DM_WQ_FLUSH_DEFERRED,
90 } type;
91 struct work_struct work;
92@@ -126,6 +160,8 @@ struct mapped_device {
93
94 struct bio_set *bs;
95
96+ unsigned int mempool_type; /* Type of mempools above. */
97+
98 /*
99 * Event handling.
100 */
101@@ -143,52 +179,74 @@ struct mapped_device {
102
103 /* forced geometry settings */
104 struct hd_geometry geometry;
105+
106+ /* marker of flush suspend for request-based dm */
107+ struct request suspend_rq;
108+
109+ /* For saving the address of __make_request for request based dm */
110+ make_request_fn *saved_make_request_fn;
111 };
112
113 #define MIN_IOS 256
114 static struct kmem_cache *_io_cache;
115 static struct kmem_cache *_tio_cache;
116+static struct kmem_cache *_rq_tio_cache;
117+static struct kmem_cache *_bio_info_cache;
118
119 static int __init local_init(void)
120 {
121- int r;
122+ int r = -ENOMEM;
123
124 /* allocate a slab for the dm_ios */
125 _io_cache = KMEM_CACHE(dm_io, 0);
126 if (!_io_cache)
127- return -ENOMEM;
128+ return r;
129
130 /* allocate a slab for the target ios */
131 _tio_cache = KMEM_CACHE(dm_target_io, 0);
132- if (!_tio_cache) {
133- kmem_cache_destroy(_io_cache);
134- return -ENOMEM;
135- }
136+ if (!_tio_cache)
137+ goto out_free_io_cache;
138+
139+ _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
140+ if (!_rq_tio_cache)
141+ goto out_free_tio_cache;
142+
143+ _bio_info_cache = KMEM_CACHE(dm_clone_bio_info, 0);
144+ if (!_bio_info_cache)
145+ goto out_free_rq_tio_cache;
146
147 r = dm_uevent_init();
148- if (r) {
149- kmem_cache_destroy(_tio_cache);
150- kmem_cache_destroy(_io_cache);
151- return r;
152- }
153+ if (r)
154+ goto out_free_bio_info_cache;
155
156 _major = major;
157 r = register_blkdev(_major, _name);
158- if (r < 0) {
159- kmem_cache_destroy(_tio_cache);
160- kmem_cache_destroy(_io_cache);
161- dm_uevent_exit();
162- return r;
163- }
164+ if (r < 0)
165+ goto out_uevent_exit;
166
167 if (!_major)
168 _major = r;
169
170 return 0;
171+
172+out_uevent_exit:
173+ dm_uevent_exit();
174+out_free_bio_info_cache:
175+ kmem_cache_destroy(_bio_info_cache);
176+out_free_rq_tio_cache:
177+ kmem_cache_destroy(_rq_tio_cache);
178+out_free_tio_cache:
179+ kmem_cache_destroy(_tio_cache);
180+out_free_io_cache:
181+ kmem_cache_destroy(_io_cache);
182+
183+ return r;
184 }
185
186 static void local_exit(void)
187 {
188+ kmem_cache_destroy(_bio_info_cache);
189+ kmem_cache_destroy(_rq_tio_cache);
190 kmem_cache_destroy(_tio_cache);
191 kmem_cache_destroy(_io_cache);
192 unregister_blkdev(_major, _name);
193@@ -380,6 +438,28 @@ static void free_tio(struct mapped_devic
194 mempool_free(tio, md->tio_pool);
195 }
196
197+static inline struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md)
198+{
199+ return mempool_alloc(md->tio_pool, GFP_ATOMIC);
200+}
201+
202+static inline void free_rq_tio(struct mapped_device *md,
203+ struct dm_rq_target_io *tio)
204+{
205+ mempool_free(tio, md->tio_pool);
206+}
207+
208+static inline struct dm_clone_bio_info *alloc_bio_info(struct mapped_device *md)
209+{
210+ return mempool_alloc(md->io_pool, GFP_ATOMIC);
211+}
212+
213+static inline void free_bio_info(struct mapped_device *md,
214+ struct dm_clone_bio_info *info)
215+{
216+ mempool_free(info, md->io_pool);
217+}
218+
219 static void start_io_acct(struct dm_io *io)
220 {
221 struct mapped_device *md = io->md;
222@@ -568,6 +648,266 @@ static void clone_endio(struct bio *bio,
223 free_tio(md, tio);
224 }
225
226+/*
227+ * Partial completion handling for request-based dm
228+ */
229+static void end_clone_bio(struct bio *clone, int error)
230+{
231+ struct dm_clone_bio_info *info = clone->bi_private;
232+ struct dm_rq_target_io *tio = info->rq->end_io_data;
233+ struct bio *bio = info->orig;
234+ unsigned int nr_bytes = info->orig->bi_size;
235+
236+ free_bio_info(tio->md, info);
237+ clone->bi_private = tio->md->bs;
238+ bio_put(clone);
239+
240+ if (tio->error) {
241+ /*
242+ * An error has already been detected on the request.
243+ * Once error occurred, just let clone->end_io() handle
244+ * the remainder.
245+ */
246+ return;
247+ } else if (error) {
248+ /*
249+ * Don't notice the error to the upper layer yet.
250+ * The error handling decision is made by the target driver,
251+ * when the request is completed.
252+ */
253+ tio->error = error;
254+ return;
255+ }
256+
257+ /*
258+ * I/O for the bio successfully completed.
259+ * Notice the data completion to the upper layer.
260+ */
261+
262+ /*
263+ * bios are processed from the head of the list.
264+ * So the completing bio should always be rq->bio.
265+ * If it's not, something wrong is happening.
266+ */
267+ if (tio->orig->bio != bio)
268+ DMERR("bio completion is going in the middle of the request");
269+
270+ /*
271+ * Update the original request.
272+ * Do not use blk_end_request() here, because it may complete
273+ * the original request before the clone, and break the ordering.
274+ */
275+ blk_update_request(tio->orig, 0, nr_bytes);
276+}
277+
278+static void free_bio_clone(struct request *clone)
279+{
280+ struct dm_rq_target_io *tio = clone->end_io_data;
281+ struct mapped_device *md = tio->md;
282+ struct bio *bio;
283+ struct dm_clone_bio_info *info;
284+
285+ while ((bio = clone->bio) != NULL) {
286+ clone->bio = bio->bi_next;
287+
288+ info = bio->bi_private;
289+ free_bio_info(md, info);
290+
291+ bio->bi_private = md->bs;
292+ bio_put(bio);
293+ }
294+}
295+
296+static void dec_rq_pending(struct dm_rq_target_io *tio)
297+{
298+ if (!atomic_dec_return(&tio->md->pending))
299+ /* nudge anyone waiting on suspend queue */
300+ wake_up(&tio->md->wait);
301+}
302+
303+static void dm_unprep_request(struct request *rq)
304+{
305+ struct request *clone = rq->special;
306+ struct dm_rq_target_io *tio = clone->end_io_data;
307+
308+ rq->special = NULL;
309+ rq->cmd_flags &= ~REQ_DONTPREP;
310+
311+ free_bio_clone(clone);
312+ dec_rq_pending(tio);
313+ free_rq_tio(tio->md, tio);
314+}
315+
316+/*
317+ * Requeue the original request of a clone.
318+ */
319+void dm_requeue_request(struct request *clone)
320+{
321+ struct dm_rq_target_io *tio = clone->end_io_data;
322+ struct request *rq = tio->orig;
323+ struct request_queue *q = rq->q;
324+ unsigned long flags;
325+
326+ dm_unprep_request(rq);
327+
328+ spin_lock_irqsave(q->queue_lock, flags);
329+ if (elv_queue_empty(q))
330+ blk_plug_device(q);
331+ blk_requeue_request(q, rq);
332+ spin_unlock_irqrestore(q->queue_lock, flags);
333+}
334+EXPORT_SYMBOL_GPL(dm_requeue_request);
335+
336+static inline void __stop_queue(struct request_queue *q)
337+{
338+ blk_stop_queue(q);
339+}
340+
341+static void stop_queue(struct request_queue *q)
342+{
343+ unsigned long flags;
344+
345+ spin_lock_irqsave(q->queue_lock, flags);
346+ __stop_queue(q);
347+ spin_unlock_irqrestore(q->queue_lock, flags);
348+}
349+
350+static inline void __start_queue(struct request_queue *q)
351+{
352+ if (blk_queue_stopped(q))
353+ blk_start_queue(q);
354+}
355+
356+static void start_queue(struct request_queue *q)
357+{
358+ unsigned long flags;
359+
360+ spin_lock_irqsave(q->queue_lock, flags);
361+ __start_queue(q);
362+ spin_unlock_irqrestore(q->queue_lock, flags);
363+}
364+
365+/*
366+ * Complete the clone and the original request
367+ */
368+static void dm_end_request(struct request *clone, int error)
369+{
370+ struct dm_rq_target_io *tio = clone->end_io_data;
371+ struct request *rq = tio->orig;
372+ struct request_queue *q = rq->q;
373+ unsigned int nr_bytes = blk_rq_bytes(rq);
374+
375+ if (blk_pc_request(rq)) {
376+ rq->errors = clone->errors;
377+ rq->data_len = clone->data_len;
378+
379+ if (rq->sense)
380+ /*
381+ * We are using the sense buffer of the original
382+ * request.
383+ * So setting the length of the sense data is enough.
384+ */
385+ rq->sense_len = clone->sense_len;
386+ }
387+
388+ free_bio_clone(clone);
389+ dec_rq_pending(tio);
390+ free_rq_tio(tio->md, tio);
391+
392+ if (unlikely(blk_end_request(rq, error, nr_bytes)))
393+ BUG();
394+
395+ blk_run_queue(q);
396+}
397+
398+/*
399+ * Request completion handler for request-based dm
400+ */
401+static void dm_softirq_done(struct request *rq)
402+{
403+ struct request *clone = rq->completion_data;
404+ struct dm_rq_target_io *tio = clone->end_io_data;
405+ dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
406+ int error = tio->error;
407+ int r;
408+
409+ if (rq->cmd_flags & REQ_FAILED)
410+ goto end_request;
411+
412+ if (rq_end_io) {
413+ r = rq_end_io(tio->ti, clone, error, &tio->info);
414+ if (r <= 0)
415+ /* The target wants to complete the I/O */
416+ error = r;
417+ else if (r == DM_ENDIO_INCOMPLETE)
418+ /* The target will handle the I/O */
419+ return;
420+ else if (r == DM_ENDIO_REQUEUE) {
421+ /*
422+ * The target wants to requeue the I/O.
423+ * Don't invoke blk_run_queue() so that the requeued
424+ * request won't be dispatched again soon.
425+ */
426+ dm_requeue_request(clone);
427+ return;
428+ } else {
429+ DMWARN("unimplemented target endio return value: %d",
430+ r);
431+ BUG();
432+ }
433+ }
434+
435+end_request:
436+ dm_end_request(clone, error);
437+}
438+
439+/*
440+ * Called with the queue lock held
441+ */
442+static void end_clone_request(struct request *clone, int error)
443+{
444+ struct dm_rq_target_io *tio = clone->end_io_data;
445+ struct request *rq = tio->orig;
446+
447+ /*
448+ * For just cleaning up the information of the queue in which
449+ * the clone was dispatched.
450+ * The clone is *NOT* freed actually here because it is alloced from
451+ * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
452+ */
453+ __blk_put_request(clone->q, clone);
454+
455+ /*
456+ * Actual request completion is done in a softirq context which doesn't
457+ * hold the queue lock. Otherwise, deadlock could occur because:
458+ * - another request may be submitted by the upper level driver
459+ * of the stacking during the completion
460+ * - the submission which requires queue lock may be done
461+ * against this queue
462+ */
463+ tio->error = error;
464+ rq->completion_data = clone;
465+ blk_complete_request(rq);
466+}
467+
468+/*
469+ * Complete the original request of a clone with an error status.
470+ * Target's rq_end_io() function isn't called.
471+ * This may be used by target's map_rq() function when the mapping fails.
472+ */
473+void dm_kill_request(struct request *clone, int error)
474+{
475+ struct dm_rq_target_io *tio = clone->end_io_data;
476+ struct request *rq = tio->orig;
477+
478+ tio->error = error;
479+ /* Avoid printing "I/O error" message, since we didn't I/O actually */
480+ rq->cmd_flags |= (REQ_FAILED | REQ_QUIET);
481+ rq->completion_data = clone;
482+ blk_complete_request(rq);
483+}
484+EXPORT_SYMBOL_GPL(dm_kill_request);
485+
486 static sector_t max_io_len(struct mapped_device *md,
487 sector_t sector, struct dm_target *ti)
488 {
489@@ -886,7 +1226,7 @@ out:
490 * The request function that just remaps the bio built up by
491 * dm_merge_bvec.
492 */
493-static int dm_request(struct request_queue *q, struct bio *bio)
494+static int _dm_request(struct request_queue *q, struct bio *bio)
495 {
496 int r = -EIO;
497 int rw = bio_data_dir(bio);
498@@ -936,12 +1276,335 @@ out_req:
499 return 0;
500 }
501
502+static int dm_make_request(struct request_queue *q, struct bio *bio)
503+{
504+ struct mapped_device *md = (struct mapped_device *)q->queuedata;
505+
506+ if (unlikely(bio_barrier(bio))) {
507+ bio_endio(bio, -EOPNOTSUPP);
508+ return 0;
509+ }
510+
511+ if (unlikely(!md->map)) {
512+ bio_endio(bio, -EIO);
513+ return 0;
514+ }
515+
516+ return md->saved_make_request_fn(q, bio); /* call __make_request() */
517+}
518+
519+static inline int dm_request_based(struct mapped_device *md)
520+{
521+ return blk_queue_stackable(md->queue);
522+}
523+
524+static int dm_request(struct request_queue *q, struct bio *bio)
525+{
526+ struct mapped_device *md = q->queuedata;
527+
528+ if (dm_request_based(md))
529+ return dm_make_request(q, bio);
530+
531+ return _dm_request(q, bio);
532+}
533+
534+void dm_dispatch_request(struct request *rq)
535+{
536+ int r;
537+
538+ rq->start_time = jiffies;
539+ r = blk_insert_cloned_request(rq->q, rq);
540+ if (r)
541+ dm_kill_request(rq, r);
542+}
543+EXPORT_SYMBOL_GPL(dm_dispatch_request);
544+
545+static void copy_request_info(struct request *clone, struct request *rq)
546+{
547+ clone->cmd_flags = (rq_data_dir(rq) | REQ_NOMERGE);
548+ clone->cmd_type = rq->cmd_type;
549+ clone->sector = rq->sector;
550+ clone->hard_sector = rq->hard_sector;
551+ clone->nr_sectors = rq->nr_sectors;
552+ clone->hard_nr_sectors = rq->hard_nr_sectors;
553+ clone->current_nr_sectors = rq->current_nr_sectors;
554+ clone->hard_cur_sectors = rq->hard_cur_sectors;
555+ clone->nr_phys_segments = rq->nr_phys_segments;
556+ clone->ioprio = rq->ioprio;
557+ clone->buffer = rq->buffer;
558+ clone->cmd_len = rq->cmd_len;
559+ if (rq->cmd_len)
560+ clone->cmd = rq->cmd;
561+ clone->data_len = rq->data_len;
562+ clone->extra_len = rq->extra_len;
563+ clone->sense_len = rq->sense_len;
564+ clone->data = rq->data;
565+ clone->sense = rq->sense;
566+}
567+
568+static int clone_request_bios(struct request *clone, struct request *rq,
569+ struct mapped_device *md)
570+{
571+ struct bio *bio, *clone_bio;
572+ struct dm_clone_bio_info *info;
573+
574+ for (bio = rq->bio; bio; bio = bio->bi_next) {
575+ info = alloc_bio_info(md);
576+ if (!info)
577+ goto free_and_out;
578+
579+ clone_bio = bio_alloc_bioset(GFP_ATOMIC, bio->bi_max_vecs,
580+ md->bs);
581+ if (!clone_bio) {
582+ free_bio_info(md, info);
583+ goto free_and_out;
584+ }
585+
586+ __bio_clone(clone_bio, bio);
587+ clone_bio->bi_destructor = dm_bio_destructor;
588+ clone_bio->bi_end_io = end_clone_bio;
589+ info->rq = clone;
590+ info->orig = bio;
591+ clone_bio->bi_private = info;
592+
593+ if (clone->bio) {
594+ clone->biotail->bi_next = clone_bio;
595+ clone->biotail = clone_bio;
596+ } else
597+ clone->bio = clone->biotail = clone_bio;
598+ }
599+
600+ return 0;
601+
602+free_and_out:
603+ free_bio_clone(clone);
604+
605+ return -ENOMEM;
606+}
607+
608+static int setup_clone(struct request *clone, struct request *rq,
609+ struct dm_rq_target_io *tio)
610+{
611+ int r;
612+
613+ blk_rq_init(NULL, clone);
614+
615+ r = clone_request_bios(clone, rq, tio->md);
616+ if (r)
617+ return r;
618+
619+ copy_request_info(clone, rq);
620+ clone->start_time = jiffies;
621+ clone->end_io = end_clone_request;
622+ clone->end_io_data = tio;
623+
624+ return 0;
625+}
626+
627+static inline int dm_flush_suspending(struct mapped_device *md)
628+{
629+ return !md->suspend_rq.data;
630+}
631+
632+/*
633+ * Called with the queue lock held.
634+ */
635+static int dm_prep_fn(struct request_queue *q, struct request *rq)
636+{
637+ struct mapped_device *md = (struct mapped_device *)q->queuedata;
638+ struct dm_rq_target_io *tio;
639+ struct request *clone;
640+
641+ if (unlikely(rq == &md->suspend_rq)) { /* Flush suspend marker */
642+ if (dm_flush_suspending(md)) {
643+ if (q->in_flight)
644+ return BLKPREP_DEFER;
645+ else {
646+ /* This device should be quiet now */
647+ __stop_queue(q);
648+ smp_mb();
649+ BUG_ON(atomic_read(&md->pending));
650+ wake_up(&md->wait);
651+ return BLKPREP_KILL;
652+ }
653+ } else
654+ /*
655+ * The suspend process was interrupted.
656+ * So no need to suspend now.
657+ */
658+ return BLKPREP_KILL;
659+ }
660+
661+ if (unlikely(rq->special)) {
662+ DMWARN("Already has something in rq->special.");
663+ return BLKPREP_KILL;
664+ }
665+
666+ if (unlikely(!dm_request_based(md))) {
667+ DMWARN("Request was queued into bio-based device");
668+ return BLKPREP_KILL;
669+ }
670+
671+ tio = alloc_rq_tio(md); /* Only one for each original request */
672+ if (!tio)
673+ /* -ENOMEM */
674+ return BLKPREP_DEFER;
675+
676+ tio->md = md;
677+ tio->ti = NULL;
678+ tio->orig = rq;
679+ tio->error = 0;
680+ memset(&tio->info, 0, sizeof(tio->info));
681+
682+ clone = &tio->clone;
683+ if (setup_clone(clone, rq, tio)) {
684+ /* -ENOMEM */
685+ free_rq_tio(md, tio);
686+ return BLKPREP_DEFER;
687+ }
688+
689+ rq->special = clone;
690+ rq->cmd_flags |= REQ_DONTPREP;
691+
692+ return BLKPREP_OK;
693+}
694+
695+static void map_request(struct dm_target *ti, struct request *rq,
696+ struct mapped_device *md)
697+{
698+ int r;
699+ struct request *clone = rq->special;
700+ struct dm_rq_target_io *tio = clone->end_io_data;
701+
702+ tio->ti = ti;
703+ atomic_inc(&md->pending);
704+
705+ /*
706+ * Although submitted requests to the md->queue are checked against
707+ * the table/queue limitations at the submission time, the limitations
708+ * may be changed by a table swapping while those already checked
709+ * requests are in the md->queue.
710+ * If the limitations have been shrunk in such situations, we may be
711+ * dispatching requests violating the current limitations here.
712+ * Since struct request is a reliable one in the block-layer
713+ * and device drivers, dispatching such requests is dangerous.
714+ * (e.g. it may cause kernel panic easily.)
715+ * Avoid to dispatch such problematic requests in request-based dm.
716+ *
717+ * Since dm_kill_request() decrements the md->pending, this have to
718+ * be done after incrementing the md->pending.
719+ */
720+ r = blk_rq_check_limits(rq->q, rq);
721+ if (unlikely(r)) {
722+ DMWARN("violating the queue limitation. the limitation may be"
723+ " shrunk while there are some requests in the queue.");
724+ dm_kill_request(clone, r);
725+ return;
726+ }
727+
728+ r = ti->type->map_rq(ti, clone, &tio->info);
729+ switch (r) {
730+ case DM_MAPIO_SUBMITTED:
731+ /* The target has taken the I/O to submit by itself later */
732+ break;
733+ case DM_MAPIO_REMAPPED:
734+ /* The target has remapped the I/O so dispatch it */
735+ dm_dispatch_request(clone);
736+ break;
737+ case DM_MAPIO_REQUEUE:
738+ /* The target wants to requeue the I/O */
739+ dm_requeue_request(clone);
740+ break;
741+ default:
742+ if (r > 0) {
743+ DMWARN("unimplemented target map return value: %d", r);
744+ BUG();
745+ }
746+
747+ /* The target wants to complete the I/O */
748+ dm_kill_request(clone, r);
749+ break;
750+ }
751+}
752+
753+/*
754+ * q->request_fn for request-based dm.
755+ * Called with the queue lock held.
756+ */
757+static void dm_request_fn(struct request_queue *q)
758+{
759+ struct mapped_device *md = (struct mapped_device *)q->queuedata;
760+ struct dm_table *map = dm_get_table(md);
761+ struct dm_target *ti;
762+ struct request *rq;
763+
764+ /*
765+ * The check for blk_queue_stopped() needs here, because:
766+ * - device suspend uses blk_stop_queue() and expects that
767+ * no I/O will be dispatched any more after the queue stop
768+ * - generic_unplug_device() doesn't call q->request_fn()
769+ * when the queue is stopped, so no problem
770+ * - but underlying device drivers may call q->request_fn()
771+ * without the check through blk_run_queue()
772+ */
773+ while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
774+ rq = elv_next_request(q);
775+ if (!rq)
776+ goto plug_and_out;
777+
778+ ti = dm_table_find_target(map, rq->sector);
779+ if (ti->type->busy && ti->type->busy(ti))
780+ goto plug_and_out;
781+
782+ blkdev_dequeue_request(rq);
783+ spin_unlock(q->queue_lock);
784+ map_request(ti, rq, md);
785+ spin_lock_irq(q->queue_lock);
786+ }
787+
788+ goto out;
789+
790+plug_and_out:
791+ if (!elv_queue_empty(q))
792+ /* Some requests still remain, retry later */
793+ blk_plug_device(q);
794+
795+out:
796+ dm_table_put(map);
797+
798+ return;
799+}
800+
801+int dm_underlying_device_busy(struct request_queue *q)
802+{
803+ return blk_lld_busy(q);
804+}
805+EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
806+
807+static int dm_lld_busy(struct request_queue *q)
808+{
809+ int r;
810+ struct mapped_device *md = q->queuedata;
811+ struct dm_table *map = dm_get_table(md);
812+
813+ if (!map || test_bit(DMF_BLOCK_IO, &md->flags))
814+ r = 1;
815+ else
816+ r = dm_table_any_busy_target(map);
817+
818+ dm_table_put(map);
819+ return r;
820+}
821+
822 static void dm_unplug_all(struct request_queue *q)
823 {
824 struct mapped_device *md = q->queuedata;
825 struct dm_table *map = dm_get_table(md);
826
827 if (map) {
828+ if (dm_request_based(md))
829+ generic_unplug_device(q);
830+
831 dm_table_unplug_all(map);
832 dm_table_put(map);
833 }
834@@ -955,6 +1618,12 @@ static int dm_any_congested(void *conges
835
836 if (!map || test_bit(DMF_BLOCK_IO, &md->flags))
837 r = bdi_bits;
838+ else if (dm_request_based(md))
839+ /*
840+ * Request-based dm cares about only own queue for
841+ * the query about congestion status of request_queue
842+ */
843+ r = md->queue->backing_dev_info.state & bdi_bits;
844 else
845 r = dm_table_any_congested(map, bdi_bits);
846
847@@ -1075,10 +1744,22 @@ static struct mapped_device *alloc_dev(i
848 INIT_LIST_HEAD(&md->uevent_list);
849 spin_lock_init(&md->uevent_lock);
850
851- md->queue = blk_alloc_queue(GFP_KERNEL);
852+ md->queue = blk_init_queue(dm_request_fn, NULL);
853 if (!md->queue)
854 goto bad_queue;
855
856+ /*
857+ * Request-based dm devices cannot be stacked on top of bio-based dm
858+ * devices. The type of this dm device has not been decided yet,
859+ * although we initialized the queue using blk_init_queue().
860+ * The type is decided at the first table loading time.
861+ * To prevent problematic device stacking, clear the queue flag
862+ * for request stacking support until then.
863+ *
864+ * This queue is new, so no concurrency on the queue_flags.
865+ */
866+ queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
867+ md->saved_make_request_fn = md->queue->make_request_fn;
868 md->queue->queuedata = md;
869 md->queue->backing_dev_info.congested_fn = dm_any_congested;
870 md->queue->backing_dev_info.congested_data = md;
871@@ -1086,18 +1767,9 @@ static struct mapped_device *alloc_dev(i
872 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
873 md->queue->unplug_fn = dm_unplug_all;
874 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
875-
876- md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
877- if (!md->io_pool)
878- goto bad_io_pool;
879-
880- md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
881- if (!md->tio_pool)
882- goto bad_tio_pool;
883-
884- md->bs = bioset_create(16, 16);
885- if (!md->bs)
886- goto bad_no_bioset;
887+ blk_queue_softirq_done(md->queue, dm_softirq_done);
888+ blk_queue_prep_rq(md->queue, dm_prep_fn);
889+ blk_queue_lld_busy(md->queue, dm_lld_busy);
890
891 md->disk = alloc_disk(1);
892 if (!md->disk)
893@@ -1132,12 +1804,6 @@ static struct mapped_device *alloc_dev(i
894 bad_thread:
895 put_disk(md->disk);
896 bad_disk:
897- bioset_free(md->bs);
898-bad_no_bioset:
899- mempool_destroy(md->tio_pool);
900-bad_tio_pool:
901- mempool_destroy(md->io_pool);
902-bad_io_pool:
903 blk_cleanup_queue(md->queue);
904 bad_queue:
905 free_minor(minor);
906@@ -1159,9 +1825,12 @@ static void free_dev(struct mapped_devic
907 bdput(md->suspended_bdev);
908 }
909 destroy_workqueue(md->wq);
910- mempool_destroy(md->tio_pool);
911- mempool_destroy(md->io_pool);
912- bioset_free(md->bs);
913+ if (md->tio_pool)
914+ mempool_destroy(md->tio_pool);
915+ if (md->io_pool)
916+ mempool_destroy(md->io_pool);
917+ if (md->bs)
918+ bioset_free(md->bs);
919 del_gendisk(md->disk);
920 free_minor(minor);
921
922@@ -1224,6 +1893,16 @@ static int __bind(struct mapped_device *
923 dm_table_get(t);
924 dm_table_event_callback(t, event_callback, md);
925
926+ /*
927+ * The queue hasn't been stopped yet, if the old table type wasn't
928+ * for request-based during suspension. So stop it to prevent
929+ * I/O mapping before resume.
930+ * This must be done before setting the queue restrictions,
931+ * because request-based dm may be run just after the setting.
932+ */
933+ if (dm_table_request_based(t) && !blk_queue_stopped(q))
934+ stop_queue(q);
935+
936 write_lock(&md->map_lock);
937 md->map = t;
938 dm_table_set_restrictions(t, q);
939@@ -1346,7 +2025,11 @@ static int dm_wait_for_completion(struct
940 set_current_state(TASK_INTERRUPTIBLE);
941
942 smp_mb();
943- if (!atomic_read(&md->pending))
944+ if (dm_request_based(md)) {
945+ if (!atomic_read(&md->pending) &&
946+ blk_queue_stopped(md->queue))
947+ break;
948+ } else if (!atomic_read(&md->pending))
949 break;
950
951 if (signal_pending(current)) {
952@@ -1369,7 +2052,13 @@ static void __flush_deferred_io(struct m
953 struct bio *c;
954
955 while ((c = bio_list_pop(&md->deferred))) {
956- if (__split_bio(md, c))
957+ /*
958+ * Some bios might have been queued here during suspension
959+ * before setting of request-based dm in resume
960+ */
961+ if (dm_request_based(md))
962+ generic_make_request(c);
963+ else if (__split_bio(md, c))
964 bio_io_error(c);
965 }
966
967@@ -1394,9 +2083,6 @@ static void dm_wq_work(struct work_struc
968
969 down_write(&md->io_lock);
970 switch (req->type) {
971- case DM_WQ_FLUSH_ALL:
972- __merge_pushback_list(md);
973- /* pass through */
974 case DM_WQ_FLUSH_DEFERRED:
975 __flush_deferred_io(md);
976 break;
977@@ -1451,6 +2137,88 @@ out:
978 return r;
979 }
980
981+static inline void dm_invalidate_flush_suspend(struct mapped_device *md)
982+{
983+ md->suspend_rq.data = (void *)0x1;
984+}
985+
986+static void dm_abort_suspend(struct mapped_device *md, int noflush)
987+{
988+ struct request_queue *q = md->queue;
989+ unsigned long flags;
990+
991+ /*
992+ * For flush suspend, invalidation and queue restart must be protected
993+ * by a single queue lock to prevent a race with dm_prep_fn().
994+ */
995+ spin_lock_irqsave(q->queue_lock, flags);
996+ if (!noflush)
997+ dm_invalidate_flush_suspend(md);
998+ __start_queue(q);
999+ spin_unlock_irqrestore(q->queue_lock, flags);
1000+}
1001+
1002+/*
1003+ * Additional suspend work for request-based dm.
1004+ *
1005+ * In request-based dm, stopping request_queue prevents mapping.
1006+ * Even after stopping the request_queue, submitted requests from upper-layer
1007+ * can be inserted to the request_queue. So original (unmapped) requests are
1008+ * kept in the request_queue during suspension.
1009+ */
1010+static void dm_start_suspend(struct mapped_device *md, int noflush)
1011+{
1012+ struct request *rq = &md->suspend_rq;
1013+ struct request_queue *q = md->queue;
1014+ unsigned long flags;
1015+
1016+ if (noflush) {
1017+ stop_queue(q);
1018+ return;
1019+ }
1020+
1021+ /*
1022+ * For flush suspend, we need a marker to indicate the border line
1023+ * between flush needed I/Os and deferred I/Os, since all I/Os are
1024+ * queued in the request_queue during suspension.
1025+ *
1026+ * This marker must be inserted after setting DMF_BLOCK_IO,
1027+ * because dm_prep_fn() considers no DMF_BLOCK_IO to be
1028+ * a suspend interruption.
1029+ */
1030+ spin_lock_irqsave(q->queue_lock, flags);
1031+ if (unlikely(rq->ref_count)) {
1032+ /*
1033+ * This can happen when the previous suspend was interrupted,
1034+ * the inserted suspend_rq for the previous suspend has still
1035+ * been in the queue and this suspend has been invoked.
1036+ *
1037+ * We could re-insert the suspend_rq by deleting it from
1038+ * the queue forcibly using list_del_init(&rq->queuelist).
1039+ * But it would break the block-layer easily.
1040+ * So we don't re-insert the suspend_rq again in such a case.
1041+ * The suspend_rq should be already invalidated during
1042+ * the previous suspend interruption, so just wait for it
1043+ * to be completed.
1044+ *
1045+ * This suspend will never complete, so warn the user to
1046+ * interrupt this suspend and retry later.
1047+ */
1048+ BUG_ON(!rq->data);
1049+ spin_unlock_irqrestore(q->queue_lock, flags);
1050+
1051+ DMWARN("Invalidating the previous suspend is still in"
1052+ " progress. This suspend will be never done."
1053+ " Please interrupt this suspend and retry later.");
1054+ return;
1055+ }
1056+ spin_unlock_irqrestore(q->queue_lock, flags);
1057+
1058+ /* Now no user of the suspend_rq */
1059+ blk_rq_init(q, rq);
1060+ blk_insert_request(q, rq, 0, NULL);
1061+}
1062+
1063 /*
1064 * Functions to lock and unlock any filesystem running on the
1065 * device.
1066@@ -1526,7 +2294,7 @@ int dm_suspend(struct mapped_device *md,
1067 if (!md->suspended_bdev) {
1068 DMWARN("bdget failed in dm_suspend");
1069 r = -ENOMEM;
1070- goto flush_and_out;
1071+ goto out;
1072 }
1073
1074 /*
1075@@ -1549,6 +2317,9 @@ int dm_suspend(struct mapped_device *md,
1076 add_wait_queue(&md->wait, &wait);
1077 up_write(&md->io_lock);
1078
1079+ if (dm_request_based(md))
1080+ dm_start_suspend(md, noflush);
1081+
1082 /* unplug */
1083 if (map)
1084 dm_table_unplug_all(map);
1085@@ -1561,14 +2332,22 @@ int dm_suspend(struct mapped_device *md,
1086 down_write(&md->io_lock);
1087 remove_wait_queue(&md->wait, &wait);
1088
1089- if (noflush)
1090- __merge_pushback_list(md);
1091+ if (noflush) {
1092+ if (dm_request_based(md))
1093+ /* All requeued requests are already in md->queue */
1094+ clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
1095+ else
1096+ __merge_pushback_list(md);
1097+ }
1098 up_write(&md->io_lock);
1099
1100 /* were we interrupted ? */
1101 if (r < 0) {
1102 dm_queue_flush(md, DM_WQ_FLUSH_DEFERRED, NULL);
1103
1104+ if (dm_request_based(md))
1105+ dm_abort_suspend(md, noflush);
1106+
1107 unlock_fs(md);
1108 goto out; /* pushback list is already flushed, so skip flush */
1109 }
1110@@ -1577,14 +2356,6 @@ int dm_suspend(struct mapped_device *md,
1111
1112 set_bit(DMF_SUSPENDED, &md->flags);
1113
1114-flush_and_out:
1115- if (r && noflush)
1116- /*
1117- * Because there may be already I/Os in the pushback list,
1118- * flush them before return.
1119- */
1120- dm_queue_flush(md, DM_WQ_FLUSH_ALL, NULL);
1121-
1122 out:
1123 if (r && md->suspended_bdev) {
1124 bdput(md->suspended_bdev);
1125@@ -1617,6 +2388,14 @@ int dm_resume(struct mapped_device *md)
1126
1127 dm_queue_flush(md, DM_WQ_FLUSH_DEFERRED, NULL);
1128
1129+ /*
1130+ * Flushing deferred I/Os must be done after targets are resumed
1131+ * so that mapping of targets can work correctly.
1132+ * Request-based dm is queueing the deferred I/Os in its request_queue.
1133+ */
1134+ if (dm_request_based(md))
1135+ start_queue(md->queue);
1136+
1137 unlock_fs(md);
1138
1139 if (md->suspended_bdev) {
1140@@ -1698,6 +2477,65 @@ int dm_noflush_suspending(struct dm_targ
1141 }
1142 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
1143
1144+int dm_init_md_mempool(struct mapped_device *md, int type)
1145+{
1146+ if (unlikely(type == DM_TYPE_NONE)) {
1147+ DMWARN("no type is specified, can't initialize mempool");
1148+ return -EINVAL;
1149+ }
1150+
1151+ if (md->mempool_type == type)
1152+ return 0;
1153+
1154+ if (md->map) {
1155+ /* The md has been using, can't change the mempool type */
1156+ DMWARN("can't change mempool type after a table is bound");
1157+ return -EINVAL;
1158+ }
1159+
1160+ /* Not using the md yet, we can still change the mempool type */
1161+ if (md->mempool_type != DM_TYPE_NONE) {
1162+ mempool_destroy(md->io_pool);
1163+ md->io_pool = NULL;
1164+ mempool_destroy(md->tio_pool);
1165+ md->tio_pool = NULL;
1166+ bioset_free(md->bs);
1167+ md->bs = NULL;
1168+ md->mempool_type = DM_TYPE_NONE;
1169+ }
1170+
1171+ md->io_pool = (type == DM_TYPE_BIO_BASED) ?
1172+ mempool_create_slab_pool(MIN_IOS, _io_cache) :
1173+ mempool_create_slab_pool(MIN_IOS, _bio_info_cache);
1174+ if (!md->io_pool)
1175+ return -ENOMEM;
1176+
1177+ md->tio_pool = (type == DM_TYPE_BIO_BASED) ?
1178+ mempool_create_slab_pool(MIN_IOS, _tio_cache) :
1179+ mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
1180+ if (!md->tio_pool)
1181+ goto free_io_pool_and_out;
1182+
1183+ md->bs = (type == DM_TYPE_BIO_BASED) ?
1184+ bioset_create(16, 16) : bioset_create(MIN_IOS, MIN_IOS);
1185+ if (!md->bs)
1186+ goto free_tio_pool_and_out;
1187+
1188+ md->mempool_type = type;
1189+
1190+ return 0;
1191+
1192+free_tio_pool_and_out:
1193+ mempool_destroy(md->tio_pool);
1194+ md->tio_pool = NULL;
1195+
1196+free_io_pool_and_out:
1197+ mempool_destroy(md->io_pool);
1198+ md->io_pool = NULL;
1199+
1200+ return -ENOMEM;
1201+}
1202+
1203 static struct block_device_operations dm_blk_dops = {
1204 .open = dm_blk_open,
1205 .release = dm_blk_close,
1206--- a/drivers/md/dm.h
1207+++ b/drivers/md/dm.h
1208@@ -23,6 +23,13 @@
1209 #define DM_SUSPEND_NOFLUSH_FLAG (1 << 1)
1210
1211 /*
1212+ * Type of table and mapped_device's mempool
1213+ */
1214+#define DM_TYPE_NONE 0
1215+#define DM_TYPE_BIO_BASED 1
1216+#define DM_TYPE_REQUEST_BASED 2
1217+
1218+/*
1219 * List of devices that a metadevice uses and should open/close.
1220 */
1221 struct dm_dev {
1222@@ -49,6 +56,10 @@ void dm_table_presuspend_targets(struct
1223 void dm_table_postsuspend_targets(struct dm_table *t);
1224 int dm_table_resume_targets(struct dm_table *t);
1225 int dm_table_any_congested(struct dm_table *t, int bdi_bits);
1226+int dm_table_any_busy_target(struct dm_table *t);
1227+int dm_table_set_type(struct dm_table *t);
1228+int dm_table_get_type(struct dm_table *t);
1229+int dm_table_request_based(struct dm_table *t);
1230 void dm_table_unplug_all(struct dm_table *t);
1231
1232 /*
1233@@ -97,10 +108,16 @@ void *dm_vcalloc(unsigned long nmemb, un
1234 union map_info *dm_get_mapinfo(struct bio *bio);
1235 int dm_open_count(struct mapped_device *md);
1236 int dm_lock_for_deletion(struct mapped_device *md);
1237+union map_info *dm_get_rq_mapinfo(struct request *rq);
1238
1239 void dm_kobject_uevent(struct mapped_device *md);
1240
1241 int dm_kcopyd_init(void);
1242 void dm_kcopyd_exit(void);
1243
1244+/*
1245+ * Mempool initializer for a mapped_device
1246+ */
1247+int dm_init_md_mempool(struct mapped_device *md, int type);
1248+
1249 #endif
1250--- a/drivers/md/dm-ioctl.c
1251+++ b/drivers/md/dm-ioctl.c
1252@@ -1046,6 +1046,12 @@ static int populate_table(struct dm_tabl
1253 next = spec->next;
1254 }
1255
1256+ r = dm_table_set_type(table);
1257+ if (r) {
1258+ DMWARN("unable to set table type");
1259+ return r;
1260+ }
1261+
1262 return dm_table_complete(table);
1263 }
1264
1265@@ -1070,6 +1076,13 @@ static int table_load(struct dm_ioctl *p
1266 goto out;
1267 }
1268
1269+ r = dm_init_md_mempool(md, dm_table_get_type(t));
1270+ if (r) {
1271+ DMWARN("unable to initialize the md mempools for this table");
1272+ dm_table_put(t);
1273+ goto out;
1274+ }
1275+
1276 down_write(&_hash_lock);
1277 hc = dm_get_mdptr(md);
1278 if (!hc || hc->md != md) {
1279--- a/drivers/md/dm-mpath.c
1280+++ b/drivers/md/dm-mpath.c
1281@@ -7,8 +7,6 @@
1282
1283 #include "dm.h"
1284 #include "dm-path-selector.h"
1285-#include "dm-bio-list.h"
1286-#include "dm-bio-record.h"
1287 #include "dm-uevent.h"
1288
1289 #include <linux/ctype.h>
1290@@ -83,7 +81,7 @@ struct multipath {
1291 unsigned pg_init_count; /* Number of times pg_init called */
1292
1293 struct work_struct process_queued_ios;
1294- struct bio_list queued_ios;
1295+ struct list_head queued_ios;
1296 unsigned queue_size;
1297
1298 struct work_struct trigger_event;
1299@@ -100,7 +98,6 @@ struct multipath {
1300 */
1301 struct dm_mpath_io {
1302 struct pgpath *pgpath;
1303- struct dm_bio_details details;
1304 };
1305
1306 typedef int (*action_fn) (struct pgpath *pgpath);
1307@@ -197,6 +194,7 @@ static struct multipath *alloc_multipath
1308 m = kzalloc(sizeof(*m), GFP_KERNEL);
1309 if (m) {
1310 INIT_LIST_HEAD(&m->priority_groups);
1311+ INIT_LIST_HEAD(&m->queued_ios);
1312 spin_lock_init(&m->lock);
1313 m->queue_io = 1;
1314 INIT_WORK(&m->process_queued_ios, process_queued_ios);
1315@@ -321,12 +319,13 @@ static int __must_push_back(struct multi
1316 dm_noflush_suspending(m->ti));
1317 }
1318
1319-static int map_io(struct multipath *m, struct bio *bio,
1320+static int map_io(struct multipath *m, struct request *clone,
1321 struct dm_mpath_io *mpio, unsigned was_queued)
1322 {
1323 int r = DM_MAPIO_REMAPPED;
1324 unsigned long flags;
1325 struct pgpath *pgpath;
1326+ struct block_device *bdev;
1327
1328 spin_lock_irqsave(&m->lock, flags);
1329
1330@@ -343,16 +342,18 @@ static int map_io(struct multipath *m, s
1331 if ((pgpath && m->queue_io) ||
1332 (!pgpath && m->queue_if_no_path)) {
1333 /* Queue for the daemon to resubmit */
1334- bio_list_add(&m->queued_ios, bio);
1335+ list_add_tail(&clone->queuelist, &m->queued_ios);
1336 m->queue_size++;
1337 if ((m->pg_init_required && !m->pg_init_in_progress) ||
1338 !m->queue_io)
1339 queue_work(kmultipathd, &m->process_queued_ios);
1340 pgpath = NULL;
1341 r = DM_MAPIO_SUBMITTED;
1342- } else if (pgpath)
1343- bio->bi_bdev = pgpath->path.dev->bdev;
1344- else if (__must_push_back(m))
1345+ } else if (pgpath) {
1346+ bdev = pgpath->path.dev->bdev;
1347+ clone->q = bdev_get_queue(bdev);
1348+ clone->rq_disk = bdev->bd_disk;
1349+ } else if (__must_push_back(m))
1350 r = DM_MAPIO_REQUEUE;
1351 else
1352 r = -EIO; /* Failed */
1353@@ -395,30 +396,31 @@ static void dispatch_queued_ios(struct m
1354 {
1355 int r;
1356 unsigned long flags;
1357- struct bio *bio = NULL, *next;
1358 struct dm_mpath_io *mpio;
1359 union map_info *info;
1360+ struct request *clone, *n;
1361+ LIST_HEAD(cl);
1362
1363 spin_lock_irqsave(&m->lock, flags);
1364- bio = bio_list_get(&m->queued_ios);
1365+ list_splice_init(&m->queued_ios, &cl);
1366 spin_unlock_irqrestore(&m->lock, flags);
1367
1368- while (bio) {
1369- next = bio->bi_next;
1370- bio->bi_next = NULL;
1371+ list_for_each_entry_safe(clone, n, &cl, queuelist) {
1372+ list_del_init(&clone->queuelist);
1373
1374- info = dm_get_mapinfo(bio);
1375+ info = dm_get_rq_mapinfo(clone);
1376 mpio = info->ptr;
1377
1378- r = map_io(m, bio, mpio, 1);
1379- if (r < 0)
1380- bio_endio(bio, r);
1381- else if (r == DM_MAPIO_REMAPPED)
1382- generic_make_request(bio);
1383- else if (r == DM_MAPIO_REQUEUE)
1384- bio_endio(bio, -EIO);
1385-
1386- bio = next;
1387+ r = map_io(m, clone, mpio, 1);
1388+ if (r < 0) {
1389+ mempool_free(mpio, m->mpio_pool);
1390+ dm_kill_request(clone, r);
1391+ } else if (r == DM_MAPIO_REMAPPED)
1392+ dm_dispatch_request(clone);
1393+ else if (r == DM_MAPIO_REQUEUE) {
1394+ mempool_free(mpio, m->mpio_pool);
1395+ dm_requeue_request(clone);
1396+ }
1397 }
1398 }
1399
1400@@ -833,21 +835,24 @@ static void multipath_dtr(struct dm_targ
1401 }
1402
1403 /*
1404- * Map bios, recording original fields for later in case we have to resubmit
1405+ * Map cloned requests
1406 */
1407-static int multipath_map(struct dm_target *ti, struct bio *bio,
1408+static int multipath_map(struct dm_target *ti, struct request *clone,
1409 union map_info *map_context)
1410 {
1411 int r;
1412 struct dm_mpath_io *mpio;
1413 struct multipath *m = (struct multipath *) ti->private;
1414
1415- mpio = mempool_alloc(m->mpio_pool, GFP_NOIO);
1416- dm_bio_record(&mpio->details, bio);
1417+ mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
1418+ if (!mpio)
1419+ /* ENOMEM, requeue */
1420+ return DM_MAPIO_REQUEUE;
1421+ memset(mpio, 0, sizeof(*mpio));
1422
1423 map_context->ptr = mpio;
1424- bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
1425- r = map_io(m, bio, mpio, 0);
1426+ clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
1427+ r = map_io(m, clone, mpio, 0);
1428 if (r < 0 || r == DM_MAPIO_REQUEUE)
1429 mempool_free(mpio, m->mpio_pool);
1430
1431@@ -1129,53 +1134,41 @@ static void activate_path(struct work_st
1432 /*
1433 * end_io handling
1434 */
1435-static int do_end_io(struct multipath *m, struct bio *bio,
1436+static int do_end_io(struct multipath *m, struct request *clone,
1437 int error, struct dm_mpath_io *mpio)
1438 {
1439+ /*
1440+ * We don't queue any clone request inside the multipath target
1441+ * during end I/O handling, since those clone requests don't have
1442+ * bio clones. If we queue them inside the multipath target,
1443+ * we need to make bio clones, that requires memory allocation.
1444+ * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
1445+ * don't have bio clones.)
1446+ * Instead of queueing the clone request here, we queue the original
1447+ * request into dm core, which will remake a clone request and
1448+ * clone bios for it and resubmit it later.
1449+ */
1450+ int r = DM_ENDIO_REQUEUE;
1451 unsigned long flags;
1452
1453- if (!error)
1454+ if (!error && !clone->errors)
1455 return 0; /* I/O complete */
1456
1457- if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio))
1458- return error;
1459-
1460 if (error == -EOPNOTSUPP)
1461 return error;
1462
1463- spin_lock_irqsave(&m->lock, flags);
1464- if (!m->nr_valid_paths) {
1465- if (__must_push_back(m)) {
1466- spin_unlock_irqrestore(&m->lock, flags);
1467- return DM_ENDIO_REQUEUE;
1468- } else if (!m->queue_if_no_path) {
1469- spin_unlock_irqrestore(&m->lock, flags);
1470- return -EIO;
1471- } else {
1472- spin_unlock_irqrestore(&m->lock, flags);
1473- goto requeue;
1474- }
1475- }
1476- spin_unlock_irqrestore(&m->lock, flags);
1477-
1478 if (mpio->pgpath)
1479 fail_path(mpio->pgpath);
1480
1481- requeue:
1482- dm_bio_restore(&mpio->details, bio);
1483-
1484- /* queue for the daemon to resubmit or fail */
1485 spin_lock_irqsave(&m->lock, flags);
1486- bio_list_add(&m->queued_ios, bio);
1487- m->queue_size++;
1488- if (!m->queue_io)
1489- queue_work(kmultipathd, &m->process_queued_ios);
1490+ if (!m->nr_valid_paths && !m->queue_if_no_path && !__must_push_back(m))
1491+ r = -EIO;
1492 spin_unlock_irqrestore(&m->lock, flags);
1493
1494- return DM_ENDIO_INCOMPLETE; /* io not complete */
1495+ return r;
1496 }
1497
1498-static int multipath_end_io(struct dm_target *ti, struct bio *bio,
1499+static int multipath_end_io(struct dm_target *ti, struct request *clone,
1500 int error, union map_info *map_context)
1501 {
1502 struct multipath *m = ti->private;
1503@@ -1184,14 +1177,13 @@ static int multipath_end_io(struct dm_ta
1504 struct path_selector *ps;
1505 int r;
1506
1507- r = do_end_io(m, bio, error, mpio);
1508+ r = do_end_io(m, clone, error, mpio);
1509 if (pgpath) {
1510 ps = &pgpath->pg->ps;
1511 if (ps->type->end_io)
1512 ps->type->end_io(ps, &pgpath->path);
1513 }
1514- if (r != DM_ENDIO_INCOMPLETE)
1515- mempool_free(mpio, m->mpio_pool);
1516+ mempool_free(mpio, m->mpio_pool);
1517
1518 return r;
1519 }
1520@@ -1427,6 +1419,75 @@ static int multipath_ioctl(struct dm_tar
1521 bdev->bd_disk, cmd, arg);
1522 }
1523
1524+static int __pgpath_busy(struct pgpath *pgpath)
1525+{
1526+ struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1527+
1528+ return dm_underlying_device_busy(q);
1529+}
1530+
1531+/*
1532+ * We return "busy", only when we can map I/Os but underlying devices
1533+ * are busy (so even if we map I/Os now, the I/Os will wait on
1534+ * the underlying queue).
1535+ * In other words, if we want to kill I/Os or queue them inside us
1536+ * due to map unavailability, we don't return "busy". Otherwise,
1537+ * dm core won't give us the I/Os and we can't do what we want.
1538+ */
1539+static int multipath_busy(struct dm_target *ti)
1540+{
1541+ int busy = 0, has_active = 0;
1542+ struct multipath *m = (struct multipath *) ti->private;
1543+ struct priority_group *pg;
1544+ struct pgpath *pgpath;
1545+ unsigned long flags;
1546+
1547+ spin_lock_irqsave(&m->lock, flags);
1548+
1549+ /* Guess which priority_group will be used at next mapping time */
1550+ if (unlikely(!m->current_pgpath && m->next_pg))
1551+ pg = m->next_pg;
1552+ else if (likely(m->current_pg))
1553+ pg = m->current_pg;
1554+ else
1555+ /*
1556+ * We don't know which pg will be used at next mapping time.
1557+ * We don't call __choose_pgpath() here to avoid to trigger
1558+ * pg_init just by busy checking.
1559+ * So we don't know whether underlying devices we will be using
1560+ * at next mapping time are busy or not. Just try mapping.
1561+ */
1562+ goto out;
1563+
1564+ /*
1565+ * If there is one non-busy active path at least, the path selector
1566+ * will be able to select it. So we consider such a pg as not busy.
1567+ */
1568+ busy = 1;
1569+ list_for_each_entry(pgpath, &pg->pgpaths, list)
1570+ if (pgpath->is_active) {
1571+ has_active = 1;
1572+
1573+ if (!__pgpath_busy(pgpath)) {
1574+ busy = 0;
1575+ break;
1576+ }
1577+ }
1578+
1579+ if (!has_active)
1580+ /*
1581+ * No active path in this pg, so this pg won't be used and
1582+ * the current_pg will be changed at next mapping time.
1583+ * We need to try mapping to determine it.
1584+ */
1585+ busy = 0;
1586+
1587+out:
1588+ spin_unlock_irqrestore(&m->lock, flags);
1589+
1590+ return busy;
1591+}
1592+
1593 /*-----------------------------------------------------------------
1594 * Module setup
1595 *---------------------------------------------------------------*/
1596@@ -1436,13 +1497,14 @@ static struct target_type multipath_targ
1597 .module = THIS_MODULE,
1598 .ctr = multipath_ctr,
1599 .dtr = multipath_dtr,
1600- .map = multipath_map,
1601- .end_io = multipath_end_io,
1602+ .map_rq = multipath_map,
1603+ .rq_end_io = multipath_end_io,
1604 .presuspend = multipath_presuspend,
1605 .resume = multipath_resume,
1606 .status = multipath_status,
1607 .message = multipath_message,
1608 .ioctl = multipath_ioctl,
1609+ .busy = multipath_busy,
1610 };
1611
1612 static int __init dm_multipath_init(void)
1613--- a/drivers/md/dm-table.c
1614+++ b/drivers/md/dm-table.c
1615@@ -108,6 +108,8 @@ static void combine_restrictions_low(str
1616 lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn);
1617
1618 lhs->no_cluster |= rhs->no_cluster;
1619+
1620+ lhs->no_request_stacking |= rhs->no_request_stacking;
1621 }
1622
1623 /*
1624@@ -522,6 +524,8 @@ void dm_set_device_limits(struct dm_targ
1625 rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);
1626
1627 rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1628+
1629+ rs->no_request_stacking |= !blk_queue_stackable(q);
1630 }
1631 EXPORT_SYMBOL_GPL(dm_set_device_limits);
1632
1633@@ -731,6 +735,66 @@ int dm_table_add_target(struct dm_table
1634 return r;
1635 }
1636
1637+int dm_table_set_type(struct dm_table *t)
1638+{
1639+ int i;
1640+ int bio_based = 0, request_based = 0;
1641+ struct dm_target *tgt;
1642+
1643+ for (i = 0; i < t->num_targets; i++) {
1644+ tgt = t->targets + i;
1645+ if (tgt->type->map_rq)
1646+ request_based = 1;
1647+ else
1648+ bio_based = 1;
1649+
1650+ if (bio_based && request_based) {
1651+ DMWARN("Inconsistent table: different target types"
1652+ " can't be mixed up");
1653+ return -EINVAL;
1654+ }
1655+ }
1656+
1657+ if (bio_based) {
1658+ /* We must use this table as bio-based */
1659+ t->limits.no_request_stacking = 1;
1660+ return 0;
1661+ }
1662+
1663+ BUG_ON(!request_based); /* No targets in this table */
1664+
1665+ /* Non-request-stackable devices can't be used for request-based dm */
1666+ if (t->limits.no_request_stacking) {
1667+ DMWARN("table load rejected: including non-request-stackable"
1668+ " devices");
1669+ return -EINVAL;
1670+ }
1671+
1672+ /*
1673+ * Request-based dm supports only tables that have a single target now.
1674+ * To support multiple targets, request splitting support is needed,
1675+ * and that needs lots of changes in the block-layer.
1676+ * (e.g. request completion process for partial completion.)
1677+ */
1678+ if (t->num_targets > 1) {
1679+ DMWARN("Request-based dm doesn't support multiple targets yet");
1680+ return -EINVAL;
1681+ }
1682+
1683+ return 0;
1684+}
1685+
1686+int dm_table_get_type(struct dm_table *t)
1687+{
1688+ return t->limits.no_request_stacking ?
1689+ DM_TYPE_BIO_BASED : DM_TYPE_REQUEST_BASED;
1690+}
1691+
1692+int dm_table_request_based(struct dm_table *t)
1693+{
1694+ return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
1695+}
1696+
1697 static int setup_indexes(struct dm_table *t)
1698 {
1699 int i;
1700@@ -861,6 +925,10 @@ void dm_table_set_restrictions(struct dm
1701 else
1702 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
1703
1704+ if (t->limits.no_request_stacking)
1705+ queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, q);
1706+ else
1707+ queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
1708 }
1709
1710 unsigned int dm_table_get_num_targets(struct dm_table *t)
1711@@ -949,6 +1017,20 @@ int dm_table_any_congested(struct dm_tab
1712 return r;
1713 }
1714
1715+int dm_table_any_busy_target(struct dm_table *t)
1716+{
1717+ int i;
1718+ struct dm_target *ti;
1719+
1720+ for (i = 0; i < t->num_targets; i++) {
1721+ ti = t->targets + i;
1722+ if (ti->type->busy && ti->type->busy(ti))
1723+ return 1;
1724+ }
1725+
1726+ return 0;
1727+}
1728+
1729 void dm_table_unplug_all(struct dm_table *t)
1730 {
1731 struct dm_dev *dd;
1732--- a/include/linux/device-mapper.h
1733+++ b/include/linux/device-mapper.h
1734@@ -46,6 +46,8 @@ typedef void (*dm_dtr_fn) (struct dm_tar
1735 */
1736 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio,
1737 union map_info *map_context);
1738+typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
1739+ union map_info *map_context);
1740
1741 /*
1742 * Returns:
1743@@ -58,6 +60,9 @@ typedef int (*dm_map_fn) (struct dm_targ
1744 typedef int (*dm_endio_fn) (struct dm_target *ti,
1745 struct bio *bio, int error,
1746 union map_info *map_context);
1747+typedef int (*dm_request_endio_fn) (struct dm_target *ti,
1748+ struct request *clone, int error,
1749+ union map_info *map_context);
1750
1751 typedef void (*dm_flush_fn) (struct dm_target *ti);
1752 typedef void (*dm_presuspend_fn) (struct dm_target *ti);
1753@@ -77,6 +82,13 @@ typedef int (*dm_ioctl_fn) (struct dm_ta
1754 typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
1755 struct bio_vec *biovec, int max_size);
1756
1757+/*
1758+ * Returns:
1759+ * 0: The target can handle the next I/O immediately.
1760+ * 1: The target can't handle the next I/O immediately.
1761+ */
1762+typedef int (*dm_busy_fn) (struct dm_target *ti);
1763+
1764 void dm_error(const char *message);
1765
1766 /*
1767@@ -103,7 +115,9 @@ struct target_type {
1768 dm_ctr_fn ctr;
1769 dm_dtr_fn dtr;
1770 dm_map_fn map;
1771+ dm_map_request_fn map_rq;
1772 dm_endio_fn end_io;
1773+ dm_request_endio_fn rq_end_io;
1774 dm_flush_fn flush;
1775 dm_presuspend_fn presuspend;
1776 dm_postsuspend_fn postsuspend;
1777@@ -113,6 +127,7 @@ struct target_type {
1778 dm_message_fn message;
1779 dm_ioctl_fn ioctl;
1780 dm_merge_fn merge;
1781+ dm_busy_fn busy;
1782 };
1783
1784 struct io_restrictions {
1785@@ -125,6 +140,7 @@ struct io_restrictions {
1786 unsigned short max_hw_segments;
1787 unsigned short max_phys_segments;
1788 unsigned char no_cluster; /* inverted so that 0 is default */
1789+ unsigned char no_request_stacking;
1790 };
1791
1792 struct dm_target {
1793@@ -348,4 +364,12 @@ static inline unsigned long to_bytes(sec
1794 return (n << SECTOR_SHIFT);
1795 }
1796
1797+/*-----------------------------------------------------------------
1798+ * Helper for block layer and dm core operations
1799+ *---------------------------------------------------------------*/
1800+void dm_dispatch_request(struct request *rq);
1801+void dm_requeue_request(struct request *rq);
1802+void dm_kill_request(struct request *rq, int error);
1803+int dm_underlying_device_busy(struct request_queue *q);
1804+
1805 #endif /* _LINUX_DEVICE_MAPPER_H */