]> git.ipfire.org Git - people/arne_f/kernel.git/blob - block/blk-flush.c
block: avoid to use q->flush_rq directly
[people/arne_f/kernel.git] / block / blk-flush.c
1 /*
2 * Functions to sequence FLUSH and FUA writes.
3 *
4 * Copyright (C) 2011 Max Planck Institute for Gravitational Physics
5 * Copyright (C) 2011 Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three
10 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
11 * properties and hardware capability.
12 *
13 * If a request doesn't have data, only REQ_FLUSH makes sense, which
14 * indicates a simple flush request. If there is data, REQ_FLUSH indicates
15 * that the device cache should be flushed before the data is executed, and
16 * REQ_FUA means that the data must be on non-volatile media on request
17 * completion.
18 *
19 * If the device doesn't have writeback cache, FLUSH and FUA don't make any
20 * difference. The requests are either completed immediately if there's no
21 * data or executed as normal requests otherwise.
22 *
23 * If the device has writeback cache and supports FUA, REQ_FLUSH is
24 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
25 *
26 * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is
27 * translated to PREFLUSH and REQ_FUA to POSTFLUSH.
28 *
29 * The actual execution of flush is double buffered. Whenever a request
30 * needs to execute PRE or POSTFLUSH, it queues at
31 * q->flush_queue[q->flush_pending_idx]. Once certain criteria are met, a
32 * flush is issued and the pending_idx is toggled. When the flush
33 * completes, all the requests which were pending are proceeded to the next
34 * step. This allows arbitrary merging of different types of FLUSH/FUA
35 * requests.
36 *
37 * Currently, the following conditions are used to determine when to issue
38 * flush.
39 *
40 * C1. At any given time, only one flush shall be in progress. This makes
41 * double buffering sufficient.
42 *
43 * C2. Flush is deferred if any request is executing DATA of its sequence.
44 * This avoids issuing separate POSTFLUSHes for requests which shared
45 * PREFLUSH.
46 *
47 * C3. The second condition is ignored if there is a request which has
48 * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid
49 * starvation in the unlikely case where there are continuous stream of
50 * FUA (without FLUSH) requests.
51 *
52 * For devices which support FUA, it isn't clear whether C2 (and thus C3)
53 * is beneficial.
54 *
55 * Note that a sequenced FLUSH/FUA request with DATA is completed twice.
56 * Once while executing DATA and again after the whole sequence is
57 * complete. The first completion updates the contained bio but doesn't
58 * finish it so that the bio submitter is notified only after the whole
59 * sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in
60 * req_bio_endio().
61 *
62 * The above peculiarity requires that each FLUSH/FUA request has only one
63 * bio attached to it, which is guaranteed as they aren't allowed to be
64 * merged in the usual way.
65 */
66
67 #include <linux/kernel.h>
68 #include <linux/module.h>
69 #include <linux/bio.h>
70 #include <linux/blkdev.h>
71 #include <linux/gfp.h>
72 #include <linux/blk-mq.h>
73
74 #include "blk.h"
75 #include "blk-mq.h"
76
77 /* FLUSH/FUA sequences */
78 enum {
79 REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
80 REQ_FSEQ_DATA = (1 << 1), /* data write in progress */
81 REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */
82 REQ_FSEQ_DONE = (1 << 3),
83
84 REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
85 REQ_FSEQ_POSTFLUSH,
86
87 /*
88 * If flush has been pending longer than the following timeout,
89 * it's issued even if flush_data requests are still in flight.
90 */
91 FLUSH_PENDING_TIMEOUT = 5 * HZ,
92 };
93
94 static bool blk_kick_flush(struct request_queue *q);
95
96 static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
97 {
98 unsigned int policy = 0;
99
100 if (blk_rq_sectors(rq))
101 policy |= REQ_FSEQ_DATA;
102
103 if (fflags & REQ_FLUSH) {
104 if (rq->cmd_flags & REQ_FLUSH)
105 policy |= REQ_FSEQ_PREFLUSH;
106 if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
107 policy |= REQ_FSEQ_POSTFLUSH;
108 }
109 return policy;
110 }
111
112 static unsigned int blk_flush_cur_seq(struct request *rq)
113 {
114 return 1 << ffz(rq->flush.seq);
115 }
116
117 static void blk_flush_restore_request(struct request *rq)
118 {
119 /*
120 * After flush data completion, @rq->bio is %NULL but we need to
121 * complete the bio again. @rq->biotail is guaranteed to equal the
122 * original @rq->bio. Restore it.
123 */
124 rq->bio = rq->biotail;
125
126 /* make @rq a normal request */
127 rq->cmd_flags &= ~REQ_FLUSH_SEQ;
128 rq->end_io = rq->flush.saved_end_io;
129 }
130
131 static bool blk_flush_queue_rq(struct request *rq, bool add_front)
132 {
133 if (rq->q->mq_ops) {
134 struct request_queue *q = rq->q;
135
136 blk_mq_add_to_requeue_list(rq, add_front);
137 blk_mq_kick_requeue_list(q);
138 return false;
139 } else {
140 if (add_front)
141 list_add(&rq->queuelist, &rq->q->queue_head);
142 else
143 list_add_tail(&rq->queuelist, &rq->q->queue_head);
144 return true;
145 }
146 }
147
148 /**
149 * blk_flush_complete_seq - complete flush sequence
150 * @rq: FLUSH/FUA request being sequenced
151 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
152 * @error: whether an error occurred
153 *
154 * @rq just completed @seq part of its flush sequence, record the
155 * completion and trigger the next step.
156 *
157 * CONTEXT:
158 * spin_lock_irq(q->queue_lock or q->mq_flush_lock)
159 *
160 * RETURNS:
161 * %true if requests were added to the dispatch queue, %false otherwise.
162 */
163 static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
164 int error)
165 {
166 struct request_queue *q = rq->q;
167 struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
168 bool queued = false, kicked;
169
170 BUG_ON(rq->flush.seq & seq);
171 rq->flush.seq |= seq;
172
173 if (likely(!error))
174 seq = blk_flush_cur_seq(rq);
175 else
176 seq = REQ_FSEQ_DONE;
177
178 switch (seq) {
179 case REQ_FSEQ_PREFLUSH:
180 case REQ_FSEQ_POSTFLUSH:
181 /* queue for flush */
182 if (list_empty(pending))
183 q->flush_pending_since = jiffies;
184 list_move_tail(&rq->flush.list, pending);
185 break;
186
187 case REQ_FSEQ_DATA:
188 list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
189 queued = blk_flush_queue_rq(rq, true);
190 break;
191
192 case REQ_FSEQ_DONE:
193 /*
194 * @rq was previously adjusted by blk_flush_issue() for
195 * flush sequencing and may already have gone through the
196 * flush data request completion path. Restore @rq for
197 * normal completion and end it.
198 */
199 BUG_ON(!list_empty(&rq->queuelist));
200 list_del_init(&rq->flush.list);
201 blk_flush_restore_request(rq);
202 if (q->mq_ops)
203 blk_mq_end_request(rq, error);
204 else
205 __blk_end_request_all(rq, error);
206 break;
207
208 default:
209 BUG();
210 }
211
212 kicked = blk_kick_flush(q);
213 return kicked | queued;
214 }
215
216 static void flush_end_io(struct request *flush_rq, int error)
217 {
218 struct request_queue *q = flush_rq->q;
219 struct list_head *running;
220 bool queued = false;
221 struct request *rq, *n;
222 unsigned long flags = 0;
223
224 if (q->mq_ops) {
225 spin_lock_irqsave(&q->mq_flush_lock, flags);
226 flush_rq->tag = -1;
227 }
228
229 running = &q->flush_queue[q->flush_running_idx];
230 BUG_ON(q->flush_pending_idx == q->flush_running_idx);
231
232 /* account completion of the flush request */
233 q->flush_running_idx ^= 1;
234
235 if (!q->mq_ops)
236 elv_completed_request(q, flush_rq);
237
238 /* and push the waiting requests to the next stage */
239 list_for_each_entry_safe(rq, n, running, flush.list) {
240 unsigned int seq = blk_flush_cur_seq(rq);
241
242 BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
243 queued |= blk_flush_complete_seq(rq, seq, error);
244 }
245
246 /*
247 * Kick the queue to avoid stall for two cases:
248 * 1. Moving a request silently to empty queue_head may stall the
249 * queue.
250 * 2. When flush request is running in non-queueable queue, the
251 * queue is hold. Restart the queue after flush request is finished
252 * to avoid stall.
253 * This function is called from request completion path and calling
254 * directly into request_fn may confuse the driver. Always use
255 * kblockd.
256 */
257 if (queued || q->flush_queue_delayed) {
258 WARN_ON(q->mq_ops);
259 blk_run_queue_async(q);
260 }
261 q->flush_queue_delayed = 0;
262 if (q->mq_ops)
263 spin_unlock_irqrestore(&q->mq_flush_lock, flags);
264 }
265
266 /**
267 * blk_kick_flush - consider issuing flush request
268 * @q: request_queue being kicked
269 *
270 * Flush related states of @q have changed, consider issuing flush request.
271 * Please read the comment at the top of this file for more info.
272 *
273 * CONTEXT:
274 * spin_lock_irq(q->queue_lock or q->mq_flush_lock)
275 *
276 * RETURNS:
277 * %true if flush was issued, %false otherwise.
278 */
279 static bool blk_kick_flush(struct request_queue *q)
280 {
281 struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
282 struct request *first_rq =
283 list_first_entry(pending, struct request, flush.list);
284 struct request *flush_rq = q->flush_rq;
285
286 /* C1 described at the top of this file */
287 if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending))
288 return false;
289
290 /* C2 and C3 */
291 if (!list_empty(&q->flush_data_in_flight) &&
292 time_before(jiffies,
293 q->flush_pending_since + FLUSH_PENDING_TIMEOUT))
294 return false;
295
296 /*
297 * Issue flush and toggle pending_idx. This makes pending_idx
298 * different from running_idx, which means flush is in flight.
299 */
300 q->flush_pending_idx ^= 1;
301
302 blk_rq_init(q, flush_rq);
303 if (q->mq_ops)
304 blk_mq_clone_flush_request(flush_rq, first_rq);
305
306 flush_rq->cmd_type = REQ_TYPE_FS;
307 flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
308 flush_rq->rq_disk = first_rq->rq_disk;
309 flush_rq->end_io = flush_end_io;
310
311 return blk_flush_queue_rq(flush_rq, false);
312 }
313
314 static void flush_data_end_io(struct request *rq, int error)
315 {
316 struct request_queue *q = rq->q;
317
318 /*
319 * After populating an empty queue, kick it to avoid stall. Read
320 * the comment in flush_end_io().
321 */
322 if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
323 blk_run_queue_async(q);
324 }
325
326 static void mq_flush_data_end_io(struct request *rq, int error)
327 {
328 struct request_queue *q = rq->q;
329 struct blk_mq_hw_ctx *hctx;
330 struct blk_mq_ctx *ctx;
331 unsigned long flags;
332
333 ctx = rq->mq_ctx;
334 hctx = q->mq_ops->map_queue(q, ctx->cpu);
335
336 /*
337 * After populating an empty queue, kick it to avoid stall. Read
338 * the comment in flush_end_io().
339 */
340 spin_lock_irqsave(&q->mq_flush_lock, flags);
341 if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
342 blk_mq_run_hw_queue(hctx, true);
343 spin_unlock_irqrestore(&q->mq_flush_lock, flags);
344 }
345
346 /**
347 * blk_insert_flush - insert a new FLUSH/FUA request
348 * @rq: request to insert
349 *
350 * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
351 * or __blk_mq_run_hw_queue() to dispatch request.
352 * @rq is being submitted. Analyze what needs to be done and put it on the
353 * right queue.
354 *
355 * CONTEXT:
356 * spin_lock_irq(q->queue_lock) in !mq case
357 */
358 void blk_insert_flush(struct request *rq)
359 {
360 struct request_queue *q = rq->q;
361 unsigned int fflags = q->flush_flags; /* may change, cache */
362 unsigned int policy = blk_flush_policy(fflags, rq);
363
364 /*
365 * @policy now records what operations need to be done. Adjust
366 * REQ_FLUSH and FUA for the driver.
367 */
368 rq->cmd_flags &= ~REQ_FLUSH;
369 if (!(fflags & REQ_FUA))
370 rq->cmd_flags &= ~REQ_FUA;
371
372 /*
373 * An empty flush handed down from a stacking driver may
374 * translate into nothing if the underlying device does not
375 * advertise a write-back cache. In this case, simply
376 * complete the request.
377 */
378 if (!policy) {
379 if (q->mq_ops)
380 blk_mq_end_request(rq, 0);
381 else
382 __blk_end_bidi_request(rq, 0, 0, 0);
383 return;
384 }
385
386 BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
387
388 /*
389 * If there's data but flush is not necessary, the request can be
390 * processed directly without going through flush machinery. Queue
391 * for normal execution.
392 */
393 if ((policy & REQ_FSEQ_DATA) &&
394 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
395 if (q->mq_ops) {
396 blk_mq_insert_request(rq, false, false, true);
397 } else
398 list_add_tail(&rq->queuelist, &q->queue_head);
399 return;
400 }
401
402 /*
403 * @rq should go through flush machinery. Mark it part of flush
404 * sequence and submit for further processing.
405 */
406 memset(&rq->flush, 0, sizeof(rq->flush));
407 INIT_LIST_HEAD(&rq->flush.list);
408 rq->cmd_flags |= REQ_FLUSH_SEQ;
409 rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
410 if (q->mq_ops) {
411 rq->end_io = mq_flush_data_end_io;
412
413 spin_lock_irq(&q->mq_flush_lock);
414 blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
415 spin_unlock_irq(&q->mq_flush_lock);
416 return;
417 }
418 rq->end_io = flush_data_end_io;
419
420 blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
421 }
422
423 /**
424 * blkdev_issue_flush - queue a flush
425 * @bdev: blockdev to issue flush for
426 * @gfp_mask: memory allocation flags (for bio_alloc)
427 * @error_sector: error sector
428 *
429 * Description:
430 * Issue a flush for the block device in question. Caller can supply
431 * room for storing the error offset in case of a flush error, if they
432 * wish to. If WAIT flag is not passed then caller may check only what
433 * request was pushed in some internal queue for later handling.
434 */
435 int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
436 sector_t *error_sector)
437 {
438 struct request_queue *q;
439 struct bio *bio;
440 int ret = 0;
441
442 if (bdev->bd_disk == NULL)
443 return -ENXIO;
444
445 q = bdev_get_queue(bdev);
446 if (!q)
447 return -ENXIO;
448
449 /*
450 * some block devices may not have their queue correctly set up here
451 * (e.g. loop device without a backing file) and so issuing a flush
452 * here will panic. Ensure there is a request function before issuing
453 * the flush.
454 */
455 if (!q->make_request_fn)
456 return -ENXIO;
457
458 bio = bio_alloc(gfp_mask, 0);
459 bio->bi_bdev = bdev;
460
461 ret = submit_bio_wait(WRITE_FLUSH, bio);
462
463 /*
464 * The driver must store the error location in ->bi_sector, if
465 * it supports it. For non-stacked drivers, this should be
466 * copied from blk_rq_pos(rq).
467 */
468 if (error_sector)
469 *error_sector = bio->bi_iter.bi_sector;
470
471 bio_put(bio);
472 return ret;
473 }
474 EXPORT_SYMBOL(blkdev_issue_flush);
475
476 static int blk_mq_init_flush(struct request_queue *q)
477 {
478 struct blk_mq_tag_set *set = q->tag_set;
479
480 spin_lock_init(&q->mq_flush_lock);
481
482 q->flush_rq = kzalloc(round_up(sizeof(struct request) +
483 set->cmd_size, cache_line_size()),
484 GFP_KERNEL);
485 if (!q->flush_rq)
486 return -ENOMEM;
487 return 0;
488 }
489
490 int blk_init_flush(struct request_queue *q)
491 {
492 INIT_LIST_HEAD(&q->flush_queue[0]);
493 INIT_LIST_HEAD(&q->flush_queue[1]);
494 INIT_LIST_HEAD(&q->flush_data_in_flight);
495
496 if (q->mq_ops)
497 return blk_mq_init_flush(q);
498
499 q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
500 if (!q->flush_rq)
501 return -ENOMEM;
502
503 return 0;
504 }
505
506 void blk_exit_flush(struct request_queue *q)
507 {
508 kfree(q->flush_rq);
509 }