1 // SPDX-License-Identifier: GPL-2.0
3 * Functions to sequence PREFLUSH and FUA writes.
5 * Copyright (C) 2011 Max Planck Institute for Gravitational Physics
6 * Copyright (C) 2011 Tejun Heo <tj@kernel.org>
8 * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three
9 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
10 * properties and hardware capability.
12 * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
13 * indicates a simple flush request. If there is data, REQ_PREFLUSH indicates
14 * that the device cache should be flushed before the data is executed, and
15 * REQ_FUA means that the data must be on non-volatile media on request
18 * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any
19 * difference. The requests are either completed immediately if there's no data
20 * or executed as normal requests otherwise.
22 * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
23 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
25 * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
26 * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
28 * The actual execution of flush is double buffered. Whenever a request
29 * needs to execute PRE or POSTFLUSH, it queues at
30 * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a
31 * REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush
32 * completes, all the requests which were pending are proceeded to the next
33 * step. This allows arbitrary merging of different types of PREFLUSH/FUA
36 * Currently, the following conditions are used to determine when to issue
39 * C1. At any given time, only one flush shall be in progress. This makes
40 * double buffering sufficient.
42 * C2. Flush is deferred if any request is executing DATA of its sequence.
43 * This avoids issuing separate POSTFLUSHes for requests which shared
46 * C3. The second condition is ignored if there is a request which has
47 * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid
48 * starvation in the unlikely case where there are continuous stream of
49 * FUA (without PREFLUSH) requests.
51 * For devices which support FUA, it isn't clear whether C2 (and thus C3)
54 * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice.
55 * Once while executing DATA and again after the whole sequence is
56 * complete. The first completion updates the contained bio but doesn't
57 * finish it so that the bio submitter is notified only after the whole
58 * sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in
61 * The above peculiarity requires that each PREFLUSH/FUA request has only one
62 * bio attached to it, which is guaranteed as they aren't allowed to be
63 * merged in the usual way.
66 #include <linux/kernel.h>
67 #include <linux/module.h>
68 #include <linux/bio.h>
69 #include <linux/blkdev.h>
70 #include <linux/gfp.h>
71 #include <linux/part_stat.h>
75 #include "blk-mq-sched.h"
77 /* PREFLUSH/FUA sequences */
79 REQ_FSEQ_PREFLUSH
= (1 << 0), /* pre-flushing in progress */
80 REQ_FSEQ_DATA
= (1 << 1), /* data write in progress */
81 REQ_FSEQ_POSTFLUSH
= (1 << 2), /* post-flushing in progress */
82 REQ_FSEQ_DONE
= (1 << 3),
84 REQ_FSEQ_ACTIONS
= REQ_FSEQ_PREFLUSH
| REQ_FSEQ_DATA
|
88 * If flush has been pending longer than the following timeout,
89 * it's issued even if flush_data requests are still in flight.
91 FLUSH_PENDING_TIMEOUT
= 5 * HZ
,
94 static void blk_kick_flush(struct request_queue
*q
,
95 struct blk_flush_queue
*fq
, blk_opf_t flags
);
97 static inline struct blk_flush_queue
*
98 blk_get_flush_queue(struct request_queue
*q
, struct blk_mq_ctx
*ctx
)
100 return blk_mq_map_queue(q
, REQ_OP_FLUSH
, ctx
)->fq
;
103 static unsigned int blk_flush_policy(unsigned long fflags
, struct request
*rq
)
105 unsigned int policy
= 0;
107 if (blk_rq_sectors(rq
))
108 policy
|= REQ_FSEQ_DATA
;
110 if (fflags
& (1UL << QUEUE_FLAG_WC
)) {
111 if (rq
->cmd_flags
& REQ_PREFLUSH
)
112 policy
|= REQ_FSEQ_PREFLUSH
;
113 if (!(fflags
& (1UL << QUEUE_FLAG_FUA
)) &&
114 (rq
->cmd_flags
& REQ_FUA
))
115 policy
|= REQ_FSEQ_POSTFLUSH
;
120 static unsigned int blk_flush_cur_seq(struct request
*rq
)
122 return 1 << ffz(rq
->flush
.seq
);
125 static void blk_flush_restore_request(struct request
*rq
)
128 * After flush data completion, @rq->bio is %NULL but we need to
129 * complete the bio again. @rq->biotail is guaranteed to equal the
130 * original @rq->bio. Restore it.
132 rq
->bio
= rq
->biotail
;
134 /* make @rq a normal request */
135 rq
->rq_flags
&= ~RQF_FLUSH_SEQ
;
136 rq
->end_io
= rq
->flush
.saved_end_io
;
139 static void blk_account_io_flush(struct request
*rq
)
141 struct block_device
*part
= rq
->q
->disk
->part0
;
144 part_stat_inc(part
, ios
[STAT_FLUSH
]);
145 part_stat_add(part
, nsecs
[STAT_FLUSH
],
146 ktime_get_ns() - rq
->start_time_ns
);
151 * blk_flush_complete_seq - complete flush sequence
152 * @rq: PREFLUSH/FUA request being sequenced
154 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
155 * @error: whether an error occurred
157 * @rq just completed @seq part of its flush sequence, record the
158 * completion and trigger the next step.
161 * spin_lock_irq(fq->mq_flush_lock)
163 static void blk_flush_complete_seq(struct request
*rq
,
164 struct blk_flush_queue
*fq
,
165 unsigned int seq
, blk_status_t error
)
167 struct request_queue
*q
= rq
->q
;
168 struct list_head
*pending
= &fq
->flush_queue
[fq
->flush_pending_idx
];
171 BUG_ON(rq
->flush
.seq
& seq
);
172 rq
->flush
.seq
|= seq
;
173 cmd_flags
= rq
->cmd_flags
;
176 seq
= blk_flush_cur_seq(rq
);
181 case REQ_FSEQ_PREFLUSH
:
182 case REQ_FSEQ_POSTFLUSH
:
183 /* queue for flush */
184 if (list_empty(pending
))
185 fq
->flush_pending_since
= jiffies
;
186 list_move_tail(&rq
->queuelist
, pending
);
190 fq
->flush_data_in_flight
++;
191 spin_lock(&q
->requeue_lock
);
192 list_move(&rq
->queuelist
, &q
->requeue_list
);
193 spin_unlock(&q
->requeue_lock
);
194 blk_mq_kick_requeue_list(q
);
199 * @rq was previously adjusted by blk_insert_flush() for
200 * flush sequencing and may already have gone through the
201 * flush data request completion path. Restore @rq for
202 * normal completion and end it.
204 list_del_init(&rq
->queuelist
);
205 blk_flush_restore_request(rq
);
206 blk_mq_end_request(rq
, error
);
213 blk_kick_flush(q
, fq
, cmd_flags
);
216 static enum rq_end_io_ret
flush_end_io(struct request
*flush_rq
,
219 struct request_queue
*q
= flush_rq
->q
;
220 struct list_head
*running
;
221 struct request
*rq
, *n
;
222 unsigned long flags
= 0;
223 struct blk_flush_queue
*fq
= blk_get_flush_queue(q
, flush_rq
->mq_ctx
);
225 /* release the tag's ownership to the req cloned from */
226 spin_lock_irqsave(&fq
->mq_flush_lock
, flags
);
228 if (!req_ref_put_and_test(flush_rq
)) {
229 fq
->rq_status
= error
;
230 spin_unlock_irqrestore(&fq
->mq_flush_lock
, flags
);
231 return RQ_END_IO_NONE
;
234 blk_account_io_flush(flush_rq
);
236 * Flush request has to be marked as IDLE when it is really ended
237 * because its .end_io() is called from timeout code path too for
238 * avoiding use-after-free.
240 WRITE_ONCE(flush_rq
->state
, MQ_RQ_IDLE
);
241 if (fq
->rq_status
!= BLK_STS_OK
) {
242 error
= fq
->rq_status
;
243 fq
->rq_status
= BLK_STS_OK
;
247 flush_rq
->tag
= BLK_MQ_NO_TAG
;
249 blk_mq_put_driver_tag(flush_rq
);
250 flush_rq
->internal_tag
= BLK_MQ_NO_TAG
;
253 running
= &fq
->flush_queue
[fq
->flush_running_idx
];
254 BUG_ON(fq
->flush_pending_idx
== fq
->flush_running_idx
);
256 /* account completion of the flush request */
257 fq
->flush_running_idx
^= 1;
259 /* and push the waiting requests to the next stage */
260 list_for_each_entry_safe(rq
, n
, running
, queuelist
) {
261 unsigned int seq
= blk_flush_cur_seq(rq
);
263 BUG_ON(seq
!= REQ_FSEQ_PREFLUSH
&& seq
!= REQ_FSEQ_POSTFLUSH
);
264 blk_flush_complete_seq(rq
, fq
, seq
, error
);
267 spin_unlock_irqrestore(&fq
->mq_flush_lock
, flags
);
268 return RQ_END_IO_NONE
;
271 bool is_flush_rq(struct request
*rq
)
273 return rq
->end_io
== flush_end_io
;
277 * blk_kick_flush - consider issuing flush request
278 * @q: request_queue being kicked
280 * @flags: cmd_flags of the original request
282 * Flush related states of @q have changed, consider issuing flush request.
283 * Please read the comment at the top of this file for more info.
286 * spin_lock_irq(fq->mq_flush_lock)
289 static void blk_kick_flush(struct request_queue
*q
, struct blk_flush_queue
*fq
,
292 struct list_head
*pending
= &fq
->flush_queue
[fq
->flush_pending_idx
];
293 struct request
*first_rq
=
294 list_first_entry(pending
, struct request
, queuelist
);
295 struct request
*flush_rq
= fq
->flush_rq
;
297 /* C1 described at the top of this file */
298 if (fq
->flush_pending_idx
!= fq
->flush_running_idx
|| list_empty(pending
))
302 if (fq
->flush_data_in_flight
&&
304 fq
->flush_pending_since
+ FLUSH_PENDING_TIMEOUT
))
308 * Issue flush and toggle pending_idx. This makes pending_idx
309 * different from running_idx, which means flush is in flight.
311 fq
->flush_pending_idx
^= 1;
313 blk_rq_init(q
, flush_rq
);
316 * In case of none scheduler, borrow tag from the first request
317 * since they can't be in flight at the same time. And acquire
318 * the tag's ownership for flush req.
320 * In case of IO scheduler, flush rq need to borrow scheduler tag
321 * just for cheating put/get driver tag.
323 flush_rq
->mq_ctx
= first_rq
->mq_ctx
;
324 flush_rq
->mq_hctx
= first_rq
->mq_hctx
;
327 flush_rq
->tag
= first_rq
->tag
;
329 flush_rq
->internal_tag
= first_rq
->internal_tag
;
331 flush_rq
->cmd_flags
= REQ_OP_FLUSH
| REQ_PREFLUSH
;
332 flush_rq
->cmd_flags
|= (flags
& REQ_DRV
) | (flags
& REQ_FAILFAST_MASK
);
333 flush_rq
->rq_flags
|= RQF_FLUSH_SEQ
;
334 flush_rq
->end_io
= flush_end_io
;
336 * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one
337 * implied in refcount_inc_not_zero() called from
338 * blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref
339 * and READ flush_rq->end_io
342 req_ref_set(flush_rq
, 1);
344 spin_lock(&q
->requeue_lock
);
345 list_add_tail(&flush_rq
->queuelist
, &q
->flush_list
);
346 spin_unlock(&q
->requeue_lock
);
348 blk_mq_kick_requeue_list(q
);
351 static enum rq_end_io_ret
mq_flush_data_end_io(struct request
*rq
,
354 struct request_queue
*q
= rq
->q
;
355 struct blk_mq_hw_ctx
*hctx
= rq
->mq_hctx
;
356 struct blk_mq_ctx
*ctx
= rq
->mq_ctx
;
358 struct blk_flush_queue
*fq
= blk_get_flush_queue(q
, ctx
);
361 WARN_ON(rq
->tag
< 0);
362 blk_mq_put_driver_tag(rq
);
366 * After populating an empty queue, kick it to avoid stall. Read
367 * the comment in flush_end_io().
369 spin_lock_irqsave(&fq
->mq_flush_lock
, flags
);
370 fq
->flush_data_in_flight
--;
372 * May have been corrupted by rq->rq_next reuse, we need to
373 * re-initialize rq->queuelist before reusing it here.
375 INIT_LIST_HEAD(&rq
->queuelist
);
376 blk_flush_complete_seq(rq
, fq
, REQ_FSEQ_DATA
, error
);
377 spin_unlock_irqrestore(&fq
->mq_flush_lock
, flags
);
379 blk_mq_sched_restart(hctx
);
380 return RQ_END_IO_NONE
;
383 static void blk_rq_init_flush(struct request
*rq
)
386 rq
->rq_flags
|= RQF_FLUSH_SEQ
;
387 rq
->flush
.saved_end_io
= rq
->end_io
; /* Usually NULL */
388 rq
->end_io
= mq_flush_data_end_io
;
392 * Insert a PREFLUSH/FUA request into the flush state machine.
393 * Returns true if the request has been consumed by the flush state machine,
394 * or false if the caller should continue to process it.
396 bool blk_insert_flush(struct request
*rq
)
398 struct request_queue
*q
= rq
->q
;
399 unsigned long fflags
= q
->queue_flags
; /* may change, cache */
400 unsigned int policy
= blk_flush_policy(fflags
, rq
);
401 struct blk_flush_queue
*fq
= blk_get_flush_queue(q
, rq
->mq_ctx
);
403 /* FLUSH/FUA request must never be merged */
404 WARN_ON_ONCE(rq
->bio
!= rq
->biotail
);
407 * @policy now records what operations need to be done. Adjust
408 * REQ_PREFLUSH and FUA for the driver.
410 rq
->cmd_flags
&= ~REQ_PREFLUSH
;
411 if (!(fflags
& (1UL << QUEUE_FLAG_FUA
)))
412 rq
->cmd_flags
&= ~REQ_FUA
;
415 * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
416 * of those flags, we have to set REQ_SYNC to avoid skewing
417 * the request accounting.
419 rq
->cmd_flags
|= REQ_SYNC
;
424 * An empty flush handed down from a stacking driver may
425 * translate into nothing if the underlying device does not
426 * advertise a write-back cache. In this case, simply
427 * complete the request.
429 blk_mq_end_request(rq
, 0);
433 * If there's data, but no flush is necessary, the request can
434 * be processed directly without going through flush machinery.
435 * Queue for normal execution.
438 case REQ_FSEQ_DATA
| REQ_FSEQ_POSTFLUSH
:
440 * Initialize the flush fields and completion handler to trigger
441 * the post flush, and then just pass the command on.
443 blk_rq_init_flush(rq
);
444 rq
->flush
.seq
|= REQ_FSEQ_PREFLUSH
;
445 spin_lock_irq(&fq
->mq_flush_lock
);
446 fq
->flush_data_in_flight
++;
447 spin_unlock_irq(&fq
->mq_flush_lock
);
451 * Mark the request as part of a flush sequence and submit it
452 * for further processing to the flush state machine.
454 blk_rq_init_flush(rq
);
455 spin_lock_irq(&fq
->mq_flush_lock
);
456 blk_flush_complete_seq(rq
, fq
, REQ_FSEQ_ACTIONS
& ~policy
, 0);
457 spin_unlock_irq(&fq
->mq_flush_lock
);
463 * blkdev_issue_flush - queue a flush
464 * @bdev: blockdev to issue flush for
467 * Issue a flush for the block device in question.
469 int blkdev_issue_flush(struct block_device
*bdev
)
473 bio_init(&bio
, bdev
, NULL
, 0, REQ_OP_WRITE
| REQ_PREFLUSH
);
474 return submit_bio_wait(&bio
);
476 EXPORT_SYMBOL(blkdev_issue_flush
);
478 struct blk_flush_queue
*blk_alloc_flush_queue(int node
, int cmd_size
,
481 struct blk_flush_queue
*fq
;
482 int rq_sz
= sizeof(struct request
);
484 fq
= kzalloc_node(sizeof(*fq
), flags
, node
);
488 spin_lock_init(&fq
->mq_flush_lock
);
490 rq_sz
= round_up(rq_sz
+ cmd_size
, cache_line_size());
491 fq
->flush_rq
= kzalloc_node(rq_sz
, flags
, node
);
495 INIT_LIST_HEAD(&fq
->flush_queue
[0]);
496 INIT_LIST_HEAD(&fq
->flush_queue
[1]);
506 void blk_free_flush_queue(struct blk_flush_queue
*fq
)
508 /* bio based request queue hasn't flush queue */
517 * Allow driver to set its own lock class to fq->mq_flush_lock for
518 * avoiding lockdep complaint.
520 * flush_end_io() may be called recursively from some driver, such as
521 * nvme-loop, so lockdep may complain 'possible recursive locking' because
522 * all 'struct blk_flush_queue' instance share same mq_flush_lock lock class
523 * key. We need to assign different lock class for these driver's
524 * fq->mq_flush_lock for avoiding the lockdep warning.
526 * Use dynamically allocated lock class key for each 'blk_flush_queue'
527 * instance is over-kill, and more worse it introduces horrible boot delay
528 * issue because synchronize_rcu() is implied in lockdep_unregister_key which
529 * is called for each hctx release. SCSI probing may synchronously create and
530 * destroy lots of MQ request_queues for non-existent devices, and some robot
531 * test kernel always enable lockdep option. It is observed that more than half
532 * an hour is taken during SCSI MQ probe with per-fq lock class.
534 void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx
*hctx
,
535 struct lock_class_key
*key
)
537 lockdep_set_class(&hctx
->fq
->mq_flush_lock
, key
);
539 EXPORT_SYMBOL_GPL(blk_mq_hctx_set_fq_lock_class
);