bfq and mq-deadline consider sync writes as async requests and only
reserve tags for sync reads by async_depth, however, kyber doesn't
consider sync writes as async requests for now.
Consider the case there are lots of dirty pages, and user use fsync to
flush dirty pages. In this case sched_tags can be exhausted by sync writes
and sync reads can stuck waiting for tag. Hence let kyber follow what
mq-deadline and bfq did, and unify async requests checking for all
elevators.
Signed-off-by: Yu Kuai <yukuai@fnnas.com>
Reviewed-by: Nilay Shroff <nilay@linux.ibm.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
unsigned int limit, act_idx;
/* Sync reads have full depth available */
- if (op_is_sync(opf) && !op_is_write(opf))
+ if (blk_mq_is_sync_read(opf))
limit = data->q->nr_requests;
else
limit = bfqd->async_depths[!!bfqd->wr_busy_queues][op_is_sync(opf)];
depth);
}
+static inline bool blk_mq_is_sync_read(blk_opf_t opf)
+{
+ return op_is_sync(opf) && !op_is_write(opf);
+}
+
#endif
* We use the scheduler tags as per-hardware queue queueing tokens.
* Async requests can be limited at this stage.
*/
- if (!op_is_sync(opf)) {
+ if (!blk_mq_is_sync_read(opf)) {
struct kyber_queue_data *kqd = data->q->elevator->elevator_data;
data->shallow_depth = kqd->async_depth;
struct deadline_data *dd = data->q->elevator->elevator_data;
/* Do not throttle synchronous reads. */
- if (op_is_sync(opf) && !op_is_write(opf))
+ if (blk_mq_is_sync_read(opf))
return;
/*