1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/blk-mq.h>
12 struct blk_mq_ctx __percpu
*queue_ctx
;
16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
21 struct list_head rq_lists
[HCTX_MAX_TYPES
];
22 } ____cacheline_aligned_in_smp
;
25 unsigned short index_hw
[HCTX_MAX_TYPES
];
26 struct blk_mq_hw_ctx
*hctxs
[HCTX_MAX_TYPES
];
28 struct request_queue
*queue
;
29 struct blk_mq_ctxs
*ctxs
;
31 } ____cacheline_aligned_in_smp
;
36 BLK_MQ_TAG_MAX
= BLK_MQ_NO_TAG
- 1,
39 typedef unsigned int __bitwise blk_insert_t
;
40 #define BLK_MQ_INSERT_AT_HEAD ((__force blk_insert_t)0x01)
42 void blk_mq_submit_bio(struct bio
*bio
);
43 int blk_mq_poll(struct request_queue
*q
, blk_qc_t cookie
, struct io_comp_batch
*iob
,
45 void blk_mq_exit_queue(struct request_queue
*q
);
46 int blk_mq_update_nr_requests(struct request_queue
*q
, unsigned int nr
);
47 void blk_mq_wake_waiters(struct request_queue
*q
);
48 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx
*hctx
, struct list_head
*,
50 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx
*hctx
, struct list_head
*list
);
51 struct request
*blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx
*hctx
,
52 struct blk_mq_ctx
*start
);
53 void blk_mq_put_rq_ref(struct request
*rq
);
56 * Internal helpers for allocating/freeing the request map
58 void blk_mq_free_rqs(struct blk_mq_tag_set
*set
, struct blk_mq_tags
*tags
,
59 unsigned int hctx_idx
);
60 void blk_mq_free_rq_map(struct blk_mq_tags
*tags
);
61 struct blk_mq_tags
*blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set
*set
,
62 unsigned int hctx_idx
, unsigned int depth
);
63 void blk_mq_free_map_and_rqs(struct blk_mq_tag_set
*set
,
64 struct blk_mq_tags
*tags
,
65 unsigned int hctx_idx
);
68 * CPU -> queue mappings
70 extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map
*qmap
, unsigned int);
73 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
75 * @type: the hctx type index
78 static inline struct blk_mq_hw_ctx
*blk_mq_map_queue_type(struct request_queue
*q
,
82 return xa_load(&q
->hctx_table
, q
->tag_set
->map
[type
].mq_map
[cpu
]);
85 static inline enum hctx_type
blk_mq_get_hctx_type(blk_opf_t opf
)
87 enum hctx_type type
= HCTX_TYPE_DEFAULT
;
90 * The caller ensure that if REQ_POLLED, poll must be enabled.
93 type
= HCTX_TYPE_POLL
;
94 else if ((opf
& REQ_OP_MASK
) == REQ_OP_READ
)
95 type
= HCTX_TYPE_READ
;
100 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
102 * @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED).
103 * @ctx: software queue cpu ctx
105 static inline struct blk_mq_hw_ctx
*blk_mq_map_queue(struct request_queue
*q
,
107 struct blk_mq_ctx
*ctx
)
109 return ctx
->hctxs
[blk_mq_get_hctx_type(opf
)];
115 extern void blk_mq_sysfs_init(struct request_queue
*q
);
116 extern void blk_mq_sysfs_deinit(struct request_queue
*q
);
117 int blk_mq_sysfs_register(struct gendisk
*disk
);
118 void blk_mq_sysfs_unregister(struct gendisk
*disk
);
119 int blk_mq_sysfs_register_hctxs(struct request_queue
*q
);
120 void blk_mq_sysfs_unregister_hctxs(struct request_queue
*q
);
121 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx
*hctx
);
122 void blk_mq_free_plug_rqs(struct blk_plug
*plug
);
123 void blk_mq_flush_plug_list(struct blk_plug
*plug
, bool from_schedule
);
125 void blk_mq_cancel_work_sync(struct request_queue
*q
);
127 void blk_mq_release(struct request_queue
*q
);
129 static inline struct blk_mq_ctx
*__blk_mq_get_ctx(struct request_queue
*q
,
132 return per_cpu_ptr(q
->queue_ctx
, cpu
);
136 * This assumes per-cpu software queueing queues. They could be per-node
137 * as well, for instance. For now this is hardcoded as-is. Note that we don't
138 * care about preemption, since we know the ctx's are persistent. This does
139 * mean that we can't rely on ctx always matching the currently running CPU.
141 static inline struct blk_mq_ctx
*blk_mq_get_ctx(struct request_queue
*q
)
143 return __blk_mq_get_ctx(q
, raw_smp_processor_id());
146 struct blk_mq_alloc_data
{
147 /* input parameter */
148 struct request_queue
*q
;
149 blk_mq_req_flags_t flags
;
150 unsigned int shallow_depth
;
152 req_flags_t rq_flags
;
154 /* allocate multiple requests/tags in one go */
155 unsigned int nr_tags
;
156 struct request
**cached_rq
;
158 /* input & output parameter */
159 struct blk_mq_ctx
*ctx
;
160 struct blk_mq_hw_ctx
*hctx
;
163 struct blk_mq_tags
*blk_mq_init_tags(unsigned int nr_tags
,
164 unsigned int reserved_tags
, int node
, int alloc_policy
);
165 void blk_mq_free_tags(struct blk_mq_tags
*tags
);
166 int blk_mq_init_bitmaps(struct sbitmap_queue
*bitmap_tags
,
167 struct sbitmap_queue
*breserved_tags
, unsigned int queue_depth
,
168 unsigned int reserved
, int node
, int alloc_policy
);
170 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data
*data
);
171 unsigned long blk_mq_get_tags(struct blk_mq_alloc_data
*data
, int nr_tags
,
172 unsigned int *offset
);
173 void blk_mq_put_tag(struct blk_mq_tags
*tags
, struct blk_mq_ctx
*ctx
,
175 void blk_mq_put_tags(struct blk_mq_tags
*tags
, int *tag_array
, int nr_tags
);
176 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx
*hctx
,
177 struct blk_mq_tags
**tags
, unsigned int depth
, bool can_grow
);
178 void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set
*set
,
180 void blk_mq_tag_update_sched_shared_tags(struct request_queue
*q
);
182 void blk_mq_tag_wakeup_all(struct blk_mq_tags
*tags
, bool);
183 void blk_mq_queue_tag_busy_iter(struct request_queue
*q
, busy_tag_iter_fn
*fn
,
185 void blk_mq_all_tag_iter(struct blk_mq_tags
*tags
, busy_tag_iter_fn
*fn
,
188 static inline struct sbq_wait_state
*bt_wait_ptr(struct sbitmap_queue
*bt
,
189 struct blk_mq_hw_ctx
*hctx
)
193 return sbq_wait_ptr(bt
, &hctx
->wait_index
);
196 void __blk_mq_tag_busy(struct blk_mq_hw_ctx
*);
197 void __blk_mq_tag_idle(struct blk_mq_hw_ctx
*);
199 static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx
*hctx
)
201 if (hctx
->flags
& BLK_MQ_F_TAG_QUEUE_SHARED
)
202 __blk_mq_tag_busy(hctx
);
205 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx
*hctx
)
207 if (hctx
->flags
& BLK_MQ_F_TAG_QUEUE_SHARED
)
208 __blk_mq_tag_idle(hctx
);
211 static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags
*tags
,
214 return tag
< tags
->nr_reserved_tags
;
217 static inline bool blk_mq_is_shared_tags(unsigned int flags
)
219 return flags
& BLK_MQ_F_TAG_HCTX_SHARED
;
222 static inline struct blk_mq_tags
*blk_mq_tags_from_data(struct blk_mq_alloc_data
*data
)
224 if (data
->rq_flags
& RQF_SCHED_TAGS
)
225 return data
->hctx
->sched_tags
;
226 return data
->hctx
->tags
;
229 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx
*hctx
)
231 return test_bit(BLK_MQ_S_STOPPED
, &hctx
->state
);
234 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx
*hctx
)
236 return hctx
->nr_ctx
&& hctx
->tags
;
239 unsigned int blk_mq_in_flight(struct request_queue
*q
,
240 struct block_device
*part
);
241 void blk_mq_in_flight_rw(struct request_queue
*q
, struct block_device
*part
,
242 unsigned int inflight
[2]);
244 static inline void blk_mq_put_dispatch_budget(struct request_queue
*q
,
247 if (q
->mq_ops
->put_budget
)
248 q
->mq_ops
->put_budget(q
, budget_token
);
251 static inline int blk_mq_get_dispatch_budget(struct request_queue
*q
)
253 if (q
->mq_ops
->get_budget
)
254 return q
->mq_ops
->get_budget(q
);
258 static inline void blk_mq_set_rq_budget_token(struct request
*rq
, int token
)
263 if (rq
->q
->mq_ops
->set_rq_budget_token
)
264 rq
->q
->mq_ops
->set_rq_budget_token(rq
, token
);
267 static inline int blk_mq_get_rq_budget_token(struct request
*rq
)
269 if (rq
->q
->mq_ops
->get_rq_budget_token
)
270 return rq
->q
->mq_ops
->get_rq_budget_token(rq
);
274 static inline void __blk_mq_add_active_requests(struct blk_mq_hw_ctx
*hctx
,
277 if (blk_mq_is_shared_tags(hctx
->flags
))
278 atomic_add(val
, &hctx
->queue
->nr_active_requests_shared_tags
);
280 atomic_add(val
, &hctx
->nr_active
);
283 static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx
*hctx
)
285 __blk_mq_add_active_requests(hctx
, 1);
288 static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx
*hctx
,
291 if (blk_mq_is_shared_tags(hctx
->flags
))
292 atomic_sub(val
, &hctx
->queue
->nr_active_requests_shared_tags
);
294 atomic_sub(val
, &hctx
->nr_active
);
297 static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx
*hctx
)
299 __blk_mq_sub_active_requests(hctx
, 1);
302 static inline void blk_mq_add_active_requests(struct blk_mq_hw_ctx
*hctx
,
305 if (hctx
->flags
& BLK_MQ_F_TAG_QUEUE_SHARED
)
306 __blk_mq_add_active_requests(hctx
, val
);
309 static inline void blk_mq_inc_active_requests(struct blk_mq_hw_ctx
*hctx
)
311 if (hctx
->flags
& BLK_MQ_F_TAG_QUEUE_SHARED
)
312 __blk_mq_inc_active_requests(hctx
);
315 static inline void blk_mq_sub_active_requests(struct blk_mq_hw_ctx
*hctx
,
318 if (hctx
->flags
& BLK_MQ_F_TAG_QUEUE_SHARED
)
319 __blk_mq_sub_active_requests(hctx
, val
);
322 static inline void blk_mq_dec_active_requests(struct blk_mq_hw_ctx
*hctx
)
324 if (hctx
->flags
& BLK_MQ_F_TAG_QUEUE_SHARED
)
325 __blk_mq_dec_active_requests(hctx
);
328 static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx
*hctx
)
330 if (blk_mq_is_shared_tags(hctx
->flags
))
331 return atomic_read(&hctx
->queue
->nr_active_requests_shared_tags
);
332 return atomic_read(&hctx
->nr_active
);
334 static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx
*hctx
,
337 blk_mq_dec_active_requests(hctx
);
338 blk_mq_put_tag(hctx
->tags
, rq
->mq_ctx
, rq
->tag
);
339 rq
->tag
= BLK_MQ_NO_TAG
;
342 static inline void blk_mq_put_driver_tag(struct request
*rq
)
344 if (rq
->tag
== BLK_MQ_NO_TAG
|| rq
->internal_tag
== BLK_MQ_NO_TAG
)
347 __blk_mq_put_driver_tag(rq
->mq_hctx
, rq
);
350 bool __blk_mq_alloc_driver_tag(struct request
*rq
);
352 static inline bool blk_mq_get_driver_tag(struct request
*rq
)
354 if (rq
->tag
== BLK_MQ_NO_TAG
&& !__blk_mq_alloc_driver_tag(rq
))
360 static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map
*qmap
)
364 for_each_possible_cpu(cpu
)
365 qmap
->mq_map
[cpu
] = 0;
369 * blk_mq_plug() - Get caller context plug
370 * @bio : the bio being submitted by the caller context
372 * Plugging, by design, may delay the insertion of BIOs into the elevator in
373 * order to increase BIO merging opportunities. This however can cause BIO
374 * insertion order to change from the order in which submit_bio() is being
375 * executed in the case of multiple contexts concurrently issuing BIOs to a
376 * device, even if these context are synchronized to tightly control BIO issuing
377 * order. While this is not a problem with regular block devices, this ordering
378 * change can cause write BIO failures with zoned block devices as these
379 * require sequential write patterns to zones. Prevent this from happening by
380 * ignoring the plug state of a BIO issuing context if it is for a zoned block
381 * device and the BIO to plug is a write operation.
383 * Return current->plug if the bio can be plugged and NULL otherwise
385 static inline struct blk_plug
*blk_mq_plug( struct bio
*bio
)
387 /* Zoned block device write operation case: do not plug the BIO */
388 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED
) &&
389 bdev_op_is_zoned_write(bio
->bi_bdev
, bio_op(bio
)))
393 * For regular block devices or read operations, use the context plug
394 * which may be NULL if blk_start_plug() was not executed.
396 return current
->plug
;
399 /* Free all requests on the list */
400 static inline void blk_mq_free_requests(struct list_head
*list
)
402 while (!list_empty(list
)) {
403 struct request
*rq
= list_entry_rq(list
->next
);
405 list_del_init(&rq
->queuelist
);
406 blk_mq_free_request(rq
);
411 * For shared tag users, we track the number of currently active users
412 * and attempt to provide a fair share of the tag depth for each of them.
414 static inline bool hctx_may_queue(struct blk_mq_hw_ctx
*hctx
,
415 struct sbitmap_queue
*bt
)
417 unsigned int depth
, users
;
419 if (!hctx
|| !(hctx
->flags
& BLK_MQ_F_TAG_QUEUE_SHARED
))
423 * Don't try dividing an ant
425 if (bt
->sb
.depth
== 1)
428 if (blk_mq_is_shared_tags(hctx
->flags
)) {
429 struct request_queue
*q
= hctx
->queue
;
431 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE
, &q
->queue_flags
))
434 if (!test_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
438 users
= READ_ONCE(hctx
->tags
->active_queues
);
443 * Allow at least some tags
445 depth
= max((bt
->sb
.depth
+ users
- 1) / users
, 4U);
446 return __blk_mq_active_requests(hctx
) < depth
;
449 /* run the code block in @dispatch_ops with rcu/srcu read lock held */
450 #define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \
452 if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) { \
453 struct blk_mq_tag_set *__tag_set = (q)->tag_set; \
456 might_sleep_if(check_sleep); \
457 srcu_idx = srcu_read_lock(__tag_set->srcu); \
459 srcu_read_unlock(__tag_set->srcu, srcu_idx); \
467 #define blk_mq_run_dispatch_ops(q, dispatch_ops) \
468 __blk_mq_run_dispatch_ops(q, true, dispatch_ops) \