1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include "blk-mq-tag.h"
12 struct blk_mq_ctx __percpu
*queue_ctx
;
16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
21 struct list_head rq_lists
[HCTX_MAX_TYPES
];
22 } ____cacheline_aligned_in_smp
;
25 unsigned short index_hw
[HCTX_MAX_TYPES
];
26 struct blk_mq_hw_ctx
*hctxs
[HCTX_MAX_TYPES
];
28 struct request_queue
*queue
;
29 struct blk_mq_ctxs
*ctxs
;
31 } ____cacheline_aligned_in_smp
;
33 void blk_mq_submit_bio(struct bio
*bio
);
34 int blk_mq_poll(struct request_queue
*q
, blk_qc_t cookie
, struct io_comp_batch
*iob
,
36 void blk_mq_exit_queue(struct request_queue
*q
);
37 int blk_mq_update_nr_requests(struct request_queue
*q
, unsigned int nr
);
38 void blk_mq_wake_waiters(struct request_queue
*q
);
39 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx
*hctx
, struct list_head
*,
41 void blk_mq_add_to_requeue_list(struct request
*rq
, bool at_head
,
42 bool kick_requeue_list
);
43 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx
*hctx
, struct list_head
*list
);
44 struct request
*blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx
*hctx
,
45 struct blk_mq_ctx
*start
);
46 void blk_mq_put_rq_ref(struct request
*rq
);
49 * Internal helpers for allocating/freeing the request map
51 void blk_mq_free_rqs(struct blk_mq_tag_set
*set
, struct blk_mq_tags
*tags
,
52 unsigned int hctx_idx
);
53 void blk_mq_free_rq_map(struct blk_mq_tags
*tags
);
54 struct blk_mq_tags
*blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set
*set
,
55 unsigned int hctx_idx
, unsigned int depth
);
56 void blk_mq_free_map_and_rqs(struct blk_mq_tag_set
*set
,
57 struct blk_mq_tags
*tags
,
58 unsigned int hctx_idx
);
60 * Internal helpers for request insertion into sw queues
62 void __blk_mq_insert_request(struct blk_mq_hw_ctx
*hctx
, struct request
*rq
,
64 void blk_mq_request_bypass_insert(struct request
*rq
, bool at_head
,
66 void blk_mq_insert_requests(struct blk_mq_hw_ctx
*hctx
, struct blk_mq_ctx
*ctx
,
67 struct list_head
*list
);
68 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx
*hctx
,
69 struct list_head
*list
);
72 * CPU -> queue mappings
74 extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map
*qmap
, unsigned int);
77 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
79 * @type: the hctx type index
82 static inline struct blk_mq_hw_ctx
*blk_mq_map_queue_type(struct request_queue
*q
,
86 return xa_load(&q
->hctx_table
, q
->tag_set
->map
[type
].mq_map
[cpu
]);
89 static inline enum hctx_type
blk_mq_get_hctx_type(unsigned int flags
)
91 enum hctx_type type
= HCTX_TYPE_DEFAULT
;
94 * The caller ensure that if REQ_POLLED, poll must be enabled.
96 if (flags
& REQ_POLLED
)
97 type
= HCTX_TYPE_POLL
;
98 else if ((flags
& REQ_OP_MASK
) == REQ_OP_READ
)
99 type
= HCTX_TYPE_READ
;
104 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
106 * @flags: request command flags
107 * @ctx: software queue cpu ctx
109 static inline struct blk_mq_hw_ctx
*blk_mq_map_queue(struct request_queue
*q
,
111 struct blk_mq_ctx
*ctx
)
113 return ctx
->hctxs
[blk_mq_get_hctx_type(flags
)];
119 extern void blk_mq_sysfs_init(struct request_queue
*q
);
120 extern void blk_mq_sysfs_deinit(struct request_queue
*q
);
121 extern int __blk_mq_register_dev(struct device
*dev
, struct request_queue
*q
);
122 extern int blk_mq_sysfs_register(struct request_queue
*q
);
123 extern void blk_mq_sysfs_unregister(struct request_queue
*q
);
124 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx
*hctx
);
125 void blk_mq_free_plug_rqs(struct blk_plug
*plug
);
126 void blk_mq_flush_plug_list(struct blk_plug
*plug
, bool from_schedule
);
128 void blk_mq_cancel_work_sync(struct request_queue
*q
);
130 void blk_mq_release(struct request_queue
*q
);
132 static inline struct blk_mq_ctx
*__blk_mq_get_ctx(struct request_queue
*q
,
135 return per_cpu_ptr(q
->queue_ctx
, cpu
);
139 * This assumes per-cpu software queueing queues. They could be per-node
140 * as well, for instance. For now this is hardcoded as-is. Note that we don't
141 * care about preemption, since we know the ctx's are persistent. This does
142 * mean that we can't rely on ctx always matching the currently running CPU.
144 static inline struct blk_mq_ctx
*blk_mq_get_ctx(struct request_queue
*q
)
146 return __blk_mq_get_ctx(q
, raw_smp_processor_id());
149 struct blk_mq_alloc_data
{
150 /* input parameter */
151 struct request_queue
*q
;
152 blk_mq_req_flags_t flags
;
153 unsigned int shallow_depth
;
154 unsigned int cmd_flags
;
155 req_flags_t rq_flags
;
157 /* allocate multiple requests/tags in one go */
158 unsigned int nr_tags
;
159 struct request
**cached_rq
;
161 /* input & output parameter */
162 struct blk_mq_ctx
*ctx
;
163 struct blk_mq_hw_ctx
*hctx
;
166 static inline bool blk_mq_is_shared_tags(unsigned int flags
)
168 return flags
& BLK_MQ_F_TAG_HCTX_SHARED
;
171 static inline struct blk_mq_tags
*blk_mq_tags_from_data(struct blk_mq_alloc_data
*data
)
173 if (!(data
->rq_flags
& RQF_ELV
))
174 return data
->hctx
->tags
;
175 return data
->hctx
->sched_tags
;
178 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx
*hctx
)
180 return test_bit(BLK_MQ_S_STOPPED
, &hctx
->state
);
183 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx
*hctx
)
185 return hctx
->nr_ctx
&& hctx
->tags
;
188 unsigned int blk_mq_in_flight(struct request_queue
*q
,
189 struct block_device
*part
);
190 void blk_mq_in_flight_rw(struct request_queue
*q
, struct block_device
*part
,
191 unsigned int inflight
[2]);
193 static inline void blk_mq_put_dispatch_budget(struct request_queue
*q
,
196 if (q
->mq_ops
->put_budget
)
197 q
->mq_ops
->put_budget(q
, budget_token
);
200 static inline int blk_mq_get_dispatch_budget(struct request_queue
*q
)
202 if (q
->mq_ops
->get_budget
)
203 return q
->mq_ops
->get_budget(q
);
207 static inline void blk_mq_set_rq_budget_token(struct request
*rq
, int token
)
212 if (rq
->q
->mq_ops
->set_rq_budget_token
)
213 rq
->q
->mq_ops
->set_rq_budget_token(rq
, token
);
216 static inline int blk_mq_get_rq_budget_token(struct request
*rq
)
218 if (rq
->q
->mq_ops
->get_rq_budget_token
)
219 return rq
->q
->mq_ops
->get_rq_budget_token(rq
);
223 static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx
*hctx
)
225 if (blk_mq_is_shared_tags(hctx
->flags
))
226 atomic_inc(&hctx
->queue
->nr_active_requests_shared_tags
);
228 atomic_inc(&hctx
->nr_active
);
231 static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx
*hctx
,
234 if (blk_mq_is_shared_tags(hctx
->flags
))
235 atomic_sub(val
, &hctx
->queue
->nr_active_requests_shared_tags
);
237 atomic_sub(val
, &hctx
->nr_active
);
240 static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx
*hctx
)
242 __blk_mq_sub_active_requests(hctx
, 1);
245 static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx
*hctx
)
247 if (blk_mq_is_shared_tags(hctx
->flags
))
248 return atomic_read(&hctx
->queue
->nr_active_requests_shared_tags
);
249 return atomic_read(&hctx
->nr_active
);
251 static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx
*hctx
,
254 blk_mq_put_tag(hctx
->tags
, rq
->mq_ctx
, rq
->tag
);
255 rq
->tag
= BLK_MQ_NO_TAG
;
257 if (rq
->rq_flags
& RQF_MQ_INFLIGHT
) {
258 rq
->rq_flags
&= ~RQF_MQ_INFLIGHT
;
259 __blk_mq_dec_active_requests(hctx
);
263 static inline void blk_mq_put_driver_tag(struct request
*rq
)
265 if (rq
->tag
== BLK_MQ_NO_TAG
|| rq
->internal_tag
== BLK_MQ_NO_TAG
)
268 __blk_mq_put_driver_tag(rq
->mq_hctx
, rq
);
271 bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx
*hctx
, struct request
*rq
);
273 static inline bool blk_mq_get_driver_tag(struct request
*rq
)
275 struct blk_mq_hw_ctx
*hctx
= rq
->mq_hctx
;
277 if (rq
->tag
!= BLK_MQ_NO_TAG
&&
278 !(hctx
->flags
& BLK_MQ_F_TAG_QUEUE_SHARED
)) {
279 hctx
->tags
->rqs
[rq
->tag
] = rq
;
283 return __blk_mq_get_driver_tag(hctx
, rq
);
286 static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map
*qmap
)
290 for_each_possible_cpu(cpu
)
291 qmap
->mq_map
[cpu
] = 0;
295 * blk_mq_plug() - Get caller context plug
297 * @bio : the bio being submitted by the caller context
299 * Plugging, by design, may delay the insertion of BIOs into the elevator in
300 * order to increase BIO merging opportunities. This however can cause BIO
301 * insertion order to change from the order in which submit_bio() is being
302 * executed in the case of multiple contexts concurrently issuing BIOs to a
303 * device, even if these context are synchronized to tightly control BIO issuing
304 * order. While this is not a problem with regular block devices, this ordering
305 * change can cause write BIO failures with zoned block devices as these
306 * require sequential write patterns to zones. Prevent this from happening by
307 * ignoring the plug state of a BIO issuing context if the target request queue
308 * is for a zoned block device and the BIO to plug is a write operation.
310 * Return current->plug if the bio can be plugged and NULL otherwise
312 static inline struct blk_plug
*blk_mq_plug(struct request_queue
*q
,
316 * For regular block devices or read operations, use the context plug
317 * which may be NULL if blk_start_plug() was not executed.
319 if (!blk_queue_is_zoned(q
) || !op_is_write(bio_op(bio
)))
320 return current
->plug
;
322 /* Zoned block device write operation case: do not plug the BIO */
326 /* Free all requests on the list */
327 static inline void blk_mq_free_requests(struct list_head
*list
)
329 while (!list_empty(list
)) {
330 struct request
*rq
= list_entry_rq(list
->next
);
332 list_del_init(&rq
->queuelist
);
333 blk_mq_free_request(rq
);
338 * For shared tag users, we track the number of currently active users
339 * and attempt to provide a fair share of the tag depth for each of them.
341 static inline bool hctx_may_queue(struct blk_mq_hw_ctx
*hctx
,
342 struct sbitmap_queue
*bt
)
344 unsigned int depth
, users
;
346 if (!hctx
|| !(hctx
->flags
& BLK_MQ_F_TAG_QUEUE_SHARED
))
350 * Don't try dividing an ant
352 if (bt
->sb
.depth
== 1)
355 if (blk_mq_is_shared_tags(hctx
->flags
)) {
356 struct request_queue
*q
= hctx
->queue
;
358 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE
, &q
->queue_flags
))
361 if (!test_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
365 users
= atomic_read(&hctx
->tags
->active_queues
);
371 * Allow at least some tags
373 depth
= max((bt
->sb
.depth
+ users
- 1) / users
, 4U);
374 return __blk_mq_active_requests(hctx
) < depth
;
377 /* run the code block in @dispatch_ops with rcu/srcu read lock held */
378 #define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \
380 if (!blk_queue_has_srcu(q)) { \
387 might_sleep_if(check_sleep); \
388 srcu_idx = srcu_read_lock((q)->srcu); \
390 srcu_read_unlock((q)->srcu, srcu_idx); \
394 #define blk_mq_run_dispatch_ops(q, dispatch_ops) \
395 __blk_mq_run_dispatch_ops(q, true, dispatch_ops) \