1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/blkdev.h>
6 #include <linux/sbitmap.h>
7 #include <linux/srcu.h>
10 struct blk_flush_queue
;
13 * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware block device
15 struct blk_mq_hw_ctx
{
18 struct list_head dispatch
;
19 unsigned long state
; /* BLK_MQ_S_* flags */
20 } ____cacheline_aligned_in_smp
;
22 struct delayed_work run_work
;
23 cpumask_var_t cpumask
;
27 unsigned long flags
; /* BLK_MQ_F_* flags */
30 struct request_queue
*queue
;
31 struct blk_flush_queue
*fq
;
35 struct sbitmap ctx_map
;
37 struct blk_mq_ctx
*dispatch_from
;
38 unsigned int dispatch_busy
;
41 unsigned short nr_ctx
;
42 struct blk_mq_ctx
**ctxs
;
44 spinlock_t dispatch_wait_lock
;
45 wait_queue_entry_t dispatch_wait
;
48 struct blk_mq_tags
*tags
;
49 struct blk_mq_tags
*sched_tags
;
53 #define BLK_MQ_MAX_DISPATCH_ORDER 7
54 unsigned long dispatched
[BLK_MQ_MAX_DISPATCH_ORDER
];
56 unsigned int numa_node
;
57 unsigned int queue_num
;
61 struct hlist_node cpuhp_dead
;
64 unsigned long poll_considered
;
65 unsigned long poll_invoked
;
66 unsigned long poll_success
;
68 #ifdef CONFIG_BLK_DEBUG_FS
69 struct dentry
*debugfs_dir
;
70 struct dentry
*sched_debugfs_dir
;
73 /* Must be the last member - see also blk_mq_hw_ctx_size(). */
74 struct srcu_struct srcu
[0];
77 struct blk_mq_queue_map
{
79 unsigned int nr_queues
;
80 unsigned int queue_offset
;
84 HCTX_TYPE_DEFAULT
, /* all I/O not otherwise accounted for */
85 HCTX_TYPE_READ
, /* just for READ I/O */
86 HCTX_TYPE_POLL
, /* polled I/O of any kind */
91 struct blk_mq_tag_set
{
93 * map[] holds ctx -> hctx mappings, one map exists for each type
94 * that the driver wishes to support. There are no restrictions
95 * on maps being of the same size, and it's perfectly legal to
96 * share maps between types.
98 struct blk_mq_queue_map map
[HCTX_MAX_TYPES
];
99 unsigned int nr_maps
; /* nr entries in map[] */
100 const struct blk_mq_ops
*ops
;
101 unsigned int nr_hw_queues
; /* nr hw queues across maps */
102 unsigned int queue_depth
; /* max hw supported */
103 unsigned int reserved_tags
;
104 unsigned int cmd_size
; /* per-request extra data */
106 unsigned int timeout
;
107 unsigned int flags
; /* BLK_MQ_F_* */
110 struct blk_mq_tags
**tags
;
112 struct mutex tag_list_lock
;
113 struct list_head tag_list
;
116 struct blk_mq_queue_data
{
121 typedef blk_status_t (queue_rq_fn
)(struct blk_mq_hw_ctx
*,
122 const struct blk_mq_queue_data
*);
123 typedef void (commit_rqs_fn
)(struct blk_mq_hw_ctx
*);
124 typedef bool (get_budget_fn
)(struct blk_mq_hw_ctx
*);
125 typedef void (put_budget_fn
)(struct blk_mq_hw_ctx
*);
126 typedef enum blk_eh_timer_return (timeout_fn
)(struct request
*, bool);
127 typedef int (init_hctx_fn
)(struct blk_mq_hw_ctx
*, void *, unsigned int);
128 typedef void (exit_hctx_fn
)(struct blk_mq_hw_ctx
*, unsigned int);
129 typedef int (init_request_fn
)(struct blk_mq_tag_set
*set
, struct request
*,
130 unsigned int, unsigned int);
131 typedef void (exit_request_fn
)(struct blk_mq_tag_set
*set
, struct request
*,
134 typedef bool (busy_iter_fn
)(struct blk_mq_hw_ctx
*, struct request
*, void *,
136 typedef bool (busy_tag_iter_fn
)(struct request
*, void *, bool);
137 typedef int (poll_fn
)(struct blk_mq_hw_ctx
*);
138 typedef int (map_queues_fn
)(struct blk_mq_tag_set
*set
);
139 typedef bool (busy_fn
)(struct request_queue
*);
140 typedef void (complete_fn
)(struct request
*);
147 queue_rq_fn
*queue_rq
;
150 * If a driver uses bd->last to judge when to submit requests to
151 * hardware, it must define this function. In case of errors that
152 * make us stop issuing further requests, this hook serves the
153 * purpose of kicking the hardware (which the last request otherwise
156 commit_rqs_fn
*commit_rqs
;
159 * Reserve budget before queue request, once .queue_rq is
160 * run, it is driver's responsibility to release the
161 * reserved budget. Also we have to handle failure case
162 * of .get_budget for avoiding I/O deadlock.
164 get_budget_fn
*get_budget
;
165 put_budget_fn
*put_budget
;
168 * Called on request timeout
173 * Called to poll for completion of a specific tag.
177 complete_fn
*complete
;
180 * Called when the block layer side of a hardware queue has been
181 * set up, allowing the driver to allocate/init matching structures.
182 * Ditto for exit/teardown.
184 init_hctx_fn
*init_hctx
;
185 exit_hctx_fn
*exit_hctx
;
188 * Called for every command allocated by the block layer to allow
189 * the driver to set up driver specific data.
191 * Tag greater than or equal to queue_depth is for setting up
194 * Ditto for exit/teardown.
196 init_request_fn
*init_request
;
197 exit_request_fn
*exit_request
;
198 /* Called from inside blk_get_request() */
199 void (*initialize_rq_fn
)(struct request
*rq
);
202 * If set, returns whether or not this queue currently is busy
206 map_queues_fn
*map_queues
;
208 #ifdef CONFIG_BLK_DEBUG_FS
210 * Used by the debugfs implementation to show driver-specific
211 * information about a request.
213 void (*show_rq
)(struct seq_file
*m
, struct request
*rq
);
218 BLK_MQ_F_SHOULD_MERGE
= 1 << 0,
219 BLK_MQ_F_TAG_SHARED
= 1 << 1,
220 BLK_MQ_F_BLOCKING
= 1 << 5,
221 BLK_MQ_F_NO_SCHED
= 1 << 6,
222 BLK_MQ_F_ALLOC_POLICY_START_BIT
= 8,
223 BLK_MQ_F_ALLOC_POLICY_BITS
= 1,
225 BLK_MQ_S_STOPPED
= 0,
226 BLK_MQ_S_TAG_ACTIVE
= 1,
227 BLK_MQ_S_SCHED_RESTART
= 2,
229 BLK_MQ_MAX_DEPTH
= 10240,
231 BLK_MQ_CPU_WORK_BATCH
= 8,
233 #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
234 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
235 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
236 #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
237 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
238 << BLK_MQ_F_ALLOC_POLICY_START_BIT)
240 struct request_queue
*blk_mq_init_queue(struct blk_mq_tag_set
*);
241 struct request_queue
*blk_mq_init_allocated_queue(struct blk_mq_tag_set
*set
,
242 struct request_queue
*q
);
243 struct request_queue
*blk_mq_init_sq_queue(struct blk_mq_tag_set
*set
,
244 const struct blk_mq_ops
*ops
,
245 unsigned int queue_depth
,
246 unsigned int set_flags
);
247 int blk_mq_register_dev(struct device
*, struct request_queue
*);
248 void blk_mq_unregister_dev(struct device
*, struct request_queue
*);
250 int blk_mq_alloc_tag_set(struct blk_mq_tag_set
*set
);
251 void blk_mq_free_tag_set(struct blk_mq_tag_set
*set
);
253 void blk_mq_flush_plug_list(struct blk_plug
*plug
, bool from_schedule
);
255 void blk_mq_free_request(struct request
*rq
);
256 bool blk_mq_can_queue(struct blk_mq_hw_ctx
*);
258 bool blk_mq_queue_inflight(struct request_queue
*q
);
261 /* return when out of requests */
262 BLK_MQ_REQ_NOWAIT
= (__force blk_mq_req_flags_t
)(1 << 0),
263 /* allocate from reserved pool */
264 BLK_MQ_REQ_RESERVED
= (__force blk_mq_req_flags_t
)(1 << 1),
265 /* allocate internal/sched tag */
266 BLK_MQ_REQ_INTERNAL
= (__force blk_mq_req_flags_t
)(1 << 2),
267 /* set RQF_PREEMPT */
268 BLK_MQ_REQ_PREEMPT
= (__force blk_mq_req_flags_t
)(1 << 3),
271 struct request
*blk_mq_alloc_request(struct request_queue
*q
, unsigned int op
,
272 blk_mq_req_flags_t flags
);
273 struct request
*blk_mq_alloc_request_hctx(struct request_queue
*q
,
274 unsigned int op
, blk_mq_req_flags_t flags
,
275 unsigned int hctx_idx
);
276 struct request
*blk_mq_tag_to_rq(struct blk_mq_tags
*tags
, unsigned int tag
);
279 BLK_MQ_UNIQUE_TAG_BITS
= 16,
280 BLK_MQ_UNIQUE_TAG_MASK
= (1 << BLK_MQ_UNIQUE_TAG_BITS
) - 1,
283 u32
blk_mq_unique_tag(struct request
*rq
);
285 static inline u16
blk_mq_unique_tag_to_hwq(u32 unique_tag
)
287 return unique_tag
>> BLK_MQ_UNIQUE_TAG_BITS
;
290 static inline u16
blk_mq_unique_tag_to_tag(u32 unique_tag
)
292 return unique_tag
& BLK_MQ_UNIQUE_TAG_MASK
;
296 int blk_mq_request_started(struct request
*rq
);
297 void blk_mq_start_request(struct request
*rq
);
298 void blk_mq_end_request(struct request
*rq
, blk_status_t error
);
299 void __blk_mq_end_request(struct request
*rq
, blk_status_t error
);
301 void blk_mq_requeue_request(struct request
*rq
, bool kick_requeue_list
);
302 void blk_mq_kick_requeue_list(struct request_queue
*q
);
303 void blk_mq_delay_kick_requeue_list(struct request_queue
*q
, unsigned long msecs
);
304 bool blk_mq_complete_request(struct request
*rq
);
305 void blk_mq_complete_request_sync(struct request
*rq
);
306 bool blk_mq_bio_list_merge(struct request_queue
*q
, struct list_head
*list
,
308 bool blk_mq_queue_stopped(struct request_queue
*q
);
309 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx
*hctx
);
310 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx
*hctx
);
311 void blk_mq_stop_hw_queues(struct request_queue
*q
);
312 void blk_mq_start_hw_queues(struct request_queue
*q
);
313 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx
*hctx
, bool async
);
314 void blk_mq_start_stopped_hw_queues(struct request_queue
*q
, bool async
);
315 void blk_mq_quiesce_queue(struct request_queue
*q
);
316 void blk_mq_unquiesce_queue(struct request_queue
*q
);
317 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx
*hctx
, unsigned long msecs
);
318 bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx
*hctx
, bool async
);
319 void blk_mq_run_hw_queues(struct request_queue
*q
, bool async
);
320 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set
*tagset
,
321 busy_tag_iter_fn
*fn
, void *priv
);
322 void blk_mq_freeze_queue(struct request_queue
*q
);
323 void blk_mq_unfreeze_queue(struct request_queue
*q
);
324 void blk_freeze_queue_start(struct request_queue
*q
);
325 void blk_mq_freeze_queue_wait(struct request_queue
*q
);
326 int blk_mq_freeze_queue_wait_timeout(struct request_queue
*q
,
327 unsigned long timeout
);
329 int blk_mq_map_queues(struct blk_mq_queue_map
*qmap
);
330 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set
*set
, int nr_hw_queues
);
332 void blk_mq_quiesce_queue_nowait(struct request_queue
*q
);
334 unsigned int blk_mq_rq_cpu(struct request
*rq
);
337 * Driver command data is immediately after the request. So subtract request
338 * size to get back to the original request, add request size to get the PDU.
340 static inline struct request
*blk_mq_rq_from_pdu(void *pdu
)
342 return pdu
- sizeof(struct request
);
344 static inline void *blk_mq_rq_to_pdu(struct request
*rq
)
349 #define queue_for_each_hw_ctx(q, hctx, i) \
350 for ((i) = 0; (i) < (q)->nr_hw_queues && \
351 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
353 #define hctx_for_each_ctx(hctx, ctx, i) \
354 for ((i) = 0; (i) < (hctx)->nr_ctx && \
355 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
357 static inline blk_qc_t
request_to_qc_t(struct blk_mq_hw_ctx
*hctx
,
361 return rq
->tag
| (hctx
->queue_num
<< BLK_QC_T_SHIFT
);
363 return rq
->internal_tag
| (hctx
->queue_num
<< BLK_QC_T_SHIFT
) |