1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include "blk-mq-tag.h"
11 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
16 struct list_head rq_list
;
17 } ____cacheline_aligned_in_smp
;
20 unsigned short index_hw
[HCTX_MAX_TYPES
];
22 /* incremented at dispatch time */
23 unsigned long rq_dispatched
[2];
24 unsigned long rq_merged
;
26 /* incremented at completion time */
27 unsigned long ____cacheline_aligned_in_smp rq_completed
[2];
29 struct request_queue
*queue
;
31 } ____cacheline_aligned_in_smp
;
33 void blk_mq_freeze_queue(struct request_queue
*q
);
34 void blk_mq_free_queue(struct request_queue
*q
);
35 int blk_mq_update_nr_requests(struct request_queue
*q
, unsigned int nr
);
36 void blk_mq_wake_waiters(struct request_queue
*q
);
37 bool blk_mq_dispatch_rq_list(struct request_queue
*, struct list_head
*, bool);
38 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx
*hctx
, struct list_head
*list
);
39 bool blk_mq_get_driver_tag(struct request
*rq
);
40 struct request
*blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx
*hctx
,
41 struct blk_mq_ctx
*start
);
44 * Internal helpers for allocating/freeing the request map
46 void blk_mq_free_rqs(struct blk_mq_tag_set
*set
, struct blk_mq_tags
*tags
,
47 unsigned int hctx_idx
);
48 void blk_mq_free_rq_map(struct blk_mq_tags
*tags
);
49 struct blk_mq_tags
*blk_mq_alloc_rq_map(struct blk_mq_tag_set
*set
,
50 unsigned int hctx_idx
,
52 unsigned int reserved_tags
);
53 int blk_mq_alloc_rqs(struct blk_mq_tag_set
*set
, struct blk_mq_tags
*tags
,
54 unsigned int hctx_idx
, unsigned int depth
);
57 * Internal helpers for request insertion into sw queues
59 void __blk_mq_insert_request(struct blk_mq_hw_ctx
*hctx
, struct request
*rq
,
61 void blk_mq_request_bypass_insert(struct request
*rq
, bool run_queue
);
62 void blk_mq_insert_requests(struct blk_mq_hw_ctx
*hctx
, struct blk_mq_ctx
*ctx
,
63 struct list_head
*list
);
65 /* Used by blk_insert_cloned_request() to issue request directly */
66 blk_status_t
blk_mq_request_issue_directly(struct request
*rq
);
67 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx
*hctx
,
68 struct list_head
*list
);
71 * CPU -> queue mappings
73 extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map
*qmap
, unsigned int);
76 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
78 * @hctx_type: the hctx type index
81 static inline struct blk_mq_hw_ctx
*blk_mq_map_queue_type(struct request_queue
*q
,
82 unsigned int hctx_type
,
85 struct blk_mq_tag_set
*set
= q
->tag_set
;
87 return q
->queue_hw_ctx
[set
->map
[hctx_type
].mq_map
[cpu
]];
91 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
93 * @flags: request command flags
96 static inline struct blk_mq_hw_ctx
*blk_mq_map_queue(struct request_queue
*q
,
102 if (q
->mq_ops
->rq_flags_to_type
)
103 hctx_type
= q
->mq_ops
->rq_flags_to_type(q
, flags
);
105 return blk_mq_map_queue_type(q
, hctx_type
, cpu
);
111 extern void blk_mq_sysfs_init(struct request_queue
*q
);
112 extern void blk_mq_sysfs_deinit(struct request_queue
*q
);
113 extern int __blk_mq_register_dev(struct device
*dev
, struct request_queue
*q
);
114 extern int blk_mq_sysfs_register(struct request_queue
*q
);
115 extern void blk_mq_sysfs_unregister(struct request_queue
*q
);
116 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx
*hctx
);
118 void blk_mq_release(struct request_queue
*q
);
121 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
122 * @rq: target request.
124 static inline enum mq_rq_state
blk_mq_rq_state(struct request
*rq
)
126 return READ_ONCE(rq
->state
);
129 static inline struct blk_mq_ctx
*__blk_mq_get_ctx(struct request_queue
*q
,
132 return per_cpu_ptr(q
->queue_ctx
, cpu
);
136 * This assumes per-cpu software queueing queues. They could be per-node
137 * as well, for instance. For now this is hardcoded as-is. Note that we don't
138 * care about preemption, since we know the ctx's are persistent. This does
139 * mean that we can't rely on ctx always matching the currently running CPU.
141 static inline struct blk_mq_ctx
*blk_mq_get_ctx(struct request_queue
*q
)
143 return __blk_mq_get_ctx(q
, get_cpu());
146 static inline void blk_mq_put_ctx(struct blk_mq_ctx
*ctx
)
151 struct blk_mq_alloc_data
{
152 /* input parameter */
153 struct request_queue
*q
;
154 blk_mq_req_flags_t flags
;
155 unsigned int shallow_depth
;
156 unsigned int cmd_flags
;
158 /* input & output parameter */
159 struct blk_mq_ctx
*ctx
;
160 struct blk_mq_hw_ctx
*hctx
;
163 static inline struct blk_mq_tags
*blk_mq_tags_from_data(struct blk_mq_alloc_data
*data
)
165 if (data
->flags
& BLK_MQ_REQ_INTERNAL
)
166 return data
->hctx
->sched_tags
;
168 return data
->hctx
->tags
;
171 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx
*hctx
)
173 return test_bit(BLK_MQ_S_STOPPED
, &hctx
->state
);
176 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx
*hctx
)
178 return hctx
->nr_ctx
&& hctx
->tags
;
181 void blk_mq_in_flight(struct request_queue
*q
, struct hd_struct
*part
,
182 unsigned int inflight
[2]);
183 void blk_mq_in_flight_rw(struct request_queue
*q
, struct hd_struct
*part
,
184 unsigned int inflight
[2]);
186 static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx
*hctx
)
188 struct request_queue
*q
= hctx
->queue
;
190 if (q
->mq_ops
->put_budget
)
191 q
->mq_ops
->put_budget(hctx
);
194 static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx
*hctx
)
196 struct request_queue
*q
= hctx
->queue
;
198 if (q
->mq_ops
->get_budget
)
199 return q
->mq_ops
->get_budget(hctx
);
203 static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx
*hctx
,
206 blk_mq_put_tag(hctx
, hctx
->tags
, rq
->mq_ctx
, rq
->tag
);
209 if (rq
->rq_flags
& RQF_MQ_INFLIGHT
) {
210 rq
->rq_flags
&= ~RQF_MQ_INFLIGHT
;
211 atomic_dec(&hctx
->nr_active
);
215 static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx
*hctx
,
218 if (rq
->tag
== -1 || rq
->internal_tag
== -1)
221 __blk_mq_put_driver_tag(hctx
, rq
);
224 static inline void blk_mq_put_driver_tag(struct request
*rq
)
226 if (rq
->tag
== -1 || rq
->internal_tag
== -1)
229 __blk_mq_put_driver_tag(rq
->mq_hctx
, rq
);
232 static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map
*qmap
)
236 for_each_possible_cpu(cpu
)
237 qmap
->mq_map
[cpu
] = 0;