1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include "blk-mq-tag.h"
13 struct list_head rq_list
;
14 } ____cacheline_aligned_in_smp
;
17 unsigned int index_hw
;
19 /* incremented at dispatch time */
20 unsigned long rq_dispatched
[2];
21 unsigned long rq_merged
;
23 /* incremented at completion time */
24 unsigned long ____cacheline_aligned_in_smp rq_completed
[2];
26 struct request_queue
*queue
;
28 } ____cacheline_aligned_in_smp
;
31 * Bits for request->gstate. The lower two bits carry MQ_RQ_* state value
32 * and the upper bits the generation number.
39 MQ_RQ_STATE_MASK
= (1 << MQ_RQ_STATE_BITS
) - 1,
40 MQ_RQ_GEN_INC
= 1 << MQ_RQ_STATE_BITS
,
43 void blk_mq_freeze_queue(struct request_queue
*q
);
44 void blk_mq_free_queue(struct request_queue
*q
);
45 int blk_mq_update_nr_requests(struct request_queue
*q
, unsigned int nr
);
46 void blk_mq_wake_waiters(struct request_queue
*q
);
47 bool blk_mq_dispatch_rq_list(struct request_queue
*, struct list_head
*, bool);
48 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx
*hctx
, struct list_head
*list
);
49 bool blk_mq_get_driver_tag(struct request
*rq
, struct blk_mq_hw_ctx
**hctx
,
51 struct request
*blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx
*hctx
,
52 struct blk_mq_ctx
*start
);
55 * Internal helpers for allocating/freeing the request map
57 void blk_mq_free_rqs(struct blk_mq_tag_set
*set
, struct blk_mq_tags
*tags
,
58 unsigned int hctx_idx
);
59 void blk_mq_free_rq_map(struct blk_mq_tags
*tags
);
60 struct blk_mq_tags
*blk_mq_alloc_rq_map(struct blk_mq_tag_set
*set
,
61 unsigned int hctx_idx
,
63 unsigned int reserved_tags
);
64 int blk_mq_alloc_rqs(struct blk_mq_tag_set
*set
, struct blk_mq_tags
*tags
,
65 unsigned int hctx_idx
, unsigned int depth
);
68 * Internal helpers for request insertion into sw queues
70 void __blk_mq_insert_request(struct blk_mq_hw_ctx
*hctx
, struct request
*rq
,
72 void blk_mq_request_bypass_insert(struct request
*rq
, bool run_queue
);
73 void blk_mq_insert_requests(struct blk_mq_hw_ctx
*hctx
, struct blk_mq_ctx
*ctx
,
74 struct list_head
*list
);
77 * CPU -> queue mappings
79 extern int blk_mq_hw_queue_to_node(unsigned int *map
, unsigned int);
81 static inline struct blk_mq_hw_ctx
*blk_mq_map_queue(struct request_queue
*q
,
84 return q
->queue_hw_ctx
[q
->mq_map
[cpu
]];
90 extern void blk_mq_sysfs_init(struct request_queue
*q
);
91 extern void blk_mq_sysfs_deinit(struct request_queue
*q
);
92 extern int __blk_mq_register_dev(struct device
*dev
, struct request_queue
*q
);
93 extern int blk_mq_sysfs_register(struct request_queue
*q
);
94 extern void blk_mq_sysfs_unregister(struct request_queue
*q
);
95 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx
*hctx
);
97 extern void blk_mq_rq_timed_out(struct request
*req
, bool reserved
);
99 void blk_mq_release(struct request_queue
*q
);
102 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
103 * @rq: target request.
105 static inline int blk_mq_rq_state(struct request
*rq
)
107 return READ_ONCE(rq
->gstate
) & MQ_RQ_STATE_MASK
;
111 * blk_mq_rq_update_state() - set the current MQ_RQ_* state of a request
112 * @rq: target request.
113 * @state: new state to set.
115 * Set @rq's state to @state. The caller is responsible for ensuring that
116 * there are no other updaters. A request can transition into IN_FLIGHT
117 * only from IDLE and doing so increments the generation number.
119 static inline void blk_mq_rq_update_state(struct request
*rq
,
120 enum mq_rq_state state
)
122 u64 old_val
= READ_ONCE(rq
->gstate
);
123 u64 new_val
= (old_val
& ~MQ_RQ_STATE_MASK
) | state
;
125 if (state
== MQ_RQ_IN_FLIGHT
) {
126 WARN_ON_ONCE((old_val
& MQ_RQ_STATE_MASK
) != MQ_RQ_IDLE
);
127 new_val
+= MQ_RQ_GEN_INC
;
130 /* avoid exposing interim values */
131 WRITE_ONCE(rq
->gstate
, new_val
);
134 static inline struct blk_mq_ctx
*__blk_mq_get_ctx(struct request_queue
*q
,
137 return per_cpu_ptr(q
->queue_ctx
, cpu
);
141 * This assumes per-cpu software queueing queues. They could be per-node
142 * as well, for instance. For now this is hardcoded as-is. Note that we don't
143 * care about preemption, since we know the ctx's are persistent. This does
144 * mean that we can't rely on ctx always matching the currently running CPU.
146 static inline struct blk_mq_ctx
*blk_mq_get_ctx(struct request_queue
*q
)
148 return __blk_mq_get_ctx(q
, get_cpu());
151 static inline void blk_mq_put_ctx(struct blk_mq_ctx
*ctx
)
156 struct blk_mq_alloc_data
{
157 /* input parameter */
158 struct request_queue
*q
;
159 blk_mq_req_flags_t flags
;
160 unsigned int shallow_depth
;
162 /* input & output parameter */
163 struct blk_mq_ctx
*ctx
;
164 struct blk_mq_hw_ctx
*hctx
;
167 static inline struct blk_mq_tags
*blk_mq_tags_from_data(struct blk_mq_alloc_data
*data
)
169 if (data
->flags
& BLK_MQ_REQ_INTERNAL
)
170 return data
->hctx
->sched_tags
;
172 return data
->hctx
->tags
;
175 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx
*hctx
)
177 return test_bit(BLK_MQ_S_STOPPED
, &hctx
->state
);
180 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx
*hctx
)
182 return hctx
->nr_ctx
&& hctx
->tags
;
185 void blk_mq_in_flight(struct request_queue
*q
, struct hd_struct
*part
,
186 unsigned int inflight
[2]);
188 static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx
*hctx
)
190 struct request_queue
*q
= hctx
->queue
;
192 if (q
->mq_ops
->put_budget
)
193 q
->mq_ops
->put_budget(hctx
);
196 static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx
*hctx
)
198 struct request_queue
*q
= hctx
->queue
;
200 if (q
->mq_ops
->get_budget
)
201 return q
->mq_ops
->get_budget(hctx
);
205 static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx
*hctx
,
208 blk_mq_put_tag(hctx
, hctx
->tags
, rq
->mq_ctx
, rq
->tag
);
211 if (rq
->rq_flags
& RQF_MQ_INFLIGHT
) {
212 rq
->rq_flags
&= ~RQF_MQ_INFLIGHT
;
213 atomic_dec(&hctx
->nr_active
);
217 static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx
*hctx
,
220 if (rq
->tag
== -1 || rq
->internal_tag
== -1)
223 __blk_mq_put_driver_tag(hctx
, rq
);
226 static inline void blk_mq_put_driver_tag(struct request
*rq
)
228 struct blk_mq_hw_ctx
*hctx
;
230 if (rq
->tag
== -1 || rq
->internal_tag
== -1)
233 hctx
= blk_mq_map_queue(rq
->q
, rq
->mq_ctx
->cpu
);
234 __blk_mq_put_driver_tag(hctx
, rq
);