]>
Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 | 2 | /* |
1da177e4 | 3 | * Copyright (C) 2003 Russell King, All Rights Reserved. |
98ac2162 | 4 | * Copyright 2006-2007 Pierre Ossman |
1da177e4 | 5 | */ |
5a0e3ad6 | 6 | #include <linux/slab.h> |
1da177e4 LT |
7 | #include <linux/module.h> |
8 | #include <linux/blkdev.h> | |
83144186 | 9 | #include <linux/freezer.h> |
87598a2b | 10 | #include <linux/kthread.h> |
45711f1a | 11 | #include <linux/scatterlist.h> |
8e0cb8a1 | 12 | #include <linux/dma-mapping.h> |
3a6ffb3c | 13 | #include <linux/backing-dev.h> |
1da177e4 LT |
14 | |
15 | #include <linux/mmc/card.h> | |
16 | #include <linux/mmc/host.h> | |
29eb7bd0 | 17 | |
98ac2162 | 18 | #include "queue.h" |
29eb7bd0 | 19 | #include "block.h" |
55244c56 | 20 | #include "core.h" |
4facdde1 | 21 | #include "card.h" |
81196976 | 22 | #include "host.h" |
1da177e4 | 23 | |
38c38cb7 YS |
24 | #define MMC_DMA_MAP_MERGE_SEGMENTS 512 |
25 | ||
1e8e55b6 AH |
26 | static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq) |
27 | { | |
28 | /* Allow only 1 DCMD at a time */ | |
29 | return mq->in_flight[MMC_ISSUE_DCMD]; | |
30 | } | |
31 | ||
32 | void mmc_cqe_check_busy(struct mmc_queue *mq) | |
33 | { | |
34 | if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq)) | |
35 | mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY; | |
36 | ||
37 | mq->cqe_busy &= ~MMC_CQE_QUEUE_FULL; | |
38 | } | |
39 | ||
40 | static inline bool mmc_cqe_can_dcmd(struct mmc_host *host) | |
41 | { | |
42 | return host->caps2 & MMC_CAP2_CQE_DCMD; | |
43 | } | |
44 | ||
15ff2946 CIK |
45 | static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host, |
46 | struct request *req) | |
1e8e55b6 AH |
47 | { |
48 | switch (req_op(req)) { | |
49 | case REQ_OP_DRV_IN: | |
50 | case REQ_OP_DRV_OUT: | |
51 | case REQ_OP_DISCARD: | |
52 | case REQ_OP_SECURE_ERASE: | |
53 | return MMC_ISSUE_SYNC; | |
54 | case REQ_OP_FLUSH: | |
55 | return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC; | |
56 | default: | |
57 | return MMC_ISSUE_ASYNC; | |
58 | } | |
59 | } | |
60 | ||
81196976 AH |
61 | enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req) |
62 | { | |
1e8e55b6 AH |
63 | struct mmc_host *host = mq->card->host; |
64 | ||
511ce378 | 65 | if (mq->use_cqe && !host->hsq_enabled) |
1e8e55b6 AH |
66 | return mmc_cqe_issue_type(host, req); |
67 | ||
81196976 AH |
68 | if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE) |
69 | return MMC_ISSUE_ASYNC; | |
70 | ||
71 | return MMC_ISSUE_SYNC; | |
72 | } | |
73 | ||
1e8e55b6 AH |
74 | static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq) |
75 | { | |
76 | if (!mq->recovery_needed) { | |
77 | mq->recovery_needed = true; | |
78 | schedule_work(&mq->recovery_work); | |
79 | } | |
80 | } | |
81 | ||
82 | void mmc_cqe_recovery_notifier(struct mmc_request *mrq) | |
83 | { | |
84 | struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, | |
85 | brq.mrq); | |
86 | struct request *req = mmc_queue_req_to_req(mqrq); | |
87 | struct request_queue *q = req->q; | |
88 | struct mmc_queue *mq = q->queuedata; | |
89 | unsigned long flags; | |
90 | ||
f5d72c5c | 91 | spin_lock_irqsave(&mq->lock, flags); |
1e8e55b6 | 92 | __mmc_cqe_recovery_notifier(mq); |
f5d72c5c | 93 | spin_unlock_irqrestore(&mq->lock, flags); |
1e8e55b6 AH |
94 | } |
95 | ||
96 | static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req) | |
97 | { | |
98 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
99 | struct mmc_request *mrq = &mqrq->brq.mrq; | |
100 | struct mmc_queue *mq = req->q->queuedata; | |
101 | struct mmc_host *host = mq->card->host; | |
102 | enum mmc_issue_type issue_type = mmc_issue_type(mq, req); | |
103 | bool recovery_needed = false; | |
104 | ||
105 | switch (issue_type) { | |
106 | case MMC_ISSUE_ASYNC: | |
107 | case MMC_ISSUE_DCMD: | |
108 | if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) { | |
109 | if (recovery_needed) | |
39a22f73 | 110 | mmc_cqe_recovery_notifier(mrq); |
1e8e55b6 AH |
111 | return BLK_EH_RESET_TIMER; |
112 | } | |
c077dc5e | 113 | /* The request has gone already */ |
ad73d6fe | 114 | return BLK_EH_DONE; |
1e8e55b6 AH |
115 | default: |
116 | /* Timeout is handled by mmc core */ | |
117 | return BLK_EH_RESET_TIMER; | |
118 | } | |
119 | } | |
120 | ||
81196976 AH |
121 | static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req, |
122 | bool reserved) | |
123 | { | |
1e8e55b6 AH |
124 | struct request_queue *q = req->q; |
125 | struct mmc_queue *mq = q->queuedata; | |
511ce378 BW |
126 | struct mmc_card *card = mq->card; |
127 | struct mmc_host *host = card->host; | |
1e8e55b6 | 128 | unsigned long flags; |
39a22f73 | 129 | bool ignore_tout; |
1e8e55b6 | 130 | |
f5d72c5c | 131 | spin_lock_irqsave(&mq->lock, flags); |
39a22f73 | 132 | ignore_tout = mq->recovery_needed || !mq->use_cqe || host->hsq_enabled; |
f5d72c5c | 133 | spin_unlock_irqrestore(&mq->lock, flags); |
1e8e55b6 | 134 | |
39a22f73 | 135 | return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req); |
1e8e55b6 AH |
136 | } |
137 | ||
138 | static void mmc_mq_recovery_handler(struct work_struct *work) | |
139 | { | |
140 | struct mmc_queue *mq = container_of(work, struct mmc_queue, | |
141 | recovery_work); | |
142 | struct request_queue *q = mq->queue; | |
511ce378 | 143 | struct mmc_host *host = mq->card->host; |
1e8e55b6 AH |
144 | |
145 | mmc_get_card(mq->card, &mq->ctx); | |
146 | ||
147 | mq->in_recovery = true; | |
148 | ||
511ce378 | 149 | if (mq->use_cqe && !host->hsq_enabled) |
10f21df4 AH |
150 | mmc_blk_cqe_recovery(mq); |
151 | else | |
152 | mmc_blk_mq_recovery(mq); | |
1e8e55b6 AH |
153 | |
154 | mq->in_recovery = false; | |
155 | ||
f5d72c5c | 156 | spin_lock_irq(&mq->lock); |
1e8e55b6 | 157 | mq->recovery_needed = false; |
f5d72c5c | 158 | spin_unlock_irq(&mq->lock); |
1e8e55b6 | 159 | |
511ce378 BW |
160 | if (host->hsq_enabled) |
161 | host->cqe_ops->cqe_recovery_finish(host); | |
162 | ||
1e8e55b6 AH |
163 | mmc_put_card(mq->card, &mq->ctx); |
164 | ||
165 | blk_mq_run_hw_queues(q, true); | |
81196976 AH |
166 | } |
167 | ||
304419d8 | 168 | static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp) |
97868a2b PF |
169 | { |
170 | struct scatterlist *sg; | |
171 | ||
304419d8 | 172 | sg = kmalloc_array(sg_len, sizeof(*sg), gfp); |
7b410d07 | 173 | if (sg) |
97868a2b | 174 | sg_init_table(sg, sg_len); |
97868a2b PF |
175 | |
176 | return sg; | |
177 | } | |
178 | ||
e056a1b5 AH |
179 | static void mmc_queue_setup_discard(struct request_queue *q, |
180 | struct mmc_card *card) | |
181 | { | |
182 | unsigned max_discard; | |
183 | ||
184 | max_discard = mmc_calc_max_discard(card); | |
185 | if (!max_discard) | |
186 | return; | |
187 | ||
8b904b5b | 188 | blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); |
2bb4cd5c | 189 | blk_queue_max_discard_sectors(q, max_discard); |
e056a1b5 AH |
190 | q->limits.discard_granularity = card->pref_erase << 9; |
191 | /* granularity must not be greater than max. discard */ | |
192 | if (card->pref_erase > max_discard) | |
193 | q->limits.discard_granularity = 0; | |
775a9362 | 194 | if (mmc_can_secure_erase_trim(card)) |
8b904b5b | 195 | blk_queue_flag_set(QUEUE_FLAG_SECERASE, q); |
e056a1b5 AH |
196 | } |
197 | ||
38c38cb7 YS |
198 | static unsigned int mmc_get_max_segments(struct mmc_host *host) |
199 | { | |
200 | return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS : | |
201 | host->max_segs; | |
202 | } | |
203 | ||
304419d8 LW |
204 | /** |
205 | * mmc_init_request() - initialize the MMC-specific per-request data | |
206 | * @q: the request queue | |
207 | * @req: the request | |
208 | * @gfp: memory allocation policy | |
209 | */ | |
81196976 AH |
210 | static int __mmc_init_request(struct mmc_queue *mq, struct request *req, |
211 | gfp_t gfp) | |
64e29e42 | 212 | { |
304419d8 | 213 | struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); |
304419d8 LW |
214 | struct mmc_card *card = mq->card; |
215 | struct mmc_host *host = card->host; | |
64e29e42 | 216 | |
38c38cb7 | 217 | mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), gfp); |
de3ee99b LW |
218 | if (!mq_rq->sg) |
219 | return -ENOMEM; | |
64e29e42 | 220 | |
c5bda0ca AH |
221 | return 0; |
222 | } | |
64e29e42 | 223 | |
304419d8 | 224 | static void mmc_exit_request(struct request_queue *q, struct request *req) |
c5bda0ca | 225 | { |
304419d8 | 226 | struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); |
7b410d07 | 227 | |
304419d8 LW |
228 | kfree(mq_rq->sg); |
229 | mq_rq->sg = NULL; | |
c09949cf AH |
230 | } |
231 | ||
81196976 AH |
232 | static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req, |
233 | unsigned int hctx_idx, unsigned int numa_node) | |
234 | { | |
235 | return __mmc_init_request(set->driver_data, req, GFP_KERNEL); | |
236 | } | |
237 | ||
238 | static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req, | |
239 | unsigned int hctx_idx) | |
240 | { | |
241 | struct mmc_queue *mq = set->driver_data; | |
242 | ||
243 | mmc_exit_request(mq->queue, req); | |
244 | } | |
245 | ||
81196976 AH |
246 | static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, |
247 | const struct blk_mq_queue_data *bd) | |
248 | { | |
249 | struct request *req = bd->rq; | |
250 | struct request_queue *q = req->q; | |
251 | struct mmc_queue *mq = q->queuedata; | |
252 | struct mmc_card *card = mq->card; | |
1e8e55b6 | 253 | struct mmc_host *host = card->host; |
81196976 AH |
254 | enum mmc_issue_type issue_type; |
255 | enum mmc_issued issued; | |
1e8e55b6 | 256 | bool get_card, cqe_retune_ok; |
81196976 AH |
257 | int ret; |
258 | ||
259 | if (mmc_card_removed(mq->card)) { | |
260 | req->rq_flags |= RQF_QUIET; | |
261 | return BLK_STS_IOERR; | |
262 | } | |
263 | ||
264 | issue_type = mmc_issue_type(mq, req); | |
265 | ||
f5d72c5c | 266 | spin_lock_irq(&mq->lock); |
81196976 | 267 | |
26caddf2 | 268 | if (mq->recovery_needed || mq->busy) { |
f5d72c5c | 269 | spin_unlock_irq(&mq->lock); |
1e8e55b6 AH |
270 | return BLK_STS_RESOURCE; |
271 | } | |
272 | ||
81196976 | 273 | switch (issue_type) { |
1e8e55b6 AH |
274 | case MMC_ISSUE_DCMD: |
275 | if (mmc_cqe_dcmd_busy(mq)) { | |
276 | mq->cqe_busy |= MMC_CQE_DCMD_BUSY; | |
f5d72c5c | 277 | spin_unlock_irq(&mq->lock); |
1e8e55b6 AH |
278 | return BLK_STS_RESOURCE; |
279 | } | |
280 | break; | |
81196976 | 281 | case MMC_ISSUE_ASYNC: |
511ce378 BW |
282 | /* |
283 | * For MMC host software queue, we only allow 2 requests in | |
284 | * flight to avoid a long latency. | |
285 | */ | |
286 | if (host->hsq_enabled && mq->in_flight[issue_type] > 2) { | |
287 | spin_unlock_irq(&mq->lock); | |
288 | return BLK_STS_RESOURCE; | |
289 | } | |
81196976 AH |
290 | break; |
291 | default: | |
292 | /* | |
293 | * Timeouts are handled by mmc core, and we don't have a host | |
294 | * API to abort requests, so we can't handle the timeout anyway. | |
295 | * However, when the timeout happens, blk_mq_complete_request() | |
296 | * no longer works (to stop the request disappearing under us). | |
297 | * To avoid racing with that, set a large timeout. | |
298 | */ | |
299 | req->timeout = 600 * HZ; | |
300 | break; | |
301 | } | |
302 | ||
26caddf2 AH |
303 | /* Parallel dispatch of requests is not supported at the moment */ |
304 | mq->busy = true; | |
305 | ||
81196976 AH |
306 | mq->in_flight[issue_type] += 1; |
307 | get_card = (mmc_tot_in_flight(mq) == 1); | |
1e8e55b6 | 308 | cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); |
81196976 | 309 | |
f5d72c5c | 310 | spin_unlock_irq(&mq->lock); |
81196976 AH |
311 | |
312 | if (!(req->rq_flags & RQF_DONTPREP)) { | |
313 | req_to_mmc_queue_req(req)->retries = 0; | |
314 | req->rq_flags |= RQF_DONTPREP; | |
315 | } | |
316 | ||
317 | if (get_card) | |
318 | mmc_get_card(card, &mq->ctx); | |
319 | ||
1e8e55b6 AH |
320 | if (mq->use_cqe) { |
321 | host->retune_now = host->need_retune && cqe_retune_ok && | |
322 | !host->hold_retune; | |
323 | } | |
324 | ||
81196976 AH |
325 | blk_mq_start_request(req); |
326 | ||
327 | issued = mmc_blk_mq_issue_rq(mq, req); | |
328 | ||
329 | switch (issued) { | |
330 | case MMC_REQ_BUSY: | |
331 | ret = BLK_STS_RESOURCE; | |
332 | break; | |
333 | case MMC_REQ_FAILED_TO_START: | |
334 | ret = BLK_STS_IOERR; | |
335 | break; | |
336 | default: | |
337 | ret = BLK_STS_OK; | |
338 | break; | |
339 | } | |
340 | ||
341 | if (issued != MMC_REQ_STARTED) { | |
342 | bool put_card = false; | |
343 | ||
f5d72c5c | 344 | spin_lock_irq(&mq->lock); |
81196976 AH |
345 | mq->in_flight[issue_type] -= 1; |
346 | if (mmc_tot_in_flight(mq) == 0) | |
347 | put_card = true; | |
26caddf2 | 348 | mq->busy = false; |
f5d72c5c | 349 | spin_unlock_irq(&mq->lock); |
81196976 AH |
350 | if (put_card) |
351 | mmc_put_card(card, &mq->ctx); | |
26caddf2 AH |
352 | } else { |
353 | WRITE_ONCE(mq->busy, false); | |
81196976 AH |
354 | } |
355 | ||
356 | return ret; | |
357 | } | |
358 | ||
359 | static const struct blk_mq_ops mmc_mq_ops = { | |
360 | .queue_rq = mmc_mq_queue_rq, | |
361 | .init_request = mmc_mq_init_request, | |
362 | .exit_request = mmc_mq_exit_request, | |
363 | .complete = mmc_blk_mq_complete, | |
364 | .timeout = mmc_mq_timed_out, | |
365 | }; | |
366 | ||
c8b5fd03 AH |
367 | static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) |
368 | { | |
369 | struct mmc_host *host = card->host; | |
c53336c8 | 370 | unsigned block_size = 512; |
c8b5fd03 | 371 | |
8b904b5b BVA |
372 | blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue); |
373 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue); | |
c8b5fd03 AH |
374 | if (mmc_can_erase(card)) |
375 | mmc_queue_setup_discard(mq->queue, card); | |
376 | ||
7559d612 CH |
377 | if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask) |
378 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH); | |
c8b5fd03 AH |
379 | blk_queue_max_hw_sectors(mq->queue, |
380 | min(host->max_blk_count, host->max_req_size / 512)); | |
38c38cb7 YS |
381 | if (host->can_dma_map_merge) |
382 | WARN(!blk_queue_can_use_dma_map_merging(mq->queue, | |
383 | mmc_dev(host)), | |
384 | "merging was advertised but not possible"); | |
385 | blk_queue_max_segments(mq->queue, mmc_get_max_segments(host)); | |
c53336c8 ML |
386 | |
387 | if (mmc_card_mmc(card)) | |
388 | block_size = card->ext_csd.data_sector_size; | |
389 | ||
390 | blk_queue_logical_block_size(mq->queue, block_size); | |
38c38cb7 YS |
391 | /* |
392 | * After blk_queue_can_use_dma_map_merging() was called with succeed, | |
393 | * since it calls blk_queue_virt_boundary(), the mmc should not call | |
394 | * both blk_queue_max_segment_size(). | |
395 | */ | |
396 | if (!host->can_dma_map_merge) | |
397 | blk_queue_max_segment_size(mq->queue, | |
c53336c8 | 398 | round_down(host->max_seg_size, block_size)); |
c8b5fd03 | 399 | |
cf1db7fc CH |
400 | dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue)); |
401 | ||
1e8e55b6 | 402 | INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); |
81196976 AH |
403 | INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); |
404 | ||
405 | mutex_init(&mq->complete_lock); | |
406 | ||
407 | init_waitqueue_head(&mq->wait); | |
408 | } | |
409 | ||
427b0034 YS |
410 | static inline bool mmc_merge_capable(struct mmc_host *host) |
411 | { | |
412 | return host->caps2 & MMC_CAP2_MERGE_CAPABLE; | |
413 | } | |
414 | ||
b061b326 CH |
415 | /* Set queue depth to get a reasonable value for q->nr_requests */ |
416 | #define MMC_QUEUE_DEPTH 64 | |
417 | ||
418 | /** | |
419 | * mmc_init_queue - initialise a queue structure. | |
420 | * @mq: mmc queue | |
421 | * @card: mmc card to attach this queue | |
b061b326 CH |
422 | * |
423 | * Initialise a MMC card request queue. | |
424 | */ | |
f5d72c5c | 425 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) |
81196976 | 426 | { |
b061b326 | 427 | struct mmc_host *host = card->host; |
81196976 AH |
428 | int ret; |
429 | ||
b061b326 CH |
430 | mq->card = card; |
431 | mq->use_cqe = host->cqe_enabled; | |
f5d72c5c CH |
432 | |
433 | spin_lock_init(&mq->lock); | |
b061b326 | 434 | |
81196976 | 435 | memset(&mq->tag_set, 0, sizeof(mq->tag_set)); |
b061b326 CH |
436 | mq->tag_set.ops = &mmc_mq_ops; |
437 | /* | |
438 | * The queue depth for CQE must match the hardware because the request | |
439 | * tag is used to index the hardware queue. | |
440 | */ | |
511ce378 | 441 | if (mq->use_cqe && !host->hsq_enabled) |
b061b326 CH |
442 | mq->tag_set.queue_depth = |
443 | min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth); | |
444 | else | |
445 | mq->tag_set.queue_depth = MMC_QUEUE_DEPTH; | |
81196976 | 446 | mq->tag_set.numa_node = NUMA_NO_NODE; |
56d18f62 | 447 | mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; |
81196976 AH |
448 | mq->tag_set.nr_hw_queues = 1; |
449 | mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); | |
450 | mq->tag_set.driver_data = mq; | |
451 | ||
38c38cb7 YS |
452 | /* |
453 | * Since blk_mq_alloc_tag_set() calls .init_request() of mmc_mq_ops, | |
454 | * the host->can_dma_map_merge should be set before to get max_segs | |
455 | * from mmc_get_max_segments(). | |
456 | */ | |
427b0034 YS |
457 | if (mmc_merge_capable(host) && |
458 | host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS && | |
38c38cb7 YS |
459 | dma_get_merge_boundary(mmc_dev(host))) |
460 | host->can_dma_map_merge = 1; | |
461 | else | |
462 | host->can_dma_map_merge = 0; | |
463 | ||
81196976 AH |
464 | ret = blk_mq_alloc_tag_set(&mq->tag_set); |
465 | if (ret) | |
466 | return ret; | |
467 | ||
468 | mq->queue = blk_mq_init_queue(&mq->tag_set); | |
469 | if (IS_ERR(mq->queue)) { | |
470 | ret = PTR_ERR(mq->queue); | |
471 | goto free_tag_set; | |
472 | } | |
473 | ||
3a6ffb3c AK |
474 | if (mmc_host_is_spi(host) && host->use_spi_crc) |
475 | mq->queue->backing_dev_info->capabilities |= | |
476 | BDI_CAP_STABLE_WRITES; | |
477 | ||
81196976 | 478 | mq->queue->queuedata = mq; |
b061b326 | 479 | blk_queue_rq_timeout(mq->queue, 60 * HZ); |
81196976 | 480 | |
b061b326 | 481 | mmc_setup_queue(mq, card); |
81196976 AH |
482 | return 0; |
483 | ||
484 | free_tag_set: | |
485 | blk_mq_free_tag_set(&mq->tag_set); | |
81196976 AH |
486 | return ret; |
487 | } | |
488 | ||
0fbfd125 | 489 | void mmc_queue_suspend(struct mmc_queue *mq) |
81196976 AH |
490 | { |
491 | blk_mq_quiesce_queue(mq->queue); | |
492 | ||
493 | /* | |
494 | * The host remains claimed while there are outstanding requests, so | |
495 | * simply claiming and releasing here ensures there are none. | |
496 | */ | |
497 | mmc_claim_host(mq->card->host); | |
498 | mmc_release_host(mq->card->host); | |
499 | } | |
500 | ||
0fbfd125 | 501 | void mmc_queue_resume(struct mmc_queue *mq) |
81196976 AH |
502 | { |
503 | blk_mq_unquiesce_queue(mq->queue); | |
504 | } | |
505 | ||
1da177e4 LT |
506 | void mmc_cleanup_queue(struct mmc_queue *mq) |
507 | { | |
165125e1 | 508 | struct request_queue *q = mq->queue; |
89b4e133 | 509 | |
0fbfd125 AH |
510 | /* |
511 | * The legacy code handled the possibility of being suspended, | |
512 | * so do that here too. | |
513 | */ | |
514 | if (blk_queue_quiesced(q)) | |
515 | blk_mq_unquiesce_queue(q); | |
5fa83ce2 | 516 | |
41e3efd0 | 517 | blk_cleanup_queue(q); |
43d8dabb | 518 | blk_mq_free_tag_set(&mq->tag_set); |
41e3efd0 | 519 | |
81196976 AH |
520 | /* |
521 | * A request can be completed before the next request, potentially | |
522 | * leaving a complete_work with nothing to do. Such a work item might | |
523 | * still be queued at this point. Flush it. | |
524 | */ | |
525 | flush_work(&mq->complete_work); | |
526 | ||
1da177e4 LT |
527 | mq->card = NULL; |
528 | } | |
1da177e4 | 529 | |
2ff1fa67 PO |
530 | /* |
531 | * Prepare the sg list(s) to be handed of to the host driver | |
532 | */ | |
97868a2b | 533 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) |
98ccf149 | 534 | { |
67e69d52 | 535 | struct request *req = mmc_queue_req_to_req(mqrq); |
98ccf149 | 536 | |
de3ee99b | 537 | return blk_rq_map_sg(mq->queue, req, mqrq->sg); |
98ccf149 | 538 | } |