1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2022 Qualcomm Innovation Center. All rights reserved.
6 * Asutosh Das <quic_asutoshd@quicinc.com>
7 * Can Guo <quic_cang@quicinc.com>
10 #include <asm/unaligned.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include "ufshcd-priv.h"
15 #include <linux/delay.h>
16 #include <scsi/scsi_cmnd.h>
17 #include <linux/bitfield.h>
18 #include <linux/iopoll.h>
20 #define MAX_QUEUE_SUP GENMASK(7, 0)
21 #define UFS_MCQ_MIN_RW_QUEUES 2
22 #define UFS_MCQ_MIN_READ_QUEUES 0
23 #define UFS_MCQ_MIN_POLL_QUEUES 0
24 #define QUEUE_EN_OFFSET 31
25 #define QUEUE_ID_OFFSET 16
27 #define MCQ_CFG_MAC_MASK GENMASK(16, 8)
28 #define MCQ_QCFG_SIZE 0x40
29 #define MCQ_ENTRY_SIZE_IN_DWORD 8
30 #define CQE_UCD_BA GENMASK_ULL(63, 7)
32 /* Max mcq register polling time in microseconds */
33 #define MCQ_POLL_US 500000
35 static int rw_queue_count_set(const char *val
, const struct kernel_param
*kp
)
37 return param_set_uint_minmax(val
, kp
, UFS_MCQ_MIN_RW_QUEUES
,
41 static const struct kernel_param_ops rw_queue_count_ops
= {
42 .set
= rw_queue_count_set
,
43 .get
= param_get_uint
,
46 static unsigned int rw_queues
;
47 module_param_cb(rw_queues
, &rw_queue_count_ops
, &rw_queues
, 0644);
48 MODULE_PARM_DESC(rw_queues
,
49 "Number of interrupt driven I/O queues used for rw. Default value is nr_cpus");
51 static int read_queue_count_set(const char *val
, const struct kernel_param
*kp
)
53 return param_set_uint_minmax(val
, kp
, UFS_MCQ_MIN_READ_QUEUES
,
57 static const struct kernel_param_ops read_queue_count_ops
= {
58 .set
= read_queue_count_set
,
59 .get
= param_get_uint
,
62 static unsigned int read_queues
;
63 module_param_cb(read_queues
, &read_queue_count_ops
, &read_queues
, 0644);
64 MODULE_PARM_DESC(read_queues
,
65 "Number of interrupt driven read queues used for read. Default value is 0");
67 static int poll_queue_count_set(const char *val
, const struct kernel_param
*kp
)
69 return param_set_uint_minmax(val
, kp
, UFS_MCQ_MIN_POLL_QUEUES
,
73 static const struct kernel_param_ops poll_queue_count_ops
= {
74 .set
= poll_queue_count_set
,
75 .get
= param_get_uint
,
78 static unsigned int poll_queues
= 1;
79 module_param_cb(poll_queues
, &poll_queue_count_ops
, &poll_queues
, 0644);
80 MODULE_PARM_DESC(poll_queues
,
81 "Number of poll queues used for r/w. Default value is 1");
84 * ufshcd_mcq_config_mac - Set the #Max Activ Cmds.
85 * @hba: per adapter instance
86 * @max_active_cmds: maximum # of active commands to the device at any time.
88 * The controller won't send more than the max_active_cmds to the device at
91 void ufshcd_mcq_config_mac(struct ufs_hba
*hba
, u32 max_active_cmds
)
95 val
= ufshcd_readl(hba
, REG_UFS_MCQ_CFG
);
96 val
&= ~MCQ_CFG_MAC_MASK
;
97 val
|= FIELD_PREP(MCQ_CFG_MAC_MASK
, max_active_cmds
);
98 ufshcd_writel(hba
, val
, REG_UFS_MCQ_CFG
);
100 EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac
);
103 * ufshcd_mcq_req_to_hwq - find the hardware queue on which the
104 * request would be issued.
105 * @hba: per adapter instance
106 * @req: pointer to the request to be issued
108 * Return: the hardware queue instance on which the request would
111 struct ufs_hw_queue
*ufshcd_mcq_req_to_hwq(struct ufs_hba
*hba
,
114 u32 utag
= blk_mq_unique_tag(req
);
115 u32 hwq
= blk_mq_unique_tag_to_hwq(utag
);
117 return &hba
->uhq
[hwq
];
121 * ufshcd_mcq_decide_queue_depth - decide the queue depth
122 * @hba: per adapter instance
124 * Return: queue-depth on success, non-zero on error
126 * MAC - Max. Active Command of the Host Controller (HC)
127 * HC wouldn't send more than this commands to the device.
128 * It is mandatory to implement get_hba_mac() to enable MCQ mode.
129 * Calculates and adjusts the queue depth based on the depth
130 * supported by the HC and ufs device.
132 int ufshcd_mcq_decide_queue_depth(struct ufs_hba
*hba
)
136 /* Mandatory to implement get_hba_mac() */
137 mac
= ufshcd_mcq_vops_get_hba_mac(hba
);
139 dev_err(hba
->dev
, "Failed to get mac, err=%d\n", mac
);
143 WARN_ON_ONCE(!hba
->dev_info
.bqueuedepth
);
145 * max. value of bqueuedepth = 256, mac is host dependent.
146 * It is mandatory for UFS device to define bQueueDepth if
147 * shared queuing architecture is enabled.
149 return min_t(int, mac
, hba
->dev_info
.bqueuedepth
);
152 static int ufshcd_mcq_config_nr_queues(struct ufs_hba
*hba
)
155 u32 hba_maxq
, rem
, tot_queues
;
156 struct Scsi_Host
*host
= hba
->host
;
158 /* maxq is 0 based value */
159 hba_maxq
= FIELD_GET(MAX_QUEUE_SUP
, hba
->mcq_capabilities
) + 1;
161 tot_queues
= read_queues
+ poll_queues
+ rw_queues
;
163 if (hba_maxq
< tot_queues
) {
164 dev_err(hba
->dev
, "Total queues (%d) exceeds HC capacity (%d)\n",
165 tot_queues
, hba_maxq
);
172 hba
->nr_queues
[HCTX_TYPE_DEFAULT
] = rw_queues
;
173 rem
-= hba
->nr_queues
[HCTX_TYPE_DEFAULT
];
175 rw_queues
= num_possible_cpus();
179 hba
->nr_queues
[HCTX_TYPE_POLL
] = poll_queues
;
180 rem
-= hba
->nr_queues
[HCTX_TYPE_POLL
];
184 hba
->nr_queues
[HCTX_TYPE_READ
] = read_queues
;
185 rem
-= hba
->nr_queues
[HCTX_TYPE_READ
];
188 if (!hba
->nr_queues
[HCTX_TYPE_DEFAULT
])
189 hba
->nr_queues
[HCTX_TYPE_DEFAULT
] = min3(rem
, rw_queues
,
190 num_possible_cpus());
192 for (i
= 0; i
< HCTX_MAX_TYPES
; i
++)
193 host
->nr_hw_queues
+= hba
->nr_queues
[i
];
195 hba
->nr_hw_queues
= host
->nr_hw_queues
;
199 int ufshcd_mcq_memory_alloc(struct ufs_hba
*hba
)
201 struct ufs_hw_queue
*hwq
;
202 size_t utrdl_size
, cqe_size
;
205 for (i
= 0; i
< hba
->nr_hw_queues
; i
++) {
208 utrdl_size
= sizeof(struct utp_transfer_req_desc
) *
210 hwq
->sqe_base_addr
= dmam_alloc_coherent(hba
->dev
, utrdl_size
,
213 if (!hwq
->sqe_dma_addr
) {
214 dev_err(hba
->dev
, "SQE allocation failed\n");
218 cqe_size
= sizeof(struct cq_entry
) * hwq
->max_entries
;
219 hwq
->cqe_base_addr
= dmam_alloc_coherent(hba
->dev
, cqe_size
,
222 if (!hwq
->cqe_dma_addr
) {
223 dev_err(hba
->dev
, "CQE allocation failed\n");
232 /* Operation and runtime registers configuration */
233 #define MCQ_CFG_n(r, i) ((r) + MCQ_QCFG_SIZE * (i))
234 #define MCQ_OPR_OFFSET_n(p, i) \
235 (hba->mcq_opr[(p)].offset + hba->mcq_opr[(p)].stride * (i))
237 static void __iomem
*mcq_opr_base(struct ufs_hba
*hba
,
238 enum ufshcd_mcq_opr n
, int i
)
240 struct ufshcd_mcq_opr_info_t
*opr
= &hba
->mcq_opr
[n
];
242 return opr
->base
+ opr
->stride
* i
;
245 u32
ufshcd_mcq_read_cqis(struct ufs_hba
*hba
, int i
)
247 return readl(mcq_opr_base(hba
, OPR_CQIS
, i
) + REG_CQIS
);
249 EXPORT_SYMBOL_GPL(ufshcd_mcq_read_cqis
);
251 void ufshcd_mcq_write_cqis(struct ufs_hba
*hba
, u32 val
, int i
)
253 writel(val
, mcq_opr_base(hba
, OPR_CQIS
, i
) + REG_CQIS
);
255 EXPORT_SYMBOL_GPL(ufshcd_mcq_write_cqis
);
258 * Current MCQ specification doesn't provide a Task Tag or its equivalent in
259 * the Completion Queue Entry. Find the Task Tag using an indirect method.
261 static int ufshcd_mcq_get_tag(struct ufs_hba
*hba
,
262 struct ufs_hw_queue
*hwq
,
263 struct cq_entry
*cqe
)
267 /* sizeof(struct utp_transfer_cmd_desc) must be a multiple of 128 */
268 BUILD_BUG_ON(sizeof(struct utp_transfer_cmd_desc
) & GENMASK(6, 0));
270 /* Bits 63:7 UCD base address, 6:5 are reserved, 4:0 is SQ ID */
271 addr
= (le64_to_cpu(cqe
->command_desc_base_addr
) & CQE_UCD_BA
) -
274 return div_u64(addr
, ufshcd_get_ucd_size(hba
));
277 static void ufshcd_mcq_process_cqe(struct ufs_hba
*hba
,
278 struct ufs_hw_queue
*hwq
)
280 struct cq_entry
*cqe
= ufshcd_mcq_cur_cqe(hwq
);
281 int tag
= ufshcd_mcq_get_tag(hba
, hwq
, cqe
);
283 if (cqe
->command_desc_base_addr
) {
284 ufshcd_compl_one_cqe(hba
, tag
, cqe
);
285 /* After processed the cqe, mark it empty (invalid) entry */
286 cqe
->command_desc_base_addr
= 0;
290 void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba
*hba
,
291 struct ufs_hw_queue
*hwq
)
294 u32 entries
= hwq
->max_entries
;
296 spin_lock_irqsave(&hwq
->cq_lock
, flags
);
297 while (entries
> 0) {
298 ufshcd_mcq_process_cqe(hba
, hwq
);
299 ufshcd_mcq_inc_cq_head_slot(hwq
);
303 ufshcd_mcq_update_cq_tail_slot(hwq
);
304 hwq
->cq_head_slot
= hwq
->cq_tail_slot
;
305 spin_unlock_irqrestore(&hwq
->cq_lock
, flags
);
308 unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba
*hba
,
309 struct ufs_hw_queue
*hwq
)
311 unsigned long completed_reqs
= 0;
314 spin_lock_irqsave(&hwq
->cq_lock
, flags
);
315 ufshcd_mcq_update_cq_tail_slot(hwq
);
316 while (!ufshcd_mcq_is_cq_empty(hwq
)) {
317 ufshcd_mcq_process_cqe(hba
, hwq
);
318 ufshcd_mcq_inc_cq_head_slot(hwq
);
323 ufshcd_mcq_update_cq_head(hwq
);
324 spin_unlock_irqrestore(&hwq
->cq_lock
, flags
);
326 return completed_reqs
;
328 EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_lock
);
330 void ufshcd_mcq_make_queues_operational(struct ufs_hba
*hba
)
332 struct ufs_hw_queue
*hwq
;
336 for (i
= 0; i
< hba
->nr_hw_queues
; i
++) {
339 qsize
= hwq
->max_entries
* MCQ_ENTRY_SIZE_IN_DWORD
- 1;
341 /* Submission Queue Lower Base Address */
342 ufsmcq_writelx(hba
, lower_32_bits(hwq
->sqe_dma_addr
),
343 MCQ_CFG_n(REG_SQLBA
, i
));
344 /* Submission Queue Upper Base Address */
345 ufsmcq_writelx(hba
, upper_32_bits(hwq
->sqe_dma_addr
),
346 MCQ_CFG_n(REG_SQUBA
, i
));
347 /* Submission Queue Doorbell Address Offset */
348 ufsmcq_writelx(hba
, MCQ_OPR_OFFSET_n(OPR_SQD
, i
),
349 MCQ_CFG_n(REG_SQDAO
, i
));
350 /* Submission Queue Interrupt Status Address Offset */
351 ufsmcq_writelx(hba
, MCQ_OPR_OFFSET_n(OPR_SQIS
, i
),
352 MCQ_CFG_n(REG_SQISAO
, i
));
354 /* Completion Queue Lower Base Address */
355 ufsmcq_writelx(hba
, lower_32_bits(hwq
->cqe_dma_addr
),
356 MCQ_CFG_n(REG_CQLBA
, i
));
357 /* Completion Queue Upper Base Address */
358 ufsmcq_writelx(hba
, upper_32_bits(hwq
->cqe_dma_addr
),
359 MCQ_CFG_n(REG_CQUBA
, i
));
360 /* Completion Queue Doorbell Address Offset */
361 ufsmcq_writelx(hba
, MCQ_OPR_OFFSET_n(OPR_CQD
, i
),
362 MCQ_CFG_n(REG_CQDAO
, i
));
363 /* Completion Queue Interrupt Status Address Offset */
364 ufsmcq_writelx(hba
, MCQ_OPR_OFFSET_n(OPR_CQIS
, i
),
365 MCQ_CFG_n(REG_CQISAO
, i
));
367 /* Save the base addresses for quicker access */
368 hwq
->mcq_sq_head
= mcq_opr_base(hba
, OPR_SQD
, i
) + REG_SQHP
;
369 hwq
->mcq_sq_tail
= mcq_opr_base(hba
, OPR_SQD
, i
) + REG_SQTP
;
370 hwq
->mcq_cq_head
= mcq_opr_base(hba
, OPR_CQD
, i
) + REG_CQHP
;
371 hwq
->mcq_cq_tail
= mcq_opr_base(hba
, OPR_CQD
, i
) + REG_CQTP
;
373 /* Reinitializing is needed upon HC reset */
374 hwq
->sq_tail_slot
= hwq
->cq_tail_slot
= hwq
->cq_head_slot
= 0;
376 /* Enable Tail Entry Push Status interrupt only for non-poll queues */
377 if (i
< hba
->nr_hw_queues
- hba
->nr_queues
[HCTX_TYPE_POLL
])
378 writel(1, mcq_opr_base(hba
, OPR_CQIS
, i
) + REG_CQIE
);
380 /* Completion Queue Enable|Size to Completion Queue Attribute */
381 ufsmcq_writel(hba
, (1 << QUEUE_EN_OFFSET
) | qsize
,
382 MCQ_CFG_n(REG_CQATTR
, i
));
385 * Submission Qeueue Enable|Size|Completion Queue ID to
386 * Submission Queue Attribute
388 ufsmcq_writel(hba
, (1 << QUEUE_EN_OFFSET
) | qsize
|
389 (i
<< QUEUE_ID_OFFSET
),
390 MCQ_CFG_n(REG_SQATTR
, i
));
393 EXPORT_SYMBOL_GPL(ufshcd_mcq_make_queues_operational
);
395 void ufshcd_mcq_enable_esi(struct ufs_hba
*hba
)
397 ufshcd_writel(hba
, ufshcd_readl(hba
, REG_UFS_MEM_CFG
) | 0x2,
400 EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi
);
402 void ufshcd_mcq_config_esi(struct ufs_hba
*hba
, struct msi_msg
*msg
)
404 ufshcd_writel(hba
, msg
->address_lo
, REG_UFS_ESILBA
);
405 ufshcd_writel(hba
, msg
->address_hi
, REG_UFS_ESIUBA
);
407 EXPORT_SYMBOL_GPL(ufshcd_mcq_config_esi
);
409 int ufshcd_mcq_init(struct ufs_hba
*hba
)
411 struct Scsi_Host
*host
= hba
->host
;
412 struct ufs_hw_queue
*hwq
;
415 ret
= ufshcd_mcq_config_nr_queues(hba
);
419 ret
= ufshcd_vops_mcq_config_resource(hba
);
423 ret
= ufshcd_mcq_vops_op_runtime_config(hba
);
425 dev_err(hba
->dev
, "Operation runtime config failed, ret=%d\n",
429 hba
->uhq
= devm_kzalloc(hba
->dev
,
430 hba
->nr_hw_queues
* sizeof(struct ufs_hw_queue
),
433 dev_err(hba
->dev
, "ufs hw queue memory allocation failed\n");
437 for (i
= 0; i
< hba
->nr_hw_queues
; i
++) {
439 hwq
->max_entries
= hba
->nutrs
+ 1;
440 spin_lock_init(&hwq
->sq_lock
);
441 spin_lock_init(&hwq
->cq_lock
);
442 mutex_init(&hwq
->sq_mutex
);
445 /* The very first HW queue serves device commands */
446 hba
->dev_cmd_queue
= &hba
->uhq
[0];
448 host
->host_tagset
= 1;
452 static int ufshcd_mcq_sq_stop(struct ufs_hba
*hba
, struct ufs_hw_queue
*hwq
)
455 u32 id
= hwq
->id
, val
;
458 if (hba
->quirks
& UFSHCD_QUIRK_MCQ_BROKEN_RTC
)
461 writel(SQ_STOP
, mcq_opr_base(hba
, OPR_SQD
, id
) + REG_SQRTC
);
462 reg
= mcq_opr_base(hba
, OPR_SQD
, id
) + REG_SQRTS
;
463 err
= read_poll_timeout(readl
, val
, val
& SQ_STS
, 20,
464 MCQ_POLL_US
, false, reg
);
466 dev_err(hba
->dev
, "%s: failed. hwq-id=%d, err=%d\n",
471 static int ufshcd_mcq_sq_start(struct ufs_hba
*hba
, struct ufs_hw_queue
*hwq
)
474 u32 id
= hwq
->id
, val
;
477 if (hba
->quirks
& UFSHCD_QUIRK_MCQ_BROKEN_RTC
)
480 writel(SQ_START
, mcq_opr_base(hba
, OPR_SQD
, id
) + REG_SQRTC
);
481 reg
= mcq_opr_base(hba
, OPR_SQD
, id
) + REG_SQRTS
;
482 err
= read_poll_timeout(readl
, val
, !(val
& SQ_STS
), 20,
483 MCQ_POLL_US
, false, reg
);
485 dev_err(hba
->dev
, "%s: failed. hwq-id=%d, err=%d\n",
491 * ufshcd_mcq_sq_cleanup - Clean up submission queue resources
492 * associated with the pending command.
493 * @hba: per adapter instance.
494 * @task_tag: The command's task tag.
496 * Return: 0 for success; error code otherwise.
498 int ufshcd_mcq_sq_cleanup(struct ufs_hba
*hba
, int task_tag
)
500 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[task_tag
];
501 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
502 struct ufs_hw_queue
*hwq
;
503 void __iomem
*reg
, *opr_sqd_base
;
507 if (hba
->quirks
& UFSHCD_QUIRK_MCQ_BROKEN_RTC
)
510 if (task_tag
!= hba
->nutrs
- UFSHCD_NUM_RESERVED
) {
513 hwq
= ufshcd_mcq_req_to_hwq(hba
, scsi_cmd_to_rq(cmd
));
515 hwq
= hba
->dev_cmd_queue
;
520 mutex_lock(&hwq
->sq_mutex
);
522 /* stop the SQ fetching before working on it */
523 err
= ufshcd_mcq_sq_stop(hba
, hwq
);
527 /* SQCTI = EXT_IID, IID, LUN, Task Tag */
528 nexus
= lrbp
->lun
<< 8 | task_tag
;
529 opr_sqd_base
= mcq_opr_base(hba
, OPR_SQD
, id
);
530 writel(nexus
, opr_sqd_base
+ REG_SQCTI
);
533 writel(SQ_ICU
, opr_sqd_base
+ REG_SQRTC
);
535 /* Poll SQRTSy.CUS = 1. Return result from SQRTSy.RTC */
536 reg
= opr_sqd_base
+ REG_SQRTS
;
537 err
= read_poll_timeout(readl
, val
, val
& SQ_CUS
, 20,
538 MCQ_POLL_US
, false, reg
);
540 dev_err(hba
->dev
, "%s: failed. hwq=%d, tag=%d err=%ld\n",
541 __func__
, id
, task_tag
,
542 FIELD_GET(SQ_ICU_ERR_CODE_MASK
, readl(reg
)));
544 if (ufshcd_mcq_sq_start(hba
, hwq
))
548 mutex_unlock(&hwq
->sq_mutex
);
553 * ufshcd_mcq_nullify_sqe - Nullify the submission queue entry.
554 * Write the sqe's Command Type to 0xF. The host controller will not
555 * fetch any sqe with Command Type = 0xF.
557 * @utrd: UTP Transfer Request Descriptor to be nullified.
559 static void ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc
*utrd
)
561 utrd
->header
.command_type
= 0xf;
565 * ufshcd_mcq_sqe_search - Search for the command in the submission queue
566 * If the command is in the submission queue and not issued to the device yet,
567 * nullify the sqe so the host controller will skip fetching the sqe.
569 * @hba: per adapter instance.
570 * @hwq: Hardware Queue to be searched.
571 * @task_tag: The command's task tag.
573 * Return: true if the SQE containing the command is present in the SQ
574 * (not fetched by the controller); returns false if the SQE is not in the SQ.
576 static bool ufshcd_mcq_sqe_search(struct ufs_hba
*hba
,
577 struct ufs_hw_queue
*hwq
, int task_tag
)
579 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[task_tag
];
580 struct utp_transfer_req_desc
*utrd
;
581 __le64 cmd_desc_base_addr
;
586 if (hba
->quirks
& UFSHCD_QUIRK_MCQ_BROKEN_RTC
)
589 mutex_lock(&hwq
->sq_mutex
);
591 ufshcd_mcq_sq_stop(hba
, hwq
);
592 sq_head_slot
= ufshcd_mcq_get_sq_head_slot(hwq
);
593 if (sq_head_slot
== hwq
->sq_tail_slot
)
596 cmd_desc_base_addr
= lrbp
->utr_descriptor_ptr
->command_desc_base_addr
;
597 addr
= le64_to_cpu(cmd_desc_base_addr
) & CQE_UCD_BA
;
599 while (sq_head_slot
!= hwq
->sq_tail_slot
) {
600 utrd
= hwq
->sqe_base_addr
+
601 sq_head_slot
* sizeof(struct utp_transfer_req_desc
);
602 match
= le64_to_cpu(utrd
->command_desc_base_addr
) & CQE_UCD_BA
;
604 ufshcd_mcq_nullify_sqe(utrd
);
610 if (sq_head_slot
== hwq
->max_entries
)
615 ufshcd_mcq_sq_start(hba
, hwq
);
616 mutex_unlock(&hwq
->sq_mutex
);
621 * ufshcd_mcq_abort - Abort the command in MCQ.
622 * @cmd: The command to be aborted.
624 * Return: SUCCESS or FAILED error codes
626 int ufshcd_mcq_abort(struct scsi_cmnd
*cmd
)
628 struct Scsi_Host
*host
= cmd
->device
->host
;
629 struct ufs_hba
*hba
= shost_priv(host
);
630 int tag
= scsi_cmd_to_rq(cmd
)->tag
;
631 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[tag
];
632 struct ufs_hw_queue
*hwq
;
636 if (!ufshcd_cmd_inflight(lrbp
->cmd
)) {
638 "%s: skip abort. cmd at tag %d already completed.\n",
643 /* Skip task abort in case previous aborts failed and report failure */
644 if (lrbp
->req_abort_skip
) {
645 dev_err(hba
->dev
, "%s: skip abort. tag %d failed earlier\n",
650 hwq
= ufshcd_mcq_req_to_hwq(hba
, scsi_cmd_to_rq(cmd
));
652 if (ufshcd_mcq_sqe_search(hba
, hwq
, tag
)) {
654 * Failure. The command should not be "stuck" in SQ for
655 * a long time which resulted in command being aborted.
657 dev_err(hba
->dev
, "%s: cmd found in sq. hwq=%d, tag=%d\n",
658 __func__
, hwq
->id
, tag
);
663 * The command is not in the submission queue, and it is not
664 * in the completion queue either. Query the device to see if
665 * the command is being processed in the device.
667 if (ufshcd_try_to_abort_task(hba
, tag
)) {
668 dev_err(hba
->dev
, "%s: device abort failed %d\n", __func__
, err
);
669 lrbp
->req_abort_skip
= true;
674 spin_lock_irqsave(&hwq
->cq_lock
, flags
);
675 if (ufshcd_cmd_inflight(lrbp
->cmd
))
676 ufshcd_release_scsi_cmd(hba
, lrbp
);
677 spin_unlock_irqrestore(&hwq
->cq_lock
, flags
);