1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Universal Flash Storage Host controller driver Core
4 * Copyright (C) 2011-2013 Samsung India Software Operations
5 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
8 * Santosh Yaraganavi <santosh.sy@samsung.com>
9 * Vinayak Holikatti <h.vinayak@samsung.com>
12 #include <linux/async.h>
13 #include <linux/devfreq.h>
14 #include <linux/nls.h>
16 #include <linux/bitfield.h>
17 #include <linux/blk-pm.h>
18 #include <linux/blkdev.h>
19 #include <linux/clk.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/pm_opp.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/sched/clock.h>
26 #include <linux/iopoll.h>
27 #include <scsi/scsi_cmnd.h>
28 #include <scsi/scsi_dbg.h>
29 #include <scsi/scsi_driver.h>
30 #include <scsi/scsi_eh.h>
31 #include "ufshcd-priv.h"
32 #include <ufs/ufs_quirks.h>
33 #include <ufs/unipro.h>
34 #include "ufs-sysfs.h"
35 #include "ufs-debugfs.h"
36 #include "ufs-fault-injection.h"
38 #include "ufshcd-crypto.h"
39 #include <asm/unaligned.h>
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/ufs.h>
44 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
48 #define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
53 /* UIC command timeout, unit: ms */
54 #define UIC_CMD_TIMEOUT 500
56 /* NOP OUT retries waiting for NOP IN response */
57 #define NOP_OUT_RETRIES 10
58 /* Timeout after 50 msecs if NOP OUT hangs without response */
59 #define NOP_OUT_TIMEOUT 50 /* msecs */
61 /* Query request retries */
62 #define QUERY_REQ_RETRIES 3
63 /* Query request timeout */
64 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
66 /* Advanced RPMB request timeout */
67 #define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */
69 /* Task management command timeout */
70 #define TM_CMD_TIMEOUT 100 /* msecs */
72 /* maximum number of retries for a general UIC command */
73 #define UFS_UIC_COMMAND_RETRIES 3
75 /* maximum number of link-startup retries */
76 #define DME_LINKSTARTUP_RETRIES 3
78 /* maximum number of reset retries before giving up */
79 #define MAX_HOST_RESET_RETRIES 5
81 /* Maximum number of error handler retries before giving up */
82 #define MAX_ERR_HANDLER_RETRIES 5
84 /* Expose the flag value from utp_upiu_query.value */
85 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
87 /* Interrupt aggregation default timeout, unit: 40us */
88 #define INT_AGGR_DEF_TO 0x02
90 /* default delay of autosuspend: 2000 ms */
91 #define RPM_AUTOSUSPEND_DELAY_MS 2000
93 /* Default delay of RPM device flush delayed work */
94 #define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
96 /* Default value of wait time before gating device ref clock */
97 #define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
99 /* Polling time to wait for fDeviceInit */
100 #define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
102 /* UFSHC 4.0 compliant HC support this mode. */
103 static bool use_mcq_mode
= true;
105 static bool is_mcq_supported(struct ufs_hba
*hba
)
107 return hba
->mcq_sup
&& use_mcq_mode
;
110 module_param(use_mcq_mode
, bool, 0644);
111 MODULE_PARM_DESC(use_mcq_mode
, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
113 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
117 _ret = ufshcd_enable_vreg(_dev, _vreg); \
119 _ret = ufshcd_disable_vreg(_dev, _vreg); \
123 #define ufshcd_hex_dump(prefix_str, buf, len) do { \
124 size_t __len = (len); \
125 print_hex_dump(KERN_ERR, prefix_str, \
126 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
127 16, 4, buf, __len, false); \
130 int ufshcd_dump_regs(struct ufs_hba
*hba
, size_t offset
, size_t len
,
136 if (offset
% 4 != 0 || len
% 4 != 0) /* keep readl happy */
139 regs
= kzalloc(len
, GFP_ATOMIC
);
143 for (pos
= 0; pos
< len
; pos
+= 4) {
145 pos
>= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER
&&
146 pos
<= REG_UIC_ERROR_CODE_DME
)
148 regs
[pos
/ 4] = ufshcd_readl(hba
, offset
+ pos
);
151 ufshcd_hex_dump(prefix
, regs
, len
);
156 EXPORT_SYMBOL_GPL(ufshcd_dump_regs
);
159 UFSHCD_MAX_CHANNEL
= 0,
161 UFSHCD_CMD_PER_LUN
= 32 - UFSHCD_NUM_RESERVED
,
162 UFSHCD_CAN_QUEUE
= 32 - UFSHCD_NUM_RESERVED
,
165 static const char *const ufshcd_state_name
[] = {
166 [UFSHCD_STATE_RESET
] = "reset",
167 [UFSHCD_STATE_OPERATIONAL
] = "operational",
168 [UFSHCD_STATE_ERROR
] = "error",
169 [UFSHCD_STATE_EH_SCHEDULED_FATAL
] = "eh_fatal",
170 [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL
] = "eh_non_fatal",
173 /* UFSHCD error handling flags */
175 UFSHCD_EH_IN_PROGRESS
= (1 << 0),
178 /* UFSHCD UIC layer error flags */
180 UFSHCD_UIC_DL_PA_INIT_ERROR
= (1 << 0), /* Data link layer error */
181 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
= (1 << 1), /* Data link layer error */
182 UFSHCD_UIC_DL_TCx_REPLAY_ERROR
= (1 << 2), /* Data link layer error */
183 UFSHCD_UIC_NL_ERROR
= (1 << 3), /* Network layer error */
184 UFSHCD_UIC_TL_ERROR
= (1 << 4), /* Transport Layer error */
185 UFSHCD_UIC_DME_ERROR
= (1 << 5), /* DME error */
186 UFSHCD_UIC_PA_GENERIC_ERROR
= (1 << 6), /* Generic PA error */
189 #define ufshcd_set_eh_in_progress(h) \
190 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
191 #define ufshcd_eh_in_progress(h) \
192 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
193 #define ufshcd_clear_eh_in_progress(h) \
194 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
196 const struct ufs_pm_lvl_states ufs_pm_lvl_states
[] = {
197 [UFS_PM_LVL_0
] = {UFS_ACTIVE_PWR_MODE
, UIC_LINK_ACTIVE_STATE
},
198 [UFS_PM_LVL_1
] = {UFS_ACTIVE_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
199 [UFS_PM_LVL_2
] = {UFS_SLEEP_PWR_MODE
, UIC_LINK_ACTIVE_STATE
},
200 [UFS_PM_LVL_3
] = {UFS_SLEEP_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
201 [UFS_PM_LVL_4
] = {UFS_POWERDOWN_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
202 [UFS_PM_LVL_5
] = {UFS_POWERDOWN_PWR_MODE
, UIC_LINK_OFF_STATE
},
204 * For DeepSleep, the link is first put in hibern8 and then off.
205 * Leaving the link in hibern8 is not supported.
207 [UFS_PM_LVL_6
] = {UFS_DEEPSLEEP_PWR_MODE
, UIC_LINK_OFF_STATE
},
210 static inline enum ufs_dev_pwr_mode
211 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl
)
213 return ufs_pm_lvl_states
[lvl
].dev_state
;
216 static inline enum uic_link_state
217 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl
)
219 return ufs_pm_lvl_states
[lvl
].link_state
;
222 static inline enum ufs_pm_level
223 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state
,
224 enum uic_link_state link_state
)
226 enum ufs_pm_level lvl
;
228 for (lvl
= UFS_PM_LVL_0
; lvl
< UFS_PM_LVL_MAX
; lvl
++) {
229 if ((ufs_pm_lvl_states
[lvl
].dev_state
== dev_state
) &&
230 (ufs_pm_lvl_states
[lvl
].link_state
== link_state
))
234 /* if no match found, return the level 0 */
238 static const struct ufs_dev_quirk ufs_fixups
[] = {
239 /* UFS cards deviations table */
240 { .wmanufacturerid
= UFS_VENDOR_MICRON
,
241 .model
= UFS_ANY_MODEL
,
242 .quirk
= UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
},
243 { .wmanufacturerid
= UFS_VENDOR_SAMSUNG
,
244 .model
= UFS_ANY_MODEL
,
245 .quirk
= UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
|
246 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE
|
247 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
},
248 { .wmanufacturerid
= UFS_VENDOR_SKHYNIX
,
249 .model
= UFS_ANY_MODEL
,
250 .quirk
= UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME
},
251 { .wmanufacturerid
= UFS_VENDOR_SKHYNIX
,
252 .model
= "hB8aL1" /*H28U62301AMR*/,
253 .quirk
= UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME
},
254 { .wmanufacturerid
= UFS_VENDOR_TOSHIBA
,
255 .model
= UFS_ANY_MODEL
,
256 .quirk
= UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
},
257 { .wmanufacturerid
= UFS_VENDOR_TOSHIBA
,
258 .model
= "THGLF2G9C8KBADG",
259 .quirk
= UFS_DEVICE_QUIRK_PA_TACTIVATE
},
260 { .wmanufacturerid
= UFS_VENDOR_TOSHIBA
,
261 .model
= "THGLF2G9D8KBADG",
262 .quirk
= UFS_DEVICE_QUIRK_PA_TACTIVATE
},
266 static irqreturn_t
ufshcd_tmc_handler(struct ufs_hba
*hba
);
267 static void ufshcd_async_scan(void *data
, async_cookie_t cookie
);
268 static int ufshcd_reset_and_restore(struct ufs_hba
*hba
);
269 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd
*cmd
);
270 static int ufshcd_clear_tm_cmd(struct ufs_hba
*hba
, int tag
);
271 static void ufshcd_hba_exit(struct ufs_hba
*hba
);
272 static int ufshcd_probe_hba(struct ufs_hba
*hba
, bool init_dev_params
);
273 static int ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
);
274 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba
*hba
);
275 static int ufshcd_host_reset_and_restore(struct ufs_hba
*hba
);
276 static void ufshcd_resume_clkscaling(struct ufs_hba
*hba
);
277 static void ufshcd_suspend_clkscaling(struct ufs_hba
*hba
);
278 static int ufshcd_scale_clks(struct ufs_hba
*hba
, unsigned long freq
,
280 static irqreturn_t
ufshcd_intr(int irq
, void *__hba
);
281 static int ufshcd_change_power_mode(struct ufs_hba
*hba
,
282 struct ufs_pa_layer_attr
*pwr_mode
);
283 static int ufshcd_setup_hba_vreg(struct ufs_hba
*hba
, bool on
);
284 static int ufshcd_setup_vreg(struct ufs_hba
*hba
, bool on
);
285 static inline int ufshcd_config_vreg_hpm(struct ufs_hba
*hba
,
286 struct ufs_vreg
*vreg
);
287 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba
*hba
,
289 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba
*hba
);
290 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba
*hba
);
292 static inline void ufshcd_enable_irq(struct ufs_hba
*hba
)
294 if (!hba
->is_irq_enabled
) {
295 enable_irq(hba
->irq
);
296 hba
->is_irq_enabled
= true;
300 static inline void ufshcd_disable_irq(struct ufs_hba
*hba
)
302 if (hba
->is_irq_enabled
) {
303 disable_irq(hba
->irq
);
304 hba
->is_irq_enabled
= false;
308 static void ufshcd_configure_wb(struct ufs_hba
*hba
)
310 if (!ufshcd_is_wb_allowed(hba
))
313 ufshcd_wb_toggle(hba
, true);
315 ufshcd_wb_toggle_buf_flush_during_h8(hba
, true);
317 if (ufshcd_is_wb_buf_flush_allowed(hba
))
318 ufshcd_wb_toggle_buf_flush(hba
, true);
321 static void ufshcd_scsi_unblock_requests(struct ufs_hba
*hba
)
323 if (atomic_dec_and_test(&hba
->scsi_block_reqs_cnt
))
324 scsi_unblock_requests(hba
->host
);
327 static void ufshcd_scsi_block_requests(struct ufs_hba
*hba
)
329 if (atomic_inc_return(&hba
->scsi_block_reqs_cnt
) == 1)
330 scsi_block_requests(hba
->host
);
333 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba
*hba
, unsigned int tag
,
334 enum ufs_trace_str_t str_t
)
336 struct utp_upiu_req
*rq
= hba
->lrb
[tag
].ucd_req_ptr
;
337 struct utp_upiu_header
*header
;
339 if (!trace_ufshcd_upiu_enabled())
342 if (str_t
== UFS_CMD_SEND
)
343 header
= &rq
->header
;
345 header
= &hba
->lrb
[tag
].ucd_rsp_ptr
->header
;
347 trace_ufshcd_upiu(dev_name(hba
->dev
), str_t
, header
, &rq
->sc
.cdb
,
351 static void ufshcd_add_query_upiu_trace(struct ufs_hba
*hba
,
352 enum ufs_trace_str_t str_t
,
353 struct utp_upiu_req
*rq_rsp
)
355 if (!trace_ufshcd_upiu_enabled())
358 trace_ufshcd_upiu(dev_name(hba
->dev
), str_t
, &rq_rsp
->header
,
359 &rq_rsp
->qr
, UFS_TSF_OSF
);
362 static void ufshcd_add_tm_upiu_trace(struct ufs_hba
*hba
, unsigned int tag
,
363 enum ufs_trace_str_t str_t
)
365 struct utp_task_req_desc
*descp
= &hba
->utmrdl_base_addr
[tag
];
367 if (!trace_ufshcd_upiu_enabled())
370 if (str_t
== UFS_TM_SEND
)
371 trace_ufshcd_upiu(dev_name(hba
->dev
), str_t
,
372 &descp
->upiu_req
.req_header
,
373 &descp
->upiu_req
.input_param1
,
376 trace_ufshcd_upiu(dev_name(hba
->dev
), str_t
,
377 &descp
->upiu_rsp
.rsp_header
,
378 &descp
->upiu_rsp
.output_param1
,
382 static void ufshcd_add_uic_command_trace(struct ufs_hba
*hba
,
383 const struct uic_command
*ucmd
,
384 enum ufs_trace_str_t str_t
)
388 if (!trace_ufshcd_uic_command_enabled())
391 if (str_t
== UFS_CMD_SEND
)
394 cmd
= ufshcd_readl(hba
, REG_UIC_COMMAND
);
396 trace_ufshcd_uic_command(dev_name(hba
->dev
), str_t
, cmd
,
397 ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_1
),
398 ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_2
),
399 ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_3
));
402 static void ufshcd_add_command_trace(struct ufs_hba
*hba
, unsigned int tag
,
403 enum ufs_trace_str_t str_t
)
406 u8 opcode
= 0, group_id
= 0;
410 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[tag
];
411 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
412 struct request
*rq
= scsi_cmd_to_rq(cmd
);
413 int transfer_len
= -1;
418 /* trace UPIU also */
419 ufshcd_add_cmd_upiu_trace(hba
, tag
, str_t
);
420 if (!trace_ufshcd_command_enabled())
423 opcode
= cmd
->cmnd
[0];
425 if (opcode
== READ_10
|| opcode
== WRITE_10
) {
427 * Currently we only fully trace read(10) and write(10) commands
430 be32_to_cpu(lrbp
->ucd_req_ptr
->sc
.exp_data_transfer_len
);
431 lba
= scsi_get_lba(cmd
);
432 if (opcode
== WRITE_10
)
433 group_id
= lrbp
->cmd
->cmnd
[6];
434 } else if (opcode
== UNMAP
) {
436 * The number of Bytes to be unmapped beginning with the lba.
438 transfer_len
= blk_rq_bytes(rq
);
439 lba
= scsi_get_lba(cmd
);
442 intr
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
444 if (is_mcq_enabled(hba
)) {
445 struct ufs_hw_queue
*hwq
= ufshcd_mcq_req_to_hwq(hba
, rq
);
449 doorbell
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
451 trace_ufshcd_command(cmd
->device
, str_t
, tag
, doorbell
, hwq_id
,
452 transfer_len
, intr
, lba
, opcode
, group_id
);
455 static void ufshcd_print_clk_freqs(struct ufs_hba
*hba
)
457 struct ufs_clk_info
*clki
;
458 struct list_head
*head
= &hba
->clk_list_head
;
460 if (list_empty(head
))
463 list_for_each_entry(clki
, head
, list
) {
464 if (!IS_ERR_OR_NULL(clki
->clk
) && clki
->min_freq
&&
466 dev_err(hba
->dev
, "clk: %s, rate: %u\n",
467 clki
->name
, clki
->curr_freq
);
471 static void ufshcd_print_evt(struct ufs_hba
*hba
, u32 id
,
472 const char *err_name
)
476 const struct ufs_event_hist
*e
;
478 if (id
>= UFS_EVT_CNT
)
481 e
= &hba
->ufs_stats
.event
[id
];
483 for (i
= 0; i
< UFS_EVENT_HIST_LENGTH
; i
++) {
484 int p
= (i
+ e
->pos
) % UFS_EVENT_HIST_LENGTH
;
486 if (e
->tstamp
[p
] == 0)
488 dev_err(hba
->dev
, "%s[%d] = 0x%x at %lld us\n", err_name
, p
,
489 e
->val
[p
], div_u64(e
->tstamp
[p
], 1000));
494 dev_err(hba
->dev
, "No record of %s\n", err_name
);
496 dev_err(hba
->dev
, "%s: total cnt=%llu\n", err_name
, e
->cnt
);
499 static void ufshcd_print_evt_hist(struct ufs_hba
*hba
)
501 ufshcd_dump_regs(hba
, 0, UFSHCI_REG_SPACE_SIZE
, "host_regs: ");
503 ufshcd_print_evt(hba
, UFS_EVT_PA_ERR
, "pa_err");
504 ufshcd_print_evt(hba
, UFS_EVT_DL_ERR
, "dl_err");
505 ufshcd_print_evt(hba
, UFS_EVT_NL_ERR
, "nl_err");
506 ufshcd_print_evt(hba
, UFS_EVT_TL_ERR
, "tl_err");
507 ufshcd_print_evt(hba
, UFS_EVT_DME_ERR
, "dme_err");
508 ufshcd_print_evt(hba
, UFS_EVT_AUTO_HIBERN8_ERR
,
510 ufshcd_print_evt(hba
, UFS_EVT_FATAL_ERR
, "fatal_err");
511 ufshcd_print_evt(hba
, UFS_EVT_LINK_STARTUP_FAIL
,
512 "link_startup_fail");
513 ufshcd_print_evt(hba
, UFS_EVT_RESUME_ERR
, "resume_fail");
514 ufshcd_print_evt(hba
, UFS_EVT_SUSPEND_ERR
,
516 ufshcd_print_evt(hba
, UFS_EVT_WL_RES_ERR
, "wlun resume_fail");
517 ufshcd_print_evt(hba
, UFS_EVT_WL_SUSP_ERR
,
518 "wlun suspend_fail");
519 ufshcd_print_evt(hba
, UFS_EVT_DEV_RESET
, "dev_reset");
520 ufshcd_print_evt(hba
, UFS_EVT_HOST_RESET
, "host_reset");
521 ufshcd_print_evt(hba
, UFS_EVT_ABORT
, "task_abort");
523 ufshcd_vops_dbg_register_dump(hba
);
527 void ufshcd_print_tr(struct ufs_hba
*hba
, int tag
, bool pr_prdt
)
529 const struct ufshcd_lrb
*lrbp
;
532 lrbp
= &hba
->lrb
[tag
];
534 dev_err(hba
->dev
, "UPIU[%d] - issue time %lld us\n",
535 tag
, div_u64(lrbp
->issue_time_stamp_local_clock
, 1000));
536 dev_err(hba
->dev
, "UPIU[%d] - complete time %lld us\n",
537 tag
, div_u64(lrbp
->compl_time_stamp_local_clock
, 1000));
539 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
540 tag
, (u64
)lrbp
->utrd_dma_addr
);
542 ufshcd_hex_dump("UPIU TRD: ", lrbp
->utr_descriptor_ptr
,
543 sizeof(struct utp_transfer_req_desc
));
544 dev_err(hba
->dev
, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag
,
545 (u64
)lrbp
->ucd_req_dma_addr
);
546 ufshcd_hex_dump("UPIU REQ: ", lrbp
->ucd_req_ptr
,
547 sizeof(struct utp_upiu_req
));
548 dev_err(hba
->dev
, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag
,
549 (u64
)lrbp
->ucd_rsp_dma_addr
);
550 ufshcd_hex_dump("UPIU RSP: ", lrbp
->ucd_rsp_ptr
,
551 sizeof(struct utp_upiu_rsp
));
553 prdt_length
= le16_to_cpu(
554 lrbp
->utr_descriptor_ptr
->prd_table_length
);
555 if (hba
->quirks
& UFSHCD_QUIRK_PRDT_BYTE_GRAN
)
556 prdt_length
/= ufshcd_sg_entry_size(hba
);
559 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
561 (u64
)lrbp
->ucd_prdt_dma_addr
);
564 ufshcd_hex_dump("UPIU PRDT: ", lrbp
->ucd_prdt_ptr
,
565 ufshcd_sg_entry_size(hba
) * prdt_length
);
568 static bool ufshcd_print_tr_iter(struct request
*req
, void *priv
)
570 struct scsi_device
*sdev
= req
->q
->queuedata
;
571 struct Scsi_Host
*shost
= sdev
->host
;
572 struct ufs_hba
*hba
= shost_priv(shost
);
574 ufshcd_print_tr(hba
, req
->tag
, *(bool *)priv
);
580 * ufshcd_print_trs_all - print trs for all started requests.
581 * @hba: per-adapter instance.
582 * @pr_prdt: need to print prdt or not.
584 static void ufshcd_print_trs_all(struct ufs_hba
*hba
, bool pr_prdt
)
586 blk_mq_tagset_busy_iter(&hba
->host
->tag_set
, ufshcd_print_tr_iter
, &pr_prdt
);
589 static void ufshcd_print_tmrs(struct ufs_hba
*hba
, unsigned long bitmap
)
593 for_each_set_bit(tag
, &bitmap
, hba
->nutmrs
) {
594 struct utp_task_req_desc
*tmrdp
= &hba
->utmrdl_base_addr
[tag
];
596 dev_err(hba
->dev
, "TM[%d] - Task Management Header\n", tag
);
597 ufshcd_hex_dump("", tmrdp
, sizeof(*tmrdp
));
601 static void ufshcd_print_host_state(struct ufs_hba
*hba
)
603 const struct scsi_device
*sdev_ufs
= hba
->ufs_device_wlun
;
605 dev_err(hba
->dev
, "UFS Host state=%d\n", hba
->ufshcd_state
);
606 dev_err(hba
->dev
, "outstanding reqs=0x%lx tasks=0x%lx\n",
607 hba
->outstanding_reqs
, hba
->outstanding_tasks
);
608 dev_err(hba
->dev
, "saved_err=0x%x, saved_uic_err=0x%x\n",
609 hba
->saved_err
, hba
->saved_uic_err
);
610 dev_err(hba
->dev
, "Device power mode=%d, UIC link state=%d\n",
611 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
612 dev_err(hba
->dev
, "PM in progress=%d, sys. suspended=%d\n",
613 hba
->pm_op_in_progress
, hba
->is_sys_suspended
);
614 dev_err(hba
->dev
, "Auto BKOPS=%d, Host self-block=%d\n",
615 hba
->auto_bkops_enabled
, hba
->host
->host_self_blocked
);
616 dev_err(hba
->dev
, "Clk gate=%d\n", hba
->clk_gating
.state
);
618 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
619 div_u64(hba
->ufs_stats
.last_hibern8_exit_tstamp
, 1000),
620 hba
->ufs_stats
.hibern8_exit_cnt
);
621 dev_err(hba
->dev
, "last intr at %lld us, last intr status=0x%x\n",
622 div_u64(hba
->ufs_stats
.last_intr_ts
, 1000),
623 hba
->ufs_stats
.last_intr_status
);
624 dev_err(hba
->dev
, "error handling flags=0x%x, req. abort count=%d\n",
625 hba
->eh_flags
, hba
->req_abort_count
);
626 dev_err(hba
->dev
, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
627 hba
->ufs_version
, hba
->capabilities
, hba
->caps
);
628 dev_err(hba
->dev
, "quirks=0x%x, dev. quirks=0x%x\n", hba
->quirks
,
631 dev_err(hba
->dev
, "UFS dev info: %.8s %.16s rev %.4s\n",
632 sdev_ufs
->vendor
, sdev_ufs
->model
, sdev_ufs
->rev
);
634 ufshcd_print_clk_freqs(hba
);
638 * ufshcd_print_pwr_info - print power params as saved in hba
640 * @hba: per-adapter instance
642 static void ufshcd_print_pwr_info(struct ufs_hba
*hba
)
644 static const char * const names
[] = {
655 * Using dev_dbg to avoid messages during runtime PM to avoid
656 * never-ending cycles of messages written back to storage by user space
657 * causing runtime resume, causing more messages and so on.
659 dev_dbg(hba
->dev
, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
661 hba
->pwr_info
.gear_rx
, hba
->pwr_info
.gear_tx
,
662 hba
->pwr_info
.lane_rx
, hba
->pwr_info
.lane_tx
,
663 names
[hba
->pwr_info
.pwr_rx
],
664 names
[hba
->pwr_info
.pwr_tx
],
665 hba
->pwr_info
.hs_rate
);
668 static void ufshcd_device_reset(struct ufs_hba
*hba
)
672 err
= ufshcd_vops_device_reset(hba
);
675 ufshcd_set_ufs_dev_active(hba
);
676 if (ufshcd_is_wb_allowed(hba
)) {
677 hba
->dev_info
.wb_enabled
= false;
678 hba
->dev_info
.wb_buf_flush_enabled
= false;
681 if (err
!= -EOPNOTSUPP
)
682 ufshcd_update_evt_hist(hba
, UFS_EVT_DEV_RESET
, err
);
685 void ufshcd_delay_us(unsigned long us
, unsigned long tolerance
)
693 usleep_range(us
, us
+ tolerance
);
695 EXPORT_SYMBOL_GPL(ufshcd_delay_us
);
698 * ufshcd_wait_for_register - wait for register value to change
699 * @hba: per-adapter interface
700 * @reg: mmio register offset
701 * @mask: mask to apply to the read register value
702 * @val: value to wait for
703 * @interval_us: polling interval in microseconds
704 * @timeout_ms: timeout in milliseconds
706 * Return: -ETIMEDOUT on error, zero on success.
708 static int ufshcd_wait_for_register(struct ufs_hba
*hba
, u32 reg
, u32 mask
,
709 u32 val
, unsigned long interval_us
,
710 unsigned long timeout_ms
)
713 unsigned long timeout
= jiffies
+ msecs_to_jiffies(timeout_ms
);
715 /* ignore bits that we don't intend to wait on */
718 while ((ufshcd_readl(hba
, reg
) & mask
) != val
) {
719 usleep_range(interval_us
, interval_us
+ 50);
720 if (time_after(jiffies
, timeout
)) {
721 if ((ufshcd_readl(hba
, reg
) & mask
) != val
)
731 * ufshcd_get_intr_mask - Get the interrupt bit mask
732 * @hba: Pointer to adapter instance
734 * Return: interrupt bit mask per version
736 static inline u32
ufshcd_get_intr_mask(struct ufs_hba
*hba
)
738 if (hba
->ufs_version
== ufshci_version(1, 0))
739 return INTERRUPT_MASK_ALL_VER_10
;
740 if (hba
->ufs_version
<= ufshci_version(2, 0))
741 return INTERRUPT_MASK_ALL_VER_11
;
743 return INTERRUPT_MASK_ALL_VER_21
;
747 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
748 * @hba: Pointer to adapter instance
750 * Return: UFSHCI version supported by the controller
752 static inline u32
ufshcd_get_ufs_version(struct ufs_hba
*hba
)
756 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION
)
757 ufshci_ver
= ufshcd_vops_get_ufs_hci_version(hba
);
759 ufshci_ver
= ufshcd_readl(hba
, REG_UFS_VERSION
);
762 * UFSHCI v1.x uses a different version scheme, in order
763 * to allow the use of comparisons with the ufshci_version
764 * function, we convert it to the same scheme as ufs 2.0+.
766 if (ufshci_ver
& 0x00010000)
767 return ufshci_version(1, ufshci_ver
& 0x00000100);
773 * ufshcd_is_device_present - Check if any device connected to
774 * the host controller
775 * @hba: pointer to adapter instance
777 * Return: true if device present, false if no device detected
779 static inline bool ufshcd_is_device_present(struct ufs_hba
*hba
)
781 return ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) & DEVICE_PRESENT
;
785 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
786 * @lrbp: pointer to local command reference block
787 * @cqe: pointer to the completion queue entry
789 * This function is used to get the OCS field from UTRD
791 * Return: the OCS field in the UTRD.
793 static enum utp_ocs
ufshcd_get_tr_ocs(struct ufshcd_lrb
*lrbp
,
794 struct cq_entry
*cqe
)
797 return le32_to_cpu(cqe
->status
) & MASK_OCS
;
799 return lrbp
->utr_descriptor_ptr
->header
.ocs
& MASK_OCS
;
803 * ufshcd_utrl_clear() - Clear requests from the controller request list.
804 * @hba: per adapter instance
805 * @mask: mask with one bit set for each request to be cleared
807 static inline void ufshcd_utrl_clear(struct ufs_hba
*hba
, u32 mask
)
809 if (hba
->quirks
& UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR
)
812 * From the UFSHCI specification: "UTP Transfer Request List CLear
813 * Register (UTRLCLR): This field is bit significant. Each bit
814 * corresponds to a slot in the UTP Transfer Request List, where bit 0
815 * corresponds to request slot 0. A bit in this field is set to ‘0’
816 * by host software to indicate to the host controller that a transfer
817 * request slot is cleared. The host controller
818 * shall free up any resources associated to the request slot
819 * immediately, and shall set the associated bit in UTRLDBR to ‘0’. The
820 * host software indicates no change to request slots by setting the
821 * associated bits in this field to ‘1’. Bits in this field shall only
822 * be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’."
824 ufshcd_writel(hba
, ~mask
, REG_UTP_TRANSFER_REQ_LIST_CLEAR
);
828 * ufshcd_utmrl_clear - Clear a bit in UTMRLCLR register
829 * @hba: per adapter instance
830 * @pos: position of the bit to be cleared
832 static inline void ufshcd_utmrl_clear(struct ufs_hba
*hba
, u32 pos
)
834 if (hba
->quirks
& UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR
)
835 ufshcd_writel(hba
, (1 << pos
), REG_UTP_TASK_REQ_LIST_CLEAR
);
837 ufshcd_writel(hba
, ~(1 << pos
), REG_UTP_TASK_REQ_LIST_CLEAR
);
841 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
842 * @reg: Register value of host controller status
844 * Return: 0 on success; a positive value if failed.
846 static inline int ufshcd_get_lists_status(u32 reg
)
848 return !((reg
& UFSHCD_STATUS_READY
) == UFSHCD_STATUS_READY
);
852 * ufshcd_get_uic_cmd_result - Get the UIC command result
853 * @hba: Pointer to adapter instance
855 * This function gets the result of UIC command completion
857 * Return: 0 on success; non-zero value on error.
859 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba
*hba
)
861 return ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_2
) &
862 MASK_UIC_COMMAND_RESULT
;
866 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
867 * @hba: Pointer to adapter instance
869 * This function gets UIC command argument3
871 * Return: 0 on success; non-zero value on error.
873 static inline u32
ufshcd_get_dme_attr_val(struct ufs_hba
*hba
)
875 return ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_3
);
879 * ufshcd_get_req_rsp - returns the TR response transaction type
880 * @ucd_rsp_ptr: pointer to response UPIU
884 static inline enum upiu_response_transaction
885 ufshcd_get_req_rsp(struct utp_upiu_rsp
*ucd_rsp_ptr
)
887 return ucd_rsp_ptr
->header
.transaction_code
;
891 * ufshcd_is_exception_event - Check if the device raised an exception event
892 * @ucd_rsp_ptr: pointer to response UPIU
894 * The function checks if the device raised an exception event indicated in
895 * the Device Information field of response UPIU.
897 * Return: true if exception is raised, false otherwise.
899 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp
*ucd_rsp_ptr
)
901 return ucd_rsp_ptr
->header
.device_information
& 1;
905 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
906 * @hba: per adapter instance
909 ufshcd_reset_intr_aggr(struct ufs_hba
*hba
)
911 ufshcd_writel(hba
, INT_AGGR_ENABLE
|
912 INT_AGGR_COUNTER_AND_TIMER_RESET
,
913 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
917 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
918 * @hba: per adapter instance
919 * @cnt: Interrupt aggregation counter threshold
920 * @tmout: Interrupt aggregation timeout value
923 ufshcd_config_intr_aggr(struct ufs_hba
*hba
, u8 cnt
, u8 tmout
)
925 ufshcd_writel(hba
, INT_AGGR_ENABLE
| INT_AGGR_PARAM_WRITE
|
926 INT_AGGR_COUNTER_THLD_VAL(cnt
) |
927 INT_AGGR_TIMEOUT_VAL(tmout
),
928 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
932 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
933 * @hba: per adapter instance
935 static inline void ufshcd_disable_intr_aggr(struct ufs_hba
*hba
)
937 ufshcd_writel(hba
, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
941 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
942 * When run-stop registers are set to 1, it indicates the
943 * host controller that it can process the requests
944 * @hba: per adapter instance
946 static void ufshcd_enable_run_stop_reg(struct ufs_hba
*hba
)
948 ufshcd_writel(hba
, UTP_TASK_REQ_LIST_RUN_STOP_BIT
,
949 REG_UTP_TASK_REQ_LIST_RUN_STOP
);
950 ufshcd_writel(hba
, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT
,
951 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP
);
955 * ufshcd_hba_start - Start controller initialization sequence
956 * @hba: per adapter instance
958 static inline void ufshcd_hba_start(struct ufs_hba
*hba
)
960 u32 val
= CONTROLLER_ENABLE
;
962 if (ufshcd_crypto_enable(hba
))
963 val
|= CRYPTO_GENERAL_ENABLE
;
965 ufshcd_writel(hba
, val
, REG_CONTROLLER_ENABLE
);
969 * ufshcd_is_hba_active - Get controller state
970 * @hba: per adapter instance
972 * Return: true if and only if the controller is active.
974 bool ufshcd_is_hba_active(struct ufs_hba
*hba
)
976 return ufshcd_readl(hba
, REG_CONTROLLER_ENABLE
) & CONTROLLER_ENABLE
;
978 EXPORT_SYMBOL_GPL(ufshcd_is_hba_active
);
980 u32
ufshcd_get_local_unipro_ver(struct ufs_hba
*hba
)
982 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
983 if (hba
->ufs_version
<= ufshci_version(1, 1))
984 return UFS_UNIPRO_VER_1_41
;
986 return UFS_UNIPRO_VER_1_6
;
988 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver
);
990 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba
*hba
)
993 * If both host and device support UniPro ver1.6 or later, PA layer
994 * parameters tuning happens during link startup itself.
996 * We can manually tune PA layer parameters if either host or device
997 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
998 * logic simple, we will only do manual tuning if local unipro version
999 * doesn't support ver1.6 or later.
1001 return ufshcd_get_local_unipro_ver(hba
) < UFS_UNIPRO_VER_1_6
;
1005 * ufshcd_set_clk_freq - set UFS controller clock frequencies
1006 * @hba: per adapter instance
1007 * @scale_up: If True, set max possible frequency othewise set low frequency
1009 * Return: 0 if successful; < 0 upon failure.
1011 static int ufshcd_set_clk_freq(struct ufs_hba
*hba
, bool scale_up
)
1014 struct ufs_clk_info
*clki
;
1015 struct list_head
*head
= &hba
->clk_list_head
;
1017 if (list_empty(head
))
1020 list_for_each_entry(clki
, head
, list
) {
1021 if (!IS_ERR_OR_NULL(clki
->clk
)) {
1022 if (scale_up
&& clki
->max_freq
) {
1023 if (clki
->curr_freq
== clki
->max_freq
)
1026 ret
= clk_set_rate(clki
->clk
, clki
->max_freq
);
1028 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
1029 __func__
, clki
->name
,
1030 clki
->max_freq
, ret
);
1033 trace_ufshcd_clk_scaling(dev_name(hba
->dev
),
1034 "scaled up", clki
->name
,
1038 clki
->curr_freq
= clki
->max_freq
;
1040 } else if (!scale_up
&& clki
->min_freq
) {
1041 if (clki
->curr_freq
== clki
->min_freq
)
1044 ret
= clk_set_rate(clki
->clk
, clki
->min_freq
);
1046 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
1047 __func__
, clki
->name
,
1048 clki
->min_freq
, ret
);
1051 trace_ufshcd_clk_scaling(dev_name(hba
->dev
),
1052 "scaled down", clki
->name
,
1055 clki
->curr_freq
= clki
->min_freq
;
1058 dev_dbg(hba
->dev
, "%s: clk: %s, rate: %lu\n", __func__
,
1059 clki
->name
, clk_get_rate(clki
->clk
));
1066 int ufshcd_opp_config_clks(struct device
*dev
, struct opp_table
*opp_table
,
1067 struct dev_pm_opp
*opp
, void *data
,
1070 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1071 struct list_head
*head
= &hba
->clk_list_head
;
1072 struct ufs_clk_info
*clki
;
1077 list_for_each_entry(clki
, head
, list
) {
1078 if (!IS_ERR_OR_NULL(clki
->clk
)) {
1079 freq
= dev_pm_opp_get_freq_indexed(opp
, idx
++);
1081 /* Do not set rate for clocks having frequency as 0 */
1085 ret
= clk_set_rate(clki
->clk
, freq
);
1087 dev_err(dev
, "%s: %s clk set rate(%ldHz) failed, %d\n",
1088 __func__
, clki
->name
, freq
, ret
);
1092 trace_ufshcd_clk_scaling(dev_name(dev
),
1093 (scaling_down
? "scaled down" : "scaled up"),
1094 clki
->name
, hba
->clk_scaling
.target_freq
, freq
);
1100 EXPORT_SYMBOL_GPL(ufshcd_opp_config_clks
);
1102 static int ufshcd_opp_set_rate(struct ufs_hba
*hba
, unsigned long freq
)
1104 struct dev_pm_opp
*opp
;
1107 opp
= dev_pm_opp_find_freq_floor_indexed(hba
->dev
,
1110 return PTR_ERR(opp
);
1112 ret
= dev_pm_opp_set_opp(hba
->dev
, opp
);
1113 dev_pm_opp_put(opp
);
1119 * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1120 * @hba: per adapter instance
1121 * @freq: frequency to scale
1122 * @scale_up: True if scaling up and false if scaling down
1124 * Return: 0 if successful; < 0 upon failure.
1126 static int ufshcd_scale_clks(struct ufs_hba
*hba
, unsigned long freq
,
1130 ktime_t start
= ktime_get();
1132 ret
= ufshcd_vops_clk_scale_notify(hba
, scale_up
, PRE_CHANGE
);
1136 if (hba
->use_pm_opp
)
1137 ret
= ufshcd_opp_set_rate(hba
, freq
);
1139 ret
= ufshcd_set_clk_freq(hba
, scale_up
);
1143 ret
= ufshcd_vops_clk_scale_notify(hba
, scale_up
, POST_CHANGE
);
1145 if (hba
->use_pm_opp
)
1146 ufshcd_opp_set_rate(hba
,
1147 hba
->devfreq
->previous_freq
);
1149 ufshcd_set_clk_freq(hba
, !scale_up
);
1153 trace_ufshcd_profile_clk_scaling(dev_name(hba
->dev
),
1154 (scale_up
? "up" : "down"),
1155 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
1160 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1161 * @hba: per adapter instance
1162 * @freq: frequency to scale
1163 * @scale_up: True if scaling up and false if scaling down
1165 * Return: true if scaling is required, false otherwise.
1167 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba
*hba
,
1168 unsigned long freq
, bool scale_up
)
1170 struct ufs_clk_info
*clki
;
1171 struct list_head
*head
= &hba
->clk_list_head
;
1173 if (list_empty(head
))
1176 if (hba
->use_pm_opp
)
1177 return freq
!= hba
->clk_scaling
.target_freq
;
1179 list_for_each_entry(clki
, head
, list
) {
1180 if (!IS_ERR_OR_NULL(clki
->clk
)) {
1181 if (scale_up
&& clki
->max_freq
) {
1182 if (clki
->curr_freq
== clki
->max_freq
)
1185 } else if (!scale_up
&& clki
->min_freq
) {
1186 if (clki
->curr_freq
== clki
->min_freq
)
1197 * Determine the number of pending commands by counting the bits in the SCSI
1198 * device budget maps. This approach has been selected because a bit is set in
1199 * the budget map before scsi_host_queue_ready() checks the host_self_blocked
1200 * flag. The host_self_blocked flag can be modified by calling
1201 * scsi_block_requests() or scsi_unblock_requests().
1203 static u32
ufshcd_pending_cmds(struct ufs_hba
*hba
)
1205 const struct scsi_device
*sdev
;
1208 lockdep_assert_held(hba
->host
->host_lock
);
1209 __shost_for_each_device(sdev
, hba
->host
)
1210 pending
+= sbitmap_weight(&sdev
->budget_map
);
1216 * Wait until all pending SCSI commands and TMFs have finished or the timeout
1219 * Return: 0 upon success; -EBUSY upon timeout.
1221 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba
*hba
,
1222 u64 wait_timeout_us
)
1224 unsigned long flags
;
1228 bool timeout
= false, do_last_check
= false;
1232 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1234 * Wait for all the outstanding tasks/transfer requests.
1235 * Verify by checking the doorbell registers are clear.
1237 start
= ktime_get();
1239 if (hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
) {
1244 tm_doorbell
= ufshcd_readl(hba
, REG_UTP_TASK_REQ_DOOR_BELL
);
1245 tr_pending
= ufshcd_pending_cmds(hba
);
1246 if (!tm_doorbell
&& !tr_pending
) {
1249 } else if (do_last_check
) {
1253 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1254 io_schedule_timeout(msecs_to_jiffies(20));
1255 if (ktime_to_us(ktime_sub(ktime_get(), start
)) >
1259 * We might have scheduled out for long time so make
1260 * sure to check if doorbells are cleared by this time
1263 do_last_check
= true;
1265 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1266 } while (tm_doorbell
|| tr_pending
);
1270 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1271 __func__
, tm_doorbell
, tr_pending
);
1275 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1276 ufshcd_release(hba
);
1281 * ufshcd_scale_gear - scale up/down UFS gear
1282 * @hba: per adapter instance
1283 * @scale_up: True for scaling up gear and false for scaling down
1285 * Return: 0 for success; -EBUSY if scaling can't happen at this time;
1286 * non-zero for any other errors.
1288 static int ufshcd_scale_gear(struct ufs_hba
*hba
, bool scale_up
)
1291 struct ufs_pa_layer_attr new_pwr_info
;
1294 memcpy(&new_pwr_info
, &hba
->clk_scaling
.saved_pwr_info
,
1295 sizeof(struct ufs_pa_layer_attr
));
1297 memcpy(&new_pwr_info
, &hba
->pwr_info
,
1298 sizeof(struct ufs_pa_layer_attr
));
1300 if (hba
->pwr_info
.gear_tx
> hba
->clk_scaling
.min_gear
||
1301 hba
->pwr_info
.gear_rx
> hba
->clk_scaling
.min_gear
) {
1302 /* save the current power mode */
1303 memcpy(&hba
->clk_scaling
.saved_pwr_info
,
1305 sizeof(struct ufs_pa_layer_attr
));
1307 /* scale down gear */
1308 new_pwr_info
.gear_tx
= hba
->clk_scaling
.min_gear
;
1309 new_pwr_info
.gear_rx
= hba
->clk_scaling
.min_gear
;
1313 /* check if the power mode needs to be changed or not? */
1314 ret
= ufshcd_config_pwr_mode(hba
, &new_pwr_info
);
1316 dev_err(hba
->dev
, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1318 hba
->pwr_info
.gear_tx
, hba
->pwr_info
.gear_rx
,
1319 new_pwr_info
.gear_tx
, new_pwr_info
.gear_rx
);
1325 * Wait until all pending SCSI commands and TMFs have finished or the timeout
1328 * Return: 0 upon success; -EBUSY upon timeout.
1330 static int ufshcd_clock_scaling_prepare(struct ufs_hba
*hba
, u64 timeout_us
)
1334 * make sure that there are no outstanding requests when
1335 * clock scaling is in progress
1337 ufshcd_scsi_block_requests(hba
);
1338 mutex_lock(&hba
->wb_mutex
);
1339 down_write(&hba
->clk_scaling_lock
);
1341 if (!hba
->clk_scaling
.is_allowed
||
1342 ufshcd_wait_for_doorbell_clr(hba
, timeout_us
)) {
1344 up_write(&hba
->clk_scaling_lock
);
1345 mutex_unlock(&hba
->wb_mutex
);
1346 ufshcd_scsi_unblock_requests(hba
);
1350 /* let's not get into low power until clock scaling is completed */
1357 static void ufshcd_clock_scaling_unprepare(struct ufs_hba
*hba
, int err
, bool scale_up
)
1359 up_write(&hba
->clk_scaling_lock
);
1361 /* Enable Write Booster if we have scaled up else disable it */
1362 if (ufshcd_enable_wb_if_scaling_up(hba
) && !err
)
1363 ufshcd_wb_toggle(hba
, scale_up
);
1365 mutex_unlock(&hba
->wb_mutex
);
1367 ufshcd_scsi_unblock_requests(hba
);
1368 ufshcd_release(hba
);
1372 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1373 * @hba: per adapter instance
1374 * @freq: frequency to scale
1375 * @scale_up: True for scaling up and false for scalin down
1377 * Return: 0 for success; -EBUSY if scaling can't happen at this time; non-zero
1378 * for any other errors.
1380 static int ufshcd_devfreq_scale(struct ufs_hba
*hba
, unsigned long freq
,
1385 ret
= ufshcd_clock_scaling_prepare(hba
, 1 * USEC_PER_SEC
);
1389 /* scale down the gear before scaling down clocks */
1391 ret
= ufshcd_scale_gear(hba
, false);
1396 ret
= ufshcd_scale_clks(hba
, freq
, scale_up
);
1399 ufshcd_scale_gear(hba
, true);
1403 /* scale up the gear after scaling up clocks */
1405 ret
= ufshcd_scale_gear(hba
, true);
1407 ufshcd_scale_clks(hba
, hba
->devfreq
->previous_freq
,
1414 ufshcd_clock_scaling_unprepare(hba
, ret
, scale_up
);
1418 static void ufshcd_clk_scaling_suspend_work(struct work_struct
*work
)
1420 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1421 clk_scaling
.suspend_work
);
1422 unsigned long irq_flags
;
1424 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1425 if (hba
->clk_scaling
.active_reqs
|| hba
->clk_scaling
.is_suspended
) {
1426 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1429 hba
->clk_scaling
.is_suspended
= true;
1430 hba
->clk_scaling
.window_start_t
= 0;
1431 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1433 devfreq_suspend_device(hba
->devfreq
);
1436 static void ufshcd_clk_scaling_resume_work(struct work_struct
*work
)
1438 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1439 clk_scaling
.resume_work
);
1440 unsigned long irq_flags
;
1442 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1443 if (!hba
->clk_scaling
.is_suspended
) {
1444 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1447 hba
->clk_scaling
.is_suspended
= false;
1448 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1450 devfreq_resume_device(hba
->devfreq
);
1453 static int ufshcd_devfreq_target(struct device
*dev
,
1454 unsigned long *freq
, u32 flags
)
1457 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1459 bool scale_up
, sched_clk_scaling_suspend_work
= false;
1460 struct list_head
*clk_list
= &hba
->clk_list_head
;
1461 struct ufs_clk_info
*clki
;
1462 unsigned long irq_flags
;
1464 if (!ufshcd_is_clkscaling_supported(hba
))
1467 if (hba
->use_pm_opp
) {
1468 struct dev_pm_opp
*opp
;
1470 /* Get the recommended frequency from OPP framework */
1471 opp
= devfreq_recommended_opp(dev
, freq
, flags
);
1473 return PTR_ERR(opp
);
1475 dev_pm_opp_put(opp
);
1477 /* Override with the closest supported frequency */
1478 clki
= list_first_entry(&hba
->clk_list_head
, struct ufs_clk_info
,
1480 *freq
= (unsigned long) clk_round_rate(clki
->clk
, *freq
);
1483 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1484 if (ufshcd_eh_in_progress(hba
)) {
1485 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1489 /* Skip scaling clock when clock scaling is suspended */
1490 if (hba
->clk_scaling
.is_suspended
) {
1491 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1492 dev_warn(hba
->dev
, "clock scaling is suspended, skip");
1496 if (!hba
->clk_scaling
.active_reqs
)
1497 sched_clk_scaling_suspend_work
= true;
1499 if (list_empty(clk_list
)) {
1500 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1504 /* Decide based on the target or rounded-off frequency and update */
1505 if (hba
->use_pm_opp
)
1506 scale_up
= *freq
> hba
->clk_scaling
.target_freq
;
1508 scale_up
= *freq
== clki
->max_freq
;
1510 if (!hba
->use_pm_opp
&& !scale_up
)
1511 *freq
= clki
->min_freq
;
1513 /* Update the frequency */
1514 if (!ufshcd_is_devfreq_scaling_required(hba
, *freq
, scale_up
)) {
1515 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1517 goto out
; /* no state change required */
1519 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1521 start
= ktime_get();
1522 ret
= ufshcd_devfreq_scale(hba
, *freq
, scale_up
);
1524 hba
->clk_scaling
.target_freq
= *freq
;
1526 trace_ufshcd_profile_clk_scaling(dev_name(hba
->dev
),
1527 (scale_up
? "up" : "down"),
1528 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
1531 if (sched_clk_scaling_suspend_work
&& !scale_up
)
1532 queue_work(hba
->clk_scaling
.workq
,
1533 &hba
->clk_scaling
.suspend_work
);
1538 static int ufshcd_devfreq_get_dev_status(struct device
*dev
,
1539 struct devfreq_dev_status
*stat
)
1541 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1542 struct ufs_clk_scaling
*scaling
= &hba
->clk_scaling
;
1543 unsigned long flags
;
1546 if (!ufshcd_is_clkscaling_supported(hba
))
1549 memset(stat
, 0, sizeof(*stat
));
1551 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1552 curr_t
= ktime_get();
1553 if (!scaling
->window_start_t
)
1557 * If current frequency is 0, then the ondemand governor considers
1558 * there's no initial frequency set. And it always requests to set
1559 * to max. frequency.
1561 if (hba
->use_pm_opp
) {
1562 stat
->current_frequency
= hba
->clk_scaling
.target_freq
;
1564 struct list_head
*clk_list
= &hba
->clk_list_head
;
1565 struct ufs_clk_info
*clki
;
1567 clki
= list_first_entry(clk_list
, struct ufs_clk_info
, list
);
1568 stat
->current_frequency
= clki
->curr_freq
;
1571 if (scaling
->is_busy_started
)
1572 scaling
->tot_busy_t
+= ktime_us_delta(curr_t
,
1573 scaling
->busy_start_t
);
1574 stat
->total_time
= ktime_us_delta(curr_t
, scaling
->window_start_t
);
1575 stat
->busy_time
= scaling
->tot_busy_t
;
1577 scaling
->window_start_t
= curr_t
;
1578 scaling
->tot_busy_t
= 0;
1580 if (scaling
->active_reqs
) {
1581 scaling
->busy_start_t
= curr_t
;
1582 scaling
->is_busy_started
= true;
1584 scaling
->busy_start_t
= 0;
1585 scaling
->is_busy_started
= false;
1587 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1591 static int ufshcd_devfreq_init(struct ufs_hba
*hba
)
1593 struct list_head
*clk_list
= &hba
->clk_list_head
;
1594 struct ufs_clk_info
*clki
;
1595 struct devfreq
*devfreq
;
1598 /* Skip devfreq if we don't have any clocks in the list */
1599 if (list_empty(clk_list
))
1602 if (!hba
->use_pm_opp
) {
1603 clki
= list_first_entry(clk_list
, struct ufs_clk_info
, list
);
1604 dev_pm_opp_add(hba
->dev
, clki
->min_freq
, 0);
1605 dev_pm_opp_add(hba
->dev
, clki
->max_freq
, 0);
1608 ufshcd_vops_config_scaling_param(hba
, &hba
->vps
->devfreq_profile
,
1609 &hba
->vps
->ondemand_data
);
1610 devfreq
= devfreq_add_device(hba
->dev
,
1611 &hba
->vps
->devfreq_profile
,
1612 DEVFREQ_GOV_SIMPLE_ONDEMAND
,
1613 &hba
->vps
->ondemand_data
);
1614 if (IS_ERR(devfreq
)) {
1615 ret
= PTR_ERR(devfreq
);
1616 dev_err(hba
->dev
, "Unable to register with devfreq %d\n", ret
);
1618 if (!hba
->use_pm_opp
) {
1619 dev_pm_opp_remove(hba
->dev
, clki
->min_freq
);
1620 dev_pm_opp_remove(hba
->dev
, clki
->max_freq
);
1625 hba
->devfreq
= devfreq
;
1630 static void ufshcd_devfreq_remove(struct ufs_hba
*hba
)
1632 struct list_head
*clk_list
= &hba
->clk_list_head
;
1637 devfreq_remove_device(hba
->devfreq
);
1638 hba
->devfreq
= NULL
;
1640 if (!hba
->use_pm_opp
) {
1641 struct ufs_clk_info
*clki
;
1643 clki
= list_first_entry(clk_list
, struct ufs_clk_info
, list
);
1644 dev_pm_opp_remove(hba
->dev
, clki
->min_freq
);
1645 dev_pm_opp_remove(hba
->dev
, clki
->max_freq
);
1649 static void ufshcd_suspend_clkscaling(struct ufs_hba
*hba
)
1651 unsigned long flags
;
1652 bool suspend
= false;
1654 cancel_work_sync(&hba
->clk_scaling
.suspend_work
);
1655 cancel_work_sync(&hba
->clk_scaling
.resume_work
);
1657 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1658 if (!hba
->clk_scaling
.is_suspended
) {
1660 hba
->clk_scaling
.is_suspended
= true;
1661 hba
->clk_scaling
.window_start_t
= 0;
1663 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1666 devfreq_suspend_device(hba
->devfreq
);
1669 static void ufshcd_resume_clkscaling(struct ufs_hba
*hba
)
1671 unsigned long flags
;
1672 bool resume
= false;
1674 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1675 if (hba
->clk_scaling
.is_suspended
) {
1677 hba
->clk_scaling
.is_suspended
= false;
1679 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1682 devfreq_resume_device(hba
->devfreq
);
1685 static ssize_t
ufshcd_clkscale_enable_show(struct device
*dev
,
1686 struct device_attribute
*attr
, char *buf
)
1688 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1690 return sysfs_emit(buf
, "%d\n", hba
->clk_scaling
.is_enabled
);
1693 static ssize_t
ufshcd_clkscale_enable_store(struct device
*dev
,
1694 struct device_attribute
*attr
, const char *buf
, size_t count
)
1696 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1700 if (kstrtou32(buf
, 0, &value
))
1703 down(&hba
->host_sem
);
1704 if (!ufshcd_is_user_access_allowed(hba
)) {
1710 if (value
== hba
->clk_scaling
.is_enabled
)
1713 ufshcd_rpm_get_sync(hba
);
1716 hba
->clk_scaling
.is_enabled
= value
;
1719 ufshcd_resume_clkscaling(hba
);
1721 ufshcd_suspend_clkscaling(hba
);
1722 err
= ufshcd_devfreq_scale(hba
, ULONG_MAX
, true);
1724 dev_err(hba
->dev
, "%s: failed to scale clocks up %d\n",
1728 ufshcd_release(hba
);
1729 ufshcd_rpm_put_sync(hba
);
1732 return err
? err
: count
;
1735 static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba
*hba
)
1737 hba
->clk_scaling
.enable_attr
.show
= ufshcd_clkscale_enable_show
;
1738 hba
->clk_scaling
.enable_attr
.store
= ufshcd_clkscale_enable_store
;
1739 sysfs_attr_init(&hba
->clk_scaling
.enable_attr
.attr
);
1740 hba
->clk_scaling
.enable_attr
.attr
.name
= "clkscale_enable";
1741 hba
->clk_scaling
.enable_attr
.attr
.mode
= 0644;
1742 if (device_create_file(hba
->dev
, &hba
->clk_scaling
.enable_attr
))
1743 dev_err(hba
->dev
, "Failed to create sysfs for clkscale_enable\n");
1746 static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba
*hba
)
1748 if (hba
->clk_scaling
.enable_attr
.attr
.name
)
1749 device_remove_file(hba
->dev
, &hba
->clk_scaling
.enable_attr
);
1752 static void ufshcd_init_clk_scaling(struct ufs_hba
*hba
)
1754 char wq_name
[sizeof("ufs_clkscaling_00")];
1756 if (!ufshcd_is_clkscaling_supported(hba
))
1759 if (!hba
->clk_scaling
.min_gear
)
1760 hba
->clk_scaling
.min_gear
= UFS_HS_G1
;
1762 INIT_WORK(&hba
->clk_scaling
.suspend_work
,
1763 ufshcd_clk_scaling_suspend_work
);
1764 INIT_WORK(&hba
->clk_scaling
.resume_work
,
1765 ufshcd_clk_scaling_resume_work
);
1767 snprintf(wq_name
, sizeof(wq_name
), "ufs_clkscaling_%d",
1768 hba
->host
->host_no
);
1769 hba
->clk_scaling
.workq
= create_singlethread_workqueue(wq_name
);
1771 hba
->clk_scaling
.is_initialized
= true;
1774 static void ufshcd_exit_clk_scaling(struct ufs_hba
*hba
)
1776 if (!hba
->clk_scaling
.is_initialized
)
1779 ufshcd_remove_clk_scaling_sysfs(hba
);
1780 destroy_workqueue(hba
->clk_scaling
.workq
);
1781 ufshcd_devfreq_remove(hba
);
1782 hba
->clk_scaling
.is_initialized
= false;
1785 static void ufshcd_ungate_work(struct work_struct
*work
)
1788 unsigned long flags
;
1789 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1790 clk_gating
.ungate_work
);
1792 cancel_delayed_work_sync(&hba
->clk_gating
.gate_work
);
1794 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1795 if (hba
->clk_gating
.state
== CLKS_ON
) {
1796 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1800 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1801 ufshcd_hba_vreg_set_hpm(hba
);
1802 ufshcd_setup_clocks(hba
, true);
1804 ufshcd_enable_irq(hba
);
1806 /* Exit from hibern8 */
1807 if (ufshcd_can_hibern8_during_gating(hba
)) {
1808 /* Prevent gating in this path */
1809 hba
->clk_gating
.is_suspended
= true;
1810 if (ufshcd_is_link_hibern8(hba
)) {
1811 ret
= ufshcd_uic_hibern8_exit(hba
);
1813 dev_err(hba
->dev
, "%s: hibern8 exit failed %d\n",
1816 ufshcd_set_link_active(hba
);
1818 hba
->clk_gating
.is_suspended
= false;
1823 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1824 * Also, exit from hibern8 mode and set the link as active.
1825 * @hba: per adapter instance
1827 void ufshcd_hold(struct ufs_hba
*hba
)
1830 unsigned long flags
;
1832 if (!ufshcd_is_clkgating_allowed(hba
) ||
1833 !hba
->clk_gating
.is_initialized
)
1835 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1836 hba
->clk_gating
.active_reqs
++;
1839 switch (hba
->clk_gating
.state
) {
1842 * Wait for the ungate work to complete if in progress.
1843 * Though the clocks may be in ON state, the link could
1844 * still be in hibner8 state if hibern8 is allowed
1845 * during clock gating.
1846 * Make sure we exit hibern8 state also in addition to
1849 if (ufshcd_can_hibern8_during_gating(hba
) &&
1850 ufshcd_is_link_hibern8(hba
)) {
1851 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1852 flush_result
= flush_work(&hba
->clk_gating
.ungate_work
);
1853 if (hba
->clk_gating
.is_suspended
&& !flush_result
)
1855 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1860 if (cancel_delayed_work(&hba
->clk_gating
.gate_work
)) {
1861 hba
->clk_gating
.state
= CLKS_ON
;
1862 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1863 hba
->clk_gating
.state
);
1867 * If we are here, it means gating work is either done or
1868 * currently running. Hence, fall through to cancel gating
1869 * work and to enable clocks.
1873 hba
->clk_gating
.state
= REQ_CLKS_ON
;
1874 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1875 hba
->clk_gating
.state
);
1876 queue_work(hba
->clk_gating
.clk_gating_workq
,
1877 &hba
->clk_gating
.ungate_work
);
1879 * fall through to check if we should wait for this
1880 * work to be done or not.
1884 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1885 flush_work(&hba
->clk_gating
.ungate_work
);
1886 /* Make sure state is CLKS_ON before returning */
1887 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1890 dev_err(hba
->dev
, "%s: clk gating is in invalid state %d\n",
1891 __func__
, hba
->clk_gating
.state
);
1894 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1896 EXPORT_SYMBOL_GPL(ufshcd_hold
);
1898 static void ufshcd_gate_work(struct work_struct
*work
)
1900 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1901 clk_gating
.gate_work
.work
);
1902 unsigned long flags
;
1905 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1907 * In case you are here to cancel this work the gating state
1908 * would be marked as REQ_CLKS_ON. In this case save time by
1909 * skipping the gating work and exit after changing the clock
1912 if (hba
->clk_gating
.is_suspended
||
1913 (hba
->clk_gating
.state
!= REQ_CLKS_OFF
)) {
1914 hba
->clk_gating
.state
= CLKS_ON
;
1915 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1916 hba
->clk_gating
.state
);
1920 if (hba
->clk_gating
.active_reqs
1921 || hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
1922 || hba
->outstanding_reqs
|| hba
->outstanding_tasks
1923 || hba
->active_uic_cmd
|| hba
->uic_async_done
)
1926 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1928 /* put the link into hibern8 mode before turning off clocks */
1929 if (ufshcd_can_hibern8_during_gating(hba
)) {
1930 ret
= ufshcd_uic_hibern8_enter(hba
);
1932 hba
->clk_gating
.state
= CLKS_ON
;
1933 dev_err(hba
->dev
, "%s: hibern8 enter failed %d\n",
1935 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1936 hba
->clk_gating
.state
);
1939 ufshcd_set_link_hibern8(hba
);
1942 ufshcd_disable_irq(hba
);
1944 ufshcd_setup_clocks(hba
, false);
1946 /* Put the host controller in low power mode if possible */
1947 ufshcd_hba_vreg_set_lpm(hba
);
1949 * In case you are here to cancel this work the gating state
1950 * would be marked as REQ_CLKS_ON. In this case keep the state
1951 * as REQ_CLKS_ON which would anyway imply that clocks are off
1952 * and a request to turn them on is pending. By doing this way,
1953 * we keep the state machine in tact and this would ultimately
1954 * prevent from doing cancel work multiple times when there are
1955 * new requests arriving before the current cancel work is done.
1957 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1958 if (hba
->clk_gating
.state
== REQ_CLKS_OFF
) {
1959 hba
->clk_gating
.state
= CLKS_OFF
;
1960 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1961 hba
->clk_gating
.state
);
1964 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1969 /* host lock must be held before calling this variant */
1970 static void __ufshcd_release(struct ufs_hba
*hba
)
1972 if (!ufshcd_is_clkgating_allowed(hba
))
1975 hba
->clk_gating
.active_reqs
--;
1977 if (hba
->clk_gating
.active_reqs
|| hba
->clk_gating
.is_suspended
||
1978 hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
||
1979 hba
->outstanding_tasks
|| !hba
->clk_gating
.is_initialized
||
1980 hba
->active_uic_cmd
|| hba
->uic_async_done
||
1981 hba
->clk_gating
.state
== CLKS_OFF
)
1984 hba
->clk_gating
.state
= REQ_CLKS_OFF
;
1985 trace_ufshcd_clk_gating(dev_name(hba
->dev
), hba
->clk_gating
.state
);
1986 queue_delayed_work(hba
->clk_gating
.clk_gating_workq
,
1987 &hba
->clk_gating
.gate_work
,
1988 msecs_to_jiffies(hba
->clk_gating
.delay_ms
));
1991 void ufshcd_release(struct ufs_hba
*hba
)
1993 unsigned long flags
;
1995 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1996 __ufshcd_release(hba
);
1997 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1999 EXPORT_SYMBOL_GPL(ufshcd_release
);
2001 static ssize_t
ufshcd_clkgate_delay_show(struct device
*dev
,
2002 struct device_attribute
*attr
, char *buf
)
2004 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
2006 return sysfs_emit(buf
, "%lu\n", hba
->clk_gating
.delay_ms
);
2009 void ufshcd_clkgate_delay_set(struct device
*dev
, unsigned long value
)
2011 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
2012 unsigned long flags
;
2014 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2015 hba
->clk_gating
.delay_ms
= value
;
2016 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2018 EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set
);
2020 static ssize_t
ufshcd_clkgate_delay_store(struct device
*dev
,
2021 struct device_attribute
*attr
, const char *buf
, size_t count
)
2023 unsigned long value
;
2025 if (kstrtoul(buf
, 0, &value
))
2028 ufshcd_clkgate_delay_set(dev
, value
);
2032 static ssize_t
ufshcd_clkgate_enable_show(struct device
*dev
,
2033 struct device_attribute
*attr
, char *buf
)
2035 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
2037 return sysfs_emit(buf
, "%d\n", hba
->clk_gating
.is_enabled
);
2040 static ssize_t
ufshcd_clkgate_enable_store(struct device
*dev
,
2041 struct device_attribute
*attr
, const char *buf
, size_t count
)
2043 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
2044 unsigned long flags
;
2047 if (kstrtou32(buf
, 0, &value
))
2052 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2053 if (value
== hba
->clk_gating
.is_enabled
)
2057 __ufshcd_release(hba
);
2059 hba
->clk_gating
.active_reqs
++;
2061 hba
->clk_gating
.is_enabled
= value
;
2063 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2067 static void ufshcd_init_clk_gating_sysfs(struct ufs_hba
*hba
)
2069 hba
->clk_gating
.delay_attr
.show
= ufshcd_clkgate_delay_show
;
2070 hba
->clk_gating
.delay_attr
.store
= ufshcd_clkgate_delay_store
;
2071 sysfs_attr_init(&hba
->clk_gating
.delay_attr
.attr
);
2072 hba
->clk_gating
.delay_attr
.attr
.name
= "clkgate_delay_ms";
2073 hba
->clk_gating
.delay_attr
.attr
.mode
= 0644;
2074 if (device_create_file(hba
->dev
, &hba
->clk_gating
.delay_attr
))
2075 dev_err(hba
->dev
, "Failed to create sysfs for clkgate_delay\n");
2077 hba
->clk_gating
.enable_attr
.show
= ufshcd_clkgate_enable_show
;
2078 hba
->clk_gating
.enable_attr
.store
= ufshcd_clkgate_enable_store
;
2079 sysfs_attr_init(&hba
->clk_gating
.enable_attr
.attr
);
2080 hba
->clk_gating
.enable_attr
.attr
.name
= "clkgate_enable";
2081 hba
->clk_gating
.enable_attr
.attr
.mode
= 0644;
2082 if (device_create_file(hba
->dev
, &hba
->clk_gating
.enable_attr
))
2083 dev_err(hba
->dev
, "Failed to create sysfs for clkgate_enable\n");
2086 static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba
*hba
)
2088 if (hba
->clk_gating
.delay_attr
.attr
.name
)
2089 device_remove_file(hba
->dev
, &hba
->clk_gating
.delay_attr
);
2090 if (hba
->clk_gating
.enable_attr
.attr
.name
)
2091 device_remove_file(hba
->dev
, &hba
->clk_gating
.enable_attr
);
2094 static void ufshcd_init_clk_gating(struct ufs_hba
*hba
)
2096 char wq_name
[sizeof("ufs_clk_gating_00")];
2098 if (!ufshcd_is_clkgating_allowed(hba
))
2101 hba
->clk_gating
.state
= CLKS_ON
;
2103 hba
->clk_gating
.delay_ms
= 150;
2104 INIT_DELAYED_WORK(&hba
->clk_gating
.gate_work
, ufshcd_gate_work
);
2105 INIT_WORK(&hba
->clk_gating
.ungate_work
, ufshcd_ungate_work
);
2107 snprintf(wq_name
, ARRAY_SIZE(wq_name
), "ufs_clk_gating_%d",
2108 hba
->host
->host_no
);
2109 hba
->clk_gating
.clk_gating_workq
= alloc_ordered_workqueue(wq_name
,
2110 WQ_MEM_RECLAIM
| WQ_HIGHPRI
);
2112 ufshcd_init_clk_gating_sysfs(hba
);
2114 hba
->clk_gating
.is_enabled
= true;
2115 hba
->clk_gating
.is_initialized
= true;
2118 static void ufshcd_exit_clk_gating(struct ufs_hba
*hba
)
2120 if (!hba
->clk_gating
.is_initialized
)
2123 ufshcd_remove_clk_gating_sysfs(hba
);
2125 /* Ungate the clock if necessary. */
2127 hba
->clk_gating
.is_initialized
= false;
2128 ufshcd_release(hba
);
2130 destroy_workqueue(hba
->clk_gating
.clk_gating_workq
);
2133 static void ufshcd_clk_scaling_start_busy(struct ufs_hba
*hba
)
2135 bool queue_resume_work
= false;
2136 ktime_t curr_t
= ktime_get();
2137 unsigned long flags
;
2139 if (!ufshcd_is_clkscaling_supported(hba
))
2142 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2143 if (!hba
->clk_scaling
.active_reqs
++)
2144 queue_resume_work
= true;
2146 if (!hba
->clk_scaling
.is_enabled
|| hba
->pm_op_in_progress
) {
2147 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2151 if (queue_resume_work
)
2152 queue_work(hba
->clk_scaling
.workq
,
2153 &hba
->clk_scaling
.resume_work
);
2155 if (!hba
->clk_scaling
.window_start_t
) {
2156 hba
->clk_scaling
.window_start_t
= curr_t
;
2157 hba
->clk_scaling
.tot_busy_t
= 0;
2158 hba
->clk_scaling
.is_busy_started
= false;
2161 if (!hba
->clk_scaling
.is_busy_started
) {
2162 hba
->clk_scaling
.busy_start_t
= curr_t
;
2163 hba
->clk_scaling
.is_busy_started
= true;
2165 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2168 static void ufshcd_clk_scaling_update_busy(struct ufs_hba
*hba
)
2170 struct ufs_clk_scaling
*scaling
= &hba
->clk_scaling
;
2171 unsigned long flags
;
2173 if (!ufshcd_is_clkscaling_supported(hba
))
2176 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2177 hba
->clk_scaling
.active_reqs
--;
2178 if (!scaling
->active_reqs
&& scaling
->is_busy_started
) {
2179 scaling
->tot_busy_t
+= ktime_to_us(ktime_sub(ktime_get(),
2180 scaling
->busy_start_t
));
2181 scaling
->busy_start_t
= 0;
2182 scaling
->is_busy_started
= false;
2184 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2187 static inline int ufshcd_monitor_opcode2dir(u8 opcode
)
2189 if (opcode
== READ_6
|| opcode
== READ_10
|| opcode
== READ_16
)
2191 else if (opcode
== WRITE_6
|| opcode
== WRITE_10
|| opcode
== WRITE_16
)
2197 static inline bool ufshcd_should_inform_monitor(struct ufs_hba
*hba
,
2198 struct ufshcd_lrb
*lrbp
)
2200 const struct ufs_hba_monitor
*m
= &hba
->monitor
;
2202 return (m
->enabled
&& lrbp
&& lrbp
->cmd
&&
2203 (!m
->chunk_size
|| m
->chunk_size
== lrbp
->cmd
->sdb
.length
) &&
2204 ktime_before(hba
->monitor
.enabled_ts
, lrbp
->issue_time_stamp
));
2207 static void ufshcd_start_monitor(struct ufs_hba
*hba
,
2208 const struct ufshcd_lrb
*lrbp
)
2210 int dir
= ufshcd_monitor_opcode2dir(*lrbp
->cmd
->cmnd
);
2211 unsigned long flags
;
2213 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2214 if (dir
>= 0 && hba
->monitor
.nr_queued
[dir
]++ == 0)
2215 hba
->monitor
.busy_start_ts
[dir
] = ktime_get();
2216 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2219 static void ufshcd_update_monitor(struct ufs_hba
*hba
, const struct ufshcd_lrb
*lrbp
)
2221 int dir
= ufshcd_monitor_opcode2dir(*lrbp
->cmd
->cmnd
);
2222 unsigned long flags
;
2224 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2225 if (dir
>= 0 && hba
->monitor
.nr_queued
[dir
] > 0) {
2226 const struct request
*req
= scsi_cmd_to_rq(lrbp
->cmd
);
2227 struct ufs_hba_monitor
*m
= &hba
->monitor
;
2228 ktime_t now
, inc
, lat
;
2230 now
= lrbp
->compl_time_stamp
;
2231 inc
= ktime_sub(now
, m
->busy_start_ts
[dir
]);
2232 m
->total_busy
[dir
] = ktime_add(m
->total_busy
[dir
], inc
);
2233 m
->nr_sec_rw
[dir
] += blk_rq_sectors(req
);
2235 /* Update latencies */
2237 lat
= ktime_sub(now
, lrbp
->issue_time_stamp
);
2238 m
->lat_sum
[dir
] += lat
;
2239 if (m
->lat_max
[dir
] < lat
|| !m
->lat_max
[dir
])
2240 m
->lat_max
[dir
] = lat
;
2241 if (m
->lat_min
[dir
] > lat
|| !m
->lat_min
[dir
])
2242 m
->lat_min
[dir
] = lat
;
2244 m
->nr_queued
[dir
]--;
2245 /* Push forward the busy start of monitor */
2246 m
->busy_start_ts
[dir
] = now
;
2248 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2252 * ufshcd_send_command - Send SCSI or device management commands
2253 * @hba: per adapter instance
2254 * @task_tag: Task tag of the command
2255 * @hwq: pointer to hardware queue instance
2258 void ufshcd_send_command(struct ufs_hba
*hba
, unsigned int task_tag
,
2259 struct ufs_hw_queue
*hwq
)
2261 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[task_tag
];
2262 unsigned long flags
;
2264 lrbp
->issue_time_stamp
= ktime_get();
2265 lrbp
->issue_time_stamp_local_clock
= local_clock();
2266 lrbp
->compl_time_stamp
= ktime_set(0, 0);
2267 lrbp
->compl_time_stamp_local_clock
= 0;
2268 ufshcd_add_command_trace(hba
, task_tag
, UFS_CMD_SEND
);
2270 ufshcd_clk_scaling_start_busy(hba
);
2271 if (unlikely(ufshcd_should_inform_monitor(hba
, lrbp
)))
2272 ufshcd_start_monitor(hba
, lrbp
);
2274 if (is_mcq_enabled(hba
)) {
2275 int utrd_size
= sizeof(struct utp_transfer_req_desc
);
2276 struct utp_transfer_req_desc
*src
= lrbp
->utr_descriptor_ptr
;
2277 struct utp_transfer_req_desc
*dest
= hwq
->sqe_base_addr
+ hwq
->sq_tail_slot
;
2279 spin_lock(&hwq
->sq_lock
);
2280 memcpy(dest
, src
, utrd_size
);
2281 ufshcd_inc_sq_tail(hwq
);
2282 spin_unlock(&hwq
->sq_lock
);
2284 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
2285 if (hba
->vops
&& hba
->vops
->setup_xfer_req
)
2286 hba
->vops
->setup_xfer_req(hba
, lrbp
->task_tag
,
2288 __set_bit(lrbp
->task_tag
, &hba
->outstanding_reqs
);
2289 ufshcd_writel(hba
, 1 << lrbp
->task_tag
,
2290 REG_UTP_TRANSFER_REQ_DOOR_BELL
);
2291 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
2296 * ufshcd_copy_sense_data - Copy sense data in case of check condition
2297 * @lrbp: pointer to local reference block
2299 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb
*lrbp
)
2301 u8
*const sense_buffer
= lrbp
->cmd
->sense_buffer
;
2305 resp_len
= be16_to_cpu(lrbp
->ucd_rsp_ptr
->header
.data_segment_length
);
2306 if (sense_buffer
&& resp_len
) {
2309 len
= be16_to_cpu(lrbp
->ucd_rsp_ptr
->sr
.sense_data_len
);
2310 len_to_copy
= min_t(int, UFS_SENSE_SIZE
, len
);
2312 memcpy(sense_buffer
, lrbp
->ucd_rsp_ptr
->sr
.sense_data
,
2318 * ufshcd_copy_query_response() - Copy the Query Response and the data
2320 * @hba: per adapter instance
2321 * @lrbp: pointer to local reference block
2323 * Return: 0 upon success; < 0 upon failure.
2326 int ufshcd_copy_query_response(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2328 struct ufs_query_res
*query_res
= &hba
->dev_cmd
.query
.response
;
2330 memcpy(&query_res
->upiu_res
, &lrbp
->ucd_rsp_ptr
->qr
, QUERY_OSF_SIZE
);
2332 /* Get the descriptor */
2333 if (hba
->dev_cmd
.query
.descriptor
&&
2334 lrbp
->ucd_rsp_ptr
->qr
.opcode
== UPIU_QUERY_OPCODE_READ_DESC
) {
2335 u8
*descp
= (u8
*)lrbp
->ucd_rsp_ptr
+
2336 GENERAL_UPIU_REQUEST_SIZE
;
2340 /* data segment length */
2341 resp_len
= be16_to_cpu(lrbp
->ucd_rsp_ptr
->header
2342 .data_segment_length
);
2343 buf_len
= be16_to_cpu(
2344 hba
->dev_cmd
.query
.request
.upiu_req
.length
);
2345 if (likely(buf_len
>= resp_len
)) {
2346 memcpy(hba
->dev_cmd
.query
.descriptor
, descp
, resp_len
);
2349 "%s: rsp size %d is bigger than buffer size %d",
2350 __func__
, resp_len
, buf_len
);
2359 * ufshcd_hba_capabilities - Read controller capabilities
2360 * @hba: per adapter instance
2362 * Return: 0 on success, negative on error.
2364 static inline int ufshcd_hba_capabilities(struct ufs_hba
*hba
)
2368 hba
->capabilities
= ufshcd_readl(hba
, REG_CONTROLLER_CAPABILITIES
);
2369 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS
)
2370 hba
->capabilities
&= ~MASK_64_ADDRESSING_SUPPORT
;
2372 /* nutrs and nutmrs are 0 based values */
2373 hba
->nutrs
= (hba
->capabilities
& MASK_TRANSFER_REQUESTS_SLOTS
) + 1;
2375 ((hba
->capabilities
& MASK_TASK_MANAGEMENT_REQUEST_SLOTS
) >> 16) + 1;
2376 hba
->reserved_slot
= hba
->nutrs
- 1;
2378 /* Read crypto capabilities */
2379 err
= ufshcd_hba_init_crypto_capabilities(hba
);
2381 dev_err(hba
->dev
, "crypto setup failed\n");
2385 hba
->mcq_sup
= FIELD_GET(MASK_MCQ_SUPPORT
, hba
->capabilities
);
2389 hba
->mcq_capabilities
= ufshcd_readl(hba
, REG_MCQCAP
);
2390 hba
->ext_iid_sup
= FIELD_GET(MASK_EXT_IID_SUPPORT
,
2391 hba
->mcq_capabilities
);
2397 * ufshcd_ready_for_uic_cmd - Check if controller is ready
2398 * to accept UIC commands
2399 * @hba: per adapter instance
2401 * Return: true on success, else false.
2403 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba
*hba
)
2406 int ret
= read_poll_timeout(ufshcd_readl
, val
, val
& UIC_COMMAND_READY
,
2407 500, UIC_CMD_TIMEOUT
* 1000, false, hba
,
2408 REG_CONTROLLER_STATUS
);
2413 * ufshcd_get_upmcrs - Get the power mode change request status
2414 * @hba: Pointer to adapter instance
2416 * This function gets the UPMCRS field of HCS register
2418 * Return: value of UPMCRS field.
2420 static inline u8
ufshcd_get_upmcrs(struct ufs_hba
*hba
)
2422 return (ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) >> 8) & 0x7;
2426 * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer
2427 * @hba: per adapter instance
2428 * @uic_cmd: UIC command
2431 ufshcd_dispatch_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
2433 lockdep_assert_held(&hba
->uic_cmd_mutex
);
2435 WARN_ON(hba
->active_uic_cmd
);
2437 hba
->active_uic_cmd
= uic_cmd
;
2440 ufshcd_writel(hba
, uic_cmd
->argument1
, REG_UIC_COMMAND_ARG_1
);
2441 ufshcd_writel(hba
, uic_cmd
->argument2
, REG_UIC_COMMAND_ARG_2
);
2442 ufshcd_writel(hba
, uic_cmd
->argument3
, REG_UIC_COMMAND_ARG_3
);
2444 ufshcd_add_uic_command_trace(hba
, uic_cmd
, UFS_CMD_SEND
);
2447 ufshcd_writel(hba
, uic_cmd
->command
& COMMAND_OPCODE_MASK
,
2452 * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command
2453 * @hba: per adapter instance
2454 * @uic_cmd: UIC command
2456 * Return: 0 only if success.
2459 ufshcd_wait_for_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
2462 unsigned long flags
;
2464 lockdep_assert_held(&hba
->uic_cmd_mutex
);
2466 if (wait_for_completion_timeout(&uic_cmd
->done
,
2467 msecs_to_jiffies(UIC_CMD_TIMEOUT
))) {
2468 ret
= uic_cmd
->argument2
& MASK_UIC_COMMAND_RESULT
;
2472 "uic cmd 0x%x with arg3 0x%x completion timeout\n",
2473 uic_cmd
->command
, uic_cmd
->argument3
);
2475 if (!uic_cmd
->cmd_active
) {
2476 dev_err(hba
->dev
, "%s: UIC cmd has been completed, return the result\n",
2478 ret
= uic_cmd
->argument2
& MASK_UIC_COMMAND_RESULT
;
2482 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2483 hba
->active_uic_cmd
= NULL
;
2484 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2490 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2491 * @hba: per adapter instance
2492 * @uic_cmd: UIC command
2493 * @completion: initialize the completion only if this is set to true
2495 * Return: 0 only if success.
2498 __ufshcd_send_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
,
2501 lockdep_assert_held(&hba
->uic_cmd_mutex
);
2503 if (!ufshcd_ready_for_uic_cmd(hba
)) {
2505 "Controller not ready to accept UIC commands\n");
2510 init_completion(&uic_cmd
->done
);
2512 uic_cmd
->cmd_active
= 1;
2513 ufshcd_dispatch_uic_cmd(hba
, uic_cmd
);
2519 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2520 * @hba: per adapter instance
2521 * @uic_cmd: UIC command
2523 * Return: 0 only if success.
2525 int ufshcd_send_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
2529 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_UIC_CMD
)
2533 mutex_lock(&hba
->uic_cmd_mutex
);
2534 ufshcd_add_delay_before_dme_cmd(hba
);
2536 ret
= __ufshcd_send_uic_cmd(hba
, uic_cmd
, true);
2538 ret
= ufshcd_wait_for_uic_cmd(hba
, uic_cmd
);
2540 mutex_unlock(&hba
->uic_cmd_mutex
);
2542 ufshcd_release(hba
);
2547 * ufshcd_sgl_to_prdt - SG list to PRTD (Physical Region Description Table, 4DW format)
2548 * @hba: per-adapter instance
2549 * @lrbp: pointer to local reference block
2550 * @sg_entries: The number of sg lists actually used
2551 * @sg_list: Pointer to SG list
2553 static void ufshcd_sgl_to_prdt(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
, int sg_entries
,
2554 struct scatterlist
*sg_list
)
2556 struct ufshcd_sg_entry
*prd
;
2557 struct scatterlist
*sg
;
2562 if (hba
->quirks
& UFSHCD_QUIRK_PRDT_BYTE_GRAN
)
2563 lrbp
->utr_descriptor_ptr
->prd_table_length
=
2564 cpu_to_le16(sg_entries
* ufshcd_sg_entry_size(hba
));
2566 lrbp
->utr_descriptor_ptr
->prd_table_length
= cpu_to_le16(sg_entries
);
2568 prd
= lrbp
->ucd_prdt_ptr
;
2570 for_each_sg(sg_list
, sg
, sg_entries
, i
) {
2571 const unsigned int len
= sg_dma_len(sg
);
2574 * From the UFSHCI spec: "Data Byte Count (DBC): A '0'
2575 * based value that indicates the length, in bytes, of
2576 * the data block. A maximum of length of 256KB may
2577 * exist for any entry. Bits 1:0 of this field shall be
2578 * 11b to indicate Dword granularity. A value of '3'
2579 * indicates 4 bytes, '7' indicates 8 bytes, etc."
2581 WARN_ONCE(len
> SZ_256K
, "len = %#x\n", len
);
2582 prd
->size
= cpu_to_le32(len
- 1);
2583 prd
->addr
= cpu_to_le64(sg
->dma_address
);
2585 prd
= (void *)prd
+ ufshcd_sg_entry_size(hba
);
2588 lrbp
->utr_descriptor_ptr
->prd_table_length
= 0;
2593 * ufshcd_map_sg - Map scatter-gather list to prdt
2594 * @hba: per adapter instance
2595 * @lrbp: pointer to local reference block
2597 * Return: 0 in case of success, non-zero value in case of failure.
2599 static int ufshcd_map_sg(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2601 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
2602 int sg_segments
= scsi_dma_map(cmd
);
2604 if (sg_segments
< 0)
2607 ufshcd_sgl_to_prdt(hba
, lrbp
, sg_segments
, scsi_sglist(cmd
));
2613 * ufshcd_enable_intr - enable interrupts
2614 * @hba: per adapter instance
2615 * @intrs: interrupt bits
2617 static void ufshcd_enable_intr(struct ufs_hba
*hba
, u32 intrs
)
2619 u32 set
= ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
2621 if (hba
->ufs_version
== ufshci_version(1, 0)) {
2623 rw
= set
& INTERRUPT_MASK_RW_VER_10
;
2624 set
= rw
| ((set
^ intrs
) & intrs
);
2629 ufshcd_writel(hba
, set
, REG_INTERRUPT_ENABLE
);
2633 * ufshcd_disable_intr - disable interrupts
2634 * @hba: per adapter instance
2635 * @intrs: interrupt bits
2637 static void ufshcd_disable_intr(struct ufs_hba
*hba
, u32 intrs
)
2639 u32 set
= ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
2641 if (hba
->ufs_version
== ufshci_version(1, 0)) {
2643 rw
= (set
& INTERRUPT_MASK_RW_VER_10
) &
2644 ~(intrs
& INTERRUPT_MASK_RW_VER_10
);
2645 set
= rw
| ((set
& intrs
) & ~INTERRUPT_MASK_RW_VER_10
);
2651 ufshcd_writel(hba
, set
, REG_INTERRUPT_ENABLE
);
2655 * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
2656 * descriptor according to request
2657 * @lrbp: pointer to local reference block
2658 * @upiu_flags: flags required in the header
2659 * @cmd_dir: requests data direction
2660 * @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments)
2662 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb
*lrbp
, u8
*upiu_flags
,
2663 enum dma_data_direction cmd_dir
, int ehs_length
)
2665 struct utp_transfer_req_desc
*req_desc
= lrbp
->utr_descriptor_ptr
;
2666 struct request_desc_header
*h
= &req_desc
->header
;
2667 enum utp_data_direction data_direction
;
2669 *h
= (typeof(*h
)){ };
2671 if (cmd_dir
== DMA_FROM_DEVICE
) {
2672 data_direction
= UTP_DEVICE_TO_HOST
;
2673 *upiu_flags
= UPIU_CMD_FLAGS_READ
;
2674 } else if (cmd_dir
== DMA_TO_DEVICE
) {
2675 data_direction
= UTP_HOST_TO_DEVICE
;
2676 *upiu_flags
= UPIU_CMD_FLAGS_WRITE
;
2678 data_direction
= UTP_NO_DATA_TRANSFER
;
2679 *upiu_flags
= UPIU_CMD_FLAGS_NONE
;
2682 h
->command_type
= lrbp
->command_type
;
2683 h
->data_direction
= data_direction
;
2684 h
->ehs_length
= ehs_length
;
2689 /* Prepare crypto related dwords */
2690 ufshcd_prepare_req_desc_hdr_crypto(lrbp
, h
);
2693 * assigning invalid value for command status. Controller
2694 * updates OCS on command completion, with the command
2697 h
->ocs
= OCS_INVALID_COMMAND_STATUS
;
2699 req_desc
->prd_table_length
= 0;
2703 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2705 * @lrbp: local reference block pointer
2706 * @upiu_flags: flags
2709 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb
*lrbp
, u8 upiu_flags
)
2711 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
2712 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2713 unsigned short cdb_len
;
2715 ucd_req_ptr
->header
= (struct utp_upiu_header
){
2716 .transaction_code
= UPIU_TRANSACTION_COMMAND
,
2717 .flags
= upiu_flags
,
2719 .task_tag
= lrbp
->task_tag
,
2720 .command_set_type
= UPIU_COMMAND_SET_TYPE_SCSI
,
2723 ucd_req_ptr
->sc
.exp_data_transfer_len
= cpu_to_be32(cmd
->sdb
.length
);
2725 cdb_len
= min_t(unsigned short, cmd
->cmd_len
, UFS_CDB_SIZE
);
2726 memset(ucd_req_ptr
->sc
.cdb
, 0, UFS_CDB_SIZE
);
2727 memcpy(ucd_req_ptr
->sc
.cdb
, cmd
->cmnd
, cdb_len
);
2729 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2733 * ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request
2735 * @lrbp: local reference block pointer
2736 * @upiu_flags: flags
2738 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba
*hba
,
2739 struct ufshcd_lrb
*lrbp
, u8 upiu_flags
)
2741 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2742 struct ufs_query
*query
= &hba
->dev_cmd
.query
;
2743 u16 len
= be16_to_cpu(query
->request
.upiu_req
.length
);
2745 /* Query request header */
2746 ucd_req_ptr
->header
= (struct utp_upiu_header
){
2747 .transaction_code
= UPIU_TRANSACTION_QUERY_REQ
,
2748 .flags
= upiu_flags
,
2750 .task_tag
= lrbp
->task_tag
,
2751 .query_function
= query
->request
.query_func
,
2752 /* Data segment length only need for WRITE_DESC */
2753 .data_segment_length
=
2754 query
->request
.upiu_req
.opcode
==
2755 UPIU_QUERY_OPCODE_WRITE_DESC
?
2760 /* Copy the Query Request buffer as is */
2761 memcpy(&ucd_req_ptr
->qr
, &query
->request
.upiu_req
,
2764 /* Copy the Descriptor */
2765 if (query
->request
.upiu_req
.opcode
== UPIU_QUERY_OPCODE_WRITE_DESC
)
2766 memcpy(ucd_req_ptr
+ 1, query
->descriptor
, len
);
2768 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2771 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb
*lrbp
)
2773 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2775 memset(ucd_req_ptr
, 0, sizeof(struct utp_upiu_req
));
2777 ucd_req_ptr
->header
= (struct utp_upiu_header
){
2778 .transaction_code
= UPIU_TRANSACTION_NOP_OUT
,
2779 .task_tag
= lrbp
->task_tag
,
2782 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2786 * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
2787 * for Device Management Purposes
2788 * @hba: per adapter instance
2789 * @lrbp: pointer to local reference block
2791 * Return: 0 upon success; < 0 upon failure.
2793 static int ufshcd_compose_devman_upiu(struct ufs_hba
*hba
,
2794 struct ufshcd_lrb
*lrbp
)
2799 if (hba
->ufs_version
<= ufshci_version(1, 1))
2800 lrbp
->command_type
= UTP_CMD_TYPE_DEV_MANAGE
;
2802 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
2804 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, DMA_NONE
, 0);
2805 if (hba
->dev_cmd
.type
== DEV_CMD_TYPE_QUERY
)
2806 ufshcd_prepare_utp_query_req_upiu(hba
, lrbp
, upiu_flags
);
2807 else if (hba
->dev_cmd
.type
== DEV_CMD_TYPE_NOP
)
2808 ufshcd_prepare_utp_nop_upiu(lrbp
);
2816 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2818 * @hba: per adapter instance
2819 * @lrbp: pointer to local reference block
2821 static void ufshcd_comp_scsi_upiu(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2823 struct request
*rq
= scsi_cmd_to_rq(lrbp
->cmd
);
2824 unsigned int ioprio_class
= IOPRIO_PRIO_CLASS(req_get_ioprio(rq
));
2827 if (hba
->ufs_version
<= ufshci_version(1, 1))
2828 lrbp
->command_type
= UTP_CMD_TYPE_SCSI
;
2830 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
2832 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
,
2833 lrbp
->cmd
->sc_data_direction
, 0);
2834 if (ioprio_class
== IOPRIO_CLASS_RT
)
2835 upiu_flags
|= UPIU_CMD_FLAGS_CP
;
2836 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp
, upiu_flags
);
2840 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2841 * @upiu_wlun_id: UPIU W-LUN id
2843 * Return: SCSI W-LUN id.
2845 static inline u16
ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id
)
2847 return (upiu_wlun_id
& ~UFS_UPIU_WLUN_ID
) | SCSI_W_LUN_BASE
;
2850 static inline bool is_device_wlun(struct scsi_device
*sdev
)
2853 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN
);
2857 * Associate the UFS controller queue with the default and poll HCTX types.
2858 * Initialize the mq_map[] arrays.
2860 static void ufshcd_map_queues(struct Scsi_Host
*shost
)
2862 struct ufs_hba
*hba
= shost_priv(shost
);
2863 int i
, queue_offset
= 0;
2865 if (!is_mcq_supported(hba
)) {
2866 hba
->nr_queues
[HCTX_TYPE_DEFAULT
] = 1;
2867 hba
->nr_queues
[HCTX_TYPE_READ
] = 0;
2868 hba
->nr_queues
[HCTX_TYPE_POLL
] = 1;
2869 hba
->nr_hw_queues
= 1;
2872 for (i
= 0; i
< shost
->nr_maps
; i
++) {
2873 struct blk_mq_queue_map
*map
= &shost
->tag_set
.map
[i
];
2875 map
->nr_queues
= hba
->nr_queues
[i
];
2876 if (!map
->nr_queues
)
2878 map
->queue_offset
= queue_offset
;
2879 if (i
== HCTX_TYPE_POLL
&& !is_mcq_supported(hba
))
2880 map
->queue_offset
= 0;
2882 blk_mq_map_queues(map
);
2883 queue_offset
+= map
->nr_queues
;
2887 static void ufshcd_init_lrb(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrb
, int i
)
2889 struct utp_transfer_cmd_desc
*cmd_descp
= (void *)hba
->ucdl_base_addr
+
2890 i
* ufshcd_get_ucd_size(hba
);
2891 struct utp_transfer_req_desc
*utrdlp
= hba
->utrdl_base_addr
;
2892 dma_addr_t cmd_desc_element_addr
= hba
->ucdl_dma_addr
+
2893 i
* ufshcd_get_ucd_size(hba
);
2894 u16 response_offset
= offsetof(struct utp_transfer_cmd_desc
,
2896 u16 prdt_offset
= offsetof(struct utp_transfer_cmd_desc
, prd_table
);
2898 lrb
->utr_descriptor_ptr
= utrdlp
+ i
;
2899 lrb
->utrd_dma_addr
= hba
->utrdl_dma_addr
+
2900 i
* sizeof(struct utp_transfer_req_desc
);
2901 lrb
->ucd_req_ptr
= (struct utp_upiu_req
*)cmd_descp
->command_upiu
;
2902 lrb
->ucd_req_dma_addr
= cmd_desc_element_addr
;
2903 lrb
->ucd_rsp_ptr
= (struct utp_upiu_rsp
*)cmd_descp
->response_upiu
;
2904 lrb
->ucd_rsp_dma_addr
= cmd_desc_element_addr
+ response_offset
;
2905 lrb
->ucd_prdt_ptr
= (struct ufshcd_sg_entry
*)cmd_descp
->prd_table
;
2906 lrb
->ucd_prdt_dma_addr
= cmd_desc_element_addr
+ prdt_offset
;
2910 * ufshcd_queuecommand - main entry point for SCSI requests
2911 * @host: SCSI host pointer
2912 * @cmd: command from SCSI Midlayer
2914 * Return: 0 for success, non-zero in case of failure.
2916 static int ufshcd_queuecommand(struct Scsi_Host
*host
, struct scsi_cmnd
*cmd
)
2918 struct ufs_hba
*hba
= shost_priv(host
);
2919 int tag
= scsi_cmd_to_rq(cmd
)->tag
;
2920 struct ufshcd_lrb
*lrbp
;
2922 struct ufs_hw_queue
*hwq
= NULL
;
2924 switch (hba
->ufshcd_state
) {
2925 case UFSHCD_STATE_OPERATIONAL
:
2927 case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL
:
2929 * SCSI error handler can call ->queuecommand() while UFS error
2930 * handler is in progress. Error interrupts could change the
2931 * state from UFSHCD_STATE_RESET to
2932 * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests
2933 * being issued in that case.
2935 if (ufshcd_eh_in_progress(hba
)) {
2936 err
= SCSI_MLQUEUE_HOST_BUSY
;
2940 case UFSHCD_STATE_EH_SCHEDULED_FATAL
:
2942 * pm_runtime_get_sync() is used at error handling preparation
2943 * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
2944 * PM ops, it can never be finished if we let SCSI layer keep
2945 * retrying it, which gets err handler stuck forever. Neither
2946 * can we let the scsi cmd pass through, because UFS is in bad
2947 * state, the scsi cmd may eventually time out, which will get
2948 * err handler blocked for too long. So, just fail the scsi cmd
2949 * sent from PM ops, err handler can recover PM error anyways.
2951 if (hba
->pm_op_in_progress
) {
2952 hba
->force_reset
= true;
2953 set_host_byte(cmd
, DID_BAD_TARGET
);
2958 case UFSHCD_STATE_RESET
:
2959 err
= SCSI_MLQUEUE_HOST_BUSY
;
2961 case UFSHCD_STATE_ERROR
:
2962 set_host_byte(cmd
, DID_ERROR
);
2967 hba
->req_abort_count
= 0;
2971 lrbp
= &hba
->lrb
[tag
];
2973 lrbp
->task_tag
= tag
;
2974 lrbp
->lun
= ufshcd_scsi_to_upiu_lun(cmd
->device
->lun
);
2975 lrbp
->intr_cmd
= !ufshcd_is_intr_aggr_allowed(hba
);
2977 ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd
), lrbp
);
2979 lrbp
->req_abort_skip
= false;
2981 ufshcd_comp_scsi_upiu(hba
, lrbp
);
2983 err
= ufshcd_map_sg(hba
, lrbp
);
2985 ufshcd_release(hba
);
2989 if (is_mcq_enabled(hba
))
2990 hwq
= ufshcd_mcq_req_to_hwq(hba
, scsi_cmd_to_rq(cmd
));
2992 ufshcd_send_command(hba
, tag
, hwq
);
2995 if (ufs_trigger_eh()) {
2996 unsigned long flags
;
2998 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2999 ufshcd_schedule_eh_work(hba
);
3000 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3006 static int ufshcd_compose_dev_cmd(struct ufs_hba
*hba
,
3007 struct ufshcd_lrb
*lrbp
, enum dev_cmd_type cmd_type
, int tag
)
3010 lrbp
->task_tag
= tag
;
3011 lrbp
->lun
= 0; /* device management cmd is not specific to any LUN */
3012 lrbp
->intr_cmd
= true; /* No interrupt aggregation */
3013 ufshcd_prepare_lrbp_crypto(NULL
, lrbp
);
3014 hba
->dev_cmd
.type
= cmd_type
;
3016 return ufshcd_compose_devman_upiu(hba
, lrbp
);
3020 * Check with the block layer if the command is inflight
3021 * @cmd: command to check.
3023 * Return: true if command is inflight; false if not.
3025 bool ufshcd_cmd_inflight(struct scsi_cmnd
*cmd
)
3032 rq
= scsi_cmd_to_rq(cmd
);
3033 if (!blk_mq_request_started(rq
))
3040 * Clear the pending command in the controller and wait until
3041 * the controller confirms that the command has been cleared.
3042 * @hba: per adapter instance
3043 * @task_tag: The tag number of the command to be cleared.
3045 static int ufshcd_clear_cmd(struct ufs_hba
*hba
, u32 task_tag
)
3047 u32 mask
= 1U << task_tag
;
3048 unsigned long flags
;
3051 if (is_mcq_enabled(hba
)) {
3053 * MCQ mode. Clean up the MCQ resources similar to
3054 * what the ufshcd_utrl_clear() does for SDB mode.
3056 err
= ufshcd_mcq_sq_cleanup(hba
, task_tag
);
3058 dev_err(hba
->dev
, "%s: failed tag=%d. err=%d\n",
3059 __func__
, task_tag
, err
);
3065 /* clear outstanding transaction before retry */
3066 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
3067 ufshcd_utrl_clear(hba
, mask
);
3068 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3071 * wait for h/w to clear corresponding bit in door-bell.
3072 * max. wait is 1 sec.
3074 return ufshcd_wait_for_register(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
,
3075 mask
, ~mask
, 1000, 1000);
3079 * ufshcd_dev_cmd_completion() - handles device management command responses
3080 * @hba: per adapter instance
3081 * @lrbp: pointer to local reference block
3083 * Return: 0 upon success; < 0 upon failure.
3086 ufshcd_dev_cmd_completion(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
3088 enum upiu_response_transaction resp
;
3091 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
3092 resp
= ufshcd_get_req_rsp(lrbp
->ucd_rsp_ptr
);
3095 case UPIU_TRANSACTION_NOP_IN
:
3096 if (hba
->dev_cmd
.type
!= DEV_CMD_TYPE_NOP
) {
3098 dev_err(hba
->dev
, "%s: unexpected response %x\n",
3102 case UPIU_TRANSACTION_QUERY_RSP
: {
3103 u8 response
= lrbp
->ucd_rsp_ptr
->header
.response
;
3106 err
= ufshcd_copy_query_response(hba
, lrbp
);
3109 case UPIU_TRANSACTION_REJECT_UPIU
:
3110 /* TODO: handle Reject UPIU Response */
3112 dev_err(hba
->dev
, "%s: Reject UPIU not fully implemented\n",
3115 case UPIU_TRANSACTION_RESPONSE
:
3116 if (hba
->dev_cmd
.type
!= DEV_CMD_TYPE_RPMB
) {
3118 dev_err(hba
->dev
, "%s: unexpected response %x\n", __func__
, resp
);
3123 dev_err(hba
->dev
, "%s: Invalid device management cmd response: %x\n",
3131 static int ufshcd_wait_for_dev_cmd(struct ufs_hba
*hba
,
3132 struct ufshcd_lrb
*lrbp
, int max_timeout
)
3134 unsigned long time_left
= msecs_to_jiffies(max_timeout
);
3135 unsigned long flags
;
3140 time_left
= wait_for_completion_timeout(hba
->dev_cmd
.complete
,
3143 if (likely(time_left
)) {
3145 * The completion handler called complete() and the caller of
3146 * this function still owns the @lrbp tag so the code below does
3147 * not trigger any race conditions.
3149 hba
->dev_cmd
.complete
= NULL
;
3150 err
= ufshcd_get_tr_ocs(lrbp
, NULL
);
3152 err
= ufshcd_dev_cmd_completion(hba
, lrbp
);
3155 dev_dbg(hba
->dev
, "%s: dev_cmd request timedout, tag %d\n",
3156 __func__
, lrbp
->task_tag
);
3159 if (is_mcq_enabled(hba
)) {
3160 err
= ufshcd_clear_cmd(hba
, lrbp
->task_tag
);
3161 hba
->dev_cmd
.complete
= NULL
;
3166 if (ufshcd_clear_cmd(hba
, lrbp
->task_tag
) == 0) {
3167 /* successfully cleared the command, retry if needed */
3170 * Since clearing the command succeeded we also need to
3171 * clear the task tag bit from the outstanding_reqs
3174 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
3175 pending
= test_bit(lrbp
->task_tag
,
3176 &hba
->outstanding_reqs
);
3178 hba
->dev_cmd
.complete
= NULL
;
3179 __clear_bit(lrbp
->task_tag
,
3180 &hba
->outstanding_reqs
);
3182 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
3186 * The completion handler ran while we tried to
3187 * clear the command.
3193 dev_err(hba
->dev
, "%s: failed to clear tag %d\n",
3194 __func__
, lrbp
->task_tag
);
3196 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
3197 pending
= test_bit(lrbp
->task_tag
,
3198 &hba
->outstanding_reqs
);
3200 hba
->dev_cmd
.complete
= NULL
;
3201 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
3205 * The completion handler ran while we tried to
3206 * clear the command.
3218 * ufshcd_exec_dev_cmd - API for sending device management requests
3220 * @cmd_type: specifies the type (NOP, Query...)
3221 * @timeout: timeout in milliseconds
3223 * Return: 0 upon success; < 0 upon failure.
3225 * NOTE: Since there is only one available tag for device management commands,
3226 * it is expected you hold the hba->dev_cmd.lock mutex.
3228 static int ufshcd_exec_dev_cmd(struct ufs_hba
*hba
,
3229 enum dev_cmd_type cmd_type
, int timeout
)
3231 DECLARE_COMPLETION_ONSTACK(wait
);
3232 const u32 tag
= hba
->reserved_slot
;
3233 struct ufshcd_lrb
*lrbp
;
3236 /* Protects use of hba->reserved_slot. */
3237 lockdep_assert_held(&hba
->dev_cmd
.lock
);
3239 down_read(&hba
->clk_scaling_lock
);
3241 lrbp
= &hba
->lrb
[tag
];
3243 err
= ufshcd_compose_dev_cmd(hba
, lrbp
, cmd_type
, tag
);
3247 hba
->dev_cmd
.complete
= &wait
;
3249 ufshcd_add_query_upiu_trace(hba
, UFS_QUERY_SEND
, lrbp
->ucd_req_ptr
);
3251 ufshcd_send_command(hba
, tag
, hba
->dev_cmd_queue
);
3252 err
= ufshcd_wait_for_dev_cmd(hba
, lrbp
, timeout
);
3253 ufshcd_add_query_upiu_trace(hba
, err
? UFS_QUERY_ERR
: UFS_QUERY_COMP
,
3254 (struct utp_upiu_req
*)lrbp
->ucd_rsp_ptr
);
3257 up_read(&hba
->clk_scaling_lock
);
3262 * ufshcd_init_query() - init the query response and request parameters
3263 * @hba: per-adapter instance
3264 * @request: address of the request pointer to be initialized
3265 * @response: address of the response pointer to be initialized
3266 * @opcode: operation to perform
3267 * @idn: flag idn to access
3268 * @index: LU number to access
3269 * @selector: query/flag/descriptor further identification
3271 static inline void ufshcd_init_query(struct ufs_hba
*hba
,
3272 struct ufs_query_req
**request
, struct ufs_query_res
**response
,
3273 enum query_opcode opcode
, u8 idn
, u8 index
, u8 selector
)
3275 *request
= &hba
->dev_cmd
.query
.request
;
3276 *response
= &hba
->dev_cmd
.query
.response
;
3277 memset(*request
, 0, sizeof(struct ufs_query_req
));
3278 memset(*response
, 0, sizeof(struct ufs_query_res
));
3279 (*request
)->upiu_req
.opcode
= opcode
;
3280 (*request
)->upiu_req
.idn
= idn
;
3281 (*request
)->upiu_req
.index
= index
;
3282 (*request
)->upiu_req
.selector
= selector
;
3285 static int ufshcd_query_flag_retry(struct ufs_hba
*hba
,
3286 enum query_opcode opcode
, enum flag_idn idn
, u8 index
, bool *flag_res
)
3291 for (retries
= 0; retries
< QUERY_REQ_RETRIES
; retries
++) {
3292 ret
= ufshcd_query_flag(hba
, opcode
, idn
, index
, flag_res
);
3295 "%s: failed with error %d, retries %d\n",
3296 __func__
, ret
, retries
);
3303 "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n",
3304 __func__
, opcode
, idn
, ret
, retries
);
3309 * ufshcd_query_flag() - API function for sending flag query requests
3310 * @hba: per-adapter instance
3311 * @opcode: flag query to perform
3312 * @idn: flag idn to access
3313 * @index: flag index to access
3314 * @flag_res: the flag value after the query request completes
3316 * Return: 0 for success, non-zero in case of failure.
3318 int ufshcd_query_flag(struct ufs_hba
*hba
, enum query_opcode opcode
,
3319 enum flag_idn idn
, u8 index
, bool *flag_res
)
3321 struct ufs_query_req
*request
= NULL
;
3322 struct ufs_query_res
*response
= NULL
;
3323 int err
, selector
= 0;
3324 int timeout
= QUERY_REQ_TIMEOUT
;
3329 mutex_lock(&hba
->dev_cmd
.lock
);
3330 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
3334 case UPIU_QUERY_OPCODE_SET_FLAG
:
3335 case UPIU_QUERY_OPCODE_CLEAR_FLAG
:
3336 case UPIU_QUERY_OPCODE_TOGGLE_FLAG
:
3337 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
3339 case UPIU_QUERY_OPCODE_READ_FLAG
:
3340 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
3342 /* No dummy reads */
3343 dev_err(hba
->dev
, "%s: Invalid argument for read request\n",
3351 "%s: Expected query flag opcode but got = %d\n",
3357 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, timeout
);
3361 "%s: Sending flag query for idn %d failed, err = %d\n",
3362 __func__
, idn
, err
);
3367 *flag_res
= (be32_to_cpu(response
->upiu_res
.value
) &
3368 MASK_QUERY_UPIU_FLAG_LOC
) & 0x1;
3371 mutex_unlock(&hba
->dev_cmd
.lock
);
3372 ufshcd_release(hba
);
3377 * ufshcd_query_attr - API function for sending attribute requests
3378 * @hba: per-adapter instance
3379 * @opcode: attribute opcode
3380 * @idn: attribute idn to access
3381 * @index: index field
3382 * @selector: selector field
3383 * @attr_val: the attribute value after the query request completes
3385 * Return: 0 for success, non-zero in case of failure.
3387 int ufshcd_query_attr(struct ufs_hba
*hba
, enum query_opcode opcode
,
3388 enum attr_idn idn
, u8 index
, u8 selector
, u32
*attr_val
)
3390 struct ufs_query_req
*request
= NULL
;
3391 struct ufs_query_res
*response
= NULL
;
3397 dev_err(hba
->dev
, "%s: attribute value required for opcode 0x%x\n",
3404 mutex_lock(&hba
->dev_cmd
.lock
);
3405 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
3409 case UPIU_QUERY_OPCODE_WRITE_ATTR
:
3410 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
3411 request
->upiu_req
.value
= cpu_to_be32(*attr_val
);
3413 case UPIU_QUERY_OPCODE_READ_ATTR
:
3414 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
3417 dev_err(hba
->dev
, "%s: Expected query attr opcode but got = 0x%.2x\n",
3423 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
3426 dev_err(hba
->dev
, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3427 __func__
, opcode
, idn
, index
, err
);
3431 *attr_val
= be32_to_cpu(response
->upiu_res
.value
);
3434 mutex_unlock(&hba
->dev_cmd
.lock
);
3435 ufshcd_release(hba
);
3440 * ufshcd_query_attr_retry() - API function for sending query
3441 * attribute with retries
3442 * @hba: per-adapter instance
3443 * @opcode: attribute opcode
3444 * @idn: attribute idn to access
3445 * @index: index field
3446 * @selector: selector field
3447 * @attr_val: the attribute value after the query request
3450 * Return: 0 for success, non-zero in case of failure.
3452 int ufshcd_query_attr_retry(struct ufs_hba
*hba
,
3453 enum query_opcode opcode
, enum attr_idn idn
, u8 index
, u8 selector
,
3459 for (retries
= QUERY_REQ_RETRIES
; retries
> 0; retries
--) {
3460 ret
= ufshcd_query_attr(hba
, opcode
, idn
, index
,
3461 selector
, attr_val
);
3463 dev_dbg(hba
->dev
, "%s: failed with error %d, retries %d\n",
3464 __func__
, ret
, retries
);
3471 "%s: query attribute, idn %d, failed with error %d after %d retries\n",
3472 __func__
, idn
, ret
, QUERY_REQ_RETRIES
);
3476 static int __ufshcd_query_descriptor(struct ufs_hba
*hba
,
3477 enum query_opcode opcode
, enum desc_idn idn
, u8 index
,
3478 u8 selector
, u8
*desc_buf
, int *buf_len
)
3480 struct ufs_query_req
*request
= NULL
;
3481 struct ufs_query_res
*response
= NULL
;
3487 dev_err(hba
->dev
, "%s: descriptor buffer required for opcode 0x%x\n",
3492 if (*buf_len
< QUERY_DESC_MIN_SIZE
|| *buf_len
> QUERY_DESC_MAX_SIZE
) {
3493 dev_err(hba
->dev
, "%s: descriptor buffer size (%d) is out of range\n",
3494 __func__
, *buf_len
);
3500 mutex_lock(&hba
->dev_cmd
.lock
);
3501 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
3503 hba
->dev_cmd
.query
.descriptor
= desc_buf
;
3504 request
->upiu_req
.length
= cpu_to_be16(*buf_len
);
3507 case UPIU_QUERY_OPCODE_WRITE_DESC
:
3508 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
3510 case UPIU_QUERY_OPCODE_READ_DESC
:
3511 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
3515 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3521 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
3524 dev_err(hba
->dev
, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3525 __func__
, opcode
, idn
, index
, err
);
3529 *buf_len
= be16_to_cpu(response
->upiu_res
.length
);
3532 hba
->dev_cmd
.query
.descriptor
= NULL
;
3533 mutex_unlock(&hba
->dev_cmd
.lock
);
3534 ufshcd_release(hba
);
3539 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3540 * @hba: per-adapter instance
3541 * @opcode: attribute opcode
3542 * @idn: attribute idn to access
3543 * @index: index field
3544 * @selector: selector field
3545 * @desc_buf: the buffer that contains the descriptor
3546 * @buf_len: length parameter passed to the device
3548 * The buf_len parameter will contain, on return, the length parameter
3549 * received on the response.
3551 * Return: 0 for success, non-zero in case of failure.
3553 int ufshcd_query_descriptor_retry(struct ufs_hba
*hba
,
3554 enum query_opcode opcode
,
3555 enum desc_idn idn
, u8 index
,
3557 u8
*desc_buf
, int *buf_len
)
3562 for (retries
= QUERY_REQ_RETRIES
; retries
> 0; retries
--) {
3563 err
= __ufshcd_query_descriptor(hba
, opcode
, idn
, index
,
3564 selector
, desc_buf
, buf_len
);
3565 if (!err
|| err
== -EINVAL
)
3573 * ufshcd_read_desc_param - read the specified descriptor parameter
3574 * @hba: Pointer to adapter instance
3575 * @desc_id: descriptor idn value
3576 * @desc_index: descriptor index
3577 * @param_offset: offset of the parameter to read
3578 * @param_read_buf: pointer to buffer where parameter would be read
3579 * @param_size: sizeof(param_read_buf)
3581 * Return: 0 in case of success, non-zero otherwise.
3583 int ufshcd_read_desc_param(struct ufs_hba
*hba
,
3584 enum desc_idn desc_id
,
3592 int buff_len
= QUERY_DESC_MAX_SIZE
;
3593 bool is_kmalloc
= true;
3596 if (desc_id
>= QUERY_DESC_IDN_MAX
|| !param_size
)
3599 /* Check whether we need temp memory */
3600 if (param_offset
!= 0 || param_size
< buff_len
) {
3601 desc_buf
= kzalloc(buff_len
, GFP_KERNEL
);
3605 desc_buf
= param_read_buf
;
3609 /* Request for full descriptor */
3610 ret
= ufshcd_query_descriptor_retry(hba
, UPIU_QUERY_OPCODE_READ_DESC
,
3611 desc_id
, desc_index
, 0,
3612 desc_buf
, &buff_len
);
3614 dev_err(hba
->dev
, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
3615 __func__
, desc_id
, desc_index
, param_offset
, ret
);
3619 /* Update descriptor length */
3620 buff_len
= desc_buf
[QUERY_DESC_LENGTH_OFFSET
];
3622 if (param_offset
>= buff_len
) {
3623 dev_err(hba
->dev
, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3624 __func__
, param_offset
, desc_id
, buff_len
);
3630 if (desc_buf
[QUERY_DESC_DESC_TYPE_OFFSET
] != desc_id
) {
3631 dev_err(hba
->dev
, "%s: invalid desc_id %d in descriptor header\n",
3632 __func__
, desc_buf
[QUERY_DESC_DESC_TYPE_OFFSET
]);
3638 /* Make sure we don't copy more data than available */
3639 if (param_offset
>= buff_len
)
3642 memcpy(param_read_buf
, &desc_buf
[param_offset
],
3643 min_t(u32
, param_size
, buff_len
- param_offset
));
3652 * struct uc_string_id - unicode string
3654 * @len: size of this descriptor inclusive
3655 * @type: descriptor type
3656 * @uc: unicode string character
3658 struct uc_string_id
{
3664 /* replace non-printable or non-ASCII characters with spaces */
3665 static inline char ufshcd_remove_non_printable(u8 ch
)
3667 return (ch
>= 0x20 && ch
<= 0x7e) ? ch
: ' ';
3671 * ufshcd_read_string_desc - read string descriptor
3672 * @hba: pointer to adapter instance
3673 * @desc_index: descriptor index
3674 * @buf: pointer to buffer where descriptor would be read,
3675 * the caller should free the memory.
3676 * @ascii: if true convert from unicode to ascii characters
3677 * null terminated string.
3680 * * string size on success.
3681 * * -ENOMEM: on allocation failure
3682 * * -EINVAL: on a wrong parameter
3684 int ufshcd_read_string_desc(struct ufs_hba
*hba
, u8 desc_index
,
3685 u8
**buf
, bool ascii
)
3687 struct uc_string_id
*uc_str
;
3694 uc_str
= kzalloc(QUERY_DESC_MAX_SIZE
, GFP_KERNEL
);
3698 ret
= ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_STRING
, desc_index
, 0,
3699 (u8
*)uc_str
, QUERY_DESC_MAX_SIZE
);
3701 dev_err(hba
->dev
, "Reading String Desc failed after %d retries. err = %d\n",
3702 QUERY_REQ_RETRIES
, ret
);
3707 if (uc_str
->len
<= QUERY_DESC_HDR_SIZE
) {
3708 dev_dbg(hba
->dev
, "String Desc is of zero length\n");
3717 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3718 ascii_len
= (uc_str
->len
- QUERY_DESC_HDR_SIZE
) / 2 + 1;
3719 str
= kzalloc(ascii_len
, GFP_KERNEL
);
3726 * the descriptor contains string in UTF16 format
3727 * we need to convert to utf-8 so it can be displayed
3729 ret
= utf16s_to_utf8s(uc_str
->uc
,
3730 uc_str
->len
- QUERY_DESC_HDR_SIZE
,
3731 UTF16_BIG_ENDIAN
, str
, ascii_len
- 1);
3733 /* replace non-printable or non-ASCII characters with spaces */
3734 for (i
= 0; i
< ret
; i
++)
3735 str
[i
] = ufshcd_remove_non_printable(str
[i
]);
3740 str
= kmemdup(uc_str
, uc_str
->len
, GFP_KERNEL
);
3754 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3755 * @hba: Pointer to adapter instance
3757 * @param_offset: offset of the parameter to read
3758 * @param_read_buf: pointer to buffer where parameter would be read
3759 * @param_size: sizeof(param_read_buf)
3761 * Return: 0 in case of success, non-zero otherwise.
3763 static inline int ufshcd_read_unit_desc_param(struct ufs_hba
*hba
,
3765 enum unit_desc_param param_offset
,
3770 * Unit descriptors are only available for general purpose LUs (LUN id
3771 * from 0 to 7) and RPMB Well known LU.
3773 if (!ufs_is_valid_unit_desc_lun(&hba
->dev_info
, lun
))
3776 return ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_UNIT
, lun
,
3777 param_offset
, param_read_buf
, param_size
);
3780 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba
*hba
)
3783 u32 gating_wait
= UFSHCD_REF_CLK_GATING_WAIT_US
;
3785 if (hba
->dev_info
.wspecversion
>= 0x300) {
3786 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
3787 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME
, 0, 0,
3790 dev_err(hba
->dev
, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3793 if (gating_wait
== 0) {
3794 gating_wait
= UFSHCD_REF_CLK_GATING_WAIT_US
;
3795 dev_err(hba
->dev
, "Undefined ref clk gating wait time, use default %uus\n",
3799 hba
->dev_info
.clk_gating_wait_us
= gating_wait
;
3806 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3807 * @hba: per adapter instance
3809 * 1. Allocate DMA memory for Command Descriptor array
3810 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3811 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3812 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3814 * 4. Allocate memory for local reference block(lrb).
3816 * Return: 0 for success, non-zero in case of failure.
3818 static int ufshcd_memory_alloc(struct ufs_hba
*hba
)
3820 size_t utmrdl_size
, utrdl_size
, ucdl_size
;
3822 /* Allocate memory for UTP command descriptors */
3823 ucdl_size
= ufshcd_get_ucd_size(hba
) * hba
->nutrs
;
3824 hba
->ucdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3826 &hba
->ucdl_dma_addr
,
3830 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3832 if (!hba
->ucdl_base_addr
||
3833 WARN_ON(hba
->ucdl_dma_addr
& (128 - 1))) {
3835 "Command Descriptor Memory allocation failed\n");
3840 * Allocate memory for UTP Transfer descriptors
3841 * UFSHCI requires 1KB alignment of UTRD
3843 utrdl_size
= (sizeof(struct utp_transfer_req_desc
) * hba
->nutrs
);
3844 hba
->utrdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3846 &hba
->utrdl_dma_addr
,
3848 if (!hba
->utrdl_base_addr
||
3849 WARN_ON(hba
->utrdl_dma_addr
& (SZ_1K
- 1))) {
3851 "Transfer Descriptor Memory allocation failed\n");
3856 * Skip utmrdl allocation; it may have been
3857 * allocated during first pass and not released during
3858 * MCQ memory allocation.
3859 * See ufshcd_release_sdb_queue() and ufshcd_config_mcq()
3861 if (hba
->utmrdl_base_addr
)
3864 * Allocate memory for UTP Task Management descriptors
3865 * UFSHCI requires 1KB alignment of UTMRD
3867 utmrdl_size
= sizeof(struct utp_task_req_desc
) * hba
->nutmrs
;
3868 hba
->utmrdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3870 &hba
->utmrdl_dma_addr
,
3872 if (!hba
->utmrdl_base_addr
||
3873 WARN_ON(hba
->utmrdl_dma_addr
& (SZ_1K
- 1))) {
3875 "Task Management Descriptor Memory allocation failed\n");
3880 /* Allocate memory for local reference block */
3881 hba
->lrb
= devm_kcalloc(hba
->dev
,
3882 hba
->nutrs
, sizeof(struct ufshcd_lrb
),
3885 dev_err(hba
->dev
, "LRB Memory allocation failed\n");
3894 * ufshcd_host_memory_configure - configure local reference block with
3896 * @hba: per adapter instance
3898 * Configure Host memory space
3899 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3901 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3903 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3904 * into local reference block.
3906 static void ufshcd_host_memory_configure(struct ufs_hba
*hba
)
3908 struct utp_transfer_req_desc
*utrdlp
;
3909 dma_addr_t cmd_desc_dma_addr
;
3910 dma_addr_t cmd_desc_element_addr
;
3911 u16 response_offset
;
3916 utrdlp
= hba
->utrdl_base_addr
;
3919 offsetof(struct utp_transfer_cmd_desc
, response_upiu
);
3921 offsetof(struct utp_transfer_cmd_desc
, prd_table
);
3923 cmd_desc_size
= ufshcd_get_ucd_size(hba
);
3924 cmd_desc_dma_addr
= hba
->ucdl_dma_addr
;
3926 for (i
= 0; i
< hba
->nutrs
; i
++) {
3927 /* Configure UTRD with command descriptor base address */
3928 cmd_desc_element_addr
=
3929 (cmd_desc_dma_addr
+ (cmd_desc_size
* i
));
3930 utrdlp
[i
].command_desc_base_addr
=
3931 cpu_to_le64(cmd_desc_element_addr
);
3933 /* Response upiu and prdt offset should be in double words */
3934 if (hba
->quirks
& UFSHCD_QUIRK_PRDT_BYTE_GRAN
) {
3935 utrdlp
[i
].response_upiu_offset
=
3936 cpu_to_le16(response_offset
);
3937 utrdlp
[i
].prd_table_offset
=
3938 cpu_to_le16(prdt_offset
);
3939 utrdlp
[i
].response_upiu_length
=
3940 cpu_to_le16(ALIGNED_UPIU_SIZE
);
3942 utrdlp
[i
].response_upiu_offset
=
3943 cpu_to_le16(response_offset
>> 2);
3944 utrdlp
[i
].prd_table_offset
=
3945 cpu_to_le16(prdt_offset
>> 2);
3946 utrdlp
[i
].response_upiu_length
=
3947 cpu_to_le16(ALIGNED_UPIU_SIZE
>> 2);
3950 ufshcd_init_lrb(hba
, &hba
->lrb
[i
], i
);
3955 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3956 * @hba: per adapter instance
3958 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3959 * in order to initialize the Unipro link startup procedure.
3960 * Once the Unipro links are up, the device connected to the controller
3963 * Return: 0 on success, non-zero value on failure.
3965 static int ufshcd_dme_link_startup(struct ufs_hba
*hba
)
3967 struct uic_command uic_cmd
= {0};
3970 uic_cmd
.command
= UIC_CMD_DME_LINK_STARTUP
;
3972 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
3975 "dme-link-startup: error code %d\n", ret
);
3979 * ufshcd_dme_reset - UIC command for DME_RESET
3980 * @hba: per adapter instance
3982 * DME_RESET command is issued in order to reset UniPro stack.
3983 * This function now deals with cold reset.
3985 * Return: 0 on success, non-zero value on failure.
3987 static int ufshcd_dme_reset(struct ufs_hba
*hba
)
3989 struct uic_command uic_cmd
= {0};
3992 uic_cmd
.command
= UIC_CMD_DME_RESET
;
3994 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
3997 "dme-reset: error code %d\n", ret
);
4002 int ufshcd_dme_configure_adapt(struct ufs_hba
*hba
,
4008 if (agreed_gear
< UFS_HS_G4
)
4009 adapt_val
= PA_NO_ADAPT
;
4011 ret
= ufshcd_dme_set(hba
,
4012 UIC_ARG_MIB(PA_TXHSADAPTTYPE
),
4016 EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt
);
4019 * ufshcd_dme_enable - UIC command for DME_ENABLE
4020 * @hba: per adapter instance
4022 * DME_ENABLE command is issued in order to enable UniPro stack.
4024 * Return: 0 on success, non-zero value on failure.
4026 static int ufshcd_dme_enable(struct ufs_hba
*hba
)
4028 struct uic_command uic_cmd
= {0};
4031 uic_cmd
.command
= UIC_CMD_DME_ENABLE
;
4033 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
4036 "dme-enable: error code %d\n", ret
);
4041 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba
*hba
)
4043 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
4044 unsigned long min_sleep_time_us
;
4046 if (!(hba
->quirks
& UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
))
4050 * last_dme_cmd_tstamp will be 0 only for 1st call to
4053 if (unlikely(!ktime_to_us(hba
->last_dme_cmd_tstamp
))) {
4054 min_sleep_time_us
= MIN_DELAY_BEFORE_DME_CMDS_US
;
4056 unsigned long delta
=
4057 (unsigned long) ktime_to_us(
4058 ktime_sub(ktime_get(),
4059 hba
->last_dme_cmd_tstamp
));
4061 if (delta
< MIN_DELAY_BEFORE_DME_CMDS_US
)
4063 MIN_DELAY_BEFORE_DME_CMDS_US
- delta
;
4065 return; /* no more delay required */
4068 /* allow sleep for extra 50us if needed */
4069 usleep_range(min_sleep_time_us
, min_sleep_time_us
+ 50);
4073 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
4074 * @hba: per adapter instance
4075 * @attr_sel: uic command argument1
4076 * @attr_set: attribute set type as uic command argument2
4077 * @mib_val: setting value as uic command argument3
4078 * @peer: indicate whether peer or local
4080 * Return: 0 on success, non-zero value on failure.
4082 int ufshcd_dme_set_attr(struct ufs_hba
*hba
, u32 attr_sel
,
4083 u8 attr_set
, u32 mib_val
, u8 peer
)
4085 struct uic_command uic_cmd
= {0};
4086 static const char *const action
[] = {
4090 const char *set
= action
[!!peer
];
4092 int retries
= UFS_UIC_COMMAND_RETRIES
;
4094 uic_cmd
.command
= peer
?
4095 UIC_CMD_DME_PEER_SET
: UIC_CMD_DME_SET
;
4096 uic_cmd
.argument1
= attr_sel
;
4097 uic_cmd
.argument2
= UIC_ARG_ATTR_TYPE(attr_set
);
4098 uic_cmd
.argument3
= mib_val
;
4101 /* for peer attributes we retry upon failure */
4102 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
4104 dev_dbg(hba
->dev
, "%s: attr-id 0x%x val 0x%x error code %d\n",
4105 set
, UIC_GET_ATTR_ID(attr_sel
), mib_val
, ret
);
4106 } while (ret
&& peer
&& --retries
);
4109 dev_err(hba
->dev
, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
4110 set
, UIC_GET_ATTR_ID(attr_sel
), mib_val
,
4111 UFS_UIC_COMMAND_RETRIES
- retries
);
4115 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr
);
4118 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
4119 * @hba: per adapter instance
4120 * @attr_sel: uic command argument1
4121 * @mib_val: the value of the attribute as returned by the UIC command
4122 * @peer: indicate whether peer or local
4124 * Return: 0 on success, non-zero value on failure.
4126 int ufshcd_dme_get_attr(struct ufs_hba
*hba
, u32 attr_sel
,
4127 u32
*mib_val
, u8 peer
)
4129 struct uic_command uic_cmd
= {0};
4130 static const char *const action
[] = {
4134 const char *get
= action
[!!peer
];
4136 int retries
= UFS_UIC_COMMAND_RETRIES
;
4137 struct ufs_pa_layer_attr orig_pwr_info
;
4138 struct ufs_pa_layer_attr temp_pwr_info
;
4139 bool pwr_mode_change
= false;
4141 if (peer
&& (hba
->quirks
& UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
)) {
4142 orig_pwr_info
= hba
->pwr_info
;
4143 temp_pwr_info
= orig_pwr_info
;
4145 if (orig_pwr_info
.pwr_tx
== FAST_MODE
||
4146 orig_pwr_info
.pwr_rx
== FAST_MODE
) {
4147 temp_pwr_info
.pwr_tx
= FASTAUTO_MODE
;
4148 temp_pwr_info
.pwr_rx
= FASTAUTO_MODE
;
4149 pwr_mode_change
= true;
4150 } else if (orig_pwr_info
.pwr_tx
== SLOW_MODE
||
4151 orig_pwr_info
.pwr_rx
== SLOW_MODE
) {
4152 temp_pwr_info
.pwr_tx
= SLOWAUTO_MODE
;
4153 temp_pwr_info
.pwr_rx
= SLOWAUTO_MODE
;
4154 pwr_mode_change
= true;
4156 if (pwr_mode_change
) {
4157 ret
= ufshcd_change_power_mode(hba
, &temp_pwr_info
);
4163 uic_cmd
.command
= peer
?
4164 UIC_CMD_DME_PEER_GET
: UIC_CMD_DME_GET
;
4165 uic_cmd
.argument1
= attr_sel
;
4168 /* for peer attributes we retry upon failure */
4169 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
4171 dev_dbg(hba
->dev
, "%s: attr-id 0x%x error code %d\n",
4172 get
, UIC_GET_ATTR_ID(attr_sel
), ret
);
4173 } while (ret
&& peer
&& --retries
);
4176 dev_err(hba
->dev
, "%s: attr-id 0x%x failed %d retries\n",
4177 get
, UIC_GET_ATTR_ID(attr_sel
),
4178 UFS_UIC_COMMAND_RETRIES
- retries
);
4180 if (mib_val
&& !ret
)
4181 *mib_val
= uic_cmd
.argument3
;
4183 if (peer
&& (hba
->quirks
& UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
)
4185 ufshcd_change_power_mode(hba
, &orig_pwr_info
);
4189 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr
);
4192 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
4193 * state) and waits for it to take effect.
4195 * @hba: per adapter instance
4196 * @cmd: UIC command to execute
4198 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
4199 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
4200 * and device UniPro link and hence it's final completion would be indicated by
4201 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
4202 * addition to normal UIC command completion Status (UCCS). This function only
4203 * returns after the relevant status bits indicate the completion.
4205 * Return: 0 on success, non-zero value on failure.
4207 static int ufshcd_uic_pwr_ctrl(struct ufs_hba
*hba
, struct uic_command
*cmd
)
4209 DECLARE_COMPLETION_ONSTACK(uic_async_done
);
4210 unsigned long flags
;
4213 bool reenable_intr
= false;
4215 mutex_lock(&hba
->uic_cmd_mutex
);
4216 ufshcd_add_delay_before_dme_cmd(hba
);
4218 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4219 if (ufshcd_is_link_broken(hba
)) {
4223 hba
->uic_async_done
= &uic_async_done
;
4224 if (ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
) & UIC_COMMAND_COMPL
) {
4225 ufshcd_disable_intr(hba
, UIC_COMMAND_COMPL
);
4227 * Make sure UIC command completion interrupt is disabled before
4228 * issuing UIC command.
4231 reenable_intr
= true;
4233 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4234 ret
= __ufshcd_send_uic_cmd(hba
, cmd
, false);
4237 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
4238 cmd
->command
, cmd
->argument3
, ret
);
4242 if (!wait_for_completion_timeout(hba
->uic_async_done
,
4243 msecs_to_jiffies(UIC_CMD_TIMEOUT
))) {
4245 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
4246 cmd
->command
, cmd
->argument3
);
4248 if (!cmd
->cmd_active
) {
4249 dev_err(hba
->dev
, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
4259 status
= ufshcd_get_upmcrs(hba
);
4260 if (status
!= PWR_LOCAL
) {
4262 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
4263 cmd
->command
, status
);
4264 ret
= (status
!= PWR_OK
) ? status
: -1;
4268 ufshcd_print_host_state(hba
);
4269 ufshcd_print_pwr_info(hba
);
4270 ufshcd_print_evt_hist(hba
);
4273 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4274 hba
->active_uic_cmd
= NULL
;
4275 hba
->uic_async_done
= NULL
;
4277 ufshcd_enable_intr(hba
, UIC_COMMAND_COMPL
);
4279 ufshcd_set_link_broken(hba
);
4280 ufshcd_schedule_eh_work(hba
);
4283 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4284 mutex_unlock(&hba
->uic_cmd_mutex
);
4290 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
4291 * using DME_SET primitives.
4292 * @hba: per adapter instance
4293 * @mode: powr mode value
4295 * Return: 0 on success, non-zero value on failure.
4297 int ufshcd_uic_change_pwr_mode(struct ufs_hba
*hba
, u8 mode
)
4299 struct uic_command uic_cmd
= {0};
4302 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
) {
4303 ret
= ufshcd_dme_set(hba
,
4304 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP
, 0), 1);
4306 dev_err(hba
->dev
, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
4312 uic_cmd
.command
= UIC_CMD_DME_SET
;
4313 uic_cmd
.argument1
= UIC_ARG_MIB(PA_PWRMODE
);
4314 uic_cmd
.argument3
= mode
;
4316 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
4317 ufshcd_release(hba
);
4322 EXPORT_SYMBOL_GPL(ufshcd_uic_change_pwr_mode
);
4324 int ufshcd_link_recovery(struct ufs_hba
*hba
)
4327 unsigned long flags
;
4329 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4330 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
4331 ufshcd_set_eh_in_progress(hba
);
4332 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4334 /* Reset the attached device */
4335 ufshcd_device_reset(hba
);
4337 ret
= ufshcd_host_reset_and_restore(hba
);
4339 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4341 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
4342 ufshcd_clear_eh_in_progress(hba
);
4343 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4346 dev_err(hba
->dev
, "%s: link recovery failed, err %d",
4351 EXPORT_SYMBOL_GPL(ufshcd_link_recovery
);
4353 int ufshcd_uic_hibern8_enter(struct ufs_hba
*hba
)
4356 struct uic_command uic_cmd
= {0};
4357 ktime_t start
= ktime_get();
4359 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_ENTER
, PRE_CHANGE
);
4361 uic_cmd
.command
= UIC_CMD_DME_HIBER_ENTER
;
4362 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
4363 trace_ufshcd_profile_hibern8(dev_name(hba
->dev
), "enter",
4364 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
4367 dev_err(hba
->dev
, "%s: hibern8 enter failed. ret = %d\n",
4370 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_ENTER
,
4375 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter
);
4377 int ufshcd_uic_hibern8_exit(struct ufs_hba
*hba
)
4379 struct uic_command uic_cmd
= {0};
4381 ktime_t start
= ktime_get();
4383 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_EXIT
, PRE_CHANGE
);
4385 uic_cmd
.command
= UIC_CMD_DME_HIBER_EXIT
;
4386 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
4387 trace_ufshcd_profile_hibern8(dev_name(hba
->dev
), "exit",
4388 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
4391 dev_err(hba
->dev
, "%s: hibern8 exit failed. ret = %d\n",
4394 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_EXIT
,
4396 hba
->ufs_stats
.last_hibern8_exit_tstamp
= local_clock();
4397 hba
->ufs_stats
.hibern8_exit_cnt
++;
4402 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit
);
4404 void ufshcd_auto_hibern8_update(struct ufs_hba
*hba
, u32 ahit
)
4406 unsigned long flags
;
4407 bool update
= false;
4409 if (!ufshcd_is_auto_hibern8_supported(hba
))
4412 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4413 if (hba
->ahit
!= ahit
) {
4417 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4420 !pm_runtime_suspended(&hba
->ufs_device_wlun
->sdev_gendev
)) {
4421 ufshcd_rpm_get_sync(hba
);
4423 ufshcd_auto_hibern8_enable(hba
);
4424 ufshcd_release(hba
);
4425 ufshcd_rpm_put_sync(hba
);
4428 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update
);
4430 void ufshcd_auto_hibern8_enable(struct ufs_hba
*hba
)
4432 if (!ufshcd_is_auto_hibern8_supported(hba
))
4435 ufshcd_writel(hba
, hba
->ahit
, REG_AUTO_HIBERNATE_IDLE_TIMER
);
4439 * ufshcd_init_pwr_info - setting the POR (power on reset)
4440 * values in hba power info
4441 * @hba: per-adapter instance
4443 static void ufshcd_init_pwr_info(struct ufs_hba
*hba
)
4445 hba
->pwr_info
.gear_rx
= UFS_PWM_G1
;
4446 hba
->pwr_info
.gear_tx
= UFS_PWM_G1
;
4447 hba
->pwr_info
.lane_rx
= UFS_LANE_1
;
4448 hba
->pwr_info
.lane_tx
= UFS_LANE_1
;
4449 hba
->pwr_info
.pwr_rx
= SLOWAUTO_MODE
;
4450 hba
->pwr_info
.pwr_tx
= SLOWAUTO_MODE
;
4451 hba
->pwr_info
.hs_rate
= 0;
4455 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4456 * @hba: per-adapter instance
4458 * Return: 0 upon success; < 0 upon failure.
4460 static int ufshcd_get_max_pwr_mode(struct ufs_hba
*hba
)
4462 struct ufs_pa_layer_attr
*pwr_info
= &hba
->max_pwr_info
.info
;
4464 if (hba
->max_pwr_info
.is_valid
)
4467 if (hba
->quirks
& UFSHCD_QUIRK_HIBERN_FASTAUTO
) {
4468 pwr_info
->pwr_tx
= FASTAUTO_MODE
;
4469 pwr_info
->pwr_rx
= FASTAUTO_MODE
;
4471 pwr_info
->pwr_tx
= FAST_MODE
;
4472 pwr_info
->pwr_rx
= FAST_MODE
;
4474 pwr_info
->hs_rate
= PA_HS_MODE_B
;
4476 /* Get the connected lane count */
4477 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES
),
4478 &pwr_info
->lane_rx
);
4479 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
4480 &pwr_info
->lane_tx
);
4482 if (!pwr_info
->lane_rx
|| !pwr_info
->lane_tx
) {
4483 dev_err(hba
->dev
, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4491 * First, get the maximum gears of HS speed.
4492 * If a zero value, it means there is no HSGEAR capability.
4493 * Then, get the maximum gears of PWM speed.
4495 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_MAXRXHSGEAR
), &pwr_info
->gear_rx
);
4496 if (!pwr_info
->gear_rx
) {
4497 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_MAXRXPWMGEAR
),
4498 &pwr_info
->gear_rx
);
4499 if (!pwr_info
->gear_rx
) {
4500 dev_err(hba
->dev
, "%s: invalid max pwm rx gear read = %d\n",
4501 __func__
, pwr_info
->gear_rx
);
4504 pwr_info
->pwr_rx
= SLOW_MODE
;
4507 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_MAXRXHSGEAR
),
4508 &pwr_info
->gear_tx
);
4509 if (!pwr_info
->gear_tx
) {
4510 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_MAXRXPWMGEAR
),
4511 &pwr_info
->gear_tx
);
4512 if (!pwr_info
->gear_tx
) {
4513 dev_err(hba
->dev
, "%s: invalid max pwm tx gear read = %d\n",
4514 __func__
, pwr_info
->gear_tx
);
4517 pwr_info
->pwr_tx
= SLOW_MODE
;
4520 hba
->max_pwr_info
.is_valid
= true;
4524 static int ufshcd_change_power_mode(struct ufs_hba
*hba
,
4525 struct ufs_pa_layer_attr
*pwr_mode
)
4529 /* if already configured to the requested pwr_mode */
4530 if (!hba
->force_pmc
&&
4531 pwr_mode
->gear_rx
== hba
->pwr_info
.gear_rx
&&
4532 pwr_mode
->gear_tx
== hba
->pwr_info
.gear_tx
&&
4533 pwr_mode
->lane_rx
== hba
->pwr_info
.lane_rx
&&
4534 pwr_mode
->lane_tx
== hba
->pwr_info
.lane_tx
&&
4535 pwr_mode
->pwr_rx
== hba
->pwr_info
.pwr_rx
&&
4536 pwr_mode
->pwr_tx
== hba
->pwr_info
.pwr_tx
&&
4537 pwr_mode
->hs_rate
== hba
->pwr_info
.hs_rate
) {
4538 dev_dbg(hba
->dev
, "%s: power already configured\n", __func__
);
4543 * Configure attributes for power mode change with below.
4544 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4545 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4548 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXGEAR
), pwr_mode
->gear_rx
);
4549 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_ACTIVERXDATALANES
),
4551 if (pwr_mode
->pwr_rx
== FASTAUTO_MODE
||
4552 pwr_mode
->pwr_rx
== FAST_MODE
)
4553 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXTERMINATION
), true);
4555 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXTERMINATION
), false);
4557 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXGEAR
), pwr_mode
->gear_tx
);
4558 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_ACTIVETXDATALANES
),
4560 if (pwr_mode
->pwr_tx
== FASTAUTO_MODE
||
4561 pwr_mode
->pwr_tx
== FAST_MODE
)
4562 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXTERMINATION
), true);
4564 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXTERMINATION
), false);
4566 if (pwr_mode
->pwr_rx
== FASTAUTO_MODE
||
4567 pwr_mode
->pwr_tx
== FASTAUTO_MODE
||
4568 pwr_mode
->pwr_rx
== FAST_MODE
||
4569 pwr_mode
->pwr_tx
== FAST_MODE
)
4570 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_HSSERIES
),
4573 if (!(hba
->quirks
& UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING
)) {
4574 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA0
),
4575 DL_FC0ProtectionTimeOutVal_Default
);
4576 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA1
),
4577 DL_TC0ReplayTimeOutVal_Default
);
4578 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA2
),
4579 DL_AFC0ReqTimeOutVal_Default
);
4580 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA3
),
4581 DL_FC1ProtectionTimeOutVal_Default
);
4582 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA4
),
4583 DL_TC1ReplayTimeOutVal_Default
);
4584 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA5
),
4585 DL_AFC1ReqTimeOutVal_Default
);
4587 ufshcd_dme_set(hba
, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal
),
4588 DL_FC0ProtectionTimeOutVal_Default
);
4589 ufshcd_dme_set(hba
, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal
),
4590 DL_TC0ReplayTimeOutVal_Default
);
4591 ufshcd_dme_set(hba
, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal
),
4592 DL_AFC0ReqTimeOutVal_Default
);
4595 ret
= ufshcd_uic_change_pwr_mode(hba
, pwr_mode
->pwr_rx
<< 4
4596 | pwr_mode
->pwr_tx
);
4600 "%s: power mode change failed %d\n", __func__
, ret
);
4602 ufshcd_vops_pwr_change_notify(hba
, POST_CHANGE
, NULL
,
4605 memcpy(&hba
->pwr_info
, pwr_mode
,
4606 sizeof(struct ufs_pa_layer_attr
));
4613 * ufshcd_config_pwr_mode - configure a new power mode
4614 * @hba: per-adapter instance
4615 * @desired_pwr_mode: desired power configuration
4617 * Return: 0 upon success; < 0 upon failure.
4619 int ufshcd_config_pwr_mode(struct ufs_hba
*hba
,
4620 struct ufs_pa_layer_attr
*desired_pwr_mode
)
4622 struct ufs_pa_layer_attr final_params
= { 0 };
4625 ret
= ufshcd_vops_pwr_change_notify(hba
, PRE_CHANGE
,
4626 desired_pwr_mode
, &final_params
);
4629 memcpy(&final_params
, desired_pwr_mode
, sizeof(final_params
));
4631 ret
= ufshcd_change_power_mode(hba
, &final_params
);
4635 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode
);
4638 * ufshcd_complete_dev_init() - checks device readiness
4639 * @hba: per-adapter instance
4641 * Set fDeviceInit flag and poll until device toggles it.
4643 * Return: 0 upon success; < 0 upon failure.
4645 static int ufshcd_complete_dev_init(struct ufs_hba
*hba
)
4648 bool flag_res
= true;
4651 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_SET_FLAG
,
4652 QUERY_FLAG_IDN_FDEVICEINIT
, 0, NULL
);
4655 "%s: setting fDeviceInit flag failed with error %d\n",
4660 /* Poll fDeviceInit flag to be cleared */
4661 timeout
= ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT
);
4663 err
= ufshcd_query_flag(hba
, UPIU_QUERY_OPCODE_READ_FLAG
,
4664 QUERY_FLAG_IDN_FDEVICEINIT
, 0, &flag_res
);
4667 usleep_range(500, 1000);
4668 } while (ktime_before(ktime_get(), timeout
));
4672 "%s: reading fDeviceInit flag failed with error %d\n",
4674 } else if (flag_res
) {
4676 "%s: fDeviceInit was not cleared by the device\n",
4685 * ufshcd_make_hba_operational - Make UFS controller operational
4686 * @hba: per adapter instance
4688 * To bring UFS host controller to operational state,
4689 * 1. Enable required interrupts
4690 * 2. Configure interrupt aggregation
4691 * 3. Program UTRL and UTMRL base address
4692 * 4. Configure run-stop-registers
4694 * Return: 0 on success, non-zero value on failure.
4696 int ufshcd_make_hba_operational(struct ufs_hba
*hba
)
4701 /* Enable required interrupts */
4702 ufshcd_enable_intr(hba
, UFSHCD_ENABLE_INTRS
);
4704 /* Configure interrupt aggregation */
4705 if (ufshcd_is_intr_aggr_allowed(hba
))
4706 ufshcd_config_intr_aggr(hba
, hba
->nutrs
- 1, INT_AGGR_DEF_TO
);
4708 ufshcd_disable_intr_aggr(hba
);
4710 /* Configure UTRL and UTMRL base address registers */
4711 ufshcd_writel(hba
, lower_32_bits(hba
->utrdl_dma_addr
),
4712 REG_UTP_TRANSFER_REQ_LIST_BASE_L
);
4713 ufshcd_writel(hba
, upper_32_bits(hba
->utrdl_dma_addr
),
4714 REG_UTP_TRANSFER_REQ_LIST_BASE_H
);
4715 ufshcd_writel(hba
, lower_32_bits(hba
->utmrdl_dma_addr
),
4716 REG_UTP_TASK_REQ_LIST_BASE_L
);
4717 ufshcd_writel(hba
, upper_32_bits(hba
->utmrdl_dma_addr
),
4718 REG_UTP_TASK_REQ_LIST_BASE_H
);
4721 * Make sure base address and interrupt setup are updated before
4722 * enabling the run/stop registers below.
4727 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4729 reg
= ufshcd_readl(hba
, REG_CONTROLLER_STATUS
);
4730 if (!(ufshcd_get_lists_status(reg
))) {
4731 ufshcd_enable_run_stop_reg(hba
);
4734 "Host controller not ready to process requests");
4740 EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational
);
4743 * ufshcd_hba_stop - Send controller to reset state
4744 * @hba: per adapter instance
4746 void ufshcd_hba_stop(struct ufs_hba
*hba
)
4748 unsigned long flags
;
4752 * Obtain the host lock to prevent that the controller is disabled
4753 * while the UFS interrupt handler is active on another CPU.
4755 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4756 ufshcd_writel(hba
, CONTROLLER_DISABLE
, REG_CONTROLLER_ENABLE
);
4757 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4759 err
= ufshcd_wait_for_register(hba
, REG_CONTROLLER_ENABLE
,
4760 CONTROLLER_ENABLE
, CONTROLLER_DISABLE
,
4763 dev_err(hba
->dev
, "%s: Controller disable failed\n", __func__
);
4765 EXPORT_SYMBOL_GPL(ufshcd_hba_stop
);
4768 * ufshcd_hba_execute_hce - initialize the controller
4769 * @hba: per adapter instance
4771 * The controller resets itself and controller firmware initialization
4772 * sequence kicks off. When controller is ready it will set
4773 * the Host Controller Enable bit to 1.
4775 * Return: 0 on success, non-zero value on failure.
4777 static int ufshcd_hba_execute_hce(struct ufs_hba
*hba
)
4779 int retry_outer
= 3;
4783 if (ufshcd_is_hba_active(hba
))
4784 /* change controller state to "reset state" */
4785 ufshcd_hba_stop(hba
);
4787 /* UniPro link is disabled at this point */
4788 ufshcd_set_link_off(hba
);
4790 ufshcd_vops_hce_enable_notify(hba
, PRE_CHANGE
);
4792 /* start controller initialization sequence */
4793 ufshcd_hba_start(hba
);
4796 * To initialize a UFS host controller HCE bit must be set to 1.
4797 * During initialization the HCE bit value changes from 1->0->1.
4798 * When the host controller completes initialization sequence
4799 * it sets the value of HCE bit to 1. The same HCE bit is read back
4800 * to check if the controller has completed initialization sequence.
4801 * So without this delay the value HCE = 1, set in the previous
4802 * instruction might be read back.
4803 * This delay can be changed based on the controller.
4805 ufshcd_delay_us(hba
->vps
->hba_enable_delay_us
, 100);
4807 /* wait for the host controller to complete initialization */
4809 while (!ufshcd_is_hba_active(hba
)) {
4814 "Controller enable failed\n");
4821 usleep_range(1000, 1100);
4824 /* enable UIC related interrupts */
4825 ufshcd_enable_intr(hba
, UFSHCD_UIC_MASK
);
4827 ufshcd_vops_hce_enable_notify(hba
, POST_CHANGE
);
4832 int ufshcd_hba_enable(struct ufs_hba
*hba
)
4836 if (hba
->quirks
& UFSHCI_QUIRK_BROKEN_HCE
) {
4837 ufshcd_set_link_off(hba
);
4838 ufshcd_vops_hce_enable_notify(hba
, PRE_CHANGE
);
4840 /* enable UIC related interrupts */
4841 ufshcd_enable_intr(hba
, UFSHCD_UIC_MASK
);
4842 ret
= ufshcd_dme_reset(hba
);
4844 dev_err(hba
->dev
, "DME_RESET failed\n");
4848 ret
= ufshcd_dme_enable(hba
);
4850 dev_err(hba
->dev
, "Enabling DME failed\n");
4854 ufshcd_vops_hce_enable_notify(hba
, POST_CHANGE
);
4856 ret
= ufshcd_hba_execute_hce(hba
);
4861 EXPORT_SYMBOL_GPL(ufshcd_hba_enable
);
4863 static int ufshcd_disable_tx_lcc(struct ufs_hba
*hba
, bool peer
)
4865 int tx_lanes
= 0, i
, err
= 0;
4868 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
4871 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
4873 for (i
= 0; i
< tx_lanes
; i
++) {
4875 err
= ufshcd_dme_set(hba
,
4876 UIC_ARG_MIB_SEL(TX_LCC_ENABLE
,
4877 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i
)),
4880 err
= ufshcd_dme_peer_set(hba
,
4881 UIC_ARG_MIB_SEL(TX_LCC_ENABLE
,
4882 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i
)),
4885 dev_err(hba
->dev
, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4886 __func__
, peer
, i
, err
);
4894 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba
*hba
)
4896 return ufshcd_disable_tx_lcc(hba
, true);
4899 void ufshcd_update_evt_hist(struct ufs_hba
*hba
, u32 id
, u32 val
)
4901 struct ufs_event_hist
*e
;
4903 if (id
>= UFS_EVT_CNT
)
4906 e
= &hba
->ufs_stats
.event
[id
];
4907 e
->val
[e
->pos
] = val
;
4908 e
->tstamp
[e
->pos
] = local_clock();
4910 e
->pos
= (e
->pos
+ 1) % UFS_EVENT_HIST_LENGTH
;
4912 ufshcd_vops_event_notify(hba
, id
, &val
);
4914 EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist
);
4917 * ufshcd_link_startup - Initialize unipro link startup
4918 * @hba: per adapter instance
4920 * Return: 0 for success, non-zero in case of failure.
4922 static int ufshcd_link_startup(struct ufs_hba
*hba
)
4925 int retries
= DME_LINKSTARTUP_RETRIES
;
4926 bool link_startup_again
= false;
4929 * If UFS device isn't active then we will have to issue link startup
4930 * 2 times to make sure the device state move to active.
4932 if (!ufshcd_is_ufs_dev_active(hba
))
4933 link_startup_again
= true;
4937 ufshcd_vops_link_startup_notify(hba
, PRE_CHANGE
);
4939 ret
= ufshcd_dme_link_startup(hba
);
4941 /* check if device is detected by inter-connect layer */
4942 if (!ret
&& !ufshcd_is_device_present(hba
)) {
4943 ufshcd_update_evt_hist(hba
,
4944 UFS_EVT_LINK_STARTUP_FAIL
,
4946 dev_err(hba
->dev
, "%s: Device not present\n", __func__
);
4952 * DME link lost indication is only received when link is up,
4953 * but we can't be sure if the link is up until link startup
4954 * succeeds. So reset the local Uni-Pro and try again.
4956 if (ret
&& retries
&& ufshcd_hba_enable(hba
)) {
4957 ufshcd_update_evt_hist(hba
,
4958 UFS_EVT_LINK_STARTUP_FAIL
,
4962 } while (ret
&& retries
--);
4965 /* failed to get the link up... retire */
4966 ufshcd_update_evt_hist(hba
,
4967 UFS_EVT_LINK_STARTUP_FAIL
,
4972 if (link_startup_again
) {
4973 link_startup_again
= false;
4974 retries
= DME_LINKSTARTUP_RETRIES
;
4978 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4979 ufshcd_init_pwr_info(hba
);
4980 ufshcd_print_pwr_info(hba
);
4982 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_LCC
) {
4983 ret
= ufshcd_disable_device_tx_lcc(hba
);
4988 /* Include any host controller configuration via UIC commands */
4989 ret
= ufshcd_vops_link_startup_notify(hba
, POST_CHANGE
);
4993 /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
4994 ufshcd_readl(hba
, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER
);
4995 ret
= ufshcd_make_hba_operational(hba
);
4998 dev_err(hba
->dev
, "link startup failed %d\n", ret
);
4999 ufshcd_print_host_state(hba
);
5000 ufshcd_print_pwr_info(hba
);
5001 ufshcd_print_evt_hist(hba
);
5007 * ufshcd_verify_dev_init() - Verify device initialization
5008 * @hba: per-adapter instance
5010 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
5011 * device Transport Protocol (UTP) layer is ready after a reset.
5012 * If the UTP layer at the device side is not initialized, it may
5013 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
5014 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
5016 * Return: 0 upon success; < 0 upon failure.
5018 static int ufshcd_verify_dev_init(struct ufs_hba
*hba
)
5024 mutex_lock(&hba
->dev_cmd
.lock
);
5025 for (retries
= NOP_OUT_RETRIES
; retries
> 0; retries
--) {
5026 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_NOP
,
5027 hba
->nop_out_timeout
);
5029 if (!err
|| err
== -ETIMEDOUT
)
5032 dev_dbg(hba
->dev
, "%s: error %d retrying\n", __func__
, err
);
5034 mutex_unlock(&hba
->dev_cmd
.lock
);
5035 ufshcd_release(hba
);
5038 dev_err(hba
->dev
, "%s: NOP OUT failed %d\n", __func__
, err
);
5043 * ufshcd_setup_links - associate link b/w device wlun and other luns
5044 * @sdev: pointer to SCSI device
5045 * @hba: pointer to ufs hba
5047 static void ufshcd_setup_links(struct ufs_hba
*hba
, struct scsi_device
*sdev
)
5049 struct device_link
*link
;
5052 * Device wlun is the supplier & rest of the luns are consumers.
5053 * This ensures that device wlun suspends after all other luns.
5055 if (hba
->ufs_device_wlun
) {
5056 link
= device_link_add(&sdev
->sdev_gendev
,
5057 &hba
->ufs_device_wlun
->sdev_gendev
,
5058 DL_FLAG_PM_RUNTIME
| DL_FLAG_RPM_ACTIVE
);
5060 dev_err(&sdev
->sdev_gendev
, "Failed establishing link - %s\n",
5061 dev_name(&hba
->ufs_device_wlun
->sdev_gendev
));
5065 /* Ignore REPORT_LUN wlun probing */
5066 if (hba
->luns_avail
== 1) {
5067 ufshcd_rpm_put(hba
);
5072 * Device wlun is probed. The assumption is that WLUNs are
5073 * scanned before other LUNs.
5080 * ufshcd_lu_init - Initialize the relevant parameters of the LU
5081 * @hba: per-adapter instance
5082 * @sdev: pointer to SCSI device
5084 static void ufshcd_lu_init(struct ufs_hba
*hba
, struct scsi_device
*sdev
)
5086 int len
= QUERY_DESC_MAX_SIZE
;
5087 u8 lun
= ufshcd_scsi_to_upiu_lun(sdev
->lun
);
5088 u8 lun_qdepth
= hba
->nutrs
;
5092 desc_buf
= kzalloc(len
, GFP_KERNEL
);
5096 ret
= ufshcd_read_unit_desc_param(hba
, lun
, 0, desc_buf
, len
);
5098 if (ret
== -EOPNOTSUPP
)
5099 /* If LU doesn't support unit descriptor, its queue depth is set to 1 */
5105 if (desc_buf
[UNIT_DESC_PARAM_LU_Q_DEPTH
]) {
5107 * In per-LU queueing architecture, bLUQueueDepth will not be 0, then we will
5108 * use the smaller between UFSHCI CAP.NUTRS and UFS LU bLUQueueDepth
5110 lun_qdepth
= min_t(int, desc_buf
[UNIT_DESC_PARAM_LU_Q_DEPTH
], hba
->nutrs
);
5113 * According to UFS device specification, the write protection mode is only supported by
5114 * normal LU, not supported by WLUN.
5116 if (hba
->dev_info
.f_power_on_wp_en
&& lun
< hba
->dev_info
.max_lu_supported
&&
5117 !hba
->dev_info
.is_lu_power_on_wp
&&
5118 desc_buf
[UNIT_DESC_PARAM_LU_WR_PROTECT
] == UFS_LU_POWER_ON_WP
)
5119 hba
->dev_info
.is_lu_power_on_wp
= true;
5121 /* In case of RPMB LU, check if advanced RPMB mode is enabled */
5122 if (desc_buf
[UNIT_DESC_PARAM_UNIT_INDEX
] == UFS_UPIU_RPMB_WLUN
&&
5123 desc_buf
[RPMB_UNIT_DESC_PARAM_REGION_EN
] & BIT(4))
5124 hba
->dev_info
.b_advanced_rpmb_en
= true;
5130 * For WLUNs that don't support unit descriptor, queue depth is set to 1. For LUs whose
5131 * bLUQueueDepth == 0, the queue depth is set to a maximum value that host can queue.
5133 dev_dbg(hba
->dev
, "Set LU %x queue depth %d\n", lun
, lun_qdepth
);
5134 scsi_change_queue_depth(sdev
, lun_qdepth
);
5138 * ufshcd_slave_alloc - handle initial SCSI device configurations
5139 * @sdev: pointer to SCSI device
5143 static int ufshcd_slave_alloc(struct scsi_device
*sdev
)
5145 struct ufs_hba
*hba
;
5147 hba
= shost_priv(sdev
->host
);
5149 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
5150 sdev
->use_10_for_ms
= 1;
5152 /* DBD field should be set to 1 in mode sense(10) */
5153 sdev
->set_dbd_for_ms
= 1;
5155 /* allow SCSI layer to restart the device in case of errors */
5156 sdev
->allow_restart
= 1;
5158 /* REPORT SUPPORTED OPERATION CODES is not supported */
5159 sdev
->no_report_opcodes
= 1;
5161 /* WRITE_SAME command is not supported */
5162 sdev
->no_write_same
= 1;
5164 ufshcd_lu_init(hba
, sdev
);
5166 ufshcd_setup_links(hba
, sdev
);
5172 * ufshcd_change_queue_depth - change queue depth
5173 * @sdev: pointer to SCSI device
5174 * @depth: required depth to set
5176 * Change queue depth and make sure the max. limits are not crossed.
5178 * Return: new queue depth.
5180 static int ufshcd_change_queue_depth(struct scsi_device
*sdev
, int depth
)
5182 return scsi_change_queue_depth(sdev
, min(depth
, sdev
->host
->can_queue
));
5186 * ufshcd_slave_configure - adjust SCSI device configurations
5187 * @sdev: pointer to SCSI device
5189 * Return: 0 (success).
5191 static int ufshcd_slave_configure(struct scsi_device
*sdev
)
5193 struct ufs_hba
*hba
= shost_priv(sdev
->host
);
5194 struct request_queue
*q
= sdev
->request_queue
;
5196 blk_queue_update_dma_pad(q
, PRDT_DATA_BYTE_COUNT_PAD
- 1);
5199 * Block runtime-pm until all consumers are added.
5200 * Refer ufshcd_setup_links().
5202 if (is_device_wlun(sdev
))
5203 pm_runtime_get_noresume(&sdev
->sdev_gendev
);
5204 else if (ufshcd_is_rpm_autosuspend_allowed(hba
))
5205 sdev
->rpm_autosuspend
= 1;
5207 * Do not print messages during runtime PM to avoid never-ending cycles
5208 * of messages written back to storage by user space causing runtime
5209 * resume, causing more messages and so on.
5211 sdev
->silence_suspend
= 1;
5213 if (hba
->vops
&& hba
->vops
->config_scsi_dev
)
5214 hba
->vops
->config_scsi_dev(sdev
);
5216 ufshcd_crypto_register(hba
, q
);
5222 * ufshcd_slave_destroy - remove SCSI device configurations
5223 * @sdev: pointer to SCSI device
5225 static void ufshcd_slave_destroy(struct scsi_device
*sdev
)
5227 struct ufs_hba
*hba
;
5228 unsigned long flags
;
5230 hba
= shost_priv(sdev
->host
);
5232 /* Drop the reference as it won't be needed anymore */
5233 if (ufshcd_scsi_to_upiu_lun(sdev
->lun
) == UFS_UPIU_UFS_DEVICE_WLUN
) {
5234 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5235 hba
->ufs_device_wlun
= NULL
;
5236 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5237 } else if (hba
->ufs_device_wlun
) {
5238 struct device
*supplier
= NULL
;
5240 /* Ensure UFS Device WLUN exists and does not disappear */
5241 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5242 if (hba
->ufs_device_wlun
) {
5243 supplier
= &hba
->ufs_device_wlun
->sdev_gendev
;
5244 get_device(supplier
);
5246 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5250 * If a LUN fails to probe (e.g. absent BOOT WLUN), the
5251 * device will not have been registered but can still
5252 * have a device link holding a reference to the device.
5254 device_link_remove(&sdev
->sdev_gendev
, supplier
);
5255 put_device(supplier
);
5261 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
5262 * @lrbp: pointer to local reference block of completed command
5263 * @scsi_status: SCSI command status
5265 * Return: value base on SCSI command status.
5268 ufshcd_scsi_cmd_status(struct ufshcd_lrb
*lrbp
, int scsi_status
)
5272 switch (scsi_status
) {
5273 case SAM_STAT_CHECK_CONDITION
:
5274 ufshcd_copy_sense_data(lrbp
);
5277 result
|= DID_OK
<< 16 | scsi_status
;
5279 case SAM_STAT_TASK_SET_FULL
:
5281 case SAM_STAT_TASK_ABORTED
:
5282 ufshcd_copy_sense_data(lrbp
);
5283 result
|= scsi_status
;
5286 result
|= DID_ERROR
<< 16;
5288 } /* end of switch */
5294 * ufshcd_transfer_rsp_status - Get overall status of the response
5295 * @hba: per adapter instance
5296 * @lrbp: pointer to local reference block of completed command
5297 * @cqe: pointer to the completion queue entry
5299 * Return: result of the command to notify SCSI midlayer.
5302 ufshcd_transfer_rsp_status(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
,
5303 struct cq_entry
*cqe
)
5311 upiu_flags
= lrbp
->ucd_rsp_ptr
->header
.flags
;
5312 resid
= be32_to_cpu(lrbp
->ucd_rsp_ptr
->sr
.residual_transfer_count
);
5314 * Test !overflow instead of underflow to support UFS devices that do
5315 * not set either flag.
5317 if (resid
&& !(upiu_flags
& UPIU_RSP_FLAG_OVERFLOW
))
5318 scsi_set_resid(lrbp
->cmd
, resid
);
5320 /* overall command status of utrd */
5321 ocs
= ufshcd_get_tr_ocs(lrbp
, cqe
);
5323 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR
) {
5324 if (lrbp
->ucd_rsp_ptr
->header
.response
||
5325 lrbp
->ucd_rsp_ptr
->header
.status
)
5331 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
5332 switch (ufshcd_get_req_rsp(lrbp
->ucd_rsp_ptr
)) {
5333 case UPIU_TRANSACTION_RESPONSE
:
5335 * get the result based on SCSI status response
5336 * to notify the SCSI midlayer of the command status
5338 scsi_status
= lrbp
->ucd_rsp_ptr
->header
.status
;
5339 result
= ufshcd_scsi_cmd_status(lrbp
, scsi_status
);
5342 * Currently we are only supporting BKOPs exception
5343 * events hence we can ignore BKOPs exception event
5344 * during power management callbacks. BKOPs exception
5345 * event is not expected to be raised in runtime suspend
5346 * callback as it allows the urgent bkops.
5347 * During system suspend, we are anyway forcefully
5348 * disabling the bkops and if urgent bkops is needed
5349 * it will be enabled on system resume. Long term
5350 * solution could be to abort the system suspend if
5351 * UFS device needs urgent BKOPs.
5353 if (!hba
->pm_op_in_progress
&&
5354 !ufshcd_eh_in_progress(hba
) &&
5355 ufshcd_is_exception_event(lrbp
->ucd_rsp_ptr
))
5356 /* Flushed in suspend */
5357 schedule_work(&hba
->eeh_work
);
5359 case UPIU_TRANSACTION_REJECT_UPIU
:
5360 /* TODO: handle Reject UPIU Response */
5361 result
= DID_ERROR
<< 16;
5363 "Reject UPIU not fully implemented\n");
5367 "Unexpected request response code = %x\n",
5369 result
= DID_ERROR
<< 16;
5374 result
|= DID_ABORT
<< 16;
5376 case OCS_INVALID_COMMAND_STATUS
:
5377 result
|= DID_REQUEUE
<< 16;
5379 case OCS_INVALID_CMD_TABLE_ATTR
:
5380 case OCS_INVALID_PRDT_ATTR
:
5381 case OCS_MISMATCH_DATA_BUF_SIZE
:
5382 case OCS_MISMATCH_RESP_UPIU_SIZE
:
5383 case OCS_PEER_COMM_FAILURE
:
5384 case OCS_FATAL_ERROR
:
5385 case OCS_DEVICE_FATAL_ERROR
:
5386 case OCS_INVALID_CRYPTO_CONFIG
:
5387 case OCS_GENERAL_CRYPTO_ERROR
:
5389 result
|= DID_ERROR
<< 16;
5391 "OCS error from controller = %x for tag %d\n",
5392 ocs
, lrbp
->task_tag
);
5393 ufshcd_print_evt_hist(hba
);
5394 ufshcd_print_host_state(hba
);
5396 } /* end of switch */
5398 if ((host_byte(result
) != DID_OK
) &&
5399 (host_byte(result
) != DID_REQUEUE
) && !hba
->silence_err_logs
)
5400 ufshcd_print_tr(hba
, lrbp
->task_tag
, true);
5404 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba
*hba
,
5407 if (!ufshcd_is_auto_hibern8_supported(hba
) ||
5408 !ufshcd_is_auto_hibern8_enabled(hba
))
5411 if (!(intr_mask
& UFSHCD_UIC_HIBERN8_MASK
))
5414 if (hba
->active_uic_cmd
&&
5415 (hba
->active_uic_cmd
->command
== UIC_CMD_DME_HIBER_ENTER
||
5416 hba
->active_uic_cmd
->command
== UIC_CMD_DME_HIBER_EXIT
))
5423 * ufshcd_uic_cmd_compl - handle completion of uic command
5424 * @hba: per adapter instance
5425 * @intr_status: interrupt status generated by the controller
5428 * IRQ_HANDLED - If interrupt is valid
5429 * IRQ_NONE - If invalid interrupt
5431 static irqreturn_t
ufshcd_uic_cmd_compl(struct ufs_hba
*hba
, u32 intr_status
)
5433 irqreturn_t retval
= IRQ_NONE
;
5435 spin_lock(hba
->host
->host_lock
);
5436 if (ufshcd_is_auto_hibern8_error(hba
, intr_status
))
5437 hba
->errors
|= (UFSHCD_UIC_HIBERN8_MASK
& intr_status
);
5439 if ((intr_status
& UIC_COMMAND_COMPL
) && hba
->active_uic_cmd
) {
5440 hba
->active_uic_cmd
->argument2
|=
5441 ufshcd_get_uic_cmd_result(hba
);
5442 hba
->active_uic_cmd
->argument3
=
5443 ufshcd_get_dme_attr_val(hba
);
5444 if (!hba
->uic_async_done
)
5445 hba
->active_uic_cmd
->cmd_active
= 0;
5446 complete(&hba
->active_uic_cmd
->done
);
5447 retval
= IRQ_HANDLED
;
5450 if ((intr_status
& UFSHCD_UIC_PWR_MASK
) && hba
->uic_async_done
) {
5451 hba
->active_uic_cmd
->cmd_active
= 0;
5452 complete(hba
->uic_async_done
);
5453 retval
= IRQ_HANDLED
;
5456 if (retval
== IRQ_HANDLED
)
5457 ufshcd_add_uic_command_trace(hba
, hba
->active_uic_cmd
,
5459 spin_unlock(hba
->host
->host_lock
);
5463 /* Release the resources allocated for processing a SCSI command. */
5464 void ufshcd_release_scsi_cmd(struct ufs_hba
*hba
,
5465 struct ufshcd_lrb
*lrbp
)
5467 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
5469 scsi_dma_unmap(cmd
);
5470 ufshcd_release(hba
);
5471 ufshcd_clk_scaling_update_busy(hba
);
5475 * ufshcd_compl_one_cqe - handle a completion queue entry
5476 * @hba: per adapter instance
5477 * @task_tag: the task tag of the request to be completed
5478 * @cqe: pointer to the completion queue entry
5480 void ufshcd_compl_one_cqe(struct ufs_hba
*hba
, int task_tag
,
5481 struct cq_entry
*cqe
)
5483 struct ufshcd_lrb
*lrbp
;
5484 struct scsi_cmnd
*cmd
;
5487 lrbp
= &hba
->lrb
[task_tag
];
5488 lrbp
->compl_time_stamp
= ktime_get();
5491 if (unlikely(ufshcd_should_inform_monitor(hba
, lrbp
)))
5492 ufshcd_update_monitor(hba
, lrbp
);
5493 ufshcd_add_command_trace(hba
, task_tag
, UFS_CMD_COMP
);
5494 cmd
->result
= ufshcd_transfer_rsp_status(hba
, lrbp
, cqe
);
5495 ufshcd_release_scsi_cmd(hba
, lrbp
);
5496 /* Do not touch lrbp after scsi done */
5498 } else if (lrbp
->command_type
== UTP_CMD_TYPE_DEV_MANAGE
||
5499 lrbp
->command_type
== UTP_CMD_TYPE_UFS_STORAGE
) {
5500 if (hba
->dev_cmd
.complete
) {
5502 ocs
= le32_to_cpu(cqe
->status
) & MASK_OCS
;
5503 lrbp
->utr_descriptor_ptr
->header
.ocs
= ocs
;
5505 complete(hba
->dev_cmd
.complete
);
5511 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
5512 * @hba: per adapter instance
5513 * @completed_reqs: bitmask that indicates which requests to complete
5515 static void __ufshcd_transfer_req_compl(struct ufs_hba
*hba
,
5516 unsigned long completed_reqs
)
5520 for_each_set_bit(tag
, &completed_reqs
, hba
->nutrs
)
5521 ufshcd_compl_one_cqe(hba
, tag
, NULL
);
5524 /* Any value that is not an existing queue number is fine for this constant. */
5526 UFSHCD_POLL_FROM_INTERRUPT_CONTEXT
= -1
5529 static void ufshcd_clear_polled(struct ufs_hba
*hba
,
5530 unsigned long *completed_reqs
)
5534 for_each_set_bit(tag
, completed_reqs
, hba
->nutrs
) {
5535 struct scsi_cmnd
*cmd
= hba
->lrb
[tag
].cmd
;
5539 if (scsi_cmd_to_rq(cmd
)->cmd_flags
& REQ_POLLED
)
5540 __clear_bit(tag
, completed_reqs
);
5545 * Return: > 0 if one or more commands have been completed or 0 if no
5546 * requests have been completed.
5548 static int ufshcd_poll(struct Scsi_Host
*shost
, unsigned int queue_num
)
5550 struct ufs_hba
*hba
= shost_priv(shost
);
5551 unsigned long completed_reqs
, flags
;
5553 struct ufs_hw_queue
*hwq
;
5555 if (is_mcq_enabled(hba
)) {
5556 hwq
= &hba
->uhq
[queue_num
];
5558 return ufshcd_mcq_poll_cqe_lock(hba
, hwq
);
5561 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
5562 tr_doorbell
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
5563 completed_reqs
= ~tr_doorbell
& hba
->outstanding_reqs
;
5564 WARN_ONCE(completed_reqs
& ~hba
->outstanding_reqs
,
5565 "completed: %#lx; outstanding: %#lx\n", completed_reqs
,
5566 hba
->outstanding_reqs
);
5567 if (queue_num
== UFSHCD_POLL_FROM_INTERRUPT_CONTEXT
) {
5568 /* Do not complete polled requests from interrupt context. */
5569 ufshcd_clear_polled(hba
, &completed_reqs
);
5571 hba
->outstanding_reqs
&= ~completed_reqs
;
5572 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
5575 __ufshcd_transfer_req_compl(hba
, completed_reqs
);
5577 return completed_reqs
!= 0;
5581 * ufshcd_mcq_compl_pending_transfer - MCQ mode function. It is
5582 * invoked from the error handler context or ufshcd_host_reset_and_restore()
5583 * to complete the pending transfers and free the resources associated with
5586 * @hba: per adapter instance
5587 * @force_compl: This flag is set to true when invoked
5588 * from ufshcd_host_reset_and_restore() in which case it requires special
5589 * handling because the host controller has been reset by ufshcd_hba_stop().
5591 static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba
*hba
,
5594 struct ufs_hw_queue
*hwq
;
5595 struct ufshcd_lrb
*lrbp
;
5596 struct scsi_cmnd
*cmd
;
5597 unsigned long flags
;
5601 for (tag
= 0; tag
< hba
->nutrs
; tag
++) {
5602 lrbp
= &hba
->lrb
[tag
];
5604 if (!ufshcd_cmd_inflight(cmd
) ||
5605 test_bit(SCMD_STATE_COMPLETE
, &cmd
->state
))
5608 utag
= blk_mq_unique_tag(scsi_cmd_to_rq(cmd
));
5609 hwq_num
= blk_mq_unique_tag_to_hwq(utag
);
5610 hwq
= &hba
->uhq
[hwq_num
];
5613 ufshcd_mcq_compl_all_cqes_lock(hba
, hwq
);
5615 * For those cmds of which the cqes are not present
5616 * in the cq, complete them explicitly.
5618 spin_lock_irqsave(&hwq
->cq_lock
, flags
);
5619 if (cmd
&& !test_bit(SCMD_STATE_COMPLETE
, &cmd
->state
)) {
5620 set_host_byte(cmd
, DID_REQUEUE
);
5621 ufshcd_release_scsi_cmd(hba
, lrbp
);
5624 spin_unlock_irqrestore(&hwq
->cq_lock
, flags
);
5626 ufshcd_mcq_poll_cqe_lock(hba
, hwq
);
5632 * ufshcd_transfer_req_compl - handle SCSI and query command completion
5633 * @hba: per adapter instance
5636 * IRQ_HANDLED - If interrupt is valid
5637 * IRQ_NONE - If invalid interrupt
5639 static irqreturn_t
ufshcd_transfer_req_compl(struct ufs_hba
*hba
)
5641 /* Resetting interrupt aggregation counters first and reading the
5642 * DOOR_BELL afterward allows us to handle all the completed requests.
5643 * In order to prevent other interrupts starvation the DB is read once
5644 * after reset. The down side of this solution is the possibility of
5645 * false interrupt if device completes another request after resetting
5646 * aggregation and before reading the DB.
5648 if (ufshcd_is_intr_aggr_allowed(hba
) &&
5649 !(hba
->quirks
& UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR
))
5650 ufshcd_reset_intr_aggr(hba
);
5652 if (ufs_fail_completion())
5656 * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
5657 * do not want polling to trigger spurious interrupt complaints.
5659 ufshcd_poll(hba
->host
, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT
);
5664 int __ufshcd_write_ee_control(struct ufs_hba
*hba
, u32 ee_ctrl_mask
)
5666 return ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
5667 QUERY_ATTR_IDN_EE_CONTROL
, 0, 0,
5671 int ufshcd_write_ee_control(struct ufs_hba
*hba
)
5675 mutex_lock(&hba
->ee_ctrl_mutex
);
5676 err
= __ufshcd_write_ee_control(hba
, hba
->ee_ctrl_mask
);
5677 mutex_unlock(&hba
->ee_ctrl_mutex
);
5679 dev_err(hba
->dev
, "%s: failed to write ee control %d\n",
5684 int ufshcd_update_ee_control(struct ufs_hba
*hba
, u16
*mask
,
5685 const u16
*other_mask
, u16 set
, u16 clr
)
5687 u16 new_mask
, ee_ctrl_mask
;
5690 mutex_lock(&hba
->ee_ctrl_mutex
);
5691 new_mask
= (*mask
& ~clr
) | set
;
5692 ee_ctrl_mask
= new_mask
| *other_mask
;
5693 if (ee_ctrl_mask
!= hba
->ee_ctrl_mask
)
5694 err
= __ufshcd_write_ee_control(hba
, ee_ctrl_mask
);
5695 /* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */
5697 hba
->ee_ctrl_mask
= ee_ctrl_mask
;
5700 mutex_unlock(&hba
->ee_ctrl_mutex
);
5705 * ufshcd_disable_ee - disable exception event
5706 * @hba: per-adapter instance
5707 * @mask: exception event to disable
5709 * Disables exception event in the device so that the EVENT_ALERT
5712 * Return: zero on success, non-zero error value on failure.
5714 static inline int ufshcd_disable_ee(struct ufs_hba
*hba
, u16 mask
)
5716 return ufshcd_update_ee_drv_mask(hba
, 0, mask
);
5720 * ufshcd_enable_ee - enable exception event
5721 * @hba: per-adapter instance
5722 * @mask: exception event to enable
5724 * Enable corresponding exception event in the device to allow
5725 * device to alert host in critical scenarios.
5727 * Return: zero on success, non-zero error value on failure.
5729 static inline int ufshcd_enable_ee(struct ufs_hba
*hba
, u16 mask
)
5731 return ufshcd_update_ee_drv_mask(hba
, mask
, 0);
5735 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5736 * @hba: per-adapter instance
5738 * Allow device to manage background operations on its own. Enabling
5739 * this might lead to inconsistent latencies during normal data transfers
5740 * as the device is allowed to manage its own way of handling background
5743 * Return: zero on success, non-zero on failure.
5745 static int ufshcd_enable_auto_bkops(struct ufs_hba
*hba
)
5749 if (hba
->auto_bkops_enabled
)
5752 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_SET_FLAG
,
5753 QUERY_FLAG_IDN_BKOPS_EN
, 0, NULL
);
5755 dev_err(hba
->dev
, "%s: failed to enable bkops %d\n",
5760 hba
->auto_bkops_enabled
= true;
5761 trace_ufshcd_auto_bkops_state(dev_name(hba
->dev
), "Enabled");
5763 /* No need of URGENT_BKOPS exception from the device */
5764 err
= ufshcd_disable_ee(hba
, MASK_EE_URGENT_BKOPS
);
5766 dev_err(hba
->dev
, "%s: failed to disable exception event %d\n",
5773 * ufshcd_disable_auto_bkops - block device in doing background operations
5774 * @hba: per-adapter instance
5776 * Disabling background operations improves command response latency but
5777 * has drawback of device moving into critical state where the device is
5778 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5779 * host is idle so that BKOPS are managed effectively without any negative
5782 * Return: zero on success, non-zero on failure.
5784 static int ufshcd_disable_auto_bkops(struct ufs_hba
*hba
)
5788 if (!hba
->auto_bkops_enabled
)
5792 * If host assisted BKOPs is to be enabled, make sure
5793 * urgent bkops exception is allowed.
5795 err
= ufshcd_enable_ee(hba
, MASK_EE_URGENT_BKOPS
);
5797 dev_err(hba
->dev
, "%s: failed to enable exception event %d\n",
5802 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_CLEAR_FLAG
,
5803 QUERY_FLAG_IDN_BKOPS_EN
, 0, NULL
);
5805 dev_err(hba
->dev
, "%s: failed to disable bkops %d\n",
5807 ufshcd_disable_ee(hba
, MASK_EE_URGENT_BKOPS
);
5811 hba
->auto_bkops_enabled
= false;
5812 trace_ufshcd_auto_bkops_state(dev_name(hba
->dev
), "Disabled");
5813 hba
->is_urgent_bkops_lvl_checked
= false;
5819 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5820 * @hba: per adapter instance
5822 * After a device reset the device may toggle the BKOPS_EN flag
5823 * to default value. The s/w tracking variables should be updated
5824 * as well. This function would change the auto-bkops state based on
5825 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5827 static void ufshcd_force_reset_auto_bkops(struct ufs_hba
*hba
)
5829 if (ufshcd_keep_autobkops_enabled_except_suspend(hba
)) {
5830 hba
->auto_bkops_enabled
= false;
5831 hba
->ee_ctrl_mask
|= MASK_EE_URGENT_BKOPS
;
5832 ufshcd_enable_auto_bkops(hba
);
5834 hba
->auto_bkops_enabled
= true;
5835 hba
->ee_ctrl_mask
&= ~MASK_EE_URGENT_BKOPS
;
5836 ufshcd_disable_auto_bkops(hba
);
5838 hba
->urgent_bkops_lvl
= BKOPS_STATUS_PERF_IMPACT
;
5839 hba
->is_urgent_bkops_lvl_checked
= false;
5842 static inline int ufshcd_get_bkops_status(struct ufs_hba
*hba
, u32
*status
)
5844 return ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5845 QUERY_ATTR_IDN_BKOPS_STATUS
, 0, 0, status
);
5849 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5850 * @hba: per-adapter instance
5851 * @status: bkops_status value
5853 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5854 * flag in the device to permit background operations if the device
5855 * bkops_status is greater than or equal to "status" argument passed to
5856 * this function, disable otherwise.
5858 * Return: 0 for success, non-zero in case of failure.
5860 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5861 * to know whether auto bkops is enabled or disabled after this function
5862 * returns control to it.
5864 static int ufshcd_bkops_ctrl(struct ufs_hba
*hba
,
5865 enum bkops_status status
)
5868 u32 curr_status
= 0;
5870 err
= ufshcd_get_bkops_status(hba
, &curr_status
);
5872 dev_err(hba
->dev
, "%s: failed to get BKOPS status %d\n",
5875 } else if (curr_status
> BKOPS_STATUS_MAX
) {
5876 dev_err(hba
->dev
, "%s: invalid BKOPS status %d\n",
5877 __func__
, curr_status
);
5882 if (curr_status
>= status
)
5883 err
= ufshcd_enable_auto_bkops(hba
);
5885 err
= ufshcd_disable_auto_bkops(hba
);
5891 * ufshcd_urgent_bkops - handle urgent bkops exception event
5892 * @hba: per-adapter instance
5894 * Enable fBackgroundOpsEn flag in the device to permit background
5897 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5898 * and negative error value for any other failure.
5900 * Return: 0 upon success; < 0 upon failure.
5902 static int ufshcd_urgent_bkops(struct ufs_hba
*hba
)
5904 return ufshcd_bkops_ctrl(hba
, hba
->urgent_bkops_lvl
);
5907 static inline int ufshcd_get_ee_status(struct ufs_hba
*hba
, u32
*status
)
5909 return ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5910 QUERY_ATTR_IDN_EE_STATUS
, 0, 0, status
);
5913 static void ufshcd_bkops_exception_event_handler(struct ufs_hba
*hba
)
5916 u32 curr_status
= 0;
5918 if (hba
->is_urgent_bkops_lvl_checked
)
5919 goto enable_auto_bkops
;
5921 err
= ufshcd_get_bkops_status(hba
, &curr_status
);
5923 dev_err(hba
->dev
, "%s: failed to get BKOPS status %d\n",
5929 * We are seeing that some devices are raising the urgent bkops
5930 * exception events even when BKOPS status doesn't indicate performace
5931 * impacted or critical. Handle these device by determining their urgent
5932 * bkops status at runtime.
5934 if (curr_status
< BKOPS_STATUS_PERF_IMPACT
) {
5935 dev_err(hba
->dev
, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5936 __func__
, curr_status
);
5937 /* update the current status as the urgent bkops level */
5938 hba
->urgent_bkops_lvl
= curr_status
;
5939 hba
->is_urgent_bkops_lvl_checked
= true;
5943 err
= ufshcd_enable_auto_bkops(hba
);
5946 dev_err(hba
->dev
, "%s: failed to handle urgent bkops %d\n",
5950 static void ufshcd_temp_exception_event_handler(struct ufs_hba
*hba
, u16 status
)
5954 if (ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5955 QUERY_ATTR_IDN_CASE_ROUGH_TEMP
, 0, 0, &value
))
5958 dev_info(hba
->dev
, "exception Tcase %d\n", value
- 80);
5960 ufs_hwmon_notify_event(hba
, status
& MASK_EE_URGENT_TEMP
);
5963 * A placeholder for the platform vendors to add whatever additional
5968 static int __ufshcd_wb_toggle(struct ufs_hba
*hba
, bool set
, enum flag_idn idn
)
5971 enum query_opcode opcode
= set
? UPIU_QUERY_OPCODE_SET_FLAG
:
5972 UPIU_QUERY_OPCODE_CLEAR_FLAG
;
5974 index
= ufshcd_wb_get_query_index(hba
);
5975 return ufshcd_query_flag_retry(hba
, opcode
, idn
, index
, NULL
);
5978 int ufshcd_wb_toggle(struct ufs_hba
*hba
, bool enable
)
5982 if (!ufshcd_is_wb_allowed(hba
) ||
5983 hba
->dev_info
.wb_enabled
== enable
)
5986 ret
= __ufshcd_wb_toggle(hba
, enable
, QUERY_FLAG_IDN_WB_EN
);
5988 dev_err(hba
->dev
, "%s: Write Booster %s failed %d\n",
5989 __func__
, enable
? "enabling" : "disabling", ret
);
5993 hba
->dev_info
.wb_enabled
= enable
;
5994 dev_dbg(hba
->dev
, "%s: Write Booster %s\n",
5995 __func__
, enable
? "enabled" : "disabled");
6000 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba
*hba
,
6005 ret
= __ufshcd_wb_toggle(hba
, enable
,
6006 QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8
);
6008 dev_err(hba
->dev
, "%s: WB-Buf Flush during H8 %s failed %d\n",
6009 __func__
, enable
? "enabling" : "disabling", ret
);
6012 dev_dbg(hba
->dev
, "%s: WB-Buf Flush during H8 %s\n",
6013 __func__
, enable
? "enabled" : "disabled");
6016 int ufshcd_wb_toggle_buf_flush(struct ufs_hba
*hba
, bool enable
)
6020 if (!ufshcd_is_wb_allowed(hba
) ||
6021 hba
->dev_info
.wb_buf_flush_enabled
== enable
)
6024 ret
= __ufshcd_wb_toggle(hba
, enable
, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN
);
6026 dev_err(hba
->dev
, "%s: WB-Buf Flush %s failed %d\n",
6027 __func__
, enable
? "enabling" : "disabling", ret
);
6031 hba
->dev_info
.wb_buf_flush_enabled
= enable
;
6032 dev_dbg(hba
->dev
, "%s: WB-Buf Flush %s\n",
6033 __func__
, enable
? "enabled" : "disabled");
6038 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba
*hba
,
6045 index
= ufshcd_wb_get_query_index(hba
);
6046 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
6047 QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE
,
6048 index
, 0, &cur_buf
);
6050 dev_err(hba
->dev
, "%s: dCurWriteBoosterBufferSize read failed %d\n",
6056 dev_info(hba
->dev
, "dCurWBBuf: %d WB disabled until free-space is available\n",
6060 /* Let it continue to flush when available buffer exceeds threshold */
6061 return avail_buf
< hba
->vps
->wb_flush_threshold
;
6064 static void ufshcd_wb_force_disable(struct ufs_hba
*hba
)
6066 if (ufshcd_is_wb_buf_flush_allowed(hba
))
6067 ufshcd_wb_toggle_buf_flush(hba
, false);
6069 ufshcd_wb_toggle_buf_flush_during_h8(hba
, false);
6070 ufshcd_wb_toggle(hba
, false);
6071 hba
->caps
&= ~UFSHCD_CAP_WB_EN
;
6073 dev_info(hba
->dev
, "%s: WB force disabled\n", __func__
);
6076 static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba
*hba
)
6082 index
= ufshcd_wb_get_query_index(hba
);
6083 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
6084 QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST
,
6085 index
, 0, &lifetime
);
6088 "%s: bWriteBoosterBufferLifeTimeEst read failed %d\n",
6093 if (lifetime
== UFS_WB_EXCEED_LIFETIME
) {
6094 dev_err(hba
->dev
, "%s: WB buf lifetime is exhausted 0x%02X\n",
6095 __func__
, lifetime
);
6099 dev_dbg(hba
->dev
, "%s: WB buf lifetime is 0x%02X\n",
6100 __func__
, lifetime
);
6105 static bool ufshcd_wb_need_flush(struct ufs_hba
*hba
)
6111 if (!ufshcd_is_wb_allowed(hba
))
6114 if (!ufshcd_is_wb_buf_lifetime_available(hba
)) {
6115 ufshcd_wb_force_disable(hba
);
6120 * The ufs device needs the vcc to be ON to flush.
6121 * With user-space reduction enabled, it's enough to enable flush
6122 * by checking only the available buffer. The threshold
6123 * defined here is > 90% full.
6124 * With user-space preserved enabled, the current-buffer
6125 * should be checked too because the wb buffer size can reduce
6126 * when disk tends to be full. This info is provided by current
6127 * buffer (dCurrentWriteBoosterBufferSize). There's no point in
6128 * keeping vcc on when current buffer is empty.
6130 index
= ufshcd_wb_get_query_index(hba
);
6131 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
6132 QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE
,
6133 index
, 0, &avail_buf
);
6135 dev_warn(hba
->dev
, "%s: dAvailableWriteBoosterBufferSize read failed %d\n",
6140 if (!hba
->dev_info
.b_presrv_uspc_en
)
6141 return avail_buf
<= UFS_WB_BUF_REMAIN_PERCENT(10);
6143 return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba
, avail_buf
);
6146 static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct
*work
)
6148 struct ufs_hba
*hba
= container_of(to_delayed_work(work
),
6150 rpm_dev_flush_recheck_work
);
6152 * To prevent unnecessary VCC power drain after device finishes
6153 * WriteBooster buffer flush or Auto BKOPs, force runtime resume
6154 * after a certain delay to recheck the threshold by next runtime
6157 ufshcd_rpm_get_sync(hba
);
6158 ufshcd_rpm_put_sync(hba
);
6162 * ufshcd_exception_event_handler - handle exceptions raised by device
6163 * @work: pointer to work data
6165 * Read bExceptionEventStatus attribute from the device and handle the
6166 * exception event accordingly.
6168 static void ufshcd_exception_event_handler(struct work_struct
*work
)
6170 struct ufs_hba
*hba
;
6173 hba
= container_of(work
, struct ufs_hba
, eeh_work
);
6175 ufshcd_scsi_block_requests(hba
);
6176 err
= ufshcd_get_ee_status(hba
, &status
);
6178 dev_err(hba
->dev
, "%s: failed to get exception status %d\n",
6183 trace_ufshcd_exception_event(dev_name(hba
->dev
), status
);
6185 if (status
& hba
->ee_drv_mask
& MASK_EE_URGENT_BKOPS
)
6186 ufshcd_bkops_exception_event_handler(hba
);
6188 if (status
& hba
->ee_drv_mask
& MASK_EE_URGENT_TEMP
)
6189 ufshcd_temp_exception_event_handler(hba
, status
);
6191 ufs_debugfs_exception_event(hba
, status
);
6193 ufshcd_scsi_unblock_requests(hba
);
6196 /* Complete requests that have door-bell cleared */
6197 static void ufshcd_complete_requests(struct ufs_hba
*hba
, bool force_compl
)
6199 if (is_mcq_enabled(hba
))
6200 ufshcd_mcq_compl_pending_transfer(hba
, force_compl
);
6202 ufshcd_transfer_req_compl(hba
);
6204 ufshcd_tmc_handler(hba
);
6208 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
6209 * to recover from the DL NAC errors or not.
6210 * @hba: per-adapter instance
6212 * Return: true if error handling is required, false otherwise.
6214 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba
*hba
)
6216 unsigned long flags
;
6217 bool err_handling
= true;
6219 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6221 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
6222 * device fatal error and/or DL NAC & REPLAY timeout errors.
6224 if (hba
->saved_err
& (CONTROLLER_FATAL_ERROR
| SYSTEM_BUS_FATAL_ERROR
))
6227 if ((hba
->saved_err
& DEVICE_FATAL_ERROR
) ||
6228 ((hba
->saved_err
& UIC_ERROR
) &&
6229 (hba
->saved_uic_err
& UFSHCD_UIC_DL_TCx_REPLAY_ERROR
)))
6232 if ((hba
->saved_err
& UIC_ERROR
) &&
6233 (hba
->saved_uic_err
& UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)) {
6236 * wait for 50ms to see if we can get any other errors or not.
6238 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6240 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6243 * now check if we have got any other severe errors other than
6246 if ((hba
->saved_err
& INT_FATAL_ERRORS
) ||
6247 ((hba
->saved_err
& UIC_ERROR
) &&
6248 (hba
->saved_uic_err
& ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)))
6252 * As DL NAC is the only error received so far, send out NOP
6253 * command to confirm if link is still active or not.
6254 * - If we don't get any response then do error recovery.
6255 * - If we get response then clear the DL NAC error bit.
6258 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6259 err
= ufshcd_verify_dev_init(hba
);
6260 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6265 /* Link seems to be alive hence ignore the DL NAC errors */
6266 if (hba
->saved_uic_err
== UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)
6267 hba
->saved_err
&= ~UIC_ERROR
;
6268 /* clear NAC error */
6269 hba
->saved_uic_err
&= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
;
6270 if (!hba
->saved_uic_err
)
6271 err_handling
= false;
6274 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6275 return err_handling
;
6278 /* host lock must be held before calling this func */
6279 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba
*hba
)
6281 return (hba
->saved_uic_err
& UFSHCD_UIC_DL_PA_INIT_ERROR
) ||
6282 (hba
->saved_err
& (INT_FATAL_ERRORS
| UFSHCD_UIC_HIBERN8_MASK
));
6285 void ufshcd_schedule_eh_work(struct ufs_hba
*hba
)
6287 lockdep_assert_held(hba
->host
->host_lock
);
6289 /* handle fatal errors only when link is not in error state */
6290 if (hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
) {
6291 if (hba
->force_reset
|| ufshcd_is_link_broken(hba
) ||
6292 ufshcd_is_saved_err_fatal(hba
))
6293 hba
->ufshcd_state
= UFSHCD_STATE_EH_SCHEDULED_FATAL
;
6295 hba
->ufshcd_state
= UFSHCD_STATE_EH_SCHEDULED_NON_FATAL
;
6296 queue_work(hba
->eh_wq
, &hba
->eh_work
);
6300 static void ufshcd_force_error_recovery(struct ufs_hba
*hba
)
6302 spin_lock_irq(hba
->host
->host_lock
);
6303 hba
->force_reset
= true;
6304 ufshcd_schedule_eh_work(hba
);
6305 spin_unlock_irq(hba
->host
->host_lock
);
6308 static void ufshcd_clk_scaling_allow(struct ufs_hba
*hba
, bool allow
)
6310 mutex_lock(&hba
->wb_mutex
);
6311 down_write(&hba
->clk_scaling_lock
);
6312 hba
->clk_scaling
.is_allowed
= allow
;
6313 up_write(&hba
->clk_scaling_lock
);
6314 mutex_unlock(&hba
->wb_mutex
);
6317 static void ufshcd_clk_scaling_suspend(struct ufs_hba
*hba
, bool suspend
)
6320 if (hba
->clk_scaling
.is_enabled
)
6321 ufshcd_suspend_clkscaling(hba
);
6322 ufshcd_clk_scaling_allow(hba
, false);
6324 ufshcd_clk_scaling_allow(hba
, true);
6325 if (hba
->clk_scaling
.is_enabled
)
6326 ufshcd_resume_clkscaling(hba
);
6330 static void ufshcd_err_handling_prepare(struct ufs_hba
*hba
)
6332 ufshcd_rpm_get_sync(hba
);
6333 if (pm_runtime_status_suspended(&hba
->ufs_device_wlun
->sdev_gendev
) ||
6334 hba
->is_sys_suspended
) {
6335 enum ufs_pm_op pm_op
;
6338 * Don't assume anything of resume, if
6339 * resume fails, irq and clocks can be OFF, and powers
6340 * can be OFF or in LPM.
6342 ufshcd_setup_hba_vreg(hba
, true);
6343 ufshcd_enable_irq(hba
);
6344 ufshcd_setup_vreg(hba
, true);
6345 ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq
);
6346 ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq2
);
6348 if (!ufshcd_is_clkgating_allowed(hba
))
6349 ufshcd_setup_clocks(hba
, true);
6350 ufshcd_release(hba
);
6351 pm_op
= hba
->is_sys_suspended
? UFS_SYSTEM_PM
: UFS_RUNTIME_PM
;
6352 ufshcd_vops_resume(hba
, pm_op
);
6355 if (ufshcd_is_clkscaling_supported(hba
) &&
6356 hba
->clk_scaling
.is_enabled
)
6357 ufshcd_suspend_clkscaling(hba
);
6358 ufshcd_clk_scaling_allow(hba
, false);
6360 ufshcd_scsi_block_requests(hba
);
6361 /* Wait for ongoing ufshcd_queuecommand() calls to finish. */
6362 blk_mq_wait_quiesce_done(&hba
->host
->tag_set
);
6363 cancel_work_sync(&hba
->eeh_work
);
6366 static void ufshcd_err_handling_unprepare(struct ufs_hba
*hba
)
6368 ufshcd_scsi_unblock_requests(hba
);
6369 ufshcd_release(hba
);
6370 if (ufshcd_is_clkscaling_supported(hba
))
6371 ufshcd_clk_scaling_suspend(hba
, false);
6372 ufshcd_rpm_put(hba
);
6375 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba
*hba
)
6377 return (!hba
->is_powered
|| hba
->shutting_down
||
6378 !hba
->ufs_device_wlun
||
6379 hba
->ufshcd_state
== UFSHCD_STATE_ERROR
||
6380 (!(hba
->saved_err
|| hba
->saved_uic_err
|| hba
->force_reset
||
6381 ufshcd_is_link_broken(hba
))));
6385 static void ufshcd_recover_pm_error(struct ufs_hba
*hba
)
6387 struct Scsi_Host
*shost
= hba
->host
;
6388 struct scsi_device
*sdev
;
6389 struct request_queue
*q
;
6392 hba
->is_sys_suspended
= false;
6394 * Set RPM status of wlun device to RPM_ACTIVE,
6395 * this also clears its runtime error.
6397 ret
= pm_runtime_set_active(&hba
->ufs_device_wlun
->sdev_gendev
);
6399 /* hba device might have a runtime error otherwise */
6401 ret
= pm_runtime_set_active(hba
->dev
);
6403 * If wlun device had runtime error, we also need to resume those
6404 * consumer scsi devices in case any of them has failed to be
6405 * resumed due to supplier runtime resume failure. This is to unblock
6406 * blk_queue_enter in case there are bios waiting inside it.
6409 shost_for_each_device(sdev
, shost
) {
6410 q
= sdev
->request_queue
;
6411 if (q
->dev
&& (q
->rpm_status
== RPM_SUSPENDED
||
6412 q
->rpm_status
== RPM_SUSPENDING
))
6413 pm_request_resume(q
->dev
);
6418 static inline void ufshcd_recover_pm_error(struct ufs_hba
*hba
)
6423 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba
*hba
)
6425 struct ufs_pa_layer_attr
*pwr_info
= &hba
->pwr_info
;
6428 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_PWRMODE
), &mode
);
6430 if (pwr_info
->pwr_rx
!= ((mode
>> PWRMODE_RX_OFFSET
) & PWRMODE_MASK
))
6433 if (pwr_info
->pwr_tx
!= (mode
& PWRMODE_MASK
))
6439 static bool ufshcd_abort_one(struct request
*rq
, void *priv
)
6443 struct scsi_cmnd
*cmd
= blk_mq_rq_to_pdu(rq
);
6444 struct scsi_device
*sdev
= cmd
->device
;
6445 struct Scsi_Host
*shost
= sdev
->host
;
6446 struct ufs_hba
*hba
= shost_priv(shost
);
6448 *ret
= ufshcd_try_to_abort_task(hba
, tag
);
6449 dev_err(hba
->dev
, "Aborting tag %d / CDB %#02x %s\n", tag
,
6450 hba
->lrb
[tag
].cmd
? hba
->lrb
[tag
].cmd
->cmnd
[0] : -1,
6451 *ret
? "failed" : "succeeded");
6456 * ufshcd_abort_all - Abort all pending commands.
6457 * @hba: Host bus adapter pointer.
6459 * Return: true if and only if the host controller needs to be reset.
6461 static bool ufshcd_abort_all(struct ufs_hba
*hba
)
6465 blk_mq_tagset_busy_iter(&hba
->host
->tag_set
, ufshcd_abort_one
, &ret
);
6469 /* Clear pending task management requests */
6470 for_each_set_bit(tag
, &hba
->outstanding_tasks
, hba
->nutmrs
) {
6471 ret
= ufshcd_clear_tm_cmd(hba
, tag
);
6477 /* Complete the requests that are cleared by s/w */
6478 ufshcd_complete_requests(hba
, false);
6484 * ufshcd_err_handler - handle UFS errors that require s/w attention
6485 * @work: pointer to work structure
6487 static void ufshcd_err_handler(struct work_struct
*work
)
6489 int retries
= MAX_ERR_HANDLER_RETRIES
;
6490 struct ufs_hba
*hba
;
6491 unsigned long flags
;
6496 hba
= container_of(work
, struct ufs_hba
, eh_work
);
6499 "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
6500 __func__
, ufshcd_state_name
[hba
->ufshcd_state
],
6501 hba
->is_powered
, hba
->shutting_down
, hba
->saved_err
,
6502 hba
->saved_uic_err
, hba
->force_reset
,
6503 ufshcd_is_link_broken(hba
) ? "; link is broken" : "");
6505 down(&hba
->host_sem
);
6506 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6507 if (ufshcd_err_handling_should_stop(hba
)) {
6508 if (hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
)
6509 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
6510 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6514 ufshcd_set_eh_in_progress(hba
);
6515 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6516 ufshcd_err_handling_prepare(hba
);
6517 /* Complete requests that have door-bell cleared by h/w */
6518 ufshcd_complete_requests(hba
, false);
6519 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6521 needs_restore
= false;
6522 needs_reset
= false;
6524 if (hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
)
6525 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
6527 * A full reset and restore might have happened after preparation
6528 * is finished, double check whether we should stop.
6530 if (ufshcd_err_handling_should_stop(hba
))
6531 goto skip_err_handling
;
6533 if (hba
->dev_quirks
& UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
) {
6536 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6537 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
6538 ret
= ufshcd_quirk_dl_nac_errors(hba
);
6539 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6540 if (!ret
&& ufshcd_err_handling_should_stop(hba
))
6541 goto skip_err_handling
;
6544 if ((hba
->saved_err
& (INT_FATAL_ERRORS
| UFSHCD_UIC_HIBERN8_MASK
)) ||
6545 (hba
->saved_uic_err
&&
6546 (hba
->saved_uic_err
!= UFSHCD_UIC_PA_GENERIC_ERROR
))) {
6547 bool pr_prdt
= !!(hba
->saved_err
& SYSTEM_BUS_FATAL_ERROR
);
6549 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6550 ufshcd_print_host_state(hba
);
6551 ufshcd_print_pwr_info(hba
);
6552 ufshcd_print_evt_hist(hba
);
6553 ufshcd_print_tmrs(hba
, hba
->outstanding_tasks
);
6554 ufshcd_print_trs_all(hba
, pr_prdt
);
6555 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6559 * if host reset is required then skip clearing the pending
6560 * transfers forcefully because they will get cleared during
6561 * host reset and restore
6563 if (hba
->force_reset
|| ufshcd_is_link_broken(hba
) ||
6564 ufshcd_is_saved_err_fatal(hba
) ||
6565 ((hba
->saved_err
& UIC_ERROR
) &&
6566 (hba
->saved_uic_err
& (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
|
6567 UFSHCD_UIC_DL_TCx_REPLAY_ERROR
)))) {
6573 * If LINERESET was caught, UFS might have been put to PWM mode,
6574 * check if power mode restore is needed.
6576 if (hba
->saved_uic_err
& UFSHCD_UIC_PA_GENERIC_ERROR
) {
6577 hba
->saved_uic_err
&= ~UFSHCD_UIC_PA_GENERIC_ERROR
;
6578 if (!hba
->saved_uic_err
)
6579 hba
->saved_err
&= ~UIC_ERROR
;
6580 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6581 if (ufshcd_is_pwr_mode_restore_needed(hba
))
6582 needs_restore
= true;
6583 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6584 if (!hba
->saved_err
&& !needs_restore
)
6585 goto skip_err_handling
;
6588 hba
->silence_err_logs
= true;
6589 /* release lock as clear command might sleep */
6590 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6592 needs_reset
= ufshcd_abort_all(hba
);
6594 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6595 hba
->silence_err_logs
= false;
6600 * After all reqs and tasks are cleared from doorbell,
6601 * now it is safe to retore power mode.
6603 if (needs_restore
) {
6604 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6606 * Hold the scaling lock just in case dev cmds
6607 * are sent via bsg and/or sysfs.
6609 down_write(&hba
->clk_scaling_lock
);
6610 hba
->force_pmc
= true;
6611 pmc_err
= ufshcd_config_pwr_mode(hba
, &(hba
->pwr_info
));
6614 dev_err(hba
->dev
, "%s: Failed to restore power mode, err = %d\n",
6617 hba
->force_pmc
= false;
6618 ufshcd_print_pwr_info(hba
);
6619 up_write(&hba
->clk_scaling_lock
);
6620 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6624 /* Fatal errors need reset */
6628 hba
->force_reset
= false;
6629 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6630 err
= ufshcd_reset_and_restore(hba
);
6632 dev_err(hba
->dev
, "%s: reset and restore failed with err %d\n",
6635 ufshcd_recover_pm_error(hba
);
6636 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6641 if (hba
->ufshcd_state
== UFSHCD_STATE_RESET
)
6642 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
6643 if (hba
->saved_err
|| hba
->saved_uic_err
)
6644 dev_err_ratelimited(hba
->dev
, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
6645 __func__
, hba
->saved_err
, hba
->saved_uic_err
);
6647 /* Exit in an operational state or dead */
6648 if (hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
&&
6649 hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
) {
6652 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
6654 ufshcd_clear_eh_in_progress(hba
);
6655 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6656 ufshcd_err_handling_unprepare(hba
);
6659 dev_info(hba
->dev
, "%s finished; HBA state %s\n", __func__
,
6660 ufshcd_state_name
[hba
->ufshcd_state
]);
6664 * ufshcd_update_uic_error - check and set fatal UIC error flags.
6665 * @hba: per-adapter instance
6668 * IRQ_HANDLED - If interrupt is valid
6669 * IRQ_NONE - If invalid interrupt
6671 static irqreturn_t
ufshcd_update_uic_error(struct ufs_hba
*hba
)
6674 irqreturn_t retval
= IRQ_NONE
;
6676 /* PHY layer error */
6677 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER
);
6678 if ((reg
& UIC_PHY_ADAPTER_LAYER_ERROR
) &&
6679 (reg
& UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK
)) {
6680 ufshcd_update_evt_hist(hba
, UFS_EVT_PA_ERR
, reg
);
6682 * To know whether this error is fatal or not, DB timeout
6683 * must be checked but this error is handled separately.
6685 if (reg
& UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK
)
6686 dev_dbg(hba
->dev
, "%s: UIC Lane error reported\n",
6689 /* Got a LINERESET indication. */
6690 if (reg
& UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR
) {
6691 struct uic_command
*cmd
= NULL
;
6693 hba
->uic_error
|= UFSHCD_UIC_PA_GENERIC_ERROR
;
6694 if (hba
->uic_async_done
&& hba
->active_uic_cmd
)
6695 cmd
= hba
->active_uic_cmd
;
6697 * Ignore the LINERESET during power mode change
6698 * operation via DME_SET command.
6700 if (cmd
&& (cmd
->command
== UIC_CMD_DME_SET
))
6701 hba
->uic_error
&= ~UFSHCD_UIC_PA_GENERIC_ERROR
;
6703 retval
|= IRQ_HANDLED
;
6706 /* PA_INIT_ERROR is fatal and needs UIC reset */
6707 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_DATA_LINK_LAYER
);
6708 if ((reg
& UIC_DATA_LINK_LAYER_ERROR
) &&
6709 (reg
& UIC_DATA_LINK_LAYER_ERROR_CODE_MASK
)) {
6710 ufshcd_update_evt_hist(hba
, UFS_EVT_DL_ERR
, reg
);
6712 if (reg
& UIC_DATA_LINK_LAYER_ERROR_PA_INIT
)
6713 hba
->uic_error
|= UFSHCD_UIC_DL_PA_INIT_ERROR
;
6714 else if (hba
->dev_quirks
&
6715 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
) {
6716 if (reg
& UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED
)
6718 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
;
6719 else if (reg
& UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT
)
6720 hba
->uic_error
|= UFSHCD_UIC_DL_TCx_REPLAY_ERROR
;
6722 retval
|= IRQ_HANDLED
;
6725 /* UIC NL/TL/DME errors needs software retry */
6726 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_NETWORK_LAYER
);
6727 if ((reg
& UIC_NETWORK_LAYER_ERROR
) &&
6728 (reg
& UIC_NETWORK_LAYER_ERROR_CODE_MASK
)) {
6729 ufshcd_update_evt_hist(hba
, UFS_EVT_NL_ERR
, reg
);
6730 hba
->uic_error
|= UFSHCD_UIC_NL_ERROR
;
6731 retval
|= IRQ_HANDLED
;
6734 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_TRANSPORT_LAYER
);
6735 if ((reg
& UIC_TRANSPORT_LAYER_ERROR
) &&
6736 (reg
& UIC_TRANSPORT_LAYER_ERROR_CODE_MASK
)) {
6737 ufshcd_update_evt_hist(hba
, UFS_EVT_TL_ERR
, reg
);
6738 hba
->uic_error
|= UFSHCD_UIC_TL_ERROR
;
6739 retval
|= IRQ_HANDLED
;
6742 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_DME
);
6743 if ((reg
& UIC_DME_ERROR
) &&
6744 (reg
& UIC_DME_ERROR_CODE_MASK
)) {
6745 ufshcd_update_evt_hist(hba
, UFS_EVT_DME_ERR
, reg
);
6746 hba
->uic_error
|= UFSHCD_UIC_DME_ERROR
;
6747 retval
|= IRQ_HANDLED
;
6750 dev_dbg(hba
->dev
, "%s: UIC error flags = 0x%08x\n",
6751 __func__
, hba
->uic_error
);
6756 * ufshcd_check_errors - Check for errors that need s/w attention
6757 * @hba: per-adapter instance
6758 * @intr_status: interrupt status generated by the controller
6761 * IRQ_HANDLED - If interrupt is valid
6762 * IRQ_NONE - If invalid interrupt
6764 static irqreturn_t
ufshcd_check_errors(struct ufs_hba
*hba
, u32 intr_status
)
6766 bool queue_eh_work
= false;
6767 irqreturn_t retval
= IRQ_NONE
;
6769 spin_lock(hba
->host
->host_lock
);
6770 hba
->errors
|= UFSHCD_ERROR_MASK
& intr_status
;
6772 if (hba
->errors
& INT_FATAL_ERRORS
) {
6773 ufshcd_update_evt_hist(hba
, UFS_EVT_FATAL_ERR
,
6775 queue_eh_work
= true;
6778 if (hba
->errors
& UIC_ERROR
) {
6780 retval
= ufshcd_update_uic_error(hba
);
6782 queue_eh_work
= true;
6785 if (hba
->errors
& UFSHCD_UIC_HIBERN8_MASK
) {
6787 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
6788 __func__
, (hba
->errors
& UIC_HIBERNATE_ENTER
) ?
6790 hba
->errors
, ufshcd_get_upmcrs(hba
));
6791 ufshcd_update_evt_hist(hba
, UFS_EVT_AUTO_HIBERN8_ERR
,
6793 ufshcd_set_link_broken(hba
);
6794 queue_eh_work
= true;
6797 if (queue_eh_work
) {
6799 * update the transfer error masks to sticky bits, let's do this
6800 * irrespective of current ufshcd_state.
6802 hba
->saved_err
|= hba
->errors
;
6803 hba
->saved_uic_err
|= hba
->uic_error
;
6805 /* dump controller state before resetting */
6806 if ((hba
->saved_err
&
6807 (INT_FATAL_ERRORS
| UFSHCD_UIC_HIBERN8_MASK
)) ||
6808 (hba
->saved_uic_err
&&
6809 (hba
->saved_uic_err
!= UFSHCD_UIC_PA_GENERIC_ERROR
))) {
6810 dev_err(hba
->dev
, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
6811 __func__
, hba
->saved_err
,
6812 hba
->saved_uic_err
);
6813 ufshcd_dump_regs(hba
, 0, UFSHCI_REG_SPACE_SIZE
,
6815 ufshcd_print_pwr_info(hba
);
6817 ufshcd_schedule_eh_work(hba
);
6818 retval
|= IRQ_HANDLED
;
6821 * if (!queue_eh_work) -
6822 * Other errors are either non-fatal where host recovers
6823 * itself without s/w intervention or errors that will be
6824 * handled by the SCSI core layer.
6828 spin_unlock(hba
->host
->host_lock
);
6833 * ufshcd_tmc_handler - handle task management function completion
6834 * @hba: per adapter instance
6837 * IRQ_HANDLED - If interrupt is valid
6838 * IRQ_NONE - If invalid interrupt
6840 static irqreturn_t
ufshcd_tmc_handler(struct ufs_hba
*hba
)
6842 unsigned long flags
, pending
, issued
;
6843 irqreturn_t ret
= IRQ_NONE
;
6846 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6847 pending
= ufshcd_readl(hba
, REG_UTP_TASK_REQ_DOOR_BELL
);
6848 issued
= hba
->outstanding_tasks
& ~pending
;
6849 for_each_set_bit(tag
, &issued
, hba
->nutmrs
) {
6850 struct request
*req
= hba
->tmf_rqs
[tag
];
6851 struct completion
*c
= req
->end_io_data
;
6856 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6862 * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events
6863 * @hba: per adapter instance
6865 * Return: IRQ_HANDLED if interrupt is handled.
6867 static irqreturn_t
ufshcd_handle_mcq_cq_events(struct ufs_hba
*hba
)
6869 struct ufs_hw_queue
*hwq
;
6870 unsigned long outstanding_cqs
;
6871 unsigned int nr_queues
;
6875 ret
= ufshcd_vops_get_outstanding_cqs(hba
, &outstanding_cqs
);
6877 outstanding_cqs
= (1U << hba
->nr_hw_queues
) - 1;
6879 /* Exclude the poll queues */
6880 nr_queues
= hba
->nr_hw_queues
- hba
->nr_queues
[HCTX_TYPE_POLL
];
6881 for_each_set_bit(i
, &outstanding_cqs
, nr_queues
) {
6884 events
= ufshcd_mcq_read_cqis(hba
, i
);
6886 ufshcd_mcq_write_cqis(hba
, events
, i
);
6888 if (events
& UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS
)
6889 ufshcd_mcq_poll_cqe_lock(hba
, hwq
);
6896 * ufshcd_sl_intr - Interrupt service routine
6897 * @hba: per adapter instance
6898 * @intr_status: contains interrupts generated by the controller
6901 * IRQ_HANDLED - If interrupt is valid
6902 * IRQ_NONE - If invalid interrupt
6904 static irqreturn_t
ufshcd_sl_intr(struct ufs_hba
*hba
, u32 intr_status
)
6906 irqreturn_t retval
= IRQ_NONE
;
6908 if (intr_status
& UFSHCD_UIC_MASK
)
6909 retval
|= ufshcd_uic_cmd_compl(hba
, intr_status
);
6911 if (intr_status
& UFSHCD_ERROR_MASK
|| hba
->errors
)
6912 retval
|= ufshcd_check_errors(hba
, intr_status
);
6914 if (intr_status
& UTP_TASK_REQ_COMPL
)
6915 retval
|= ufshcd_tmc_handler(hba
);
6917 if (intr_status
& UTP_TRANSFER_REQ_COMPL
)
6918 retval
|= ufshcd_transfer_req_compl(hba
);
6920 if (intr_status
& MCQ_CQ_EVENT_STATUS
)
6921 retval
|= ufshcd_handle_mcq_cq_events(hba
);
6927 * ufshcd_intr - Main interrupt service routine
6929 * @__hba: pointer to adapter instance
6932 * IRQ_HANDLED - If interrupt is valid
6933 * IRQ_NONE - If invalid interrupt
6935 static irqreturn_t
ufshcd_intr(int irq
, void *__hba
)
6937 u32 intr_status
, enabled_intr_status
= 0;
6938 irqreturn_t retval
= IRQ_NONE
;
6939 struct ufs_hba
*hba
= __hba
;
6940 int retries
= hba
->nutrs
;
6942 intr_status
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
6943 hba
->ufs_stats
.last_intr_status
= intr_status
;
6944 hba
->ufs_stats
.last_intr_ts
= local_clock();
6947 * There could be max of hba->nutrs reqs in flight and in worst case
6948 * if the reqs get finished 1 by 1 after the interrupt status is
6949 * read, make sure we handle them by checking the interrupt status
6950 * again in a loop until we process all of the reqs before returning.
6952 while (intr_status
&& retries
--) {
6953 enabled_intr_status
=
6954 intr_status
& ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
6955 ufshcd_writel(hba
, intr_status
, REG_INTERRUPT_STATUS
);
6956 if (enabled_intr_status
)
6957 retval
|= ufshcd_sl_intr(hba
, enabled_intr_status
);
6959 intr_status
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
6962 if (enabled_intr_status
&& retval
== IRQ_NONE
&&
6963 (!(enabled_intr_status
& UTP_TRANSFER_REQ_COMPL
) ||
6964 hba
->outstanding_reqs
) && !ufshcd_eh_in_progress(hba
)) {
6965 dev_err(hba
->dev
, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
6968 hba
->ufs_stats
.last_intr_status
,
6969 enabled_intr_status
);
6970 ufshcd_dump_regs(hba
, 0, UFSHCI_REG_SPACE_SIZE
, "host_regs: ");
6976 static int ufshcd_clear_tm_cmd(struct ufs_hba
*hba
, int tag
)
6979 u32 mask
= 1 << tag
;
6980 unsigned long flags
;
6982 if (!test_bit(tag
, &hba
->outstanding_tasks
))
6985 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6986 ufshcd_utmrl_clear(hba
, tag
);
6987 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6989 /* poll for max. 1 sec to clear door bell register by h/w */
6990 err
= ufshcd_wait_for_register(hba
,
6991 REG_UTP_TASK_REQ_DOOR_BELL
,
6992 mask
, 0, 1000, 1000);
6994 dev_err(hba
->dev
, "Clearing task management function with tag %d %s\n",
6995 tag
, err
< 0 ? "failed" : "succeeded");
7001 static int __ufshcd_issue_tm_cmd(struct ufs_hba
*hba
,
7002 struct utp_task_req_desc
*treq
, u8 tm_function
)
7004 struct request_queue
*q
= hba
->tmf_queue
;
7005 struct Scsi_Host
*host
= hba
->host
;
7006 DECLARE_COMPLETION_ONSTACK(wait
);
7007 struct request
*req
;
7008 unsigned long flags
;
7012 * blk_mq_alloc_request() is used here only to get a free tag.
7014 req
= blk_mq_alloc_request(q
, REQ_OP_DRV_OUT
, 0);
7016 return PTR_ERR(req
);
7018 req
->end_io_data
= &wait
;
7021 spin_lock_irqsave(host
->host_lock
, flags
);
7023 task_tag
= req
->tag
;
7024 hba
->tmf_rqs
[req
->tag
] = req
;
7025 treq
->upiu_req
.req_header
.task_tag
= task_tag
;
7027 memcpy(hba
->utmrdl_base_addr
+ task_tag
, treq
, sizeof(*treq
));
7028 ufshcd_vops_setup_task_mgmt(hba
, task_tag
, tm_function
);
7030 /* send command to the controller */
7031 __set_bit(task_tag
, &hba
->outstanding_tasks
);
7033 ufshcd_writel(hba
, 1 << task_tag
, REG_UTP_TASK_REQ_DOOR_BELL
);
7034 /* Make sure that doorbell is committed immediately */
7037 spin_unlock_irqrestore(host
->host_lock
, flags
);
7039 ufshcd_add_tm_upiu_trace(hba
, task_tag
, UFS_TM_SEND
);
7041 /* wait until the task management command is completed */
7042 err
= wait_for_completion_io_timeout(&wait
,
7043 msecs_to_jiffies(TM_CMD_TIMEOUT
));
7045 ufshcd_add_tm_upiu_trace(hba
, task_tag
, UFS_TM_ERR
);
7046 dev_err(hba
->dev
, "%s: task management cmd 0x%.2x timed-out\n",
7047 __func__
, tm_function
);
7048 if (ufshcd_clear_tm_cmd(hba
, task_tag
))
7049 dev_WARN(hba
->dev
, "%s: unable to clear tm cmd (slot %d) after timeout\n",
7050 __func__
, task_tag
);
7054 memcpy(treq
, hba
->utmrdl_base_addr
+ task_tag
, sizeof(*treq
));
7056 ufshcd_add_tm_upiu_trace(hba
, task_tag
, UFS_TM_COMP
);
7059 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7060 hba
->tmf_rqs
[req
->tag
] = NULL
;
7061 __clear_bit(task_tag
, &hba
->outstanding_tasks
);
7062 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7064 ufshcd_release(hba
);
7065 blk_mq_free_request(req
);
7071 * ufshcd_issue_tm_cmd - issues task management commands to controller
7072 * @hba: per adapter instance
7073 * @lun_id: LUN ID to which TM command is sent
7074 * @task_id: task ID to which the TM command is applicable
7075 * @tm_function: task management function opcode
7076 * @tm_response: task management service response return value
7078 * Return: non-zero value on error, zero on success.
7080 static int ufshcd_issue_tm_cmd(struct ufs_hba
*hba
, int lun_id
, int task_id
,
7081 u8 tm_function
, u8
*tm_response
)
7083 struct utp_task_req_desc treq
= { };
7084 enum utp_ocs ocs_value
;
7087 /* Configure task request descriptor */
7088 treq
.header
.interrupt
= 1;
7089 treq
.header
.ocs
= OCS_INVALID_COMMAND_STATUS
;
7091 /* Configure task request UPIU */
7092 treq
.upiu_req
.req_header
.transaction_code
= UPIU_TRANSACTION_TASK_REQ
;
7093 treq
.upiu_req
.req_header
.lun
= lun_id
;
7094 treq
.upiu_req
.req_header
.tm_function
= tm_function
;
7097 * The host shall provide the same value for LUN field in the basic
7098 * header and for Input Parameter.
7100 treq
.upiu_req
.input_param1
= cpu_to_be32(lun_id
);
7101 treq
.upiu_req
.input_param2
= cpu_to_be32(task_id
);
7103 err
= __ufshcd_issue_tm_cmd(hba
, &treq
, tm_function
);
7104 if (err
== -ETIMEDOUT
)
7107 ocs_value
= treq
.header
.ocs
& MASK_OCS
;
7108 if (ocs_value
!= OCS_SUCCESS
)
7109 dev_err(hba
->dev
, "%s: failed, ocs = 0x%x\n",
7110 __func__
, ocs_value
);
7111 else if (tm_response
)
7112 *tm_response
= be32_to_cpu(treq
.upiu_rsp
.output_param1
) &
7113 MASK_TM_SERVICE_RESP
;
7118 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
7119 * @hba: per-adapter instance
7120 * @req_upiu: upiu request
7121 * @rsp_upiu: upiu reply
7122 * @desc_buff: pointer to descriptor buffer, NULL if NA
7123 * @buff_len: descriptor size, 0 if NA
7124 * @cmd_type: specifies the type (NOP, Query...)
7125 * @desc_op: descriptor operation
7127 * Those type of requests uses UTP Transfer Request Descriptor - utrd.
7128 * Therefore, it "rides" the device management infrastructure: uses its tag and
7129 * tasks work queues.
7131 * Since there is only one available tag for device management commands,
7132 * the caller is expected to hold the hba->dev_cmd.lock mutex.
7134 * Return: 0 upon success; < 0 upon failure.
7136 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba
*hba
,
7137 struct utp_upiu_req
*req_upiu
,
7138 struct utp_upiu_req
*rsp_upiu
,
7139 u8
*desc_buff
, int *buff_len
,
7140 enum dev_cmd_type cmd_type
,
7141 enum query_opcode desc_op
)
7143 DECLARE_COMPLETION_ONSTACK(wait
);
7144 const u32 tag
= hba
->reserved_slot
;
7145 struct ufshcd_lrb
*lrbp
;
7149 /* Protects use of hba->reserved_slot. */
7150 lockdep_assert_held(&hba
->dev_cmd
.lock
);
7152 down_read(&hba
->clk_scaling_lock
);
7154 lrbp
= &hba
->lrb
[tag
];
7156 lrbp
->task_tag
= tag
;
7158 lrbp
->intr_cmd
= true;
7159 ufshcd_prepare_lrbp_crypto(NULL
, lrbp
);
7160 hba
->dev_cmd
.type
= cmd_type
;
7162 if (hba
->ufs_version
<= ufshci_version(1, 1))
7163 lrbp
->command_type
= UTP_CMD_TYPE_DEV_MANAGE
;
7165 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
7167 /* update the task tag in the request upiu */
7168 req_upiu
->header
.task_tag
= tag
;
7170 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, DMA_NONE
, 0);
7172 /* just copy the upiu request as it is */
7173 memcpy(lrbp
->ucd_req_ptr
, req_upiu
, sizeof(*lrbp
->ucd_req_ptr
));
7174 if (desc_buff
&& desc_op
== UPIU_QUERY_OPCODE_WRITE_DESC
) {
7175 /* The Data Segment Area is optional depending upon the query
7176 * function value. for WRITE DESCRIPTOR, the data segment
7177 * follows right after the tsf.
7179 memcpy(lrbp
->ucd_req_ptr
+ 1, desc_buff
, *buff_len
);
7183 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
7185 hba
->dev_cmd
.complete
= &wait
;
7187 ufshcd_add_query_upiu_trace(hba
, UFS_QUERY_SEND
, lrbp
->ucd_req_ptr
);
7189 ufshcd_send_command(hba
, tag
, hba
->dev_cmd_queue
);
7191 * ignore the returning value here - ufshcd_check_query_response is
7192 * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
7193 * read the response directly ignoring all errors.
7195 ufshcd_wait_for_dev_cmd(hba
, lrbp
, QUERY_REQ_TIMEOUT
);
7197 /* just copy the upiu response as it is */
7198 memcpy(rsp_upiu
, lrbp
->ucd_rsp_ptr
, sizeof(*rsp_upiu
));
7199 if (desc_buff
&& desc_op
== UPIU_QUERY_OPCODE_READ_DESC
) {
7200 u8
*descp
= (u8
*)lrbp
->ucd_rsp_ptr
+ sizeof(*rsp_upiu
);
7201 u16 resp_len
= be16_to_cpu(lrbp
->ucd_rsp_ptr
->header
7202 .data_segment_length
);
7204 if (*buff_len
>= resp_len
) {
7205 memcpy(desc_buff
, descp
, resp_len
);
7206 *buff_len
= resp_len
;
7209 "%s: rsp size %d is bigger than buffer size %d",
7210 __func__
, resp_len
, *buff_len
);
7215 ufshcd_add_query_upiu_trace(hba
, err
? UFS_QUERY_ERR
: UFS_QUERY_COMP
,
7216 (struct utp_upiu_req
*)lrbp
->ucd_rsp_ptr
);
7218 up_read(&hba
->clk_scaling_lock
);
7223 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
7224 * @hba: per-adapter instance
7225 * @req_upiu: upiu request
7226 * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
7227 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
7228 * @desc_buff: pointer to descriptor buffer, NULL if NA
7229 * @buff_len: descriptor size, 0 if NA
7230 * @desc_op: descriptor operation
7232 * Supports UTP Transfer requests (nop and query), and UTP Task
7233 * Management requests.
7234 * It is up to the caller to fill the upiu conent properly, as it will
7235 * be copied without any further input validations.
7237 * Return: 0 upon success; < 0 upon failure.
7239 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba
*hba
,
7240 struct utp_upiu_req
*req_upiu
,
7241 struct utp_upiu_req
*rsp_upiu
,
7242 enum upiu_request_transaction msgcode
,
7243 u8
*desc_buff
, int *buff_len
,
7244 enum query_opcode desc_op
)
7247 enum dev_cmd_type cmd_type
= DEV_CMD_TYPE_QUERY
;
7248 struct utp_task_req_desc treq
= { };
7249 enum utp_ocs ocs_value
;
7250 u8 tm_f
= req_upiu
->header
.tm_function
;
7253 case UPIU_TRANSACTION_NOP_OUT
:
7254 cmd_type
= DEV_CMD_TYPE_NOP
;
7256 case UPIU_TRANSACTION_QUERY_REQ
:
7258 mutex_lock(&hba
->dev_cmd
.lock
);
7259 err
= ufshcd_issue_devman_upiu_cmd(hba
, req_upiu
, rsp_upiu
,
7260 desc_buff
, buff_len
,
7262 mutex_unlock(&hba
->dev_cmd
.lock
);
7263 ufshcd_release(hba
);
7266 case UPIU_TRANSACTION_TASK_REQ
:
7267 treq
.header
.interrupt
= 1;
7268 treq
.header
.ocs
= OCS_INVALID_COMMAND_STATUS
;
7270 memcpy(&treq
.upiu_req
, req_upiu
, sizeof(*req_upiu
));
7272 err
= __ufshcd_issue_tm_cmd(hba
, &treq
, tm_f
);
7273 if (err
== -ETIMEDOUT
)
7276 ocs_value
= treq
.header
.ocs
& MASK_OCS
;
7277 if (ocs_value
!= OCS_SUCCESS
) {
7278 dev_err(hba
->dev
, "%s: failed, ocs = 0x%x\n", __func__
,
7283 memcpy(rsp_upiu
, &treq
.upiu_rsp
, sizeof(*rsp_upiu
));
7296 * ufshcd_advanced_rpmb_req_handler - handle advanced RPMB request
7297 * @hba: per adapter instance
7298 * @req_upiu: upiu request
7299 * @rsp_upiu: upiu reply
7300 * @req_ehs: EHS field which contains Advanced RPMB Request Message
7301 * @rsp_ehs: EHS field which returns Advanced RPMB Response Message
7302 * @sg_cnt: The number of sg lists actually used
7303 * @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
7304 * @dir: DMA direction
7306 * Return: zero on success, non-zero on failure.
7308 int ufshcd_advanced_rpmb_req_handler(struct ufs_hba
*hba
, struct utp_upiu_req
*req_upiu
,
7309 struct utp_upiu_req
*rsp_upiu
, struct ufs_ehs
*req_ehs
,
7310 struct ufs_ehs
*rsp_ehs
, int sg_cnt
, struct scatterlist
*sg_list
,
7311 enum dma_data_direction dir
)
7313 DECLARE_COMPLETION_ONSTACK(wait
);
7314 const u32 tag
= hba
->reserved_slot
;
7315 struct ufshcd_lrb
*lrbp
;
7322 /* Protects use of hba->reserved_slot. */
7324 mutex_lock(&hba
->dev_cmd
.lock
);
7325 down_read(&hba
->clk_scaling_lock
);
7327 lrbp
= &hba
->lrb
[tag
];
7329 lrbp
->task_tag
= tag
;
7330 lrbp
->lun
= UFS_UPIU_RPMB_WLUN
;
7332 lrbp
->intr_cmd
= true;
7333 ufshcd_prepare_lrbp_crypto(NULL
, lrbp
);
7334 hba
->dev_cmd
.type
= DEV_CMD_TYPE_RPMB
;
7336 /* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */
7337 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
7340 * According to UFSHCI 4.0 specification page 24, if EHSLUTRDS is 0, host controller takes
7341 * EHS length from CMD UPIU, and SW driver use EHS Length field in CMD UPIU. if it is 1,
7342 * HW controller takes EHS length from UTRD.
7344 if (hba
->capabilities
& MASK_EHSLUTRD_SUPPORTED
)
7345 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, dir
, 2);
7347 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, dir
, 0);
7349 /* update the task tag */
7350 req_upiu
->header
.task_tag
= tag
;
7352 /* copy the UPIU(contains CDB) request as it is */
7353 memcpy(lrbp
->ucd_req_ptr
, req_upiu
, sizeof(*lrbp
->ucd_req_ptr
));
7354 /* Copy EHS, starting with byte32, immediately after the CDB package */
7355 memcpy(lrbp
->ucd_req_ptr
+ 1, req_ehs
, sizeof(*req_ehs
));
7357 if (dir
!= DMA_NONE
&& sg_list
)
7358 ufshcd_sgl_to_prdt(hba
, lrbp
, sg_cnt
, sg_list
);
7360 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
7362 hba
->dev_cmd
.complete
= &wait
;
7364 ufshcd_send_command(hba
, tag
, hba
->dev_cmd_queue
);
7366 err
= ufshcd_wait_for_dev_cmd(hba
, lrbp
, ADVANCED_RPMB_REQ_TIMEOUT
);
7369 /* Just copy the upiu response as it is */
7370 memcpy(rsp_upiu
, lrbp
->ucd_rsp_ptr
, sizeof(*rsp_upiu
));
7371 /* Get the response UPIU result */
7372 result
= (lrbp
->ucd_rsp_ptr
->header
.response
<< 8) |
7373 lrbp
->ucd_rsp_ptr
->header
.status
;
7375 ehs_len
= lrbp
->ucd_rsp_ptr
->header
.ehs_length
;
7377 * Since the bLength in EHS indicates the total size of the EHS Header and EHS Data
7378 * in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB
7381 if (ehs_len
== 2 && rsp_ehs
) {
7383 * ucd_rsp_ptr points to a buffer with a length of 512 bytes
7384 * (ALIGNED_UPIU_SIZE = 512), and the EHS data just starts from byte32
7386 ehs_data
= (u8
*)lrbp
->ucd_rsp_ptr
+ EHS_OFFSET_IN_RESPONSE
;
7387 memcpy(rsp_ehs
, ehs_data
, ehs_len
* 32);
7391 up_read(&hba
->clk_scaling_lock
);
7392 mutex_unlock(&hba
->dev_cmd
.lock
);
7393 ufshcd_release(hba
);
7394 return err
? : result
;
7398 * ufshcd_eh_device_reset_handler() - Reset a single logical unit.
7399 * @cmd: SCSI command pointer
7401 * Return: SUCCESS or FAILED.
7403 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd
*cmd
)
7405 unsigned long flags
, pending_reqs
= 0, not_cleared
= 0;
7406 struct Scsi_Host
*host
;
7407 struct ufs_hba
*hba
;
7408 struct ufs_hw_queue
*hwq
;
7409 struct ufshcd_lrb
*lrbp
;
7410 u32 pos
, not_cleared_mask
= 0;
7414 host
= cmd
->device
->host
;
7415 hba
= shost_priv(host
);
7417 lun
= ufshcd_scsi_to_upiu_lun(cmd
->device
->lun
);
7418 err
= ufshcd_issue_tm_cmd(hba
, lun
, 0, UFS_LOGICAL_RESET
, &resp
);
7419 if (err
|| resp
!= UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
7425 if (is_mcq_enabled(hba
)) {
7426 for (pos
= 0; pos
< hba
->nutrs
; pos
++) {
7427 lrbp
= &hba
->lrb
[pos
];
7428 if (ufshcd_cmd_inflight(lrbp
->cmd
) &&
7430 ufshcd_clear_cmd(hba
, pos
);
7431 hwq
= ufshcd_mcq_req_to_hwq(hba
, scsi_cmd_to_rq(lrbp
->cmd
));
7432 ufshcd_mcq_poll_cqe_lock(hba
, hwq
);
7439 /* clear the commands that were pending for corresponding LUN */
7440 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
7441 for_each_set_bit(pos
, &hba
->outstanding_reqs
, hba
->nutrs
)
7442 if (hba
->lrb
[pos
].lun
== lun
)
7443 __set_bit(pos
, &pending_reqs
);
7444 hba
->outstanding_reqs
&= ~pending_reqs
;
7445 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
7447 for_each_set_bit(pos
, &pending_reqs
, hba
->nutrs
) {
7448 if (ufshcd_clear_cmd(hba
, pos
) < 0) {
7449 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
7450 not_cleared
= 1U << pos
&
7451 ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
7452 hba
->outstanding_reqs
|= not_cleared
;
7453 not_cleared_mask
|= not_cleared
;
7454 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
7456 dev_err(hba
->dev
, "%s: failed to clear request %d\n",
7460 __ufshcd_transfer_req_compl(hba
, pending_reqs
& ~not_cleared_mask
);
7463 hba
->req_abort_count
= 0;
7464 ufshcd_update_evt_hist(hba
, UFS_EVT_DEV_RESET
, (u32
)err
);
7468 dev_err(hba
->dev
, "%s: failed with err %d\n", __func__
, err
);
7474 static void ufshcd_set_req_abort_skip(struct ufs_hba
*hba
, unsigned long bitmap
)
7476 struct ufshcd_lrb
*lrbp
;
7479 for_each_set_bit(tag
, &bitmap
, hba
->nutrs
) {
7480 lrbp
= &hba
->lrb
[tag
];
7481 lrbp
->req_abort_skip
= true;
7486 * ufshcd_try_to_abort_task - abort a specific task
7487 * @hba: Pointer to adapter instance
7488 * @tag: Task tag/index to be aborted
7490 * Abort the pending command in device by sending UFS_ABORT_TASK task management
7491 * command, and in host controller by clearing the door-bell register. There can
7492 * be race between controller sending the command to the device while abort is
7493 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
7494 * really issued and then try to abort it.
7496 * Return: zero on success, non-zero on failure.
7498 int ufshcd_try_to_abort_task(struct ufs_hba
*hba
, int tag
)
7500 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[tag
];
7506 for (poll_cnt
= 100; poll_cnt
; poll_cnt
--) {
7507 err
= ufshcd_issue_tm_cmd(hba
, lrbp
->lun
, lrbp
->task_tag
,
7508 UFS_QUERY_TASK
, &resp
);
7509 if (!err
&& resp
== UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED
) {
7510 /* cmd pending in the device */
7511 dev_err(hba
->dev
, "%s: cmd pending in the device. tag = %d\n",
7514 } else if (!err
&& resp
== UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
7516 * cmd not pending in the device, check if it is
7519 dev_err(hba
->dev
, "%s: cmd at tag %d not pending in the device.\n",
7521 if (is_mcq_enabled(hba
)) {
7523 if (ufshcd_cmd_inflight(lrbp
->cmd
)) {
7524 /* sleep for max. 200us same delay as in SDB mode */
7525 usleep_range(100, 200);
7528 /* command completed already */
7529 dev_err(hba
->dev
, "%s: cmd at tag=%d is cleared.\n",
7534 /* Single Doorbell Mode */
7535 reg
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
7536 if (reg
& (1 << tag
)) {
7537 /* sleep for max. 200us to stabilize */
7538 usleep_range(100, 200);
7541 /* command completed already */
7542 dev_err(hba
->dev
, "%s: cmd at tag %d successfully cleared from DB.\n",
7547 "%s: no response from device. tag = %d, err %d\n",
7548 __func__
, tag
, err
);
7550 err
= resp
; /* service response error */
7560 err
= ufshcd_issue_tm_cmd(hba
, lrbp
->lun
, lrbp
->task_tag
,
7561 UFS_ABORT_TASK
, &resp
);
7562 if (err
|| resp
!= UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
7564 err
= resp
; /* service response error */
7565 dev_err(hba
->dev
, "%s: issued. tag = %d, err %d\n",
7566 __func__
, tag
, err
);
7571 err
= ufshcd_clear_cmd(hba
, tag
);
7573 dev_err(hba
->dev
, "%s: Failed clearing cmd at tag %d, err %d\n",
7574 __func__
, tag
, err
);
7581 * ufshcd_abort - scsi host template eh_abort_handler callback
7582 * @cmd: SCSI command pointer
7584 * Return: SUCCESS or FAILED.
7586 static int ufshcd_abort(struct scsi_cmnd
*cmd
)
7588 struct Scsi_Host
*host
= cmd
->device
->host
;
7589 struct ufs_hba
*hba
= shost_priv(host
);
7590 int tag
= scsi_cmd_to_rq(cmd
)->tag
;
7591 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[tag
];
7592 unsigned long flags
;
7599 if (!is_mcq_enabled(hba
)) {
7600 reg
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
7601 if (!test_bit(tag
, &hba
->outstanding_reqs
)) {
7602 /* If command is already aborted/completed, return FAILED. */
7604 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
7605 __func__
, tag
, hba
->outstanding_reqs
, reg
);
7610 /* Print Transfer Request of aborted task */
7611 dev_info(hba
->dev
, "%s: Device abort task at tag %d\n", __func__
, tag
);
7614 * Print detailed info about aborted request.
7615 * As more than one request might get aborted at the same time,
7616 * print full information only for the first aborted request in order
7617 * to reduce repeated printouts. For other aborted requests only print
7620 scsi_print_command(cmd
);
7621 if (!hba
->req_abort_count
) {
7622 ufshcd_update_evt_hist(hba
, UFS_EVT_ABORT
, tag
);
7623 ufshcd_print_evt_hist(hba
);
7624 ufshcd_print_host_state(hba
);
7625 ufshcd_print_pwr_info(hba
);
7626 ufshcd_print_tr(hba
, tag
, true);
7628 ufshcd_print_tr(hba
, tag
, false);
7630 hba
->req_abort_count
++;
7632 if (!is_mcq_enabled(hba
) && !(reg
& (1 << tag
))) {
7633 /* only execute this code in single doorbell mode */
7635 "%s: cmd was completed, but without a notifying intr, tag = %d",
7637 __ufshcd_transfer_req_compl(hba
, 1UL << tag
);
7642 * Task abort to the device W-LUN is illegal. When this command
7643 * will fail, due to spec violation, scsi err handling next step
7644 * will be to send LU reset which, again, is a spec violation.
7645 * To avoid these unnecessary/illegal steps, first we clean up
7646 * the lrb taken by this cmd and re-set it in outstanding_reqs,
7647 * then queue the eh_work and bail.
7649 if (lrbp
->lun
== UFS_UPIU_UFS_DEVICE_WLUN
) {
7650 ufshcd_update_evt_hist(hba
, UFS_EVT_ABORT
, lrbp
->lun
);
7652 spin_lock_irqsave(host
->host_lock
, flags
);
7653 hba
->force_reset
= true;
7654 ufshcd_schedule_eh_work(hba
);
7655 spin_unlock_irqrestore(host
->host_lock
, flags
);
7659 if (is_mcq_enabled(hba
)) {
7660 /* MCQ mode. Branch off to handle abort for mcq mode */
7661 err
= ufshcd_mcq_abort(cmd
);
7665 /* Skip task abort in case previous aborts failed and report failure */
7666 if (lrbp
->req_abort_skip
) {
7667 dev_err(hba
->dev
, "%s: skipping abort\n", __func__
);
7668 ufshcd_set_req_abort_skip(hba
, hba
->outstanding_reqs
);
7672 err
= ufshcd_try_to_abort_task(hba
, tag
);
7674 dev_err(hba
->dev
, "%s: failed with err %d\n", __func__
, err
);
7675 ufshcd_set_req_abort_skip(hba
, hba
->outstanding_reqs
);
7681 * Clear the corresponding bit from outstanding_reqs since the command
7682 * has been aborted successfully.
7684 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
7685 outstanding
= __test_and_clear_bit(tag
, &hba
->outstanding_reqs
);
7686 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
7689 ufshcd_release_scsi_cmd(hba
, lrbp
);
7694 /* Matches the ufshcd_hold() call at the start of this function. */
7695 ufshcd_release(hba
);
7700 * ufshcd_host_reset_and_restore - reset and restore host controller
7701 * @hba: per-adapter instance
7703 * Note that host controller reset may issue DME_RESET to
7704 * local and remote (device) Uni-Pro stack and the attributes
7705 * are reset to default state.
7707 * Return: zero on success, non-zero on failure.
7709 static int ufshcd_host_reset_and_restore(struct ufs_hba
*hba
)
7714 * Stop the host controller and complete the requests
7717 ufshcd_hba_stop(hba
);
7718 hba
->silence_err_logs
= true;
7719 ufshcd_complete_requests(hba
, true);
7720 hba
->silence_err_logs
= false;
7722 /* scale up clocks to max frequency before full reinitialization */
7723 ufshcd_scale_clks(hba
, ULONG_MAX
, true);
7725 err
= ufshcd_hba_enable(hba
);
7727 /* Establish the link again and restore the device */
7729 err
= ufshcd_probe_hba(hba
, false);
7732 dev_err(hba
->dev
, "%s: Host init failed %d\n", __func__
, err
);
7733 ufshcd_update_evt_hist(hba
, UFS_EVT_HOST_RESET
, (u32
)err
);
7738 * ufshcd_reset_and_restore - reset and re-initialize host/device
7739 * @hba: per-adapter instance
7741 * Reset and recover device, host and re-establish link. This
7742 * is helpful to recover the communication in fatal error conditions.
7744 * Return: zero on success, non-zero on failure.
7746 static int ufshcd_reset_and_restore(struct ufs_hba
*hba
)
7749 u32 saved_uic_err
= 0;
7751 unsigned long flags
;
7752 int retries
= MAX_HOST_RESET_RETRIES
;
7754 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7757 * This is a fresh start, cache and clear saved error first,
7758 * in case new error generated during reset and restore.
7760 saved_err
|= hba
->saved_err
;
7761 saved_uic_err
|= hba
->saved_uic_err
;
7763 hba
->saved_uic_err
= 0;
7764 hba
->force_reset
= false;
7765 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
7766 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7768 /* Reset the attached device */
7769 ufshcd_device_reset(hba
);
7771 err
= ufshcd_host_reset_and_restore(hba
);
7773 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7776 /* Do not exit unless operational or dead */
7777 if (hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
&&
7778 hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
&&
7779 hba
->ufshcd_state
!= UFSHCD_STATE_EH_SCHEDULED_NON_FATAL
)
7781 } while (err
&& --retries
);
7784 * Inform scsi mid-layer that we did reset and allow to handle
7785 * Unit Attention properly.
7787 scsi_report_bus_reset(hba
->host
, 0);
7789 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
7790 hba
->saved_err
|= saved_err
;
7791 hba
->saved_uic_err
|= saved_uic_err
;
7793 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7799 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
7800 * @cmd: SCSI command pointer
7802 * Return: SUCCESS or FAILED.
7804 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd
*cmd
)
7807 unsigned long flags
;
7808 struct ufs_hba
*hba
;
7810 hba
= shost_priv(cmd
->device
->host
);
7813 * If runtime PM sent SSU and got a timeout, scsi_error_handler is
7814 * stuck in this function waiting for flush_work(&hba->eh_work). And
7815 * ufshcd_err_handler(eh_work) is stuck waiting for runtime PM. Do
7816 * ufshcd_link_recovery instead of eh_work to prevent deadlock.
7818 if (hba
->pm_op_in_progress
) {
7819 if (ufshcd_link_recovery(hba
))
7825 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7826 hba
->force_reset
= true;
7827 ufshcd_schedule_eh_work(hba
);
7828 dev_err(hba
->dev
, "%s: reset in progress - 1\n", __func__
);
7829 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7831 flush_work(&hba
->eh_work
);
7833 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7834 if (hba
->ufshcd_state
== UFSHCD_STATE_ERROR
)
7836 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7842 * ufshcd_get_max_icc_level - calculate the ICC level
7843 * @sup_curr_uA: max. current supported by the regulator
7844 * @start_scan: row at the desc table to start scan from
7845 * @buff: power descriptor buffer
7847 * Return: calculated max ICC level for specific regulator.
7849 static u32
ufshcd_get_max_icc_level(int sup_curr_uA
, u32 start_scan
,
7857 for (i
= start_scan
; i
>= 0; i
--) {
7858 data
= get_unaligned_be16(&buff
[2 * i
]);
7859 unit
= (data
& ATTR_ICC_LVL_UNIT_MASK
) >>
7860 ATTR_ICC_LVL_UNIT_OFFSET
;
7861 curr_uA
= data
& ATTR_ICC_LVL_VALUE_MASK
;
7863 case UFSHCD_NANO_AMP
:
7864 curr_uA
= curr_uA
/ 1000;
7866 case UFSHCD_MILI_AMP
:
7867 curr_uA
= curr_uA
* 1000;
7870 curr_uA
= curr_uA
* 1000 * 1000;
7872 case UFSHCD_MICRO_AMP
:
7876 if (sup_curr_uA
>= curr_uA
)
7881 pr_err("%s: Couldn't find valid icc_level = %d", __func__
, i
);
7888 * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level
7889 * In case regulators are not initialized we'll return 0
7890 * @hba: per-adapter instance
7891 * @desc_buf: power descriptor buffer to extract ICC levels from.
7893 * Return: calculated ICC level.
7895 static u32
ufshcd_find_max_sup_active_icc_level(struct ufs_hba
*hba
,
7900 if (!hba
->vreg_info
.vcc
|| !hba
->vreg_info
.vccq
||
7901 !hba
->vreg_info
.vccq2
) {
7903 * Using dev_dbg to avoid messages during runtime PM to avoid
7904 * never-ending cycles of messages written back to storage by
7905 * user space causing runtime resume, causing more messages and
7909 "%s: Regulator capability was not set, actvIccLevel=%d",
7910 __func__
, icc_level
);
7914 if (hba
->vreg_info
.vcc
->max_uA
)
7915 icc_level
= ufshcd_get_max_icc_level(
7916 hba
->vreg_info
.vcc
->max_uA
,
7917 POWER_DESC_MAX_ACTV_ICC_LVLS
- 1,
7918 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCC_0
]);
7920 if (hba
->vreg_info
.vccq
->max_uA
)
7921 icc_level
= ufshcd_get_max_icc_level(
7922 hba
->vreg_info
.vccq
->max_uA
,
7924 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCCQ_0
]);
7926 if (hba
->vreg_info
.vccq2
->max_uA
)
7927 icc_level
= ufshcd_get_max_icc_level(
7928 hba
->vreg_info
.vccq2
->max_uA
,
7930 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCCQ2_0
]);
7935 static void ufshcd_set_active_icc_lvl(struct ufs_hba
*hba
)
7941 desc_buf
= kzalloc(QUERY_DESC_MAX_SIZE
, GFP_KERNEL
);
7945 ret
= ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_POWER
, 0, 0,
7946 desc_buf
, QUERY_DESC_MAX_SIZE
);
7949 "%s: Failed reading power descriptor ret = %d",
7954 icc_level
= ufshcd_find_max_sup_active_icc_level(hba
, desc_buf
);
7955 dev_dbg(hba
->dev
, "%s: setting icc_level 0x%x", __func__
, icc_level
);
7957 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
7958 QUERY_ATTR_IDN_ACTIVE_ICC_LVL
, 0, 0, &icc_level
);
7962 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
7963 __func__
, icc_level
, ret
);
7969 static inline void ufshcd_blk_pm_runtime_init(struct scsi_device
*sdev
)
7971 scsi_autopm_get_device(sdev
);
7972 blk_pm_runtime_init(sdev
->request_queue
, &sdev
->sdev_gendev
);
7973 if (sdev
->rpm_autosuspend
)
7974 pm_runtime_set_autosuspend_delay(&sdev
->sdev_gendev
,
7975 RPM_AUTOSUSPEND_DELAY_MS
);
7976 scsi_autopm_put_device(sdev
);
7980 * ufshcd_scsi_add_wlus - Adds required W-LUs
7981 * @hba: per-adapter instance
7983 * UFS device specification requires the UFS devices to support 4 well known
7985 * "REPORT_LUNS" (address: 01h)
7986 * "UFS Device" (address: 50h)
7987 * "RPMB" (address: 44h)
7988 * "BOOT" (address: 30h)
7989 * UFS device's power management needs to be controlled by "POWER CONDITION"
7990 * field of SSU (START STOP UNIT) command. But this "power condition" field
7991 * will take effect only when its sent to "UFS device" well known logical unit
7992 * hence we require the scsi_device instance to represent this logical unit in
7993 * order for the UFS host driver to send the SSU command for power management.
7995 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
7996 * Block) LU so user space process can control this LU. User space may also
7997 * want to have access to BOOT LU.
7999 * This function adds scsi device instances for each of all well known LUs
8000 * (except "REPORT LUNS" LU).
8002 * Return: zero on success (all required W-LUs are added successfully),
8003 * non-zero error value on failure (if failed to add any of the required W-LU).
8005 static int ufshcd_scsi_add_wlus(struct ufs_hba
*hba
)
8008 struct scsi_device
*sdev_boot
, *sdev_rpmb
;
8010 hba
->ufs_device_wlun
= __scsi_add_device(hba
->host
, 0, 0,
8011 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN
), NULL
);
8012 if (IS_ERR(hba
->ufs_device_wlun
)) {
8013 ret
= PTR_ERR(hba
->ufs_device_wlun
);
8014 hba
->ufs_device_wlun
= NULL
;
8017 scsi_device_put(hba
->ufs_device_wlun
);
8019 sdev_rpmb
= __scsi_add_device(hba
->host
, 0, 0,
8020 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN
), NULL
);
8021 if (IS_ERR(sdev_rpmb
)) {
8022 ret
= PTR_ERR(sdev_rpmb
);
8023 goto remove_ufs_device_wlun
;
8025 ufshcd_blk_pm_runtime_init(sdev_rpmb
);
8026 scsi_device_put(sdev_rpmb
);
8028 sdev_boot
= __scsi_add_device(hba
->host
, 0, 0,
8029 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN
), NULL
);
8030 if (IS_ERR(sdev_boot
)) {
8031 dev_err(hba
->dev
, "%s: BOOT WLUN not found\n", __func__
);
8033 ufshcd_blk_pm_runtime_init(sdev_boot
);
8034 scsi_device_put(sdev_boot
);
8038 remove_ufs_device_wlun
:
8039 scsi_remove_device(hba
->ufs_device_wlun
);
8044 static void ufshcd_wb_probe(struct ufs_hba
*hba
, const u8
*desc_buf
)
8046 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8048 u32 d_lu_wb_buf_alloc
;
8049 u32 ext_ufs_feature
;
8051 if (!ufshcd_is_wb_allowed(hba
))
8055 * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
8056 * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
8059 if (!(dev_info
->wspecversion
>= 0x310 ||
8060 dev_info
->wspecversion
== 0x220 ||
8061 (hba
->dev_quirks
& UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
)))
8064 ext_ufs_feature
= get_unaligned_be32(desc_buf
+
8065 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP
);
8067 if (!(ext_ufs_feature
& UFS_DEV_WRITE_BOOSTER_SUP
))
8071 * WB may be supported but not configured while provisioning. The spec
8072 * says, in dedicated wb buffer mode, a max of 1 lun would have wb
8073 * buffer configured.
8075 dev_info
->wb_buffer_type
= desc_buf
[DEVICE_DESC_PARAM_WB_TYPE
];
8077 dev_info
->b_presrv_uspc_en
=
8078 desc_buf
[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN
];
8080 if (dev_info
->wb_buffer_type
== WB_BUF_MODE_SHARED
) {
8081 if (!get_unaligned_be32(desc_buf
+
8082 DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS
))
8085 for (lun
= 0; lun
< UFS_UPIU_MAX_WB_LUN_ID
; lun
++) {
8086 d_lu_wb_buf_alloc
= 0;
8087 ufshcd_read_unit_desc_param(hba
,
8089 UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS
,
8090 (u8
*)&d_lu_wb_buf_alloc
,
8091 sizeof(d_lu_wb_buf_alloc
));
8092 if (d_lu_wb_buf_alloc
) {
8093 dev_info
->wb_dedicated_lu
= lun
;
8098 if (!d_lu_wb_buf_alloc
)
8102 if (!ufshcd_is_wb_buf_lifetime_available(hba
))
8108 hba
->caps
&= ~UFSHCD_CAP_WB_EN
;
8111 static void ufshcd_temp_notif_probe(struct ufs_hba
*hba
, const u8
*desc_buf
)
8113 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8114 u32 ext_ufs_feature
;
8117 if (!(hba
->caps
& UFSHCD_CAP_TEMP_NOTIF
) || dev_info
->wspecversion
< 0x300)
8120 ext_ufs_feature
= get_unaligned_be32(desc_buf
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP
);
8122 if (ext_ufs_feature
& UFS_DEV_LOW_TEMP_NOTIF
)
8123 mask
|= MASK_EE_TOO_LOW_TEMP
;
8125 if (ext_ufs_feature
& UFS_DEV_HIGH_TEMP_NOTIF
)
8126 mask
|= MASK_EE_TOO_HIGH_TEMP
;
8129 ufshcd_enable_ee(hba
, mask
);
8130 ufs_hwmon_probe(hba
, mask
);
8134 static void ufshcd_ext_iid_probe(struct ufs_hba
*hba
, u8
*desc_buf
)
8136 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8137 u32 ext_ufs_feature
;
8141 /* Only UFS-4.0 and above may support EXT_IID */
8142 if (dev_info
->wspecversion
< 0x400)
8145 ext_ufs_feature
= get_unaligned_be32(desc_buf
+
8146 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP
);
8147 if (!(ext_ufs_feature
& UFS_DEV_EXT_IID_SUP
))
8150 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
8151 QUERY_ATTR_IDN_EXT_IID_EN
, 0, 0, &ext_iid_en
);
8153 dev_err(hba
->dev
, "failed reading bEXTIIDEn. err = %d\n", err
);
8156 dev_info
->b_ext_iid_en
= ext_iid_en
;
8159 void ufshcd_fixup_dev_quirks(struct ufs_hba
*hba
,
8160 const struct ufs_dev_quirk
*fixups
)
8162 const struct ufs_dev_quirk
*f
;
8163 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8168 for (f
= fixups
; f
->quirk
; f
++) {
8169 if ((f
->wmanufacturerid
== dev_info
->wmanufacturerid
||
8170 f
->wmanufacturerid
== UFS_ANY_VENDOR
) &&
8171 ((dev_info
->model
&&
8172 STR_PRFX_EQUAL(f
->model
, dev_info
->model
)) ||
8173 !strcmp(f
->model
, UFS_ANY_MODEL
)))
8174 hba
->dev_quirks
|= f
->quirk
;
8177 EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks
);
8179 static void ufs_fixup_device_setup(struct ufs_hba
*hba
)
8181 /* fix by general quirk table */
8182 ufshcd_fixup_dev_quirks(hba
, ufs_fixups
);
8184 /* allow vendors to fix quirks */
8185 ufshcd_vops_fixup_dev_quirks(hba
);
8188 static int ufs_get_device_desc(struct ufs_hba
*hba
)
8193 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8195 desc_buf
= kzalloc(QUERY_DESC_MAX_SIZE
, GFP_KERNEL
);
8201 err
= ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_DEVICE
, 0, 0, desc_buf
,
8202 QUERY_DESC_MAX_SIZE
);
8204 dev_err(hba
->dev
, "%s: Failed reading Device Desc. err = %d\n",
8210 * getting vendor (manufacturerID) and Bank Index in big endian
8213 dev_info
->wmanufacturerid
= desc_buf
[DEVICE_DESC_PARAM_MANF_ID
] << 8 |
8214 desc_buf
[DEVICE_DESC_PARAM_MANF_ID
+ 1];
8216 /* getting Specification Version in big endian format */
8217 dev_info
->wspecversion
= desc_buf
[DEVICE_DESC_PARAM_SPEC_VER
] << 8 |
8218 desc_buf
[DEVICE_DESC_PARAM_SPEC_VER
+ 1];
8219 dev_info
->bqueuedepth
= desc_buf
[DEVICE_DESC_PARAM_Q_DPTH
];
8221 model_index
= desc_buf
[DEVICE_DESC_PARAM_PRDCT_NAME
];
8223 err
= ufshcd_read_string_desc(hba
, model_index
,
8224 &dev_info
->model
, SD_ASCII_STD
);
8226 dev_err(hba
->dev
, "%s: Failed reading Product Name. err = %d\n",
8231 hba
->luns_avail
= desc_buf
[DEVICE_DESC_PARAM_NUM_LU
] +
8232 desc_buf
[DEVICE_DESC_PARAM_NUM_WLU
];
8234 ufs_fixup_device_setup(hba
);
8236 ufshcd_wb_probe(hba
, desc_buf
);
8238 ufshcd_temp_notif_probe(hba
, desc_buf
);
8240 if (hba
->ext_iid_sup
)
8241 ufshcd_ext_iid_probe(hba
, desc_buf
);
8244 * ufshcd_read_string_desc returns size of the string
8245 * reset the error value
8254 static void ufs_put_device_desc(struct ufs_hba
*hba
)
8256 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8258 kfree(dev_info
->model
);
8259 dev_info
->model
= NULL
;
8263 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
8264 * @hba: per-adapter instance
8266 * PA_TActivate parameter can be tuned manually if UniPro version is less than
8267 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
8268 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
8269 * the hibern8 exit latency.
8271 * Return: zero on success, non-zero error value on failure.
8273 static int ufshcd_tune_pa_tactivate(struct ufs_hba
*hba
)
8276 u32 peer_rx_min_activatetime
= 0, tuned_pa_tactivate
;
8278 ret
= ufshcd_dme_peer_get(hba
,
8280 RX_MIN_ACTIVATETIME_CAPABILITY
,
8281 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
8282 &peer_rx_min_activatetime
);
8286 /* make sure proper unit conversion is applied */
8287 tuned_pa_tactivate
=
8288 ((peer_rx_min_activatetime
* RX_MIN_ACTIVATETIME_UNIT_US
)
8289 / PA_TACTIVATE_TIME_UNIT_US
);
8290 ret
= ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
),
8291 tuned_pa_tactivate
);
8298 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
8299 * @hba: per-adapter instance
8301 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
8302 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
8303 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
8304 * This optimal value can help reduce the hibern8 exit latency.
8306 * Return: zero on success, non-zero error value on failure.
8308 static int ufshcd_tune_pa_hibern8time(struct ufs_hba
*hba
)
8311 u32 local_tx_hibern8_time_cap
= 0, peer_rx_hibern8_time_cap
= 0;
8312 u32 max_hibern8_time
, tuned_pa_hibern8time
;
8314 ret
= ufshcd_dme_get(hba
,
8315 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY
,
8316 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
8317 &local_tx_hibern8_time_cap
);
8321 ret
= ufshcd_dme_peer_get(hba
,
8322 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY
,
8323 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
8324 &peer_rx_hibern8_time_cap
);
8328 max_hibern8_time
= max(local_tx_hibern8_time_cap
,
8329 peer_rx_hibern8_time_cap
);
8330 /* make sure proper unit conversion is applied */
8331 tuned_pa_hibern8time
= ((max_hibern8_time
* HIBERN8TIME_UNIT_US
)
8332 / PA_HIBERN8_TIME_UNIT_US
);
8333 ret
= ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_HIBERN8TIME
),
8334 tuned_pa_hibern8time
);
8340 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
8341 * less than device PA_TACTIVATE time.
8342 * @hba: per-adapter instance
8344 * Some UFS devices require host PA_TACTIVATE to be lower than device
8345 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
8348 * Return: zero on success, non-zero error value on failure.
8350 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba
*hba
)
8353 u32 granularity
, peer_granularity
;
8354 u32 pa_tactivate
, peer_pa_tactivate
;
8355 u32 pa_tactivate_us
, peer_pa_tactivate_us
;
8356 static const u8 gran_to_us_table
[] = {1, 4, 8, 16, 32, 100};
8358 ret
= ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_GRANULARITY
),
8363 ret
= ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_GRANULARITY
),
8368 if ((granularity
< PA_GRANULARITY_MIN_VAL
) ||
8369 (granularity
> PA_GRANULARITY_MAX_VAL
)) {
8370 dev_err(hba
->dev
, "%s: invalid host PA_GRANULARITY %d",
8371 __func__
, granularity
);
8375 if ((peer_granularity
< PA_GRANULARITY_MIN_VAL
) ||
8376 (peer_granularity
> PA_GRANULARITY_MAX_VAL
)) {
8377 dev_err(hba
->dev
, "%s: invalid device PA_GRANULARITY %d",
8378 __func__
, peer_granularity
);
8382 ret
= ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_TACTIVATE
), &pa_tactivate
);
8386 ret
= ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_TACTIVATE
),
8387 &peer_pa_tactivate
);
8391 pa_tactivate_us
= pa_tactivate
* gran_to_us_table
[granularity
- 1];
8392 peer_pa_tactivate_us
= peer_pa_tactivate
*
8393 gran_to_us_table
[peer_granularity
- 1];
8395 if (pa_tactivate_us
>= peer_pa_tactivate_us
) {
8396 u32 new_peer_pa_tactivate
;
8398 new_peer_pa_tactivate
= pa_tactivate_us
/
8399 gran_to_us_table
[peer_granularity
- 1];
8400 new_peer_pa_tactivate
++;
8401 ret
= ufshcd_dme_peer_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
),
8402 new_peer_pa_tactivate
);
8409 static void ufshcd_tune_unipro_params(struct ufs_hba
*hba
)
8411 if (ufshcd_is_unipro_pa_params_tuning_req(hba
)) {
8412 ufshcd_tune_pa_tactivate(hba
);
8413 ufshcd_tune_pa_hibern8time(hba
);
8416 ufshcd_vops_apply_dev_quirks(hba
);
8418 if (hba
->dev_quirks
& UFS_DEVICE_QUIRK_PA_TACTIVATE
)
8419 /* set 1ms timeout for PA_TACTIVATE */
8420 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
), 10);
8422 if (hba
->dev_quirks
& UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE
)
8423 ufshcd_quirk_tune_host_pa_tactivate(hba
);
8426 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba
*hba
)
8428 hba
->ufs_stats
.hibern8_exit_cnt
= 0;
8429 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
8430 hba
->req_abort_count
= 0;
8433 static int ufshcd_device_geo_params_init(struct ufs_hba
*hba
)
8438 desc_buf
= kzalloc(QUERY_DESC_MAX_SIZE
, GFP_KERNEL
);
8444 err
= ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_GEOMETRY
, 0, 0,
8445 desc_buf
, QUERY_DESC_MAX_SIZE
);
8447 dev_err(hba
->dev
, "%s: Failed reading Geometry Desc. err = %d\n",
8452 if (desc_buf
[GEOMETRY_DESC_PARAM_MAX_NUM_LUN
] == 1)
8453 hba
->dev_info
.max_lu_supported
= 32;
8454 else if (desc_buf
[GEOMETRY_DESC_PARAM_MAX_NUM_LUN
] == 0)
8455 hba
->dev_info
.max_lu_supported
= 8;
8462 struct ufs_ref_clk
{
8463 unsigned long freq_hz
;
8464 enum ufs_ref_clk_freq val
;
8467 static const struct ufs_ref_clk ufs_ref_clk_freqs
[] = {
8468 {19200000, REF_CLK_FREQ_19_2_MHZ
},
8469 {26000000, REF_CLK_FREQ_26_MHZ
},
8470 {38400000, REF_CLK_FREQ_38_4_MHZ
},
8471 {52000000, REF_CLK_FREQ_52_MHZ
},
8472 {0, REF_CLK_FREQ_INVAL
},
8475 static enum ufs_ref_clk_freq
8476 ufs_get_bref_clk_from_hz(unsigned long freq
)
8480 for (i
= 0; ufs_ref_clk_freqs
[i
].freq_hz
; i
++)
8481 if (ufs_ref_clk_freqs
[i
].freq_hz
== freq
)
8482 return ufs_ref_clk_freqs
[i
].val
;
8484 return REF_CLK_FREQ_INVAL
;
8487 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba
*hba
, struct clk
*refclk
)
8491 freq
= clk_get_rate(refclk
);
8493 hba
->dev_ref_clk_freq
=
8494 ufs_get_bref_clk_from_hz(freq
);
8496 if (hba
->dev_ref_clk_freq
== REF_CLK_FREQ_INVAL
)
8498 "invalid ref_clk setting = %ld\n", freq
);
8501 static int ufshcd_set_dev_ref_clk(struct ufs_hba
*hba
)
8505 u32 freq
= hba
->dev_ref_clk_freq
;
8507 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
8508 QUERY_ATTR_IDN_REF_CLK_FREQ
, 0, 0, &ref_clk
);
8511 dev_err(hba
->dev
, "failed reading bRefClkFreq. err = %d\n",
8516 if (ref_clk
== freq
)
8517 goto out
; /* nothing to update */
8519 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
8520 QUERY_ATTR_IDN_REF_CLK_FREQ
, 0, 0, &freq
);
8523 dev_err(hba
->dev
, "bRefClkFreq setting to %lu Hz failed\n",
8524 ufs_ref_clk_freqs
[freq
].freq_hz
);
8528 dev_dbg(hba
->dev
, "bRefClkFreq setting to %lu Hz succeeded\n",
8529 ufs_ref_clk_freqs
[freq
].freq_hz
);
8535 static int ufshcd_device_params_init(struct ufs_hba
*hba
)
8540 /* Init UFS geometry descriptor related parameters */
8541 ret
= ufshcd_device_geo_params_init(hba
);
8545 /* Check and apply UFS device quirks */
8546 ret
= ufs_get_device_desc(hba
);
8548 dev_err(hba
->dev
, "%s: Failed getting device info. err = %d\n",
8553 ufshcd_get_ref_clk_gating_wait(hba
);
8555 if (!ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_READ_FLAG
,
8556 QUERY_FLAG_IDN_PWR_ON_WPE
, 0, &flag
))
8557 hba
->dev_info
.f_power_on_wp_en
= flag
;
8559 /* Probe maximum power mode co-supported by both UFS host and device */
8560 if (ufshcd_get_max_pwr_mode(hba
))
8562 "%s: Failed getting max supported power mode\n",
8568 static void ufshcd_set_timestamp_attr(struct ufs_hba
*hba
)
8571 struct ufs_query_req
*request
= NULL
;
8572 struct ufs_query_res
*response
= NULL
;
8573 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8574 struct utp_upiu_query_v4_0
*upiu_data
;
8576 if (dev_info
->wspecversion
< 0x400)
8581 mutex_lock(&hba
->dev_cmd
.lock
);
8583 ufshcd_init_query(hba
, &request
, &response
,
8584 UPIU_QUERY_OPCODE_WRITE_ATTR
,
8585 QUERY_ATTR_IDN_TIMESTAMP
, 0, 0);
8587 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
8589 upiu_data
= (struct utp_upiu_query_v4_0
*)&request
->upiu_req
;
8591 put_unaligned_be64(ktime_get_real_ns(), &upiu_data
->osf3
);
8593 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
8596 dev_err(hba
->dev
, "%s: failed to set timestamp %d\n",
8599 mutex_unlock(&hba
->dev_cmd
.lock
);
8600 ufshcd_release(hba
);
8604 * ufshcd_add_lus - probe and add UFS logical units
8605 * @hba: per-adapter instance
8607 * Return: 0 upon success; < 0 upon failure.
8609 static int ufshcd_add_lus(struct ufs_hba
*hba
)
8613 /* Add required well known logical units to scsi mid layer */
8614 ret
= ufshcd_scsi_add_wlus(hba
);
8618 /* Initialize devfreq after UFS device is detected */
8619 if (ufshcd_is_clkscaling_supported(hba
)) {
8620 memcpy(&hba
->clk_scaling
.saved_pwr_info
,
8622 sizeof(struct ufs_pa_layer_attr
));
8623 hba
->clk_scaling
.is_allowed
= true;
8625 ret
= ufshcd_devfreq_init(hba
);
8629 hba
->clk_scaling
.is_enabled
= true;
8630 ufshcd_init_clk_scaling_sysfs(hba
);
8634 scsi_scan_host(hba
->host
);
8635 pm_runtime_put_sync(hba
->dev
);
8641 /* SDB - Single Doorbell */
8642 static void ufshcd_release_sdb_queue(struct ufs_hba
*hba
, int nutrs
)
8644 size_t ucdl_size
, utrdl_size
;
8646 ucdl_size
= ufshcd_get_ucd_size(hba
) * nutrs
;
8647 dmam_free_coherent(hba
->dev
, ucdl_size
, hba
->ucdl_base_addr
,
8648 hba
->ucdl_dma_addr
);
8650 utrdl_size
= sizeof(struct utp_transfer_req_desc
) * nutrs
;
8651 dmam_free_coherent(hba
->dev
, utrdl_size
, hba
->utrdl_base_addr
,
8652 hba
->utrdl_dma_addr
);
8654 devm_kfree(hba
->dev
, hba
->lrb
);
8657 static int ufshcd_alloc_mcq(struct ufs_hba
*hba
)
8660 int old_nutrs
= hba
->nutrs
;
8662 ret
= ufshcd_mcq_decide_queue_depth(hba
);
8667 ret
= ufshcd_mcq_init(hba
);
8672 * Previously allocated memory for nutrs may not be enough in MCQ mode.
8673 * Number of supported tags in MCQ mode may be larger than SDB mode.
8675 if (hba
->nutrs
!= old_nutrs
) {
8676 ufshcd_release_sdb_queue(hba
, old_nutrs
);
8677 ret
= ufshcd_memory_alloc(hba
);
8680 ufshcd_host_memory_configure(hba
);
8683 ret
= ufshcd_mcq_memory_alloc(hba
);
8689 hba
->nutrs
= old_nutrs
;
8693 static void ufshcd_config_mcq(struct ufs_hba
*hba
)
8698 ret
= ufshcd_mcq_vops_config_esi(hba
);
8699 dev_info(hba
->dev
, "ESI %sconfigured\n", ret
? "is not " : "");
8701 intrs
= UFSHCD_ENABLE_MCQ_INTRS
;
8702 if (hba
->quirks
& UFSHCD_QUIRK_MCQ_BROKEN_INTR
)
8703 intrs
&= ~MCQ_CQ_EVENT_STATUS
;
8704 ufshcd_enable_intr(hba
, intrs
);
8705 ufshcd_mcq_make_queues_operational(hba
);
8706 ufshcd_mcq_config_mac(hba
, hba
->nutrs
);
8708 hba
->host
->can_queue
= hba
->nutrs
- UFSHCD_NUM_RESERVED
;
8709 hba
->reserved_slot
= hba
->nutrs
- UFSHCD_NUM_RESERVED
;
8711 /* Select MCQ mode */
8712 ufshcd_writel(hba
, ufshcd_readl(hba
, REG_UFS_MEM_CFG
) | 0x1,
8714 hba
->mcq_enabled
= true;
8716 dev_info(hba
->dev
, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n",
8717 hba
->nr_hw_queues
, hba
->nr_queues
[HCTX_TYPE_DEFAULT
],
8718 hba
->nr_queues
[HCTX_TYPE_READ
], hba
->nr_queues
[HCTX_TYPE_POLL
],
8722 static int ufshcd_device_init(struct ufs_hba
*hba
, bool init_dev_params
)
8725 struct Scsi_Host
*host
= hba
->host
;
8727 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
8729 ret
= ufshcd_link_startup(hba
);
8733 if (hba
->quirks
& UFSHCD_QUIRK_SKIP_PH_CONFIGURATION
)
8736 /* Debug counters initialization */
8737 ufshcd_clear_dbg_ufs_stats(hba
);
8739 /* UniPro link is active now */
8740 ufshcd_set_link_active(hba
);
8742 /* Reconfigure MCQ upon reset */
8743 if (is_mcq_enabled(hba
) && !init_dev_params
)
8744 ufshcd_config_mcq(hba
);
8746 /* Verify device initialization by sending NOP OUT UPIU */
8747 ret
= ufshcd_verify_dev_init(hba
);
8751 /* Initiate UFS initialization, and waiting until completion */
8752 ret
= ufshcd_complete_dev_init(hba
);
8757 * Initialize UFS device parameters used by driver, these
8758 * parameters are associated with UFS descriptors.
8760 if (init_dev_params
) {
8761 ret
= ufshcd_device_params_init(hba
);
8764 if (is_mcq_supported(hba
) && !hba
->scsi_host_added
) {
8765 ret
= ufshcd_alloc_mcq(hba
);
8767 ufshcd_config_mcq(hba
);
8769 /* Continue with SDB mode */
8770 use_mcq_mode
= false;
8771 dev_err(hba
->dev
, "MCQ mode is disabled, err=%d\n",
8774 ret
= scsi_add_host(host
, hba
->dev
);
8776 dev_err(hba
->dev
, "scsi_add_host failed\n");
8779 hba
->scsi_host_added
= true;
8780 } else if (is_mcq_supported(hba
)) {
8781 /* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */
8782 ufshcd_config_mcq(hba
);
8786 ufshcd_tune_unipro_params(hba
);
8788 /* UFS device is also active now */
8789 ufshcd_set_ufs_dev_active(hba
);
8790 ufshcd_force_reset_auto_bkops(hba
);
8792 ufshcd_set_timestamp_attr(hba
);
8794 /* Gear up to HS gear if supported */
8795 if (hba
->max_pwr_info
.is_valid
) {
8797 * Set the right value to bRefClkFreq before attempting to
8798 * switch to HS gears.
8800 if (hba
->dev_ref_clk_freq
!= REF_CLK_FREQ_INVAL
)
8801 ufshcd_set_dev_ref_clk(hba
);
8802 ret
= ufshcd_config_pwr_mode(hba
, &hba
->max_pwr_info
.info
);
8804 dev_err(hba
->dev
, "%s: Failed setting power mode, err = %d\n",
8814 * ufshcd_probe_hba - probe hba to detect device and initialize it
8815 * @hba: per-adapter instance
8816 * @init_dev_params: whether or not to call ufshcd_device_params_init().
8818 * Execute link-startup and verify device initialization
8820 * Return: 0 upon success; < 0 upon failure.
8822 static int ufshcd_probe_hba(struct ufs_hba
*hba
, bool init_dev_params
)
8824 ktime_t start
= ktime_get();
8825 unsigned long flags
;
8828 ret
= ufshcd_device_init(hba
, init_dev_params
);
8832 if (!hba
->pm_op_in_progress
&&
8833 (hba
->quirks
& UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH
)) {
8834 /* Reset the device and controller before doing reinit */
8835 ufshcd_device_reset(hba
);
8836 ufshcd_hba_stop(hba
);
8837 ufshcd_vops_reinit_notify(hba
);
8838 ret
= ufshcd_hba_enable(hba
);
8840 dev_err(hba
->dev
, "Host controller enable failed\n");
8841 ufshcd_print_evt_hist(hba
);
8842 ufshcd_print_host_state(hba
);
8846 /* Reinit the device */
8847 ret
= ufshcd_device_init(hba
, init_dev_params
);
8852 ufshcd_print_pwr_info(hba
);
8855 * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
8856 * and for removable UFS card as well, hence always set the parameter.
8857 * Note: Error handler may issue the device reset hence resetting
8858 * bActiveICCLevel as well so it is always safe to set this here.
8860 ufshcd_set_active_icc_lvl(hba
);
8862 /* Enable UFS Write Booster if supported */
8863 ufshcd_configure_wb(hba
);
8865 if (hba
->ee_usr_mask
)
8866 ufshcd_write_ee_control(hba
);
8867 /* Enable Auto-Hibernate if configured */
8868 ufshcd_auto_hibern8_enable(hba
);
8871 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
8873 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
8874 else if (hba
->ufshcd_state
== UFSHCD_STATE_RESET
)
8875 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
8876 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
8878 trace_ufshcd_init(dev_name(hba
->dev
), ret
,
8879 ktime_to_us(ktime_sub(ktime_get(), start
)),
8880 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
8885 * ufshcd_async_scan - asynchronous execution for probing hba
8886 * @data: data pointer to pass to this function
8887 * @cookie: cookie data
8889 static void ufshcd_async_scan(void *data
, async_cookie_t cookie
)
8891 struct ufs_hba
*hba
= (struct ufs_hba
*)data
;
8894 down(&hba
->host_sem
);
8895 /* Initialize hba, detect and initialize UFS device */
8896 ret
= ufshcd_probe_hba(hba
, true);
8901 /* Probe and add UFS logical units */
8902 ret
= ufshcd_add_lus(hba
);
8905 * If we failed to initialize the device or the device is not
8906 * present, turn off the power/clocks etc.
8909 pm_runtime_put_sync(hba
->dev
);
8910 ufshcd_hba_exit(hba
);
8914 static enum scsi_timeout_action
ufshcd_eh_timed_out(struct scsi_cmnd
*scmd
)
8916 struct ufs_hba
*hba
= shost_priv(scmd
->device
->host
);
8918 if (!hba
->system_suspending
) {
8919 /* Activate the error handler in the SCSI core. */
8920 return SCSI_EH_NOT_HANDLED
;
8924 * If we get here we know that no TMFs are outstanding and also that
8925 * the only pending command is a START STOP UNIT command. Handle the
8926 * timeout of that command directly to prevent a deadlock between
8927 * ufshcd_set_dev_pwr_mode() and ufshcd_err_handler().
8929 ufshcd_link_recovery(hba
);
8930 dev_info(hba
->dev
, "%s() finished; outstanding_tasks = %#lx.\n",
8931 __func__
, hba
->outstanding_tasks
);
8933 return hba
->outstanding_reqs
? SCSI_EH_RESET_TIMER
: SCSI_EH_DONE
;
8936 static const struct attribute_group
*ufshcd_driver_groups
[] = {
8937 &ufs_sysfs_unit_descriptor_group
,
8938 &ufs_sysfs_lun_attributes_group
,
8942 static struct ufs_hba_variant_params ufs_hba_vps
= {
8943 .hba_enable_delay_us
= 1000,
8944 .wb_flush_threshold
= UFS_WB_BUF_REMAIN_PERCENT(40),
8945 .devfreq_profile
.polling_ms
= 100,
8946 .devfreq_profile
.target
= ufshcd_devfreq_target
,
8947 .devfreq_profile
.get_dev_status
= ufshcd_devfreq_get_dev_status
,
8948 .ondemand_data
.upthreshold
= 70,
8949 .ondemand_data
.downdifferential
= 5,
8952 static const struct scsi_host_template ufshcd_driver_template
= {
8953 .module
= THIS_MODULE
,
8955 .proc_name
= UFSHCD
,
8956 .map_queues
= ufshcd_map_queues
,
8957 .queuecommand
= ufshcd_queuecommand
,
8958 .mq_poll
= ufshcd_poll
,
8959 .slave_alloc
= ufshcd_slave_alloc
,
8960 .slave_configure
= ufshcd_slave_configure
,
8961 .slave_destroy
= ufshcd_slave_destroy
,
8962 .change_queue_depth
= ufshcd_change_queue_depth
,
8963 .eh_abort_handler
= ufshcd_abort
,
8964 .eh_device_reset_handler
= ufshcd_eh_device_reset_handler
,
8965 .eh_host_reset_handler
= ufshcd_eh_host_reset_handler
,
8966 .eh_timed_out
= ufshcd_eh_timed_out
,
8968 .sg_tablesize
= SG_ALL
,
8969 .cmd_per_lun
= UFSHCD_CMD_PER_LUN
,
8970 .can_queue
= UFSHCD_CAN_QUEUE
,
8971 .max_segment_size
= PRDT_DATA_BYTE_COUNT_MAX
,
8972 .max_sectors
= SZ_1M
/ SECTOR_SIZE
,
8973 .max_host_blocked
= 1,
8974 .track_queue_depth
= 1,
8975 .skip_settle_delay
= 1,
8976 .sdev_groups
= ufshcd_driver_groups
,
8977 .rpm_autosuspend_delay
= RPM_AUTOSUSPEND_DELAY_MS
,
8980 static int ufshcd_config_vreg_load(struct device
*dev
, struct ufs_vreg
*vreg
,
8989 * "set_load" operation shall be required on those regulators
8990 * which specifically configured current limitation. Otherwise
8991 * zero max_uA may cause unexpected behavior when regulator is
8992 * enabled or set as high power mode.
8997 ret
= regulator_set_load(vreg
->reg
, ua
);
8999 dev_err(dev
, "%s: %s set load (ua=%d) failed, err=%d\n",
9000 __func__
, vreg
->name
, ua
, ret
);
9006 static inline int ufshcd_config_vreg_lpm(struct ufs_hba
*hba
,
9007 struct ufs_vreg
*vreg
)
9009 return ufshcd_config_vreg_load(hba
->dev
, vreg
, UFS_VREG_LPM_LOAD_UA
);
9012 static inline int ufshcd_config_vreg_hpm(struct ufs_hba
*hba
,
9013 struct ufs_vreg
*vreg
)
9018 return ufshcd_config_vreg_load(hba
->dev
, vreg
, vreg
->max_uA
);
9021 static int ufshcd_config_vreg(struct device
*dev
,
9022 struct ufs_vreg
*vreg
, bool on
)
9024 if (regulator_count_voltages(vreg
->reg
) <= 0)
9027 return ufshcd_config_vreg_load(dev
, vreg
, on
? vreg
->max_uA
: 0);
9030 static int ufshcd_enable_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
9034 if (!vreg
|| vreg
->enabled
)
9037 ret
= ufshcd_config_vreg(dev
, vreg
, true);
9039 ret
= regulator_enable(vreg
->reg
);
9042 vreg
->enabled
= true;
9044 dev_err(dev
, "%s: %s enable failed, err=%d\n",
9045 __func__
, vreg
->name
, ret
);
9050 static int ufshcd_disable_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
9054 if (!vreg
|| !vreg
->enabled
|| vreg
->always_on
)
9057 ret
= regulator_disable(vreg
->reg
);
9060 /* ignore errors on applying disable config */
9061 ufshcd_config_vreg(dev
, vreg
, false);
9062 vreg
->enabled
= false;
9064 dev_err(dev
, "%s: %s disable failed, err=%d\n",
9065 __func__
, vreg
->name
, ret
);
9071 static int ufshcd_setup_vreg(struct ufs_hba
*hba
, bool on
)
9074 struct device
*dev
= hba
->dev
;
9075 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
9077 ret
= ufshcd_toggle_vreg(dev
, info
->vcc
, on
);
9081 ret
= ufshcd_toggle_vreg(dev
, info
->vccq
, on
);
9085 ret
= ufshcd_toggle_vreg(dev
, info
->vccq2
, on
);
9089 ufshcd_toggle_vreg(dev
, info
->vccq2
, false);
9090 ufshcd_toggle_vreg(dev
, info
->vccq
, false);
9091 ufshcd_toggle_vreg(dev
, info
->vcc
, false);
9096 static int ufshcd_setup_hba_vreg(struct ufs_hba
*hba
, bool on
)
9098 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
9100 return ufshcd_toggle_vreg(hba
->dev
, info
->vdd_hba
, on
);
9103 int ufshcd_get_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
9110 vreg
->reg
= devm_regulator_get(dev
, vreg
->name
);
9111 if (IS_ERR(vreg
->reg
)) {
9112 ret
= PTR_ERR(vreg
->reg
);
9113 dev_err(dev
, "%s: %s get failed, err=%d\n",
9114 __func__
, vreg
->name
, ret
);
9119 EXPORT_SYMBOL_GPL(ufshcd_get_vreg
);
9121 static int ufshcd_init_vreg(struct ufs_hba
*hba
)
9124 struct device
*dev
= hba
->dev
;
9125 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
9127 ret
= ufshcd_get_vreg(dev
, info
->vcc
);
9131 ret
= ufshcd_get_vreg(dev
, info
->vccq
);
9133 ret
= ufshcd_get_vreg(dev
, info
->vccq2
);
9138 static int ufshcd_init_hba_vreg(struct ufs_hba
*hba
)
9140 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
9142 return ufshcd_get_vreg(hba
->dev
, info
->vdd_hba
);
9145 static int ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
)
9148 struct ufs_clk_info
*clki
;
9149 struct list_head
*head
= &hba
->clk_list_head
;
9150 unsigned long flags
;
9151 ktime_t start
= ktime_get();
9152 bool clk_state_changed
= false;
9154 if (list_empty(head
))
9157 ret
= ufshcd_vops_setup_clocks(hba
, on
, PRE_CHANGE
);
9161 list_for_each_entry(clki
, head
, list
) {
9162 if (!IS_ERR_OR_NULL(clki
->clk
)) {
9164 * Don't disable clocks which are needed
9165 * to keep the link active.
9167 if (ufshcd_is_link_active(hba
) &&
9168 clki
->keep_link_active
)
9171 clk_state_changed
= on
^ clki
->enabled
;
9172 if (on
&& !clki
->enabled
) {
9173 ret
= clk_prepare_enable(clki
->clk
);
9175 dev_err(hba
->dev
, "%s: %s prepare enable failed, %d\n",
9176 __func__
, clki
->name
, ret
);
9179 } else if (!on
&& clki
->enabled
) {
9180 clk_disable_unprepare(clki
->clk
);
9183 dev_dbg(hba
->dev
, "%s: clk: %s %sabled\n", __func__
,
9184 clki
->name
, on
? "en" : "dis");
9188 ret
= ufshcd_vops_setup_clocks(hba
, on
, POST_CHANGE
);
9194 list_for_each_entry(clki
, head
, list
) {
9195 if (!IS_ERR_OR_NULL(clki
->clk
) && clki
->enabled
)
9196 clk_disable_unprepare(clki
->clk
);
9198 } else if (!ret
&& on
) {
9199 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
9200 hba
->clk_gating
.state
= CLKS_ON
;
9201 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
9202 hba
->clk_gating
.state
);
9203 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
9206 if (clk_state_changed
)
9207 trace_ufshcd_profile_clk_gating(dev_name(hba
->dev
),
9208 (on
? "on" : "off"),
9209 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
9213 static enum ufs_ref_clk_freq
ufshcd_parse_ref_clk_property(struct ufs_hba
*hba
)
9216 int ret
= device_property_read_u32(hba
->dev
, "ref-clk-freq", &freq
);
9219 dev_dbg(hba
->dev
, "Cannot query 'ref-clk-freq' property = %d", ret
);
9220 return REF_CLK_FREQ_INVAL
;
9223 return ufs_get_bref_clk_from_hz(freq
);
9226 static int ufshcd_init_clocks(struct ufs_hba
*hba
)
9229 struct ufs_clk_info
*clki
;
9230 struct device
*dev
= hba
->dev
;
9231 struct list_head
*head
= &hba
->clk_list_head
;
9233 if (list_empty(head
))
9236 list_for_each_entry(clki
, head
, list
) {
9240 clki
->clk
= devm_clk_get(dev
, clki
->name
);
9241 if (IS_ERR(clki
->clk
)) {
9242 ret
= PTR_ERR(clki
->clk
);
9243 dev_err(dev
, "%s: %s clk get failed, %d\n",
9244 __func__
, clki
->name
, ret
);
9249 * Parse device ref clk freq as per device tree "ref_clk".
9250 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
9251 * in ufshcd_alloc_host().
9253 if (!strcmp(clki
->name
, "ref_clk"))
9254 ufshcd_parse_dev_ref_clk_freq(hba
, clki
->clk
);
9256 if (clki
->max_freq
) {
9257 ret
= clk_set_rate(clki
->clk
, clki
->max_freq
);
9259 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
9260 __func__
, clki
->name
,
9261 clki
->max_freq
, ret
);
9264 clki
->curr_freq
= clki
->max_freq
;
9266 dev_dbg(dev
, "%s: clk: %s, rate: %lu\n", __func__
,
9267 clki
->name
, clk_get_rate(clki
->clk
));
9270 /* Set Max. frequency for all clocks */
9271 if (hba
->use_pm_opp
) {
9272 ret
= ufshcd_opp_set_rate(hba
, ULONG_MAX
);
9274 dev_err(hba
->dev
, "%s: failed to set OPP: %d", __func__
,
9284 static int ufshcd_variant_hba_init(struct ufs_hba
*hba
)
9291 err
= ufshcd_vops_init(hba
);
9293 dev_err_probe(hba
->dev
, err
,
9294 "%s: variant %s init failed with err %d\n",
9295 __func__
, ufshcd_get_var_name(hba
), err
);
9300 static void ufshcd_variant_hba_exit(struct ufs_hba
*hba
)
9305 ufshcd_vops_exit(hba
);
9308 static int ufshcd_hba_init(struct ufs_hba
*hba
)
9313 * Handle host controller power separately from the UFS device power
9314 * rails as it will help controlling the UFS host controller power
9315 * collapse easily which is different than UFS device power collapse.
9316 * Also, enable the host controller power before we go ahead with rest
9317 * of the initialization here.
9319 err
= ufshcd_init_hba_vreg(hba
);
9323 err
= ufshcd_setup_hba_vreg(hba
, true);
9327 err
= ufshcd_init_clocks(hba
);
9329 goto out_disable_hba_vreg
;
9331 if (hba
->dev_ref_clk_freq
== REF_CLK_FREQ_INVAL
)
9332 hba
->dev_ref_clk_freq
= ufshcd_parse_ref_clk_property(hba
);
9334 err
= ufshcd_setup_clocks(hba
, true);
9336 goto out_disable_hba_vreg
;
9338 err
= ufshcd_init_vreg(hba
);
9340 goto out_disable_clks
;
9342 err
= ufshcd_setup_vreg(hba
, true);
9344 goto out_disable_clks
;
9346 err
= ufshcd_variant_hba_init(hba
);
9348 goto out_disable_vreg
;
9350 ufs_debugfs_hba_init(hba
);
9352 hba
->is_powered
= true;
9356 ufshcd_setup_vreg(hba
, false);
9358 ufshcd_setup_clocks(hba
, false);
9359 out_disable_hba_vreg
:
9360 ufshcd_setup_hba_vreg(hba
, false);
9365 static void ufshcd_hba_exit(struct ufs_hba
*hba
)
9367 if (hba
->is_powered
) {
9368 ufshcd_exit_clk_scaling(hba
);
9369 ufshcd_exit_clk_gating(hba
);
9371 destroy_workqueue(hba
->eh_wq
);
9372 ufs_debugfs_hba_exit(hba
);
9373 ufshcd_variant_hba_exit(hba
);
9374 ufshcd_setup_vreg(hba
, false);
9375 ufshcd_setup_clocks(hba
, false);
9376 ufshcd_setup_hba_vreg(hba
, false);
9377 hba
->is_powered
= false;
9378 ufs_put_device_desc(hba
);
9382 static int ufshcd_execute_start_stop(struct scsi_device
*sdev
,
9383 enum ufs_dev_pwr_mode pwr_mode
,
9384 struct scsi_sense_hdr
*sshdr
)
9386 const unsigned char cdb
[6] = { START_STOP
, 0, 0, 0, pwr_mode
<< 4, 0 };
9387 const struct scsi_exec_args args
= {
9389 .req_flags
= BLK_MQ_REQ_PM
,
9390 .scmd_flags
= SCMD_FAIL_IF_RECOVERING
,
9393 return scsi_execute_cmd(sdev
, cdb
, REQ_OP_DRV_IN
, /*buffer=*/NULL
,
9394 /*bufflen=*/0, /*timeout=*/10 * HZ
, /*retries=*/0,
9399 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
9401 * @hba: per adapter instance
9402 * @pwr_mode: device power mode to set
9404 * Return: 0 if requested power mode is set successfully;
9405 * < 0 if failed to set the requested power mode.
9407 static int ufshcd_set_dev_pwr_mode(struct ufs_hba
*hba
,
9408 enum ufs_dev_pwr_mode pwr_mode
)
9410 struct scsi_sense_hdr sshdr
;
9411 struct scsi_device
*sdp
;
9412 unsigned long flags
;
9415 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
9416 sdp
= hba
->ufs_device_wlun
;
9417 if (sdp
&& scsi_device_online(sdp
))
9418 ret
= scsi_device_get(sdp
);
9421 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
9427 * If scsi commands fail, the scsi mid-layer schedules scsi error-
9428 * handling, which would wait for host to be resumed. Since we know
9429 * we are functional while we are here, skip host resume in error
9432 hba
->host
->eh_noresume
= 1;
9435 * Current function would be generally called from the power management
9436 * callbacks hence set the RQF_PM flag so that it doesn't resume the
9437 * already suspended childs.
9439 for (retries
= 3; retries
> 0; --retries
) {
9440 ret
= ufshcd_execute_start_stop(sdp
, pwr_mode
, &sshdr
);
9442 * scsi_execute() only returns a negative value if the request
9449 sdev_printk(KERN_WARNING
, sdp
,
9450 "START_STOP failed for power mode: %d, result %x\n",
9453 if (scsi_sense_valid(&sshdr
))
9454 scsi_print_sense_hdr(sdp
, NULL
, &sshdr
);
9458 hba
->curr_dev_pwr_mode
= pwr_mode
;
9461 scsi_device_put(sdp
);
9462 hba
->host
->eh_noresume
= 0;
9466 static int ufshcd_link_state_transition(struct ufs_hba
*hba
,
9467 enum uic_link_state req_link_state
,
9468 bool check_for_bkops
)
9472 if (req_link_state
== hba
->uic_link_state
)
9475 if (req_link_state
== UIC_LINK_HIBERN8_STATE
) {
9476 ret
= ufshcd_uic_hibern8_enter(hba
);
9478 ufshcd_set_link_hibern8(hba
);
9480 dev_err(hba
->dev
, "%s: hibern8 enter failed %d\n",
9486 * If autobkops is enabled, link can't be turned off because
9487 * turning off the link would also turn off the device, except in the
9488 * case of DeepSleep where the device is expected to remain powered.
9490 else if ((req_link_state
== UIC_LINK_OFF_STATE
) &&
9491 (!check_for_bkops
|| !hba
->auto_bkops_enabled
)) {
9493 * Let's make sure that link is in low power mode, we are doing
9494 * this currently by putting the link in Hibern8. Otherway to
9495 * put the link in low power mode is to send the DME end point
9496 * to device and then send the DME reset command to local
9497 * unipro. But putting the link in hibern8 is much faster.
9499 * Note also that putting the link in Hibern8 is a requirement
9500 * for entering DeepSleep.
9502 ret
= ufshcd_uic_hibern8_enter(hba
);
9504 dev_err(hba
->dev
, "%s: hibern8 enter failed %d\n",
9509 * Change controller state to "reset state" which
9510 * should also put the link in off/reset state
9512 ufshcd_hba_stop(hba
);
9514 * TODO: Check if we need any delay to make sure that
9515 * controller is reset
9517 ufshcd_set_link_off(hba
);
9524 static void ufshcd_vreg_set_lpm(struct ufs_hba
*hba
)
9526 bool vcc_off
= false;
9529 * It seems some UFS devices may keep drawing more than sleep current
9530 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
9531 * To avoid this situation, add 2ms delay before putting these UFS
9532 * rails in LPM mode.
9534 if (!ufshcd_is_link_active(hba
) &&
9535 hba
->dev_quirks
& UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
)
9536 usleep_range(2000, 2100);
9539 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
9542 * If UFS device and link is in OFF state, all power supplies (VCC,
9543 * VCCQ, VCCQ2) can be turned off if power on write protect is not
9544 * required. If UFS link is inactive (Hibern8 or OFF state) and device
9545 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
9547 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
9548 * in low power state which would save some power.
9550 * If Write Booster is enabled and the device needs to flush the WB
9551 * buffer OR if bkops status is urgent for WB, keep Vcc on.
9553 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
) &&
9554 !hba
->dev_info
.is_lu_power_on_wp
) {
9555 ufshcd_setup_vreg(hba
, false);
9557 } else if (!ufshcd_is_ufs_dev_active(hba
)) {
9558 ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, false);
9560 if (ufshcd_is_link_hibern8(hba
) || ufshcd_is_link_off(hba
)) {
9561 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq
);
9562 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq2
);
9567 * Some UFS devices require delay after VCC power rail is turned-off.
9569 if (vcc_off
&& hba
->vreg_info
.vcc
&&
9570 hba
->dev_quirks
& UFS_DEVICE_QUIRK_DELAY_AFTER_LPM
)
9571 usleep_range(5000, 5100);
9575 static int ufshcd_vreg_set_hpm(struct ufs_hba
*hba
)
9579 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
) &&
9580 !hba
->dev_info
.is_lu_power_on_wp
) {
9581 ret
= ufshcd_setup_vreg(hba
, true);
9582 } else if (!ufshcd_is_ufs_dev_active(hba
)) {
9583 if (!ufshcd_is_link_active(hba
)) {
9584 ret
= ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq
);
9587 ret
= ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq2
);
9591 ret
= ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, true);
9596 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq
);
9598 ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, false);
9602 #endif /* CONFIG_PM */
9604 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba
*hba
)
9606 if (ufshcd_is_link_off(hba
) || ufshcd_can_aggressive_pc(hba
))
9607 ufshcd_setup_hba_vreg(hba
, false);
9610 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba
*hba
)
9612 if (ufshcd_is_link_off(hba
) || ufshcd_can_aggressive_pc(hba
))
9613 ufshcd_setup_hba_vreg(hba
, true);
9616 static int __ufshcd_wl_suspend(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
9619 bool check_for_bkops
;
9620 enum ufs_pm_level pm_lvl
;
9621 enum ufs_dev_pwr_mode req_dev_pwr_mode
;
9622 enum uic_link_state req_link_state
;
9624 hba
->pm_op_in_progress
= true;
9625 if (pm_op
!= UFS_SHUTDOWN_PM
) {
9626 pm_lvl
= pm_op
== UFS_RUNTIME_PM
?
9627 hba
->rpm_lvl
: hba
->spm_lvl
;
9628 req_dev_pwr_mode
= ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl
);
9629 req_link_state
= ufs_get_pm_lvl_to_link_pwr_state(pm_lvl
);
9631 req_dev_pwr_mode
= UFS_POWERDOWN_PWR_MODE
;
9632 req_link_state
= UIC_LINK_OFF_STATE
;
9636 * If we can't transition into any of the low power modes
9637 * just gate the clocks.
9640 hba
->clk_gating
.is_suspended
= true;
9642 if (ufshcd_is_clkscaling_supported(hba
))
9643 ufshcd_clk_scaling_suspend(hba
, true);
9645 if (req_dev_pwr_mode
== UFS_ACTIVE_PWR_MODE
&&
9646 req_link_state
== UIC_LINK_ACTIVE_STATE
) {
9650 if ((req_dev_pwr_mode
== hba
->curr_dev_pwr_mode
) &&
9651 (req_link_state
== hba
->uic_link_state
))
9652 goto enable_scaling
;
9654 /* UFS device & link must be active before we enter in this function */
9655 if (!ufshcd_is_ufs_dev_active(hba
) || !ufshcd_is_link_active(hba
)) {
9657 goto enable_scaling
;
9660 if (pm_op
== UFS_RUNTIME_PM
) {
9661 if (ufshcd_can_autobkops_during_suspend(hba
)) {
9663 * The device is idle with no requests in the queue,
9664 * allow background operations if bkops status shows
9665 * that performance might be impacted.
9667 ret
= ufshcd_urgent_bkops(hba
);
9670 * If return err in suspend flow, IO will hang.
9671 * Trigger error handler and break suspend for
9674 ufshcd_force_error_recovery(hba
);
9676 goto enable_scaling
;
9679 /* make sure that auto bkops is disabled */
9680 ufshcd_disable_auto_bkops(hba
);
9683 * If device needs to do BKOP or WB buffer flush during
9684 * Hibern8, keep device power mode as "active power mode"
9687 hba
->dev_info
.b_rpm_dev_flush_capable
=
9688 hba
->auto_bkops_enabled
||
9689 (((req_link_state
== UIC_LINK_HIBERN8_STATE
) ||
9690 ((req_link_state
== UIC_LINK_ACTIVE_STATE
) &&
9691 ufshcd_is_auto_hibern8_enabled(hba
))) &&
9692 ufshcd_wb_need_flush(hba
));
9695 flush_work(&hba
->eeh_work
);
9697 ret
= ufshcd_vops_suspend(hba
, pm_op
, PRE_CHANGE
);
9699 goto enable_scaling
;
9701 if (req_dev_pwr_mode
!= hba
->curr_dev_pwr_mode
) {
9702 if (pm_op
!= UFS_RUNTIME_PM
)
9703 /* ensure that bkops is disabled */
9704 ufshcd_disable_auto_bkops(hba
);
9706 if (!hba
->dev_info
.b_rpm_dev_flush_capable
) {
9707 ret
= ufshcd_set_dev_pwr_mode(hba
, req_dev_pwr_mode
);
9708 if (ret
&& pm_op
!= UFS_SHUTDOWN_PM
) {
9710 * If return err in suspend flow, IO will hang.
9711 * Trigger error handler and break suspend for
9714 ufshcd_force_error_recovery(hba
);
9718 goto enable_scaling
;
9723 * In the case of DeepSleep, the device is expected to remain powered
9724 * with the link off, so do not check for bkops.
9726 check_for_bkops
= !ufshcd_is_ufs_dev_deepsleep(hba
);
9727 ret
= ufshcd_link_state_transition(hba
, req_link_state
, check_for_bkops
);
9728 if (ret
&& pm_op
!= UFS_SHUTDOWN_PM
) {
9730 * If return err in suspend flow, IO will hang.
9731 * Trigger error handler and break suspend for
9734 ufshcd_force_error_recovery(hba
);
9738 goto set_dev_active
;
9742 * Call vendor specific suspend callback. As these callbacks may access
9743 * vendor specific host controller register space call them before the
9744 * host clocks are ON.
9746 ret
= ufshcd_vops_suspend(hba
, pm_op
, POST_CHANGE
);
9748 goto set_link_active
;
9753 * Device hardware reset is required to exit DeepSleep. Also, for
9754 * DeepSleep, the link is off so host reset and restore will be done
9757 if (ufshcd_is_ufs_dev_deepsleep(hba
)) {
9758 ufshcd_device_reset(hba
);
9759 WARN_ON(!ufshcd_is_link_off(hba
));
9761 if (ufshcd_is_link_hibern8(hba
) && !ufshcd_uic_hibern8_exit(hba
))
9762 ufshcd_set_link_active(hba
);
9763 else if (ufshcd_is_link_off(hba
))
9764 ufshcd_host_reset_and_restore(hba
);
9766 /* Can also get here needing to exit DeepSleep */
9767 if (ufshcd_is_ufs_dev_deepsleep(hba
)) {
9768 ufshcd_device_reset(hba
);
9769 ufshcd_host_reset_and_restore(hba
);
9771 if (!ufshcd_set_dev_pwr_mode(hba
, UFS_ACTIVE_PWR_MODE
))
9772 ufshcd_disable_auto_bkops(hba
);
9774 if (ufshcd_is_clkscaling_supported(hba
))
9775 ufshcd_clk_scaling_suspend(hba
, false);
9777 hba
->dev_info
.b_rpm_dev_flush_capable
= false;
9779 if (hba
->dev_info
.b_rpm_dev_flush_capable
) {
9780 schedule_delayed_work(&hba
->rpm_dev_flush_recheck_work
,
9781 msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS
));
9785 ufshcd_update_evt_hist(hba
, UFS_EVT_WL_SUSP_ERR
, (u32
)ret
);
9786 hba
->clk_gating
.is_suspended
= false;
9787 ufshcd_release(hba
);
9789 hba
->pm_op_in_progress
= false;
9794 static int __ufshcd_wl_resume(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
9797 enum uic_link_state old_link_state
= hba
->uic_link_state
;
9799 hba
->pm_op_in_progress
= true;
9802 * Call vendor specific resume callback. As these callbacks may access
9803 * vendor specific host controller register space call them when the
9804 * host clocks are ON.
9806 ret
= ufshcd_vops_resume(hba
, pm_op
);
9810 /* For DeepSleep, the only supported option is to have the link off */
9811 WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba
) && !ufshcd_is_link_off(hba
));
9813 if (ufshcd_is_link_hibern8(hba
)) {
9814 ret
= ufshcd_uic_hibern8_exit(hba
);
9816 ufshcd_set_link_active(hba
);
9818 dev_err(hba
->dev
, "%s: hibern8 exit failed %d\n",
9820 goto vendor_suspend
;
9822 } else if (ufshcd_is_link_off(hba
)) {
9824 * A full initialization of the host and the device is
9825 * required since the link was put to off during suspend.
9826 * Note, in the case of DeepSleep, the device will exit
9827 * DeepSleep due to device reset.
9829 ret
= ufshcd_reset_and_restore(hba
);
9831 * ufshcd_reset_and_restore() should have already
9832 * set the link state as active
9834 if (ret
|| !ufshcd_is_link_active(hba
))
9835 goto vendor_suspend
;
9838 if (!ufshcd_is_ufs_dev_active(hba
)) {
9839 ret
= ufshcd_set_dev_pwr_mode(hba
, UFS_ACTIVE_PWR_MODE
);
9841 goto set_old_link_state
;
9842 ufshcd_set_timestamp_attr(hba
);
9845 if (ufshcd_keep_autobkops_enabled_except_suspend(hba
))
9846 ufshcd_enable_auto_bkops(hba
);
9849 * If BKOPs operations are urgently needed at this moment then
9850 * keep auto-bkops enabled or else disable it.
9852 ufshcd_urgent_bkops(hba
);
9854 if (hba
->ee_usr_mask
)
9855 ufshcd_write_ee_control(hba
);
9857 if (ufshcd_is_clkscaling_supported(hba
))
9858 ufshcd_clk_scaling_suspend(hba
, false);
9860 if (hba
->dev_info
.b_rpm_dev_flush_capable
) {
9861 hba
->dev_info
.b_rpm_dev_flush_capable
= false;
9862 cancel_delayed_work(&hba
->rpm_dev_flush_recheck_work
);
9865 /* Enable Auto-Hibernate if configured */
9866 ufshcd_auto_hibern8_enable(hba
);
9871 ufshcd_link_state_transition(hba
, old_link_state
, 0);
9873 ufshcd_vops_suspend(hba
, pm_op
, PRE_CHANGE
);
9874 ufshcd_vops_suspend(hba
, pm_op
, POST_CHANGE
);
9877 ufshcd_update_evt_hist(hba
, UFS_EVT_WL_RES_ERR
, (u32
)ret
);
9878 hba
->clk_gating
.is_suspended
= false;
9879 ufshcd_release(hba
);
9880 hba
->pm_op_in_progress
= false;
9884 static int ufshcd_wl_runtime_suspend(struct device
*dev
)
9886 struct scsi_device
*sdev
= to_scsi_device(dev
);
9887 struct ufs_hba
*hba
;
9889 ktime_t start
= ktime_get();
9891 hba
= shost_priv(sdev
->host
);
9893 ret
= __ufshcd_wl_suspend(hba
, UFS_RUNTIME_PM
);
9895 dev_err(&sdev
->sdev_gendev
, "%s failed: %d\n", __func__
, ret
);
9897 trace_ufshcd_wl_runtime_suspend(dev_name(dev
), ret
,
9898 ktime_to_us(ktime_sub(ktime_get(), start
)),
9899 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9904 static int ufshcd_wl_runtime_resume(struct device
*dev
)
9906 struct scsi_device
*sdev
= to_scsi_device(dev
);
9907 struct ufs_hba
*hba
;
9909 ktime_t start
= ktime_get();
9911 hba
= shost_priv(sdev
->host
);
9913 ret
= __ufshcd_wl_resume(hba
, UFS_RUNTIME_PM
);
9915 dev_err(&sdev
->sdev_gendev
, "%s failed: %d\n", __func__
, ret
);
9917 trace_ufshcd_wl_runtime_resume(dev_name(dev
), ret
,
9918 ktime_to_us(ktime_sub(ktime_get(), start
)),
9919 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9925 #ifdef CONFIG_PM_SLEEP
9926 static int ufshcd_wl_suspend(struct device
*dev
)
9928 struct scsi_device
*sdev
= to_scsi_device(dev
);
9929 struct ufs_hba
*hba
;
9931 ktime_t start
= ktime_get();
9933 hba
= shost_priv(sdev
->host
);
9934 down(&hba
->host_sem
);
9935 hba
->system_suspending
= true;
9937 if (pm_runtime_suspended(dev
))
9940 ret
= __ufshcd_wl_suspend(hba
, UFS_SYSTEM_PM
);
9942 dev_err(&sdev
->sdev_gendev
, "%s failed: %d\n", __func__
, ret
);
9948 hba
->is_sys_suspended
= true;
9949 trace_ufshcd_wl_suspend(dev_name(dev
), ret
,
9950 ktime_to_us(ktime_sub(ktime_get(), start
)),
9951 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9956 static int ufshcd_wl_resume(struct device
*dev
)
9958 struct scsi_device
*sdev
= to_scsi_device(dev
);
9959 struct ufs_hba
*hba
;
9961 ktime_t start
= ktime_get();
9963 hba
= shost_priv(sdev
->host
);
9965 if (pm_runtime_suspended(dev
))
9968 ret
= __ufshcd_wl_resume(hba
, UFS_SYSTEM_PM
);
9970 dev_err(&sdev
->sdev_gendev
, "%s failed: %d\n", __func__
, ret
);
9972 trace_ufshcd_wl_resume(dev_name(dev
), ret
,
9973 ktime_to_us(ktime_sub(ktime_get(), start
)),
9974 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9976 hba
->is_sys_suspended
= false;
9977 hba
->system_suspending
= false;
9984 * ufshcd_suspend - helper function for suspend operations
9985 * @hba: per adapter instance
9987 * This function will put disable irqs, turn off clocks
9988 * and set vreg and hba-vreg in lpm mode.
9990 * Return: 0 upon success; < 0 upon failure.
9992 static int ufshcd_suspend(struct ufs_hba
*hba
)
9996 if (!hba
->is_powered
)
9999 * Disable the host irq as host controller as there won't be any
10000 * host controller transaction expected till resume.
10002 ufshcd_disable_irq(hba
);
10003 ret
= ufshcd_setup_clocks(hba
, false);
10005 ufshcd_enable_irq(hba
);
10008 if (ufshcd_is_clkgating_allowed(hba
)) {
10009 hba
->clk_gating
.state
= CLKS_OFF
;
10010 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
10011 hba
->clk_gating
.state
);
10014 ufshcd_vreg_set_lpm(hba
);
10015 /* Put the host controller in low power mode if possible */
10016 ufshcd_hba_vreg_set_lpm(hba
);
10022 * ufshcd_resume - helper function for resume operations
10023 * @hba: per adapter instance
10025 * This function basically turns on the regulators, clocks and
10028 * Return: 0 for success and non-zero for failure.
10030 static int ufshcd_resume(struct ufs_hba
*hba
)
10034 if (!hba
->is_powered
)
10037 ufshcd_hba_vreg_set_hpm(hba
);
10038 ret
= ufshcd_vreg_set_hpm(hba
);
10042 /* Make sure clocks are enabled before accessing controller */
10043 ret
= ufshcd_setup_clocks(hba
, true);
10047 /* enable the host irq as host controller would be active soon */
10048 ufshcd_enable_irq(hba
);
10053 ufshcd_vreg_set_lpm(hba
);
10056 ufshcd_update_evt_hist(hba
, UFS_EVT_RESUME_ERR
, (u32
)ret
);
10059 #endif /* CONFIG_PM */
10061 #ifdef CONFIG_PM_SLEEP
10063 * ufshcd_system_suspend - system suspend callback
10064 * @dev: Device associated with the UFS controller.
10066 * Executed before putting the system into a sleep state in which the contents
10067 * of main memory are preserved.
10069 * Return: 0 for success and non-zero for failure.
10071 int ufshcd_system_suspend(struct device
*dev
)
10073 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10075 ktime_t start
= ktime_get();
10077 if (pm_runtime_suspended(hba
->dev
))
10080 ret
= ufshcd_suspend(hba
);
10082 trace_ufshcd_system_suspend(dev_name(hba
->dev
), ret
,
10083 ktime_to_us(ktime_sub(ktime_get(), start
)),
10084 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
10087 EXPORT_SYMBOL(ufshcd_system_suspend
);
10090 * ufshcd_system_resume - system resume callback
10091 * @dev: Device associated with the UFS controller.
10093 * Executed after waking the system up from a sleep state in which the contents
10094 * of main memory were preserved.
10096 * Return: 0 for success and non-zero for failure.
10098 int ufshcd_system_resume(struct device
*dev
)
10100 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10101 ktime_t start
= ktime_get();
10104 if (pm_runtime_suspended(hba
->dev
))
10107 ret
= ufshcd_resume(hba
);
10110 trace_ufshcd_system_resume(dev_name(hba
->dev
), ret
,
10111 ktime_to_us(ktime_sub(ktime_get(), start
)),
10112 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
10116 EXPORT_SYMBOL(ufshcd_system_resume
);
10117 #endif /* CONFIG_PM_SLEEP */
10121 * ufshcd_runtime_suspend - runtime suspend callback
10122 * @dev: Device associated with the UFS controller.
10124 * Check the description of ufshcd_suspend() function for more details.
10126 * Return: 0 for success and non-zero for failure.
10128 int ufshcd_runtime_suspend(struct device
*dev
)
10130 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10132 ktime_t start
= ktime_get();
10134 ret
= ufshcd_suspend(hba
);
10136 trace_ufshcd_runtime_suspend(dev_name(hba
->dev
), ret
,
10137 ktime_to_us(ktime_sub(ktime_get(), start
)),
10138 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
10141 EXPORT_SYMBOL(ufshcd_runtime_suspend
);
10144 * ufshcd_runtime_resume - runtime resume routine
10145 * @dev: Device associated with the UFS controller.
10147 * This function basically brings controller
10148 * to active state. Following operations are done in this function:
10150 * 1. Turn on all the controller related clocks
10151 * 2. Turn ON VCC rail
10153 * Return: 0 upon success; < 0 upon failure.
10155 int ufshcd_runtime_resume(struct device
*dev
)
10157 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10159 ktime_t start
= ktime_get();
10161 ret
= ufshcd_resume(hba
);
10163 trace_ufshcd_runtime_resume(dev_name(hba
->dev
), ret
,
10164 ktime_to_us(ktime_sub(ktime_get(), start
)),
10165 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
10168 EXPORT_SYMBOL(ufshcd_runtime_resume
);
10169 #endif /* CONFIG_PM */
10171 static void ufshcd_wl_shutdown(struct device
*dev
)
10173 struct scsi_device
*sdev
= to_scsi_device(dev
);
10174 struct ufs_hba
*hba
= shost_priv(sdev
->host
);
10176 down(&hba
->host_sem
);
10177 hba
->shutting_down
= true;
10178 up(&hba
->host_sem
);
10180 /* Turn on everything while shutting down */
10181 ufshcd_rpm_get_sync(hba
);
10182 scsi_device_quiesce(sdev
);
10183 shost_for_each_device(sdev
, hba
->host
) {
10184 if (sdev
== hba
->ufs_device_wlun
)
10186 scsi_device_quiesce(sdev
);
10188 __ufshcd_wl_suspend(hba
, UFS_SHUTDOWN_PM
);
10191 * Next, turn off the UFS controller and the UFS regulators. Disable
10194 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
))
10195 ufshcd_suspend(hba
);
10197 hba
->is_powered
= false;
10201 * ufshcd_remove - de-allocate SCSI host and host memory space
10202 * data structure memory
10203 * @hba: per adapter instance
10205 void ufshcd_remove(struct ufs_hba
*hba
)
10207 if (hba
->ufs_device_wlun
)
10208 ufshcd_rpm_get_sync(hba
);
10209 ufs_hwmon_remove(hba
);
10210 ufs_bsg_remove(hba
);
10211 ufs_sysfs_remove_nodes(hba
->dev
);
10212 blk_mq_destroy_queue(hba
->tmf_queue
);
10213 blk_put_queue(hba
->tmf_queue
);
10214 blk_mq_free_tag_set(&hba
->tmf_tag_set
);
10215 scsi_remove_host(hba
->host
);
10216 /* disable interrupts */
10217 ufshcd_disable_intr(hba
, hba
->intr_mask
);
10218 ufshcd_hba_stop(hba
);
10219 ufshcd_hba_exit(hba
);
10221 EXPORT_SYMBOL_GPL(ufshcd_remove
);
10223 #ifdef CONFIG_PM_SLEEP
10224 int ufshcd_system_freeze(struct device
*dev
)
10227 return ufshcd_system_suspend(dev
);
10230 EXPORT_SYMBOL_GPL(ufshcd_system_freeze
);
10232 int ufshcd_system_restore(struct device
*dev
)
10235 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10238 ret
= ufshcd_system_resume(dev
);
10242 /* Configure UTRL and UTMRL base address registers */
10243 ufshcd_writel(hba
, lower_32_bits(hba
->utrdl_dma_addr
),
10244 REG_UTP_TRANSFER_REQ_LIST_BASE_L
);
10245 ufshcd_writel(hba
, upper_32_bits(hba
->utrdl_dma_addr
),
10246 REG_UTP_TRANSFER_REQ_LIST_BASE_H
);
10247 ufshcd_writel(hba
, lower_32_bits(hba
->utmrdl_dma_addr
),
10248 REG_UTP_TASK_REQ_LIST_BASE_L
);
10249 ufshcd_writel(hba
, upper_32_bits(hba
->utmrdl_dma_addr
),
10250 REG_UTP_TASK_REQ_LIST_BASE_H
);
10252 * Make sure that UTRL and UTMRL base address registers
10253 * are updated with the latest queue addresses. Only after
10254 * updating these addresses, we can queue the new commands.
10258 /* Resuming from hibernate, assume that link was OFF */
10259 ufshcd_set_link_off(hba
);
10264 EXPORT_SYMBOL_GPL(ufshcd_system_restore
);
10266 int ufshcd_system_thaw(struct device
*dev
)
10268 return ufshcd_system_resume(dev
);
10270 EXPORT_SYMBOL_GPL(ufshcd_system_thaw
);
10271 #endif /* CONFIG_PM_SLEEP */
10274 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
10275 * @hba: pointer to Host Bus Adapter (HBA)
10277 void ufshcd_dealloc_host(struct ufs_hba
*hba
)
10279 scsi_host_put(hba
->host
);
10281 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host
);
10284 * ufshcd_set_dma_mask - Set dma mask based on the controller
10285 * addressing capability
10286 * @hba: per adapter instance
10288 * Return: 0 for success, non-zero for failure.
10290 static int ufshcd_set_dma_mask(struct ufs_hba
*hba
)
10292 if (hba
->capabilities
& MASK_64_ADDRESSING_SUPPORT
) {
10293 if (!dma_set_mask_and_coherent(hba
->dev
, DMA_BIT_MASK(64)))
10296 return dma_set_mask_and_coherent(hba
->dev
, DMA_BIT_MASK(32));
10300 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
10301 * @dev: pointer to device handle
10302 * @hba_handle: driver private handle
10304 * Return: 0 on success, non-zero value on failure.
10306 int ufshcd_alloc_host(struct device
*dev
, struct ufs_hba
**hba_handle
)
10308 struct Scsi_Host
*host
;
10309 struct ufs_hba
*hba
;
10314 "Invalid memory reference for dev is NULL\n");
10319 host
= scsi_host_alloc(&ufshcd_driver_template
,
10320 sizeof(struct ufs_hba
));
10322 dev_err(dev
, "scsi_host_alloc failed\n");
10326 host
->nr_maps
= HCTX_TYPE_POLL
+ 1;
10327 hba
= shost_priv(host
);
10330 hba
->dev_ref_clk_freq
= REF_CLK_FREQ_INVAL
;
10331 hba
->nop_out_timeout
= NOP_OUT_TIMEOUT
;
10332 ufshcd_set_sg_entry_size(hba
, sizeof(struct ufshcd_sg_entry
));
10333 INIT_LIST_HEAD(&hba
->clk_list_head
);
10334 spin_lock_init(&hba
->outstanding_lock
);
10341 EXPORT_SYMBOL(ufshcd_alloc_host
);
10343 /* This function exists because blk_mq_alloc_tag_set() requires this. */
10344 static blk_status_t
ufshcd_queue_tmf(struct blk_mq_hw_ctx
*hctx
,
10345 const struct blk_mq_queue_data
*qd
)
10347 WARN_ON_ONCE(true);
10348 return BLK_STS_NOTSUPP
;
10351 static const struct blk_mq_ops ufshcd_tmf_ops
= {
10352 .queue_rq
= ufshcd_queue_tmf
,
10356 * ufshcd_init - Driver initialization routine
10357 * @hba: per-adapter instance
10358 * @mmio_base: base register address
10359 * @irq: Interrupt line of device
10361 * Return: 0 on success, non-zero value on failure.
10363 int ufshcd_init(struct ufs_hba
*hba
, void __iomem
*mmio_base
, unsigned int irq
)
10366 struct Scsi_Host
*host
= hba
->host
;
10367 struct device
*dev
= hba
->dev
;
10368 char eh_wq_name
[sizeof("ufs_eh_wq_00")];
10371 * dev_set_drvdata() must be called before any callbacks are registered
10372 * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon,
10375 dev_set_drvdata(dev
, hba
);
10379 "Invalid memory reference for mmio_base is NULL\n");
10384 hba
->mmio_base
= mmio_base
;
10386 hba
->vps
= &ufs_hba_vps
;
10388 err
= ufshcd_hba_init(hba
);
10392 /* Read capabilities registers */
10393 err
= ufshcd_hba_capabilities(hba
);
10397 /* Get UFS version supported by the controller */
10398 hba
->ufs_version
= ufshcd_get_ufs_version(hba
);
10400 /* Get Interrupt bit mask per version */
10401 hba
->intr_mask
= ufshcd_get_intr_mask(hba
);
10403 err
= ufshcd_set_dma_mask(hba
);
10405 dev_err(hba
->dev
, "set dma mask failed\n");
10409 /* Allocate memory for host memory space */
10410 err
= ufshcd_memory_alloc(hba
);
10412 dev_err(hba
->dev
, "Memory allocation failed\n");
10416 /* Configure LRB */
10417 ufshcd_host_memory_configure(hba
);
10419 host
->can_queue
= hba
->nutrs
- UFSHCD_NUM_RESERVED
;
10420 host
->cmd_per_lun
= hba
->nutrs
- UFSHCD_NUM_RESERVED
;
10421 host
->max_id
= UFSHCD_MAX_ID
;
10422 host
->max_lun
= UFS_MAX_LUNS
;
10423 host
->max_channel
= UFSHCD_MAX_CHANNEL
;
10424 host
->unique_id
= host
->host_no
;
10425 host
->max_cmd_len
= UFS_CDB_SIZE
;
10426 host
->queuecommand_may_block
= !!(hba
->caps
& UFSHCD_CAP_CLK_GATING
);
10428 hba
->max_pwr_info
.is_valid
= false;
10430 /* Initialize work queues */
10431 snprintf(eh_wq_name
, sizeof(eh_wq_name
), "ufs_eh_wq_%d",
10432 hba
->host
->host_no
);
10433 hba
->eh_wq
= create_singlethread_workqueue(eh_wq_name
);
10435 dev_err(hba
->dev
, "%s: failed to create eh workqueue\n",
10440 INIT_WORK(&hba
->eh_work
, ufshcd_err_handler
);
10441 INIT_WORK(&hba
->eeh_work
, ufshcd_exception_event_handler
);
10443 sema_init(&hba
->host_sem
, 1);
10445 /* Initialize UIC command mutex */
10446 mutex_init(&hba
->uic_cmd_mutex
);
10448 /* Initialize mutex for device management commands */
10449 mutex_init(&hba
->dev_cmd
.lock
);
10451 /* Initialize mutex for exception event control */
10452 mutex_init(&hba
->ee_ctrl_mutex
);
10454 mutex_init(&hba
->wb_mutex
);
10455 init_rwsem(&hba
->clk_scaling_lock
);
10457 ufshcd_init_clk_gating(hba
);
10459 ufshcd_init_clk_scaling(hba
);
10462 * In order to avoid any spurious interrupt immediately after
10463 * registering UFS controller interrupt handler, clear any pending UFS
10464 * interrupt status and disable all the UFS interrupts.
10466 ufshcd_writel(hba
, ufshcd_readl(hba
, REG_INTERRUPT_STATUS
),
10467 REG_INTERRUPT_STATUS
);
10468 ufshcd_writel(hba
, 0, REG_INTERRUPT_ENABLE
);
10470 * Make sure that UFS interrupts are disabled and any pending interrupt
10471 * status is cleared before registering UFS interrupt handler.
10475 /* IRQ registration */
10476 err
= devm_request_irq(dev
, irq
, ufshcd_intr
, IRQF_SHARED
, UFSHCD
, hba
);
10478 dev_err(hba
->dev
, "request irq failed\n");
10481 hba
->is_irq_enabled
= true;
10484 if (!is_mcq_supported(hba
)) {
10485 err
= scsi_add_host(host
, hba
->dev
);
10487 dev_err(hba
->dev
, "scsi_add_host failed\n");
10492 hba
->tmf_tag_set
= (struct blk_mq_tag_set
) {
10494 .queue_depth
= hba
->nutmrs
,
10495 .ops
= &ufshcd_tmf_ops
,
10496 .flags
= BLK_MQ_F_NO_SCHED
,
10498 err
= blk_mq_alloc_tag_set(&hba
->tmf_tag_set
);
10500 goto out_remove_scsi_host
;
10501 hba
->tmf_queue
= blk_mq_init_queue(&hba
->tmf_tag_set
);
10502 if (IS_ERR(hba
->tmf_queue
)) {
10503 err
= PTR_ERR(hba
->tmf_queue
);
10504 goto free_tmf_tag_set
;
10506 hba
->tmf_rqs
= devm_kcalloc(hba
->dev
, hba
->nutmrs
,
10507 sizeof(*hba
->tmf_rqs
), GFP_KERNEL
);
10508 if (!hba
->tmf_rqs
) {
10510 goto free_tmf_queue
;
10513 /* Reset the attached device */
10514 ufshcd_device_reset(hba
);
10516 ufshcd_init_crypto(hba
);
10518 /* Host controller enable */
10519 err
= ufshcd_hba_enable(hba
);
10521 dev_err(hba
->dev
, "Host controller enable failed\n");
10522 ufshcd_print_evt_hist(hba
);
10523 ufshcd_print_host_state(hba
);
10524 goto free_tmf_queue
;
10528 * Set the default power management level for runtime and system PM.
10529 * Default power saving mode is to keep UFS link in Hibern8 state
10530 * and UFS device in sleep state.
10532 hba
->rpm_lvl
= ufs_get_desired_pm_lvl_for_dev_link_state(
10533 UFS_SLEEP_PWR_MODE
,
10534 UIC_LINK_HIBERN8_STATE
);
10535 hba
->spm_lvl
= ufs_get_desired_pm_lvl_for_dev_link_state(
10536 UFS_SLEEP_PWR_MODE
,
10537 UIC_LINK_HIBERN8_STATE
);
10539 INIT_DELAYED_WORK(&hba
->rpm_dev_flush_recheck_work
,
10540 ufshcd_rpm_dev_flush_recheck_work
);
10542 /* Set the default auto-hiberate idle timer value to 150 ms */
10543 if (ufshcd_is_auto_hibern8_supported(hba
) && !hba
->ahit
) {
10544 hba
->ahit
= FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK
, 150) |
10545 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK
, 3);
10548 /* Hold auto suspend until async scan completes */
10549 pm_runtime_get_sync(dev
);
10550 atomic_set(&hba
->scsi_block_reqs_cnt
, 0);
10552 * We are assuming that device wasn't put in sleep/power-down
10553 * state exclusively during the boot stage before kernel.
10554 * This assumption helps avoid doing link startup twice during
10555 * ufshcd_probe_hba().
10557 ufshcd_set_ufs_dev_active(hba
);
10559 async_schedule(ufshcd_async_scan
, hba
);
10560 ufs_sysfs_add_nodes(hba
->dev
);
10562 device_enable_async_suspend(dev
);
10566 blk_mq_destroy_queue(hba
->tmf_queue
);
10567 blk_put_queue(hba
->tmf_queue
);
10569 blk_mq_free_tag_set(&hba
->tmf_tag_set
);
10570 out_remove_scsi_host
:
10571 scsi_remove_host(hba
->host
);
10573 hba
->is_irq_enabled
= false;
10574 ufshcd_hba_exit(hba
);
10578 EXPORT_SYMBOL_GPL(ufshcd_init
);
10580 void ufshcd_resume_complete(struct device
*dev
)
10582 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10584 if (hba
->complete_put
) {
10585 ufshcd_rpm_put(hba
);
10586 hba
->complete_put
= false;
10589 EXPORT_SYMBOL_GPL(ufshcd_resume_complete
);
10591 static bool ufshcd_rpm_ok_for_spm(struct ufs_hba
*hba
)
10593 struct device
*dev
= &hba
->ufs_device_wlun
->sdev_gendev
;
10594 enum ufs_dev_pwr_mode dev_pwr_mode
;
10595 enum uic_link_state link_state
;
10596 unsigned long flags
;
10599 spin_lock_irqsave(&dev
->power
.lock
, flags
);
10600 dev_pwr_mode
= ufs_get_pm_lvl_to_dev_pwr_mode(hba
->spm_lvl
);
10601 link_state
= ufs_get_pm_lvl_to_link_pwr_state(hba
->spm_lvl
);
10602 res
= pm_runtime_suspended(dev
) &&
10603 hba
->curr_dev_pwr_mode
== dev_pwr_mode
&&
10604 hba
->uic_link_state
== link_state
&&
10605 !hba
->dev_info
.b_rpm_dev_flush_capable
;
10606 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
10611 int __ufshcd_suspend_prepare(struct device
*dev
, bool rpm_ok_for_spm
)
10613 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10617 * SCSI assumes that runtime-pm and system-pm for scsi drivers
10618 * are same. And it doesn't wake up the device for system-suspend
10619 * if it's runtime suspended. But ufs doesn't follow that.
10620 * Refer ufshcd_resume_complete()
10622 if (hba
->ufs_device_wlun
) {
10623 /* Prevent runtime suspend */
10624 ufshcd_rpm_get_noresume(hba
);
10626 * Check if already runtime suspended in same state as system
10627 * suspend would be.
10629 if (!rpm_ok_for_spm
|| !ufshcd_rpm_ok_for_spm(hba
)) {
10630 /* RPM state is not ok for SPM, so runtime resume */
10631 ret
= ufshcd_rpm_resume(hba
);
10632 if (ret
< 0 && ret
!= -EACCES
) {
10633 ufshcd_rpm_put(hba
);
10637 hba
->complete_put
= true;
10641 EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare
);
10643 int ufshcd_suspend_prepare(struct device
*dev
)
10645 return __ufshcd_suspend_prepare(dev
, true);
10647 EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare
);
10649 #ifdef CONFIG_PM_SLEEP
10650 static int ufshcd_wl_poweroff(struct device
*dev
)
10652 struct scsi_device
*sdev
= to_scsi_device(dev
);
10653 struct ufs_hba
*hba
= shost_priv(sdev
->host
);
10655 __ufshcd_wl_suspend(hba
, UFS_SHUTDOWN_PM
);
10660 static int ufshcd_wl_probe(struct device
*dev
)
10662 struct scsi_device
*sdev
= to_scsi_device(dev
);
10664 if (!is_device_wlun(sdev
))
10667 blk_pm_runtime_init(sdev
->request_queue
, dev
);
10668 pm_runtime_set_autosuspend_delay(dev
, 0);
10669 pm_runtime_allow(dev
);
10674 static int ufshcd_wl_remove(struct device
*dev
)
10676 pm_runtime_forbid(dev
);
10680 static const struct dev_pm_ops ufshcd_wl_pm_ops
= {
10681 #ifdef CONFIG_PM_SLEEP
10682 .suspend
= ufshcd_wl_suspend
,
10683 .resume
= ufshcd_wl_resume
,
10684 .freeze
= ufshcd_wl_suspend
,
10685 .thaw
= ufshcd_wl_resume
,
10686 .poweroff
= ufshcd_wl_poweroff
,
10687 .restore
= ufshcd_wl_resume
,
10689 SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend
, ufshcd_wl_runtime_resume
, NULL
)
10692 static void ufshcd_check_header_layout(void)
10695 * gcc compilers before version 10 cannot do constant-folding for
10696 * sub-byte bitfields. Hence skip the layout checks for gcc 9 and
10699 if (IS_ENABLED(CONFIG_CC_IS_GCC
) && CONFIG_GCC_VERSION
< 100000)
10702 BUILD_BUG_ON(((u8
*)&(struct request_desc_header
){
10703 .cci
= 3})[0] != 3);
10705 BUILD_BUG_ON(((u8
*)&(struct request_desc_header
){
10706 .ehs_length
= 2})[1] != 2);
10708 BUILD_BUG_ON(((u8
*)&(struct request_desc_header
){
10709 .enable_crypto
= 1})[2]
10712 BUILD_BUG_ON((((u8
*)&(struct request_desc_header
){
10714 .data_direction
= 3,
10716 })[3]) != ((5 << 4) | (3 << 1) | 1));
10718 BUILD_BUG_ON(((__le32
*)&(struct request_desc_header
){
10719 .dunl
= cpu_to_le32(0xdeadbeef)})[1] !=
10720 cpu_to_le32(0xdeadbeef));
10722 BUILD_BUG_ON(((u8
*)&(struct request_desc_header
){
10723 .ocs
= 4})[8] != 4);
10725 BUILD_BUG_ON(((u8
*)&(struct request_desc_header
){
10726 .cds
= 5})[9] != 5);
10728 BUILD_BUG_ON(((__le32
*)&(struct request_desc_header
){
10729 .dunu
= cpu_to_le32(0xbadcafe)})[3] !=
10730 cpu_to_le32(0xbadcafe));
10732 BUILD_BUG_ON(((u8
*)&(struct utp_upiu_header
){
10733 .iid
= 0xf })[4] != 0xf0);
10735 BUILD_BUG_ON(((u8
*)&(struct utp_upiu_header
){
10736 .command_set_type
= 0xf })[4] != 0xf);
10740 * ufs_dev_wlun_template - describes ufs device wlun
10741 * ufs-device wlun - used to send pm commands
10742 * All luns are consumers of ufs-device wlun.
10744 * Currently, no sd driver is present for wluns.
10745 * Hence the no specific pm operations are performed.
10746 * With ufs design, SSU should be sent to ufs-device wlun.
10747 * Hence register a scsi driver for ufs wluns only.
10749 static struct scsi_driver ufs_dev_wlun_template
= {
10751 .name
= "ufs_device_wlun",
10752 .owner
= THIS_MODULE
,
10753 .probe
= ufshcd_wl_probe
,
10754 .remove
= ufshcd_wl_remove
,
10755 .pm
= &ufshcd_wl_pm_ops
,
10756 .shutdown
= ufshcd_wl_shutdown
,
10760 static int __init
ufshcd_core_init(void)
10764 ufshcd_check_header_layout();
10766 ufs_debugfs_init();
10768 ret
= scsi_register_driver(&ufs_dev_wlun_template
.gendrv
);
10770 ufs_debugfs_exit();
10774 static void __exit
ufshcd_core_exit(void)
10776 ufs_debugfs_exit();
10777 scsi_unregister_driver(&ufs_dev_wlun_template
.gendrv
);
10780 module_init(ufshcd_core_init
);
10781 module_exit(ufshcd_core_exit
);
10783 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
10784 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
10785 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
10786 MODULE_SOFTDEP("pre: governor_simpleondemand");
10787 MODULE_LICENSE("GPL");