1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Universal Flash Storage Host controller driver Core
4 * Copyright (C) 2011-2013 Samsung India Software Operations
5 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
8 * Santosh Yaraganavi <santosh.sy@samsung.com>
9 * Vinayak Holikatti <h.vinayak@samsung.com>
12 #include <linux/async.h>
13 #include <linux/devfreq.h>
14 #include <linux/nls.h>
16 #include <linux/bitfield.h>
17 #include <linux/blk-pm.h>
18 #include <linux/blkdev.h>
19 #include <linux/clk.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/sched/clock.h>
25 #include <scsi/scsi_cmnd.h>
26 #include <scsi/scsi_dbg.h>
27 #include <scsi/scsi_driver.h>
28 #include <scsi/scsi_eh.h>
29 #include "ufshcd-priv.h"
30 #include <ufs/ufs_quirks.h>
31 #include <ufs/unipro.h>
32 #include "ufs-sysfs.h"
33 #include "ufs-debugfs.h"
34 #include "ufs-fault-injection.h"
36 #include "ufshcd-crypto.h"
38 #include <asm/unaligned.h>
40 #define CREATE_TRACE_POINTS
41 #include <trace/events/ufs.h>
43 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
47 #define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
52 /* UIC command timeout, unit: ms */
53 #define UIC_CMD_TIMEOUT 500
55 /* NOP OUT retries waiting for NOP IN response */
56 #define NOP_OUT_RETRIES 10
57 /* Timeout after 50 msecs if NOP OUT hangs without response */
58 #define NOP_OUT_TIMEOUT 50 /* msecs */
60 /* Query request retries */
61 #define QUERY_REQ_RETRIES 3
62 /* Query request timeout */
63 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
65 /* Advanced RPMB request timeout */
66 #define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */
68 /* Task management command timeout */
69 #define TM_CMD_TIMEOUT 100 /* msecs */
71 /* maximum number of retries for a general UIC command */
72 #define UFS_UIC_COMMAND_RETRIES 3
74 /* maximum number of link-startup retries */
75 #define DME_LINKSTARTUP_RETRIES 3
77 /* maximum number of reset retries before giving up */
78 #define MAX_HOST_RESET_RETRIES 5
80 /* Maximum number of error handler retries before giving up */
81 #define MAX_ERR_HANDLER_RETRIES 5
83 /* Expose the flag value from utp_upiu_query.value */
84 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
86 /* Interrupt aggregation default timeout, unit: 40us */
87 #define INT_AGGR_DEF_TO 0x02
89 /* default delay of autosuspend: 2000 ms */
90 #define RPM_AUTOSUSPEND_DELAY_MS 2000
92 /* Default delay of RPM device flush delayed work */
93 #define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
95 /* Default value of wait time before gating device ref clock */
96 #define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
98 /* Polling time to wait for fDeviceInit */
99 #define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
101 /* UFSHC 4.0 compliant HC support this mode, refer param_set_mcq_mode() */
102 static bool use_mcq_mode
= true;
104 static bool is_mcq_supported(struct ufs_hba
*hba
)
106 return hba
->mcq_sup
&& use_mcq_mode
;
109 static int param_set_mcq_mode(const char *val
, const struct kernel_param
*kp
)
113 ret
= param_set_bool(val
, kp
);
120 static const struct kernel_param_ops mcq_mode_ops
= {
121 .set
= param_set_mcq_mode
,
122 .get
= param_get_bool
,
125 module_param_cb(use_mcq_mode
, &mcq_mode_ops
, &use_mcq_mode
, 0644);
126 MODULE_PARM_DESC(use_mcq_mode
, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
128 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
132 _ret = ufshcd_enable_vreg(_dev, _vreg); \
134 _ret = ufshcd_disable_vreg(_dev, _vreg); \
138 #define ufshcd_hex_dump(prefix_str, buf, len) do { \
139 size_t __len = (len); \
140 print_hex_dump(KERN_ERR, prefix_str, \
141 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
142 16, 4, buf, __len, false); \
145 int ufshcd_dump_regs(struct ufs_hba
*hba
, size_t offset
, size_t len
,
151 if (offset
% 4 != 0 || len
% 4 != 0) /* keep readl happy */
154 regs
= kzalloc(len
, GFP_ATOMIC
);
158 for (pos
= 0; pos
< len
; pos
+= 4) {
160 pos
>= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER
&&
161 pos
<= REG_UIC_ERROR_CODE_DME
)
163 regs
[pos
/ 4] = ufshcd_readl(hba
, offset
+ pos
);
166 ufshcd_hex_dump(prefix
, regs
, len
);
171 EXPORT_SYMBOL_GPL(ufshcd_dump_regs
);
174 UFSHCD_MAX_CHANNEL
= 0,
176 UFSHCD_NUM_RESERVED
= 1,
177 UFSHCD_CMD_PER_LUN
= 32 - UFSHCD_NUM_RESERVED
,
178 UFSHCD_CAN_QUEUE
= 32 - UFSHCD_NUM_RESERVED
,
181 static const char *const ufshcd_state_name
[] = {
182 [UFSHCD_STATE_RESET
] = "reset",
183 [UFSHCD_STATE_OPERATIONAL
] = "operational",
184 [UFSHCD_STATE_ERROR
] = "error",
185 [UFSHCD_STATE_EH_SCHEDULED_FATAL
] = "eh_fatal",
186 [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL
] = "eh_non_fatal",
189 /* UFSHCD error handling flags */
191 UFSHCD_EH_IN_PROGRESS
= (1 << 0),
194 /* UFSHCD UIC layer error flags */
196 UFSHCD_UIC_DL_PA_INIT_ERROR
= (1 << 0), /* Data link layer error */
197 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
= (1 << 1), /* Data link layer error */
198 UFSHCD_UIC_DL_TCx_REPLAY_ERROR
= (1 << 2), /* Data link layer error */
199 UFSHCD_UIC_NL_ERROR
= (1 << 3), /* Network layer error */
200 UFSHCD_UIC_TL_ERROR
= (1 << 4), /* Transport Layer error */
201 UFSHCD_UIC_DME_ERROR
= (1 << 5), /* DME error */
202 UFSHCD_UIC_PA_GENERIC_ERROR
= (1 << 6), /* Generic PA error */
205 #define ufshcd_set_eh_in_progress(h) \
206 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
207 #define ufshcd_eh_in_progress(h) \
208 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
209 #define ufshcd_clear_eh_in_progress(h) \
210 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
212 const struct ufs_pm_lvl_states ufs_pm_lvl_states
[] = {
213 [UFS_PM_LVL_0
] = {UFS_ACTIVE_PWR_MODE
, UIC_LINK_ACTIVE_STATE
},
214 [UFS_PM_LVL_1
] = {UFS_ACTIVE_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
215 [UFS_PM_LVL_2
] = {UFS_SLEEP_PWR_MODE
, UIC_LINK_ACTIVE_STATE
},
216 [UFS_PM_LVL_3
] = {UFS_SLEEP_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
217 [UFS_PM_LVL_4
] = {UFS_POWERDOWN_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
218 [UFS_PM_LVL_5
] = {UFS_POWERDOWN_PWR_MODE
, UIC_LINK_OFF_STATE
},
220 * For DeepSleep, the link is first put in hibern8 and then off.
221 * Leaving the link in hibern8 is not supported.
223 [UFS_PM_LVL_6
] = {UFS_DEEPSLEEP_PWR_MODE
, UIC_LINK_OFF_STATE
},
226 static inline enum ufs_dev_pwr_mode
227 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl
)
229 return ufs_pm_lvl_states
[lvl
].dev_state
;
232 static inline enum uic_link_state
233 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl
)
235 return ufs_pm_lvl_states
[lvl
].link_state
;
238 static inline enum ufs_pm_level
239 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state
,
240 enum uic_link_state link_state
)
242 enum ufs_pm_level lvl
;
244 for (lvl
= UFS_PM_LVL_0
; lvl
< UFS_PM_LVL_MAX
; lvl
++) {
245 if ((ufs_pm_lvl_states
[lvl
].dev_state
== dev_state
) &&
246 (ufs_pm_lvl_states
[lvl
].link_state
== link_state
))
250 /* if no match found, return the level 0 */
254 static const struct ufs_dev_quirk ufs_fixups
[] = {
255 /* UFS cards deviations table */
256 { .wmanufacturerid
= UFS_VENDOR_MICRON
,
257 .model
= UFS_ANY_MODEL
,
258 .quirk
= UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
|
259 UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ
},
260 { .wmanufacturerid
= UFS_VENDOR_SAMSUNG
,
261 .model
= UFS_ANY_MODEL
,
262 .quirk
= UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
|
263 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE
|
264 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
},
265 { .wmanufacturerid
= UFS_VENDOR_SKHYNIX
,
266 .model
= UFS_ANY_MODEL
,
267 .quirk
= UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME
},
268 { .wmanufacturerid
= UFS_VENDOR_SKHYNIX
,
269 .model
= "hB8aL1" /*H28U62301AMR*/,
270 .quirk
= UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME
},
271 { .wmanufacturerid
= UFS_VENDOR_TOSHIBA
,
272 .model
= UFS_ANY_MODEL
,
273 .quirk
= UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
},
274 { .wmanufacturerid
= UFS_VENDOR_TOSHIBA
,
275 .model
= "THGLF2G9C8KBADG",
276 .quirk
= UFS_DEVICE_QUIRK_PA_TACTIVATE
},
277 { .wmanufacturerid
= UFS_VENDOR_TOSHIBA
,
278 .model
= "THGLF2G9D8KBADG",
279 .quirk
= UFS_DEVICE_QUIRK_PA_TACTIVATE
},
283 static irqreturn_t
ufshcd_tmc_handler(struct ufs_hba
*hba
);
284 static void ufshcd_async_scan(void *data
, async_cookie_t cookie
);
285 static int ufshcd_reset_and_restore(struct ufs_hba
*hba
);
286 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd
*cmd
);
287 static int ufshcd_clear_tm_cmd(struct ufs_hba
*hba
, int tag
);
288 static void ufshcd_hba_exit(struct ufs_hba
*hba
);
289 static int ufshcd_probe_hba(struct ufs_hba
*hba
, bool init_dev_params
);
290 static int ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
);
291 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba
*hba
);
292 static int ufshcd_host_reset_and_restore(struct ufs_hba
*hba
);
293 static void ufshcd_resume_clkscaling(struct ufs_hba
*hba
);
294 static void ufshcd_suspend_clkscaling(struct ufs_hba
*hba
);
295 static void __ufshcd_suspend_clkscaling(struct ufs_hba
*hba
);
296 static int ufshcd_scale_clks(struct ufs_hba
*hba
, bool scale_up
);
297 static irqreturn_t
ufshcd_intr(int irq
, void *__hba
);
298 static int ufshcd_change_power_mode(struct ufs_hba
*hba
,
299 struct ufs_pa_layer_attr
*pwr_mode
);
300 static int ufshcd_setup_hba_vreg(struct ufs_hba
*hba
, bool on
);
301 static int ufshcd_setup_vreg(struct ufs_hba
*hba
, bool on
);
302 static inline int ufshcd_config_vreg_hpm(struct ufs_hba
*hba
,
303 struct ufs_vreg
*vreg
);
304 static int ufshcd_try_to_abort_task(struct ufs_hba
*hba
, int tag
);
305 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba
*hba
,
307 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba
*hba
);
308 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba
*hba
);
310 static inline void ufshcd_enable_irq(struct ufs_hba
*hba
)
312 if (!hba
->is_irq_enabled
) {
313 enable_irq(hba
->irq
);
314 hba
->is_irq_enabled
= true;
318 static inline void ufshcd_disable_irq(struct ufs_hba
*hba
)
320 if (hba
->is_irq_enabled
) {
321 disable_irq(hba
->irq
);
322 hba
->is_irq_enabled
= false;
326 static void ufshcd_configure_wb(struct ufs_hba
*hba
)
328 if (!ufshcd_is_wb_allowed(hba
))
331 ufshcd_wb_toggle(hba
, true);
333 ufshcd_wb_toggle_buf_flush_during_h8(hba
, true);
335 if (ufshcd_is_wb_buf_flush_allowed(hba
))
336 ufshcd_wb_toggle_buf_flush(hba
, true);
339 static void ufshcd_scsi_unblock_requests(struct ufs_hba
*hba
)
341 if (atomic_dec_and_test(&hba
->scsi_block_reqs_cnt
))
342 scsi_unblock_requests(hba
->host
);
345 static void ufshcd_scsi_block_requests(struct ufs_hba
*hba
)
347 if (atomic_inc_return(&hba
->scsi_block_reqs_cnt
) == 1)
348 scsi_block_requests(hba
->host
);
351 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba
*hba
, unsigned int tag
,
352 enum ufs_trace_str_t str_t
)
354 struct utp_upiu_req
*rq
= hba
->lrb
[tag
].ucd_req_ptr
;
355 struct utp_upiu_header
*header
;
357 if (!trace_ufshcd_upiu_enabled())
360 if (str_t
== UFS_CMD_SEND
)
361 header
= &rq
->header
;
363 header
= &hba
->lrb
[tag
].ucd_rsp_ptr
->header
;
365 trace_ufshcd_upiu(dev_name(hba
->dev
), str_t
, header
, &rq
->sc
.cdb
,
369 static void ufshcd_add_query_upiu_trace(struct ufs_hba
*hba
,
370 enum ufs_trace_str_t str_t
,
371 struct utp_upiu_req
*rq_rsp
)
373 if (!trace_ufshcd_upiu_enabled())
376 trace_ufshcd_upiu(dev_name(hba
->dev
), str_t
, &rq_rsp
->header
,
377 &rq_rsp
->qr
, UFS_TSF_OSF
);
380 static void ufshcd_add_tm_upiu_trace(struct ufs_hba
*hba
, unsigned int tag
,
381 enum ufs_trace_str_t str_t
)
383 struct utp_task_req_desc
*descp
= &hba
->utmrdl_base_addr
[tag
];
385 if (!trace_ufshcd_upiu_enabled())
388 if (str_t
== UFS_TM_SEND
)
389 trace_ufshcd_upiu(dev_name(hba
->dev
), str_t
,
390 &descp
->upiu_req
.req_header
,
391 &descp
->upiu_req
.input_param1
,
394 trace_ufshcd_upiu(dev_name(hba
->dev
), str_t
,
395 &descp
->upiu_rsp
.rsp_header
,
396 &descp
->upiu_rsp
.output_param1
,
400 static void ufshcd_add_uic_command_trace(struct ufs_hba
*hba
,
401 const struct uic_command
*ucmd
,
402 enum ufs_trace_str_t str_t
)
406 if (!trace_ufshcd_uic_command_enabled())
409 if (str_t
== UFS_CMD_SEND
)
412 cmd
= ufshcd_readl(hba
, REG_UIC_COMMAND
);
414 trace_ufshcd_uic_command(dev_name(hba
->dev
), str_t
, cmd
,
415 ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_1
),
416 ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_2
),
417 ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_3
));
420 static void ufshcd_add_command_trace(struct ufs_hba
*hba
, unsigned int tag
,
421 enum ufs_trace_str_t str_t
)
424 u8 opcode
= 0, group_id
= 0;
426 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[tag
];
427 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
428 struct request
*rq
= scsi_cmd_to_rq(cmd
);
429 int transfer_len
= -1;
434 /* trace UPIU also */
435 ufshcd_add_cmd_upiu_trace(hba
, tag
, str_t
);
436 if (!trace_ufshcd_command_enabled())
439 opcode
= cmd
->cmnd
[0];
441 if (opcode
== READ_10
|| opcode
== WRITE_10
) {
443 * Currently we only fully trace read(10) and write(10) commands
446 be32_to_cpu(lrbp
->ucd_req_ptr
->sc
.exp_data_transfer_len
);
447 lba
= scsi_get_lba(cmd
);
448 if (opcode
== WRITE_10
)
449 group_id
= lrbp
->cmd
->cmnd
[6];
450 } else if (opcode
== UNMAP
) {
452 * The number of Bytes to be unmapped beginning with the lba.
454 transfer_len
= blk_rq_bytes(rq
);
455 lba
= scsi_get_lba(cmd
);
458 intr
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
459 doorbell
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
460 trace_ufshcd_command(dev_name(hba
->dev
), str_t
, tag
,
461 doorbell
, transfer_len
, intr
, lba
, opcode
, group_id
);
464 static void ufshcd_print_clk_freqs(struct ufs_hba
*hba
)
466 struct ufs_clk_info
*clki
;
467 struct list_head
*head
= &hba
->clk_list_head
;
469 if (list_empty(head
))
472 list_for_each_entry(clki
, head
, list
) {
473 if (!IS_ERR_OR_NULL(clki
->clk
) && clki
->min_freq
&&
475 dev_err(hba
->dev
, "clk: %s, rate: %u\n",
476 clki
->name
, clki
->curr_freq
);
480 static void ufshcd_print_evt(struct ufs_hba
*hba
, u32 id
,
481 const char *err_name
)
485 const struct ufs_event_hist
*e
;
487 if (id
>= UFS_EVT_CNT
)
490 e
= &hba
->ufs_stats
.event
[id
];
492 for (i
= 0; i
< UFS_EVENT_HIST_LENGTH
; i
++) {
493 int p
= (i
+ e
->pos
) % UFS_EVENT_HIST_LENGTH
;
495 if (e
->tstamp
[p
] == 0)
497 dev_err(hba
->dev
, "%s[%d] = 0x%x at %lld us\n", err_name
, p
,
498 e
->val
[p
], div_u64(e
->tstamp
[p
], 1000));
503 dev_err(hba
->dev
, "No record of %s\n", err_name
);
505 dev_err(hba
->dev
, "%s: total cnt=%llu\n", err_name
, e
->cnt
);
508 static void ufshcd_print_evt_hist(struct ufs_hba
*hba
)
510 ufshcd_dump_regs(hba
, 0, UFSHCI_REG_SPACE_SIZE
, "host_regs: ");
512 ufshcd_print_evt(hba
, UFS_EVT_PA_ERR
, "pa_err");
513 ufshcd_print_evt(hba
, UFS_EVT_DL_ERR
, "dl_err");
514 ufshcd_print_evt(hba
, UFS_EVT_NL_ERR
, "nl_err");
515 ufshcd_print_evt(hba
, UFS_EVT_TL_ERR
, "tl_err");
516 ufshcd_print_evt(hba
, UFS_EVT_DME_ERR
, "dme_err");
517 ufshcd_print_evt(hba
, UFS_EVT_AUTO_HIBERN8_ERR
,
519 ufshcd_print_evt(hba
, UFS_EVT_FATAL_ERR
, "fatal_err");
520 ufshcd_print_evt(hba
, UFS_EVT_LINK_STARTUP_FAIL
,
521 "link_startup_fail");
522 ufshcd_print_evt(hba
, UFS_EVT_RESUME_ERR
, "resume_fail");
523 ufshcd_print_evt(hba
, UFS_EVT_SUSPEND_ERR
,
525 ufshcd_print_evt(hba
, UFS_EVT_WL_RES_ERR
, "wlun resume_fail");
526 ufshcd_print_evt(hba
, UFS_EVT_WL_SUSP_ERR
,
527 "wlun suspend_fail");
528 ufshcd_print_evt(hba
, UFS_EVT_DEV_RESET
, "dev_reset");
529 ufshcd_print_evt(hba
, UFS_EVT_HOST_RESET
, "host_reset");
530 ufshcd_print_evt(hba
, UFS_EVT_ABORT
, "task_abort");
532 ufshcd_vops_dbg_register_dump(hba
);
536 void ufshcd_print_trs(struct ufs_hba
*hba
, unsigned long bitmap
, bool pr_prdt
)
538 const struct ufshcd_lrb
*lrbp
;
542 for_each_set_bit(tag
, &bitmap
, hba
->nutrs
) {
543 lrbp
= &hba
->lrb
[tag
];
545 dev_err(hba
->dev
, "UPIU[%d] - issue time %lld us\n",
546 tag
, div_u64(lrbp
->issue_time_stamp_local_clock
, 1000));
547 dev_err(hba
->dev
, "UPIU[%d] - complete time %lld us\n",
548 tag
, div_u64(lrbp
->compl_time_stamp_local_clock
, 1000));
550 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
551 tag
, (u64
)lrbp
->utrd_dma_addr
);
553 ufshcd_hex_dump("UPIU TRD: ", lrbp
->utr_descriptor_ptr
,
554 sizeof(struct utp_transfer_req_desc
));
555 dev_err(hba
->dev
, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag
,
556 (u64
)lrbp
->ucd_req_dma_addr
);
557 ufshcd_hex_dump("UPIU REQ: ", lrbp
->ucd_req_ptr
,
558 sizeof(struct utp_upiu_req
));
559 dev_err(hba
->dev
, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag
,
560 (u64
)lrbp
->ucd_rsp_dma_addr
);
561 ufshcd_hex_dump("UPIU RSP: ", lrbp
->ucd_rsp_ptr
,
562 sizeof(struct utp_upiu_rsp
));
564 prdt_length
= le16_to_cpu(
565 lrbp
->utr_descriptor_ptr
->prd_table_length
);
566 if (hba
->quirks
& UFSHCD_QUIRK_PRDT_BYTE_GRAN
)
567 prdt_length
/= ufshcd_sg_entry_size(hba
);
570 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
572 (u64
)lrbp
->ucd_prdt_dma_addr
);
575 ufshcd_hex_dump("UPIU PRDT: ", lrbp
->ucd_prdt_ptr
,
576 ufshcd_sg_entry_size(hba
) * prdt_length
);
580 static void ufshcd_print_tmrs(struct ufs_hba
*hba
, unsigned long bitmap
)
584 for_each_set_bit(tag
, &bitmap
, hba
->nutmrs
) {
585 struct utp_task_req_desc
*tmrdp
= &hba
->utmrdl_base_addr
[tag
];
587 dev_err(hba
->dev
, "TM[%d] - Task Management Header\n", tag
);
588 ufshcd_hex_dump("", tmrdp
, sizeof(*tmrdp
));
592 static void ufshcd_print_host_state(struct ufs_hba
*hba
)
594 const struct scsi_device
*sdev_ufs
= hba
->ufs_device_wlun
;
596 dev_err(hba
->dev
, "UFS Host state=%d\n", hba
->ufshcd_state
);
597 dev_err(hba
->dev
, "outstanding reqs=0x%lx tasks=0x%lx\n",
598 hba
->outstanding_reqs
, hba
->outstanding_tasks
);
599 dev_err(hba
->dev
, "saved_err=0x%x, saved_uic_err=0x%x\n",
600 hba
->saved_err
, hba
->saved_uic_err
);
601 dev_err(hba
->dev
, "Device power mode=%d, UIC link state=%d\n",
602 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
603 dev_err(hba
->dev
, "PM in progress=%d, sys. suspended=%d\n",
604 hba
->pm_op_in_progress
, hba
->is_sys_suspended
);
605 dev_err(hba
->dev
, "Auto BKOPS=%d, Host self-block=%d\n",
606 hba
->auto_bkops_enabled
, hba
->host
->host_self_blocked
);
607 dev_err(hba
->dev
, "Clk gate=%d\n", hba
->clk_gating
.state
);
609 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
610 div_u64(hba
->ufs_stats
.last_hibern8_exit_tstamp
, 1000),
611 hba
->ufs_stats
.hibern8_exit_cnt
);
612 dev_err(hba
->dev
, "last intr at %lld us, last intr status=0x%x\n",
613 div_u64(hba
->ufs_stats
.last_intr_ts
, 1000),
614 hba
->ufs_stats
.last_intr_status
);
615 dev_err(hba
->dev
, "error handling flags=0x%x, req. abort count=%d\n",
616 hba
->eh_flags
, hba
->req_abort_count
);
617 dev_err(hba
->dev
, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
618 hba
->ufs_version
, hba
->capabilities
, hba
->caps
);
619 dev_err(hba
->dev
, "quirks=0x%x, dev. quirks=0x%x\n", hba
->quirks
,
622 dev_err(hba
->dev
, "UFS dev info: %.8s %.16s rev %.4s\n",
623 sdev_ufs
->vendor
, sdev_ufs
->model
, sdev_ufs
->rev
);
625 ufshcd_print_clk_freqs(hba
);
629 * ufshcd_print_pwr_info - print power params as saved in hba
631 * @hba: per-adapter instance
633 static void ufshcd_print_pwr_info(struct ufs_hba
*hba
)
635 static const char * const names
[] = {
646 * Using dev_dbg to avoid messages during runtime PM to avoid
647 * never-ending cycles of messages written back to storage by user space
648 * causing runtime resume, causing more messages and so on.
650 dev_dbg(hba
->dev
, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
652 hba
->pwr_info
.gear_rx
, hba
->pwr_info
.gear_tx
,
653 hba
->pwr_info
.lane_rx
, hba
->pwr_info
.lane_tx
,
654 names
[hba
->pwr_info
.pwr_rx
],
655 names
[hba
->pwr_info
.pwr_tx
],
656 hba
->pwr_info
.hs_rate
);
659 static void ufshcd_device_reset(struct ufs_hba
*hba
)
663 err
= ufshcd_vops_device_reset(hba
);
666 ufshcd_set_ufs_dev_active(hba
);
667 if (ufshcd_is_wb_allowed(hba
)) {
668 hba
->dev_info
.wb_enabled
= false;
669 hba
->dev_info
.wb_buf_flush_enabled
= false;
672 if (err
!= -EOPNOTSUPP
)
673 ufshcd_update_evt_hist(hba
, UFS_EVT_DEV_RESET
, err
);
676 void ufshcd_delay_us(unsigned long us
, unsigned long tolerance
)
684 usleep_range(us
, us
+ tolerance
);
686 EXPORT_SYMBOL_GPL(ufshcd_delay_us
);
689 * ufshcd_wait_for_register - wait for register value to change
690 * @hba: per-adapter interface
691 * @reg: mmio register offset
692 * @mask: mask to apply to the read register value
693 * @val: value to wait for
694 * @interval_us: polling interval in microseconds
695 * @timeout_ms: timeout in milliseconds
698 * -ETIMEDOUT on error, zero on success.
700 static int ufshcd_wait_for_register(struct ufs_hba
*hba
, u32 reg
, u32 mask
,
701 u32 val
, unsigned long interval_us
,
702 unsigned long timeout_ms
)
705 unsigned long timeout
= jiffies
+ msecs_to_jiffies(timeout_ms
);
707 /* ignore bits that we don't intend to wait on */
710 while ((ufshcd_readl(hba
, reg
) & mask
) != val
) {
711 usleep_range(interval_us
, interval_us
+ 50);
712 if (time_after(jiffies
, timeout
)) {
713 if ((ufshcd_readl(hba
, reg
) & mask
) != val
)
723 * ufshcd_get_intr_mask - Get the interrupt bit mask
724 * @hba: Pointer to adapter instance
726 * Returns interrupt bit mask per version
728 static inline u32
ufshcd_get_intr_mask(struct ufs_hba
*hba
)
730 if (hba
->ufs_version
== ufshci_version(1, 0))
731 return INTERRUPT_MASK_ALL_VER_10
;
732 if (hba
->ufs_version
<= ufshci_version(2, 0))
733 return INTERRUPT_MASK_ALL_VER_11
;
735 return INTERRUPT_MASK_ALL_VER_21
;
739 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
740 * @hba: Pointer to adapter instance
742 * Returns UFSHCI version supported by the controller
744 static inline u32
ufshcd_get_ufs_version(struct ufs_hba
*hba
)
748 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION
)
749 ufshci_ver
= ufshcd_vops_get_ufs_hci_version(hba
);
751 ufshci_ver
= ufshcd_readl(hba
, REG_UFS_VERSION
);
754 * UFSHCI v1.x uses a different version scheme, in order
755 * to allow the use of comparisons with the ufshci_version
756 * function, we convert it to the same scheme as ufs 2.0+.
758 if (ufshci_ver
& 0x00010000)
759 return ufshci_version(1, ufshci_ver
& 0x00000100);
765 * ufshcd_is_device_present - Check if any device connected to
766 * the host controller
767 * @hba: pointer to adapter instance
769 * Returns true if device present, false if no device detected
771 static inline bool ufshcd_is_device_present(struct ufs_hba
*hba
)
773 return ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) & DEVICE_PRESENT
;
777 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
778 * @lrbp: pointer to local command reference block
779 * @cqe: pointer to the completion queue entry
781 * This function is used to get the OCS field from UTRD
782 * Returns the OCS field in the UTRD
784 static enum utp_ocs
ufshcd_get_tr_ocs(struct ufshcd_lrb
*lrbp
,
785 struct cq_entry
*cqe
)
788 return le32_to_cpu(cqe
->status
) & MASK_OCS
;
790 return le32_to_cpu(lrbp
->utr_descriptor_ptr
->header
.dword_2
) & MASK_OCS
;
794 * ufshcd_utrl_clear() - Clear requests from the controller request list.
795 * @hba: per adapter instance
796 * @mask: mask with one bit set for each request to be cleared
798 static inline void ufshcd_utrl_clear(struct ufs_hba
*hba
, u32 mask
)
800 if (hba
->quirks
& UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR
)
803 * From the UFSHCI specification: "UTP Transfer Request List CLear
804 * Register (UTRLCLR): This field is bit significant. Each bit
805 * corresponds to a slot in the UTP Transfer Request List, where bit 0
806 * corresponds to request slot 0. A bit in this field is set to ‘0’
807 * by host software to indicate to the host controller that a transfer
808 * request slot is cleared. The host controller
809 * shall free up any resources associated to the request slot
810 * immediately, and shall set the associated bit in UTRLDBR to ‘0’. The
811 * host software indicates no change to request slots by setting the
812 * associated bits in this field to ‘1’. Bits in this field shall only
813 * be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’."
815 ufshcd_writel(hba
, ~mask
, REG_UTP_TRANSFER_REQ_LIST_CLEAR
);
819 * ufshcd_utmrl_clear - Clear a bit in UTMRLCLR register
820 * @hba: per adapter instance
821 * @pos: position of the bit to be cleared
823 static inline void ufshcd_utmrl_clear(struct ufs_hba
*hba
, u32 pos
)
825 if (hba
->quirks
& UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR
)
826 ufshcd_writel(hba
, (1 << pos
), REG_UTP_TASK_REQ_LIST_CLEAR
);
828 ufshcd_writel(hba
, ~(1 << pos
), REG_UTP_TASK_REQ_LIST_CLEAR
);
832 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
833 * @reg: Register value of host controller status
835 * Returns integer, 0 on Success and positive value if failed
837 static inline int ufshcd_get_lists_status(u32 reg
)
839 return !((reg
& UFSHCD_STATUS_READY
) == UFSHCD_STATUS_READY
);
843 * ufshcd_get_uic_cmd_result - Get the UIC command result
844 * @hba: Pointer to adapter instance
846 * This function gets the result of UIC command completion
847 * Returns 0 on success, non zero value on error
849 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba
*hba
)
851 return ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_2
) &
852 MASK_UIC_COMMAND_RESULT
;
856 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
857 * @hba: Pointer to adapter instance
859 * This function gets UIC command argument3
860 * Returns 0 on success, non zero value on error
862 static inline u32
ufshcd_get_dme_attr_val(struct ufs_hba
*hba
)
864 return ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_3
);
868 * ufshcd_get_req_rsp - returns the TR response transaction type
869 * @ucd_rsp_ptr: pointer to response UPIU
872 ufshcd_get_req_rsp(struct utp_upiu_rsp
*ucd_rsp_ptr
)
874 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_0
) >> 24;
878 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
879 * @ucd_rsp_ptr: pointer to response UPIU
881 * This function gets the response status and scsi_status from response UPIU
882 * Returns the response result code.
885 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp
*ucd_rsp_ptr
)
887 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_1
) & MASK_RSP_UPIU_RESULT
;
891 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
893 * @ucd_rsp_ptr: pointer to response UPIU
895 * Return the data segment length.
897 static inline unsigned int
898 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp
*ucd_rsp_ptr
)
900 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_2
) &
901 MASK_RSP_UPIU_DATA_SEG_LEN
;
905 * ufshcd_is_exception_event - Check if the device raised an exception event
906 * @ucd_rsp_ptr: pointer to response UPIU
908 * The function checks if the device raised an exception event indicated in
909 * the Device Information field of response UPIU.
911 * Returns true if exception is raised, false otherwise.
913 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp
*ucd_rsp_ptr
)
915 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_2
) &
916 MASK_RSP_EXCEPTION_EVENT
;
920 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
921 * @hba: per adapter instance
924 ufshcd_reset_intr_aggr(struct ufs_hba
*hba
)
926 ufshcd_writel(hba
, INT_AGGR_ENABLE
|
927 INT_AGGR_COUNTER_AND_TIMER_RESET
,
928 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
932 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
933 * @hba: per adapter instance
934 * @cnt: Interrupt aggregation counter threshold
935 * @tmout: Interrupt aggregation timeout value
938 ufshcd_config_intr_aggr(struct ufs_hba
*hba
, u8 cnt
, u8 tmout
)
940 ufshcd_writel(hba
, INT_AGGR_ENABLE
| INT_AGGR_PARAM_WRITE
|
941 INT_AGGR_COUNTER_THLD_VAL(cnt
) |
942 INT_AGGR_TIMEOUT_VAL(tmout
),
943 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
947 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
948 * @hba: per adapter instance
950 static inline void ufshcd_disable_intr_aggr(struct ufs_hba
*hba
)
952 ufshcd_writel(hba
, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
956 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
957 * When run-stop registers are set to 1, it indicates the
958 * host controller that it can process the requests
959 * @hba: per adapter instance
961 static void ufshcd_enable_run_stop_reg(struct ufs_hba
*hba
)
963 ufshcd_writel(hba
, UTP_TASK_REQ_LIST_RUN_STOP_BIT
,
964 REG_UTP_TASK_REQ_LIST_RUN_STOP
);
965 ufshcd_writel(hba
, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT
,
966 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP
);
970 * ufshcd_hba_start - Start controller initialization sequence
971 * @hba: per adapter instance
973 static inline void ufshcd_hba_start(struct ufs_hba
*hba
)
975 u32 val
= CONTROLLER_ENABLE
;
977 if (ufshcd_crypto_enable(hba
))
978 val
|= CRYPTO_GENERAL_ENABLE
;
980 ufshcd_writel(hba
, val
, REG_CONTROLLER_ENABLE
);
984 * ufshcd_is_hba_active - Get controller state
985 * @hba: per adapter instance
987 * Returns true if and only if the controller is active.
989 static inline bool ufshcd_is_hba_active(struct ufs_hba
*hba
)
991 return ufshcd_readl(hba
, REG_CONTROLLER_ENABLE
) & CONTROLLER_ENABLE
;
994 u32
ufshcd_get_local_unipro_ver(struct ufs_hba
*hba
)
996 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
997 if (hba
->ufs_version
<= ufshci_version(1, 1))
998 return UFS_UNIPRO_VER_1_41
;
1000 return UFS_UNIPRO_VER_1_6
;
1002 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver
);
1004 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba
*hba
)
1007 * If both host and device support UniPro ver1.6 or later, PA layer
1008 * parameters tuning happens during link startup itself.
1010 * We can manually tune PA layer parameters if either host or device
1011 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
1012 * logic simple, we will only do manual tuning if local unipro version
1013 * doesn't support ver1.6 or later.
1015 return ufshcd_get_local_unipro_ver(hba
) < UFS_UNIPRO_VER_1_6
;
1019 * ufshcd_set_clk_freq - set UFS controller clock frequencies
1020 * @hba: per adapter instance
1021 * @scale_up: If True, set max possible frequency othewise set low frequency
1023 * Returns 0 if successful
1024 * Returns < 0 for any other errors
1026 static int ufshcd_set_clk_freq(struct ufs_hba
*hba
, bool scale_up
)
1029 struct ufs_clk_info
*clki
;
1030 struct list_head
*head
= &hba
->clk_list_head
;
1032 if (list_empty(head
))
1035 list_for_each_entry(clki
, head
, list
) {
1036 if (!IS_ERR_OR_NULL(clki
->clk
)) {
1037 if (scale_up
&& clki
->max_freq
) {
1038 if (clki
->curr_freq
== clki
->max_freq
)
1041 ret
= clk_set_rate(clki
->clk
, clki
->max_freq
);
1043 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
1044 __func__
, clki
->name
,
1045 clki
->max_freq
, ret
);
1048 trace_ufshcd_clk_scaling(dev_name(hba
->dev
),
1049 "scaled up", clki
->name
,
1053 clki
->curr_freq
= clki
->max_freq
;
1055 } else if (!scale_up
&& clki
->min_freq
) {
1056 if (clki
->curr_freq
== clki
->min_freq
)
1059 ret
= clk_set_rate(clki
->clk
, clki
->min_freq
);
1061 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
1062 __func__
, clki
->name
,
1063 clki
->min_freq
, ret
);
1066 trace_ufshcd_clk_scaling(dev_name(hba
->dev
),
1067 "scaled down", clki
->name
,
1070 clki
->curr_freq
= clki
->min_freq
;
1073 dev_dbg(hba
->dev
, "%s: clk: %s, rate: %lu\n", __func__
,
1074 clki
->name
, clk_get_rate(clki
->clk
));
1082 * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1083 * @hba: per adapter instance
1084 * @scale_up: True if scaling up and false if scaling down
1086 * Returns 0 if successful
1087 * Returns < 0 for any other errors
1089 static int ufshcd_scale_clks(struct ufs_hba
*hba
, bool scale_up
)
1092 ktime_t start
= ktime_get();
1094 ret
= ufshcd_vops_clk_scale_notify(hba
, scale_up
, PRE_CHANGE
);
1098 ret
= ufshcd_set_clk_freq(hba
, scale_up
);
1102 ret
= ufshcd_vops_clk_scale_notify(hba
, scale_up
, POST_CHANGE
);
1104 ufshcd_set_clk_freq(hba
, !scale_up
);
1107 trace_ufshcd_profile_clk_scaling(dev_name(hba
->dev
),
1108 (scale_up
? "up" : "down"),
1109 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
1114 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1115 * @hba: per adapter instance
1116 * @scale_up: True if scaling up and false if scaling down
1118 * Returns true if scaling is required, false otherwise.
1120 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba
*hba
,
1123 struct ufs_clk_info
*clki
;
1124 struct list_head
*head
= &hba
->clk_list_head
;
1126 if (list_empty(head
))
1129 list_for_each_entry(clki
, head
, list
) {
1130 if (!IS_ERR_OR_NULL(clki
->clk
)) {
1131 if (scale_up
&& clki
->max_freq
) {
1132 if (clki
->curr_freq
== clki
->max_freq
)
1135 } else if (!scale_up
&& clki
->min_freq
) {
1136 if (clki
->curr_freq
== clki
->min_freq
)
1147 * Determine the number of pending commands by counting the bits in the SCSI
1148 * device budget maps. This approach has been selected because a bit is set in
1149 * the budget map before scsi_host_queue_ready() checks the host_self_blocked
1150 * flag. The host_self_blocked flag can be modified by calling
1151 * scsi_block_requests() or scsi_unblock_requests().
1153 static u32
ufshcd_pending_cmds(struct ufs_hba
*hba
)
1155 const struct scsi_device
*sdev
;
1158 lockdep_assert_held(hba
->host
->host_lock
);
1159 __shost_for_each_device(sdev
, hba
->host
)
1160 pending
+= sbitmap_weight(&sdev
->budget_map
);
1166 * Wait until all pending SCSI commands and TMFs have finished or the timeout
1169 * Return: 0 upon success; -EBUSY upon timeout.
1171 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba
*hba
,
1172 u64 wait_timeout_us
)
1174 unsigned long flags
;
1178 bool timeout
= false, do_last_check
= false;
1181 ufshcd_hold(hba
, false);
1182 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1184 * Wait for all the outstanding tasks/transfer requests.
1185 * Verify by checking the doorbell registers are clear.
1187 start
= ktime_get();
1189 if (hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
) {
1194 tm_doorbell
= ufshcd_readl(hba
, REG_UTP_TASK_REQ_DOOR_BELL
);
1195 tr_pending
= ufshcd_pending_cmds(hba
);
1196 if (!tm_doorbell
&& !tr_pending
) {
1199 } else if (do_last_check
) {
1203 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1204 io_schedule_timeout(msecs_to_jiffies(20));
1205 if (ktime_to_us(ktime_sub(ktime_get(), start
)) >
1209 * We might have scheduled out for long time so make
1210 * sure to check if doorbells are cleared by this time
1213 do_last_check
= true;
1215 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1216 } while (tm_doorbell
|| tr_pending
);
1220 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1221 __func__
, tm_doorbell
, tr_pending
);
1225 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1226 ufshcd_release(hba
);
1231 * ufshcd_scale_gear - scale up/down UFS gear
1232 * @hba: per adapter instance
1233 * @scale_up: True for scaling up gear and false for scaling down
1235 * Returns 0 for success,
1236 * Returns -EBUSY if scaling can't happen at this time
1237 * Returns non-zero for any other errors
1239 static int ufshcd_scale_gear(struct ufs_hba
*hba
, bool scale_up
)
1242 struct ufs_pa_layer_attr new_pwr_info
;
1245 memcpy(&new_pwr_info
, &hba
->clk_scaling
.saved_pwr_info
.info
,
1246 sizeof(struct ufs_pa_layer_attr
));
1248 memcpy(&new_pwr_info
, &hba
->pwr_info
,
1249 sizeof(struct ufs_pa_layer_attr
));
1251 if (hba
->pwr_info
.gear_tx
> hba
->clk_scaling
.min_gear
||
1252 hba
->pwr_info
.gear_rx
> hba
->clk_scaling
.min_gear
) {
1253 /* save the current power mode */
1254 memcpy(&hba
->clk_scaling
.saved_pwr_info
.info
,
1256 sizeof(struct ufs_pa_layer_attr
));
1258 /* scale down gear */
1259 new_pwr_info
.gear_tx
= hba
->clk_scaling
.min_gear
;
1260 new_pwr_info
.gear_rx
= hba
->clk_scaling
.min_gear
;
1264 /* check if the power mode needs to be changed or not? */
1265 ret
= ufshcd_config_pwr_mode(hba
, &new_pwr_info
);
1267 dev_err(hba
->dev
, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1269 hba
->pwr_info
.gear_tx
, hba
->pwr_info
.gear_rx
,
1270 new_pwr_info
.gear_tx
, new_pwr_info
.gear_rx
);
1276 * Wait until all pending SCSI commands and TMFs have finished or the timeout
1279 * Return: 0 upon success; -EBUSY upon timeout.
1281 static int ufshcd_clock_scaling_prepare(struct ufs_hba
*hba
, u64 timeout_us
)
1285 * make sure that there are no outstanding requests when
1286 * clock scaling is in progress
1288 ufshcd_scsi_block_requests(hba
);
1289 mutex_lock(&hba
->wb_mutex
);
1290 down_write(&hba
->clk_scaling_lock
);
1292 if (!hba
->clk_scaling
.is_allowed
||
1293 ufshcd_wait_for_doorbell_clr(hba
, timeout_us
)) {
1295 up_write(&hba
->clk_scaling_lock
);
1296 mutex_unlock(&hba
->wb_mutex
);
1297 ufshcd_scsi_unblock_requests(hba
);
1301 /* let's not get into low power until clock scaling is completed */
1302 ufshcd_hold(hba
, false);
1308 static void ufshcd_clock_scaling_unprepare(struct ufs_hba
*hba
, int err
, bool scale_up
)
1310 up_write(&hba
->clk_scaling_lock
);
1312 /* Enable Write Booster if we have scaled up else disable it */
1313 if (ufshcd_enable_wb_if_scaling_up(hba
) && !err
)
1314 ufshcd_wb_toggle(hba
, scale_up
);
1316 mutex_unlock(&hba
->wb_mutex
);
1318 ufshcd_scsi_unblock_requests(hba
);
1319 ufshcd_release(hba
);
1323 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1324 * @hba: per adapter instance
1325 * @scale_up: True for scaling up and false for scalin down
1327 * Returns 0 for success,
1328 * Returns -EBUSY if scaling can't happen at this time
1329 * Returns non-zero for any other errors
1331 static int ufshcd_devfreq_scale(struct ufs_hba
*hba
, bool scale_up
)
1335 ret
= ufshcd_clock_scaling_prepare(hba
, 1 * USEC_PER_SEC
);
1339 /* scale down the gear before scaling down clocks */
1341 ret
= ufshcd_scale_gear(hba
, false);
1346 ret
= ufshcd_scale_clks(hba
, scale_up
);
1349 ufshcd_scale_gear(hba
, true);
1353 /* scale up the gear after scaling up clocks */
1355 ret
= ufshcd_scale_gear(hba
, true);
1357 ufshcd_scale_clks(hba
, false);
1363 ufshcd_clock_scaling_unprepare(hba
, ret
, scale_up
);
1367 static void ufshcd_clk_scaling_suspend_work(struct work_struct
*work
)
1369 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1370 clk_scaling
.suspend_work
);
1371 unsigned long irq_flags
;
1373 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1374 if (hba
->clk_scaling
.active_reqs
|| hba
->clk_scaling
.is_suspended
) {
1375 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1378 hba
->clk_scaling
.is_suspended
= true;
1379 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1381 __ufshcd_suspend_clkscaling(hba
);
1384 static void ufshcd_clk_scaling_resume_work(struct work_struct
*work
)
1386 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1387 clk_scaling
.resume_work
);
1388 unsigned long irq_flags
;
1390 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1391 if (!hba
->clk_scaling
.is_suspended
) {
1392 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1395 hba
->clk_scaling
.is_suspended
= false;
1396 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1398 devfreq_resume_device(hba
->devfreq
);
1401 static int ufshcd_devfreq_target(struct device
*dev
,
1402 unsigned long *freq
, u32 flags
)
1405 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1407 bool scale_up
, sched_clk_scaling_suspend_work
= false;
1408 struct list_head
*clk_list
= &hba
->clk_list_head
;
1409 struct ufs_clk_info
*clki
;
1410 unsigned long irq_flags
;
1412 if (!ufshcd_is_clkscaling_supported(hba
))
1415 clki
= list_first_entry(&hba
->clk_list_head
, struct ufs_clk_info
, list
);
1416 /* Override with the closest supported frequency */
1417 *freq
= (unsigned long) clk_round_rate(clki
->clk
, *freq
);
1418 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1419 if (ufshcd_eh_in_progress(hba
)) {
1420 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1424 if (!hba
->clk_scaling
.active_reqs
)
1425 sched_clk_scaling_suspend_work
= true;
1427 if (list_empty(clk_list
)) {
1428 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1432 /* Decide based on the rounded-off frequency and update */
1433 scale_up
= *freq
== clki
->max_freq
;
1435 *freq
= clki
->min_freq
;
1436 /* Update the frequency */
1437 if (!ufshcd_is_devfreq_scaling_required(hba
, scale_up
)) {
1438 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1440 goto out
; /* no state change required */
1442 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1444 start
= ktime_get();
1445 ret
= ufshcd_devfreq_scale(hba
, scale_up
);
1447 trace_ufshcd_profile_clk_scaling(dev_name(hba
->dev
),
1448 (scale_up
? "up" : "down"),
1449 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
1452 if (sched_clk_scaling_suspend_work
)
1453 queue_work(hba
->clk_scaling
.workq
,
1454 &hba
->clk_scaling
.suspend_work
);
1459 static int ufshcd_devfreq_get_dev_status(struct device
*dev
,
1460 struct devfreq_dev_status
*stat
)
1462 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1463 struct ufs_clk_scaling
*scaling
= &hba
->clk_scaling
;
1464 unsigned long flags
;
1465 struct list_head
*clk_list
= &hba
->clk_list_head
;
1466 struct ufs_clk_info
*clki
;
1469 if (!ufshcd_is_clkscaling_supported(hba
))
1472 memset(stat
, 0, sizeof(*stat
));
1474 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1475 curr_t
= ktime_get();
1476 if (!scaling
->window_start_t
)
1479 clki
= list_first_entry(clk_list
, struct ufs_clk_info
, list
);
1481 * If current frequency is 0, then the ondemand governor considers
1482 * there's no initial frequency set. And it always requests to set
1483 * to max. frequency.
1485 stat
->current_frequency
= clki
->curr_freq
;
1486 if (scaling
->is_busy_started
)
1487 scaling
->tot_busy_t
+= ktime_us_delta(curr_t
,
1488 scaling
->busy_start_t
);
1490 stat
->total_time
= ktime_us_delta(curr_t
, scaling
->window_start_t
);
1491 stat
->busy_time
= scaling
->tot_busy_t
;
1493 scaling
->window_start_t
= curr_t
;
1494 scaling
->tot_busy_t
= 0;
1496 if (hba
->outstanding_reqs
) {
1497 scaling
->busy_start_t
= curr_t
;
1498 scaling
->is_busy_started
= true;
1500 scaling
->busy_start_t
= 0;
1501 scaling
->is_busy_started
= false;
1503 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1507 static int ufshcd_devfreq_init(struct ufs_hba
*hba
)
1509 struct list_head
*clk_list
= &hba
->clk_list_head
;
1510 struct ufs_clk_info
*clki
;
1511 struct devfreq
*devfreq
;
1514 /* Skip devfreq if we don't have any clocks in the list */
1515 if (list_empty(clk_list
))
1518 clki
= list_first_entry(clk_list
, struct ufs_clk_info
, list
);
1519 dev_pm_opp_add(hba
->dev
, clki
->min_freq
, 0);
1520 dev_pm_opp_add(hba
->dev
, clki
->max_freq
, 0);
1522 ufshcd_vops_config_scaling_param(hba
, &hba
->vps
->devfreq_profile
,
1523 &hba
->vps
->ondemand_data
);
1524 devfreq
= devfreq_add_device(hba
->dev
,
1525 &hba
->vps
->devfreq_profile
,
1526 DEVFREQ_GOV_SIMPLE_ONDEMAND
,
1527 &hba
->vps
->ondemand_data
);
1528 if (IS_ERR(devfreq
)) {
1529 ret
= PTR_ERR(devfreq
);
1530 dev_err(hba
->dev
, "Unable to register with devfreq %d\n", ret
);
1532 dev_pm_opp_remove(hba
->dev
, clki
->min_freq
);
1533 dev_pm_opp_remove(hba
->dev
, clki
->max_freq
);
1537 hba
->devfreq
= devfreq
;
1542 static void ufshcd_devfreq_remove(struct ufs_hba
*hba
)
1544 struct list_head
*clk_list
= &hba
->clk_list_head
;
1545 struct ufs_clk_info
*clki
;
1550 devfreq_remove_device(hba
->devfreq
);
1551 hba
->devfreq
= NULL
;
1553 clki
= list_first_entry(clk_list
, struct ufs_clk_info
, list
);
1554 dev_pm_opp_remove(hba
->dev
, clki
->min_freq
);
1555 dev_pm_opp_remove(hba
->dev
, clki
->max_freq
);
1558 static void __ufshcd_suspend_clkscaling(struct ufs_hba
*hba
)
1560 unsigned long flags
;
1562 devfreq_suspend_device(hba
->devfreq
);
1563 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1564 hba
->clk_scaling
.window_start_t
= 0;
1565 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1568 static void ufshcd_suspend_clkscaling(struct ufs_hba
*hba
)
1570 unsigned long flags
;
1571 bool suspend
= false;
1573 cancel_work_sync(&hba
->clk_scaling
.suspend_work
);
1574 cancel_work_sync(&hba
->clk_scaling
.resume_work
);
1576 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1577 if (!hba
->clk_scaling
.is_suspended
) {
1579 hba
->clk_scaling
.is_suspended
= true;
1581 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1584 __ufshcd_suspend_clkscaling(hba
);
1587 static void ufshcd_resume_clkscaling(struct ufs_hba
*hba
)
1589 unsigned long flags
;
1590 bool resume
= false;
1592 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1593 if (hba
->clk_scaling
.is_suspended
) {
1595 hba
->clk_scaling
.is_suspended
= false;
1597 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1600 devfreq_resume_device(hba
->devfreq
);
1603 static ssize_t
ufshcd_clkscale_enable_show(struct device
*dev
,
1604 struct device_attribute
*attr
, char *buf
)
1606 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1608 return sysfs_emit(buf
, "%d\n", hba
->clk_scaling
.is_enabled
);
1611 static ssize_t
ufshcd_clkscale_enable_store(struct device
*dev
,
1612 struct device_attribute
*attr
, const char *buf
, size_t count
)
1614 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1618 if (kstrtou32(buf
, 0, &value
))
1621 down(&hba
->host_sem
);
1622 if (!ufshcd_is_user_access_allowed(hba
)) {
1628 if (value
== hba
->clk_scaling
.is_enabled
)
1631 ufshcd_rpm_get_sync(hba
);
1632 ufshcd_hold(hba
, false);
1634 hba
->clk_scaling
.is_enabled
= value
;
1637 ufshcd_resume_clkscaling(hba
);
1639 ufshcd_suspend_clkscaling(hba
);
1640 err
= ufshcd_devfreq_scale(hba
, true);
1642 dev_err(hba
->dev
, "%s: failed to scale clocks up %d\n",
1646 ufshcd_release(hba
);
1647 ufshcd_rpm_put_sync(hba
);
1650 return err
? err
: count
;
1653 static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba
*hba
)
1655 hba
->clk_scaling
.enable_attr
.show
= ufshcd_clkscale_enable_show
;
1656 hba
->clk_scaling
.enable_attr
.store
= ufshcd_clkscale_enable_store
;
1657 sysfs_attr_init(&hba
->clk_scaling
.enable_attr
.attr
);
1658 hba
->clk_scaling
.enable_attr
.attr
.name
= "clkscale_enable";
1659 hba
->clk_scaling
.enable_attr
.attr
.mode
= 0644;
1660 if (device_create_file(hba
->dev
, &hba
->clk_scaling
.enable_attr
))
1661 dev_err(hba
->dev
, "Failed to create sysfs for clkscale_enable\n");
1664 static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba
*hba
)
1666 if (hba
->clk_scaling
.enable_attr
.attr
.name
)
1667 device_remove_file(hba
->dev
, &hba
->clk_scaling
.enable_attr
);
1670 static void ufshcd_init_clk_scaling(struct ufs_hba
*hba
)
1672 char wq_name
[sizeof("ufs_clkscaling_00")];
1674 if (!ufshcd_is_clkscaling_supported(hba
))
1677 if (!hba
->clk_scaling
.min_gear
)
1678 hba
->clk_scaling
.min_gear
= UFS_HS_G1
;
1680 INIT_WORK(&hba
->clk_scaling
.suspend_work
,
1681 ufshcd_clk_scaling_suspend_work
);
1682 INIT_WORK(&hba
->clk_scaling
.resume_work
,
1683 ufshcd_clk_scaling_resume_work
);
1685 snprintf(wq_name
, sizeof(wq_name
), "ufs_clkscaling_%d",
1686 hba
->host
->host_no
);
1687 hba
->clk_scaling
.workq
= create_singlethread_workqueue(wq_name
);
1689 hba
->clk_scaling
.is_initialized
= true;
1692 static void ufshcd_exit_clk_scaling(struct ufs_hba
*hba
)
1694 if (!hba
->clk_scaling
.is_initialized
)
1697 ufshcd_remove_clk_scaling_sysfs(hba
);
1698 destroy_workqueue(hba
->clk_scaling
.workq
);
1699 ufshcd_devfreq_remove(hba
);
1700 hba
->clk_scaling
.is_initialized
= false;
1703 static void ufshcd_ungate_work(struct work_struct
*work
)
1706 unsigned long flags
;
1707 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1708 clk_gating
.ungate_work
);
1710 cancel_delayed_work_sync(&hba
->clk_gating
.gate_work
);
1712 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1713 if (hba
->clk_gating
.state
== CLKS_ON
) {
1714 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1718 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1719 ufshcd_hba_vreg_set_hpm(hba
);
1720 ufshcd_setup_clocks(hba
, true);
1722 ufshcd_enable_irq(hba
);
1724 /* Exit from hibern8 */
1725 if (ufshcd_can_hibern8_during_gating(hba
)) {
1726 /* Prevent gating in this path */
1727 hba
->clk_gating
.is_suspended
= true;
1728 if (ufshcd_is_link_hibern8(hba
)) {
1729 ret
= ufshcd_uic_hibern8_exit(hba
);
1731 dev_err(hba
->dev
, "%s: hibern8 exit failed %d\n",
1734 ufshcd_set_link_active(hba
);
1736 hba
->clk_gating
.is_suspended
= false;
1739 ufshcd_scsi_unblock_requests(hba
);
1743 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1744 * Also, exit from hibern8 mode and set the link as active.
1745 * @hba: per adapter instance
1746 * @async: This indicates whether caller should ungate clocks asynchronously.
1748 int ufshcd_hold(struct ufs_hba
*hba
, bool async
)
1752 unsigned long flags
;
1754 if (!ufshcd_is_clkgating_allowed(hba
) ||
1755 !hba
->clk_gating
.is_initialized
)
1757 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1758 hba
->clk_gating
.active_reqs
++;
1761 switch (hba
->clk_gating
.state
) {
1764 * Wait for the ungate work to complete if in progress.
1765 * Though the clocks may be in ON state, the link could
1766 * still be in hibner8 state if hibern8 is allowed
1767 * during clock gating.
1768 * Make sure we exit hibern8 state also in addition to
1771 if (ufshcd_can_hibern8_during_gating(hba
) &&
1772 ufshcd_is_link_hibern8(hba
)) {
1775 hba
->clk_gating
.active_reqs
--;
1778 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1779 flush_result
= flush_work(&hba
->clk_gating
.ungate_work
);
1780 if (hba
->clk_gating
.is_suspended
&& !flush_result
)
1782 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1787 if (cancel_delayed_work(&hba
->clk_gating
.gate_work
)) {
1788 hba
->clk_gating
.state
= CLKS_ON
;
1789 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1790 hba
->clk_gating
.state
);
1794 * If we are here, it means gating work is either done or
1795 * currently running. Hence, fall through to cancel gating
1796 * work and to enable clocks.
1800 hba
->clk_gating
.state
= REQ_CLKS_ON
;
1801 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1802 hba
->clk_gating
.state
);
1803 if (queue_work(hba
->clk_gating
.clk_gating_workq
,
1804 &hba
->clk_gating
.ungate_work
))
1805 ufshcd_scsi_block_requests(hba
);
1807 * fall through to check if we should wait for this
1808 * work to be done or not.
1814 hba
->clk_gating
.active_reqs
--;
1818 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1819 flush_work(&hba
->clk_gating
.ungate_work
);
1820 /* Make sure state is CLKS_ON before returning */
1821 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1824 dev_err(hba
->dev
, "%s: clk gating is in invalid state %d\n",
1825 __func__
, hba
->clk_gating
.state
);
1828 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1832 EXPORT_SYMBOL_GPL(ufshcd_hold
);
1834 static void ufshcd_gate_work(struct work_struct
*work
)
1836 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1837 clk_gating
.gate_work
.work
);
1838 unsigned long flags
;
1841 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1843 * In case you are here to cancel this work the gating state
1844 * would be marked as REQ_CLKS_ON. In this case save time by
1845 * skipping the gating work and exit after changing the clock
1848 if (hba
->clk_gating
.is_suspended
||
1849 (hba
->clk_gating
.state
!= REQ_CLKS_OFF
)) {
1850 hba
->clk_gating
.state
= CLKS_ON
;
1851 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1852 hba
->clk_gating
.state
);
1856 if (hba
->clk_gating
.active_reqs
1857 || hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
1858 || hba
->outstanding_reqs
|| hba
->outstanding_tasks
1859 || hba
->active_uic_cmd
|| hba
->uic_async_done
)
1862 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1864 /* put the link into hibern8 mode before turning off clocks */
1865 if (ufshcd_can_hibern8_during_gating(hba
)) {
1866 ret
= ufshcd_uic_hibern8_enter(hba
);
1868 hba
->clk_gating
.state
= CLKS_ON
;
1869 dev_err(hba
->dev
, "%s: hibern8 enter failed %d\n",
1871 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1872 hba
->clk_gating
.state
);
1875 ufshcd_set_link_hibern8(hba
);
1878 ufshcd_disable_irq(hba
);
1880 ufshcd_setup_clocks(hba
, false);
1882 /* Put the host controller in low power mode if possible */
1883 ufshcd_hba_vreg_set_lpm(hba
);
1885 * In case you are here to cancel this work the gating state
1886 * would be marked as REQ_CLKS_ON. In this case keep the state
1887 * as REQ_CLKS_ON which would anyway imply that clocks are off
1888 * and a request to turn them on is pending. By doing this way,
1889 * we keep the state machine in tact and this would ultimately
1890 * prevent from doing cancel work multiple times when there are
1891 * new requests arriving before the current cancel work is done.
1893 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1894 if (hba
->clk_gating
.state
== REQ_CLKS_OFF
) {
1895 hba
->clk_gating
.state
= CLKS_OFF
;
1896 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1897 hba
->clk_gating
.state
);
1900 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1905 /* host lock must be held before calling this variant */
1906 static void __ufshcd_release(struct ufs_hba
*hba
)
1908 if (!ufshcd_is_clkgating_allowed(hba
))
1911 hba
->clk_gating
.active_reqs
--;
1913 if (hba
->clk_gating
.active_reqs
|| hba
->clk_gating
.is_suspended
||
1914 hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
||
1915 hba
->outstanding_tasks
|| !hba
->clk_gating
.is_initialized
||
1916 hba
->active_uic_cmd
|| hba
->uic_async_done
||
1917 hba
->clk_gating
.state
== CLKS_OFF
)
1920 hba
->clk_gating
.state
= REQ_CLKS_OFF
;
1921 trace_ufshcd_clk_gating(dev_name(hba
->dev
), hba
->clk_gating
.state
);
1922 queue_delayed_work(hba
->clk_gating
.clk_gating_workq
,
1923 &hba
->clk_gating
.gate_work
,
1924 msecs_to_jiffies(hba
->clk_gating
.delay_ms
));
1927 void ufshcd_release(struct ufs_hba
*hba
)
1929 unsigned long flags
;
1931 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1932 __ufshcd_release(hba
);
1933 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1935 EXPORT_SYMBOL_GPL(ufshcd_release
);
1937 static ssize_t
ufshcd_clkgate_delay_show(struct device
*dev
,
1938 struct device_attribute
*attr
, char *buf
)
1940 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1942 return sysfs_emit(buf
, "%lu\n", hba
->clk_gating
.delay_ms
);
1945 void ufshcd_clkgate_delay_set(struct device
*dev
, unsigned long value
)
1947 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1948 unsigned long flags
;
1950 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1951 hba
->clk_gating
.delay_ms
= value
;
1952 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1954 EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set
);
1956 static ssize_t
ufshcd_clkgate_delay_store(struct device
*dev
,
1957 struct device_attribute
*attr
, const char *buf
, size_t count
)
1959 unsigned long value
;
1961 if (kstrtoul(buf
, 0, &value
))
1964 ufshcd_clkgate_delay_set(dev
, value
);
1968 static ssize_t
ufshcd_clkgate_enable_show(struct device
*dev
,
1969 struct device_attribute
*attr
, char *buf
)
1971 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1973 return sysfs_emit(buf
, "%d\n", hba
->clk_gating
.is_enabled
);
1976 static ssize_t
ufshcd_clkgate_enable_store(struct device
*dev
,
1977 struct device_attribute
*attr
, const char *buf
, size_t count
)
1979 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1980 unsigned long flags
;
1983 if (kstrtou32(buf
, 0, &value
))
1988 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1989 if (value
== hba
->clk_gating
.is_enabled
)
1993 __ufshcd_release(hba
);
1995 hba
->clk_gating
.active_reqs
++;
1997 hba
->clk_gating
.is_enabled
= value
;
1999 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2003 static void ufshcd_init_clk_gating_sysfs(struct ufs_hba
*hba
)
2005 hba
->clk_gating
.delay_attr
.show
= ufshcd_clkgate_delay_show
;
2006 hba
->clk_gating
.delay_attr
.store
= ufshcd_clkgate_delay_store
;
2007 sysfs_attr_init(&hba
->clk_gating
.delay_attr
.attr
);
2008 hba
->clk_gating
.delay_attr
.attr
.name
= "clkgate_delay_ms";
2009 hba
->clk_gating
.delay_attr
.attr
.mode
= 0644;
2010 if (device_create_file(hba
->dev
, &hba
->clk_gating
.delay_attr
))
2011 dev_err(hba
->dev
, "Failed to create sysfs for clkgate_delay\n");
2013 hba
->clk_gating
.enable_attr
.show
= ufshcd_clkgate_enable_show
;
2014 hba
->clk_gating
.enable_attr
.store
= ufshcd_clkgate_enable_store
;
2015 sysfs_attr_init(&hba
->clk_gating
.enable_attr
.attr
);
2016 hba
->clk_gating
.enable_attr
.attr
.name
= "clkgate_enable";
2017 hba
->clk_gating
.enable_attr
.attr
.mode
= 0644;
2018 if (device_create_file(hba
->dev
, &hba
->clk_gating
.enable_attr
))
2019 dev_err(hba
->dev
, "Failed to create sysfs for clkgate_enable\n");
2022 static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba
*hba
)
2024 if (hba
->clk_gating
.delay_attr
.attr
.name
)
2025 device_remove_file(hba
->dev
, &hba
->clk_gating
.delay_attr
);
2026 if (hba
->clk_gating
.enable_attr
.attr
.name
)
2027 device_remove_file(hba
->dev
, &hba
->clk_gating
.enable_attr
);
2030 static void ufshcd_init_clk_gating(struct ufs_hba
*hba
)
2032 char wq_name
[sizeof("ufs_clk_gating_00")];
2034 if (!ufshcd_is_clkgating_allowed(hba
))
2037 hba
->clk_gating
.state
= CLKS_ON
;
2039 hba
->clk_gating
.delay_ms
= 150;
2040 INIT_DELAYED_WORK(&hba
->clk_gating
.gate_work
, ufshcd_gate_work
);
2041 INIT_WORK(&hba
->clk_gating
.ungate_work
, ufshcd_ungate_work
);
2043 snprintf(wq_name
, ARRAY_SIZE(wq_name
), "ufs_clk_gating_%d",
2044 hba
->host
->host_no
);
2045 hba
->clk_gating
.clk_gating_workq
= alloc_ordered_workqueue(wq_name
,
2046 WQ_MEM_RECLAIM
| WQ_HIGHPRI
);
2048 ufshcd_init_clk_gating_sysfs(hba
);
2050 hba
->clk_gating
.is_enabled
= true;
2051 hba
->clk_gating
.is_initialized
= true;
2054 static void ufshcd_exit_clk_gating(struct ufs_hba
*hba
)
2056 if (!hba
->clk_gating
.is_initialized
)
2059 ufshcd_remove_clk_gating_sysfs(hba
);
2061 /* Ungate the clock if necessary. */
2062 ufshcd_hold(hba
, false);
2063 hba
->clk_gating
.is_initialized
= false;
2064 ufshcd_release(hba
);
2066 destroy_workqueue(hba
->clk_gating
.clk_gating_workq
);
2069 static void ufshcd_clk_scaling_start_busy(struct ufs_hba
*hba
)
2071 bool queue_resume_work
= false;
2072 ktime_t curr_t
= ktime_get();
2073 unsigned long flags
;
2075 if (!ufshcd_is_clkscaling_supported(hba
))
2078 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2079 if (!hba
->clk_scaling
.active_reqs
++)
2080 queue_resume_work
= true;
2082 if (!hba
->clk_scaling
.is_enabled
|| hba
->pm_op_in_progress
) {
2083 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2087 if (queue_resume_work
)
2088 queue_work(hba
->clk_scaling
.workq
,
2089 &hba
->clk_scaling
.resume_work
);
2091 if (!hba
->clk_scaling
.window_start_t
) {
2092 hba
->clk_scaling
.window_start_t
= curr_t
;
2093 hba
->clk_scaling
.tot_busy_t
= 0;
2094 hba
->clk_scaling
.is_busy_started
= false;
2097 if (!hba
->clk_scaling
.is_busy_started
) {
2098 hba
->clk_scaling
.busy_start_t
= curr_t
;
2099 hba
->clk_scaling
.is_busy_started
= true;
2101 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2104 static void ufshcd_clk_scaling_update_busy(struct ufs_hba
*hba
)
2106 struct ufs_clk_scaling
*scaling
= &hba
->clk_scaling
;
2107 unsigned long flags
;
2109 if (!ufshcd_is_clkscaling_supported(hba
))
2112 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2113 hba
->clk_scaling
.active_reqs
--;
2114 if (!hba
->outstanding_reqs
&& scaling
->is_busy_started
) {
2115 scaling
->tot_busy_t
+= ktime_to_us(ktime_sub(ktime_get(),
2116 scaling
->busy_start_t
));
2117 scaling
->busy_start_t
= 0;
2118 scaling
->is_busy_started
= false;
2120 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2123 static inline int ufshcd_monitor_opcode2dir(u8 opcode
)
2125 if (opcode
== READ_6
|| opcode
== READ_10
|| opcode
== READ_16
)
2127 else if (opcode
== WRITE_6
|| opcode
== WRITE_10
|| opcode
== WRITE_16
)
2133 static inline bool ufshcd_should_inform_monitor(struct ufs_hba
*hba
,
2134 struct ufshcd_lrb
*lrbp
)
2136 const struct ufs_hba_monitor
*m
= &hba
->monitor
;
2138 return (m
->enabled
&& lrbp
&& lrbp
->cmd
&&
2139 (!m
->chunk_size
|| m
->chunk_size
== lrbp
->cmd
->sdb
.length
) &&
2140 ktime_before(hba
->monitor
.enabled_ts
, lrbp
->issue_time_stamp
));
2143 static void ufshcd_start_monitor(struct ufs_hba
*hba
,
2144 const struct ufshcd_lrb
*lrbp
)
2146 int dir
= ufshcd_monitor_opcode2dir(*lrbp
->cmd
->cmnd
);
2147 unsigned long flags
;
2149 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2150 if (dir
>= 0 && hba
->monitor
.nr_queued
[dir
]++ == 0)
2151 hba
->monitor
.busy_start_ts
[dir
] = ktime_get();
2152 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2155 static void ufshcd_update_monitor(struct ufs_hba
*hba
, const struct ufshcd_lrb
*lrbp
)
2157 int dir
= ufshcd_monitor_opcode2dir(*lrbp
->cmd
->cmnd
);
2158 unsigned long flags
;
2160 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2161 if (dir
>= 0 && hba
->monitor
.nr_queued
[dir
] > 0) {
2162 const struct request
*req
= scsi_cmd_to_rq(lrbp
->cmd
);
2163 struct ufs_hba_monitor
*m
= &hba
->monitor
;
2164 ktime_t now
, inc
, lat
;
2166 now
= lrbp
->compl_time_stamp
;
2167 inc
= ktime_sub(now
, m
->busy_start_ts
[dir
]);
2168 m
->total_busy
[dir
] = ktime_add(m
->total_busy
[dir
], inc
);
2169 m
->nr_sec_rw
[dir
] += blk_rq_sectors(req
);
2171 /* Update latencies */
2173 lat
= ktime_sub(now
, lrbp
->issue_time_stamp
);
2174 m
->lat_sum
[dir
] += lat
;
2175 if (m
->lat_max
[dir
] < lat
|| !m
->lat_max
[dir
])
2176 m
->lat_max
[dir
] = lat
;
2177 if (m
->lat_min
[dir
] > lat
|| !m
->lat_min
[dir
])
2178 m
->lat_min
[dir
] = lat
;
2180 m
->nr_queued
[dir
]--;
2181 /* Push forward the busy start of monitor */
2182 m
->busy_start_ts
[dir
] = now
;
2184 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2188 * ufshcd_send_command - Send SCSI or device management commands
2189 * @hba: per adapter instance
2190 * @task_tag: Task tag of the command
2191 * @hwq: pointer to hardware queue instance
2194 void ufshcd_send_command(struct ufs_hba
*hba
, unsigned int task_tag
,
2195 struct ufs_hw_queue
*hwq
)
2197 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[task_tag
];
2198 unsigned long flags
;
2200 lrbp
->issue_time_stamp
= ktime_get();
2201 lrbp
->issue_time_stamp_local_clock
= local_clock();
2202 lrbp
->compl_time_stamp
= ktime_set(0, 0);
2203 lrbp
->compl_time_stamp_local_clock
= 0;
2204 ufshcd_add_command_trace(hba
, task_tag
, UFS_CMD_SEND
);
2205 ufshcd_clk_scaling_start_busy(hba
);
2206 if (unlikely(ufshcd_should_inform_monitor(hba
, lrbp
)))
2207 ufshcd_start_monitor(hba
, lrbp
);
2209 if (is_mcq_enabled(hba
)) {
2210 int utrd_size
= sizeof(struct utp_transfer_req_desc
);
2212 spin_lock(&hwq
->sq_lock
);
2213 memcpy(hwq
->sqe_base_addr
+ (hwq
->sq_tail_slot
* utrd_size
),
2214 lrbp
->utr_descriptor_ptr
, utrd_size
);
2215 ufshcd_inc_sq_tail(hwq
);
2216 spin_unlock(&hwq
->sq_lock
);
2218 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
2219 if (hba
->vops
&& hba
->vops
->setup_xfer_req
)
2220 hba
->vops
->setup_xfer_req(hba
, lrbp
->task_tag
,
2222 __set_bit(lrbp
->task_tag
, &hba
->outstanding_reqs
);
2223 ufshcd_writel(hba
, 1 << lrbp
->task_tag
,
2224 REG_UTP_TRANSFER_REQ_DOOR_BELL
);
2225 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
2230 * ufshcd_copy_sense_data - Copy sense data in case of check condition
2231 * @lrbp: pointer to local reference block
2233 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb
*lrbp
)
2235 u8
*const sense_buffer
= lrbp
->cmd
->sense_buffer
;
2239 ufshcd_get_rsp_upiu_data_seg_len(lrbp
->ucd_rsp_ptr
)) {
2242 len
= be16_to_cpu(lrbp
->ucd_rsp_ptr
->sr
.sense_data_len
);
2243 len_to_copy
= min_t(int, UFS_SENSE_SIZE
, len
);
2245 memcpy(sense_buffer
, lrbp
->ucd_rsp_ptr
->sr
.sense_data
,
2251 * ufshcd_copy_query_response() - Copy the Query Response and the data
2253 * @hba: per adapter instance
2254 * @lrbp: pointer to local reference block
2257 int ufshcd_copy_query_response(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2259 struct ufs_query_res
*query_res
= &hba
->dev_cmd
.query
.response
;
2261 memcpy(&query_res
->upiu_res
, &lrbp
->ucd_rsp_ptr
->qr
, QUERY_OSF_SIZE
);
2263 /* Get the descriptor */
2264 if (hba
->dev_cmd
.query
.descriptor
&&
2265 lrbp
->ucd_rsp_ptr
->qr
.opcode
== UPIU_QUERY_OPCODE_READ_DESC
) {
2266 u8
*descp
= (u8
*)lrbp
->ucd_rsp_ptr
+
2267 GENERAL_UPIU_REQUEST_SIZE
;
2271 /* data segment length */
2272 resp_len
= be32_to_cpu(lrbp
->ucd_rsp_ptr
->header
.dword_2
) &
2273 MASK_QUERY_DATA_SEG_LEN
;
2274 buf_len
= be16_to_cpu(
2275 hba
->dev_cmd
.query
.request
.upiu_req
.length
);
2276 if (likely(buf_len
>= resp_len
)) {
2277 memcpy(hba
->dev_cmd
.query
.descriptor
, descp
, resp_len
);
2280 "%s: rsp size %d is bigger than buffer size %d",
2281 __func__
, resp_len
, buf_len
);
2290 * ufshcd_hba_capabilities - Read controller capabilities
2291 * @hba: per adapter instance
2293 * Return: 0 on success, negative on error.
2295 static inline int ufshcd_hba_capabilities(struct ufs_hba
*hba
)
2299 hba
->capabilities
= ufshcd_readl(hba
, REG_CONTROLLER_CAPABILITIES
);
2300 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS
)
2301 hba
->capabilities
&= ~MASK_64_ADDRESSING_SUPPORT
;
2303 /* nutrs and nutmrs are 0 based values */
2304 hba
->nutrs
= (hba
->capabilities
& MASK_TRANSFER_REQUESTS_SLOTS
) + 1;
2306 ((hba
->capabilities
& MASK_TASK_MANAGEMENT_REQUEST_SLOTS
) >> 16) + 1;
2307 hba
->reserved_slot
= hba
->nutrs
- 1;
2309 /* Read crypto capabilities */
2310 err
= ufshcd_hba_init_crypto_capabilities(hba
);
2312 dev_err(hba
->dev
, "crypto setup failed\n");
2314 hba
->mcq_sup
= FIELD_GET(MASK_MCQ_SUPPORT
, hba
->capabilities
);
2318 hba
->mcq_capabilities
= ufshcd_readl(hba
, REG_MCQCAP
);
2319 hba
->ext_iid_sup
= FIELD_GET(MASK_EXT_IID_SUPPORT
,
2320 hba
->mcq_capabilities
);
2326 * ufshcd_ready_for_uic_cmd - Check if controller is ready
2327 * to accept UIC commands
2328 * @hba: per adapter instance
2329 * Return true on success, else false
2331 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba
*hba
)
2333 return ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) & UIC_COMMAND_READY
;
2337 * ufshcd_get_upmcrs - Get the power mode change request status
2338 * @hba: Pointer to adapter instance
2340 * This function gets the UPMCRS field of HCS register
2341 * Returns value of UPMCRS field
2343 static inline u8
ufshcd_get_upmcrs(struct ufs_hba
*hba
)
2345 return (ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) >> 8) & 0x7;
2349 * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer
2350 * @hba: per adapter instance
2351 * @uic_cmd: UIC command
2354 ufshcd_dispatch_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
2356 lockdep_assert_held(&hba
->uic_cmd_mutex
);
2358 WARN_ON(hba
->active_uic_cmd
);
2360 hba
->active_uic_cmd
= uic_cmd
;
2363 ufshcd_writel(hba
, uic_cmd
->argument1
, REG_UIC_COMMAND_ARG_1
);
2364 ufshcd_writel(hba
, uic_cmd
->argument2
, REG_UIC_COMMAND_ARG_2
);
2365 ufshcd_writel(hba
, uic_cmd
->argument3
, REG_UIC_COMMAND_ARG_3
);
2367 ufshcd_add_uic_command_trace(hba
, uic_cmd
, UFS_CMD_SEND
);
2370 ufshcd_writel(hba
, uic_cmd
->command
& COMMAND_OPCODE_MASK
,
2375 * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command
2376 * @hba: per adapter instance
2377 * @uic_cmd: UIC command
2379 * Returns 0 only if success.
2382 ufshcd_wait_for_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
2385 unsigned long flags
;
2387 lockdep_assert_held(&hba
->uic_cmd_mutex
);
2389 if (wait_for_completion_timeout(&uic_cmd
->done
,
2390 msecs_to_jiffies(UIC_CMD_TIMEOUT
))) {
2391 ret
= uic_cmd
->argument2
& MASK_UIC_COMMAND_RESULT
;
2395 "uic cmd 0x%x with arg3 0x%x completion timeout\n",
2396 uic_cmd
->command
, uic_cmd
->argument3
);
2398 if (!uic_cmd
->cmd_active
) {
2399 dev_err(hba
->dev
, "%s: UIC cmd has been completed, return the result\n",
2401 ret
= uic_cmd
->argument2
& MASK_UIC_COMMAND_RESULT
;
2405 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2406 hba
->active_uic_cmd
= NULL
;
2407 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2413 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2414 * @hba: per adapter instance
2415 * @uic_cmd: UIC command
2416 * @completion: initialize the completion only if this is set to true
2418 * Returns 0 only if success.
2421 __ufshcd_send_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
,
2424 lockdep_assert_held(&hba
->uic_cmd_mutex
);
2425 lockdep_assert_held(hba
->host
->host_lock
);
2427 if (!ufshcd_ready_for_uic_cmd(hba
)) {
2429 "Controller not ready to accept UIC commands\n");
2434 init_completion(&uic_cmd
->done
);
2436 uic_cmd
->cmd_active
= 1;
2437 ufshcd_dispatch_uic_cmd(hba
, uic_cmd
);
2443 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2444 * @hba: per adapter instance
2445 * @uic_cmd: UIC command
2447 * Returns 0 only if success.
2449 int ufshcd_send_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
2452 unsigned long flags
;
2454 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_UIC_CMD
)
2457 ufshcd_hold(hba
, false);
2458 mutex_lock(&hba
->uic_cmd_mutex
);
2459 ufshcd_add_delay_before_dme_cmd(hba
);
2461 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2462 ret
= __ufshcd_send_uic_cmd(hba
, uic_cmd
, true);
2463 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2465 ret
= ufshcd_wait_for_uic_cmd(hba
, uic_cmd
);
2467 mutex_unlock(&hba
->uic_cmd_mutex
);
2469 ufshcd_release(hba
);
2474 * ufshcd_sgl_to_prdt - SG list to PRTD (Physical Region Description Table, 4DW format)
2475 * @hba: per-adapter instance
2476 * @lrbp: pointer to local reference block
2477 * @sg_entries: The number of sg lists actually used
2478 * @sg_list: Pointer to SG list
2480 static void ufshcd_sgl_to_prdt(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
, int sg_entries
,
2481 struct scatterlist
*sg_list
)
2483 struct ufshcd_sg_entry
*prd
;
2484 struct scatterlist
*sg
;
2489 if (hba
->quirks
& UFSHCD_QUIRK_PRDT_BYTE_GRAN
)
2490 lrbp
->utr_descriptor_ptr
->prd_table_length
=
2491 cpu_to_le16(sg_entries
* ufshcd_sg_entry_size(hba
));
2493 lrbp
->utr_descriptor_ptr
->prd_table_length
= cpu_to_le16(sg_entries
);
2495 prd
= lrbp
->ucd_prdt_ptr
;
2497 for_each_sg(sg_list
, sg
, sg_entries
, i
) {
2498 const unsigned int len
= sg_dma_len(sg
);
2501 * From the UFSHCI spec: "Data Byte Count (DBC): A '0'
2502 * based value that indicates the length, in bytes, of
2503 * the data block. A maximum of length of 256KB may
2504 * exist for any entry. Bits 1:0 of this field shall be
2505 * 11b to indicate Dword granularity. A value of '3'
2506 * indicates 4 bytes, '7' indicates 8 bytes, etc."
2508 WARN_ONCE(len
> 256 * 1024, "len = %#x\n", len
);
2509 prd
->size
= cpu_to_le32(len
- 1);
2510 prd
->addr
= cpu_to_le64(sg
->dma_address
);
2512 prd
= (void *)prd
+ ufshcd_sg_entry_size(hba
);
2515 lrbp
->utr_descriptor_ptr
->prd_table_length
= 0;
2520 * ufshcd_map_sg - Map scatter-gather list to prdt
2521 * @hba: per adapter instance
2522 * @lrbp: pointer to local reference block
2524 * Returns 0 in case of success, non-zero value in case of failure
2526 static int ufshcd_map_sg(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2528 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
2529 int sg_segments
= scsi_dma_map(cmd
);
2531 if (sg_segments
< 0)
2534 ufshcd_sgl_to_prdt(hba
, lrbp
, sg_segments
, scsi_sglist(cmd
));
2540 * ufshcd_enable_intr - enable interrupts
2541 * @hba: per adapter instance
2542 * @intrs: interrupt bits
2544 static void ufshcd_enable_intr(struct ufs_hba
*hba
, u32 intrs
)
2546 u32 set
= ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
2548 if (hba
->ufs_version
== ufshci_version(1, 0)) {
2550 rw
= set
& INTERRUPT_MASK_RW_VER_10
;
2551 set
= rw
| ((set
^ intrs
) & intrs
);
2556 ufshcd_writel(hba
, set
, REG_INTERRUPT_ENABLE
);
2560 * ufshcd_disable_intr - disable interrupts
2561 * @hba: per adapter instance
2562 * @intrs: interrupt bits
2564 static void ufshcd_disable_intr(struct ufs_hba
*hba
, u32 intrs
)
2566 u32 set
= ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
2568 if (hba
->ufs_version
== ufshci_version(1, 0)) {
2570 rw
= (set
& INTERRUPT_MASK_RW_VER_10
) &
2571 ~(intrs
& INTERRUPT_MASK_RW_VER_10
);
2572 set
= rw
| ((set
& intrs
) & ~INTERRUPT_MASK_RW_VER_10
);
2578 ufshcd_writel(hba
, set
, REG_INTERRUPT_ENABLE
);
2582 * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
2583 * descriptor according to request
2584 * @lrbp: pointer to local reference block
2585 * @upiu_flags: flags required in the header
2586 * @cmd_dir: requests data direction
2587 * @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments)
2589 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb
*lrbp
, u8
*upiu_flags
,
2590 enum dma_data_direction cmd_dir
, int ehs_length
)
2592 struct utp_transfer_req_desc
*req_desc
= lrbp
->utr_descriptor_ptr
;
2598 if (cmd_dir
== DMA_FROM_DEVICE
) {
2599 data_direction
= UTP_DEVICE_TO_HOST
;
2600 *upiu_flags
= UPIU_CMD_FLAGS_READ
;
2601 } else if (cmd_dir
== DMA_TO_DEVICE
) {
2602 data_direction
= UTP_HOST_TO_DEVICE
;
2603 *upiu_flags
= UPIU_CMD_FLAGS_WRITE
;
2605 data_direction
= UTP_NO_DATA_TRANSFER
;
2606 *upiu_flags
= UPIU_CMD_FLAGS_NONE
;
2609 dword_0
= data_direction
| (lrbp
->command_type
<< UPIU_COMMAND_TYPE_OFFSET
) |
2612 dword_0
|= UTP_REQ_DESC_INT_CMD
;
2614 /* Prepare crypto related dwords */
2615 ufshcd_prepare_req_desc_hdr_crypto(lrbp
, &dword_0
, &dword_1
, &dword_3
);
2617 /* Transfer request descriptor header fields */
2618 req_desc
->header
.dword_0
= cpu_to_le32(dword_0
);
2619 req_desc
->header
.dword_1
= cpu_to_le32(dword_1
);
2621 * assigning invalid value for command status. Controller
2622 * updates OCS on command completion, with the command
2625 req_desc
->header
.dword_2
=
2626 cpu_to_le32(OCS_INVALID_COMMAND_STATUS
);
2627 req_desc
->header
.dword_3
= cpu_to_le32(dword_3
);
2629 req_desc
->prd_table_length
= 0;
2633 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2635 * @lrbp: local reference block pointer
2636 * @upiu_flags: flags
2639 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb
*lrbp
, u8 upiu_flags
)
2641 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
2642 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2643 unsigned short cdb_len
;
2645 /* command descriptor fields */
2646 ucd_req_ptr
->header
.dword_0
= UPIU_HEADER_DWORD(
2647 UPIU_TRANSACTION_COMMAND
, upiu_flags
,
2648 lrbp
->lun
, lrbp
->task_tag
);
2649 ucd_req_ptr
->header
.dword_1
= UPIU_HEADER_DWORD(
2650 UPIU_COMMAND_SET_TYPE_SCSI
, 0, 0, 0);
2652 /* Total EHS length and Data segment length will be zero */
2653 ucd_req_ptr
->header
.dword_2
= 0;
2655 ucd_req_ptr
->sc
.exp_data_transfer_len
= cpu_to_be32(cmd
->sdb
.length
);
2657 cdb_len
= min_t(unsigned short, cmd
->cmd_len
, UFS_CDB_SIZE
);
2658 memset(ucd_req_ptr
->sc
.cdb
, 0, UFS_CDB_SIZE
);
2659 memcpy(ucd_req_ptr
->sc
.cdb
, cmd
->cmnd
, cdb_len
);
2661 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2665 * ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request
2667 * @lrbp: local reference block pointer
2668 * @upiu_flags: flags
2670 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba
*hba
,
2671 struct ufshcd_lrb
*lrbp
, u8 upiu_flags
)
2673 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2674 struct ufs_query
*query
= &hba
->dev_cmd
.query
;
2675 u16 len
= be16_to_cpu(query
->request
.upiu_req
.length
);
2677 /* Query request header */
2678 ucd_req_ptr
->header
.dword_0
= UPIU_HEADER_DWORD(
2679 UPIU_TRANSACTION_QUERY_REQ
, upiu_flags
,
2680 lrbp
->lun
, lrbp
->task_tag
);
2681 ucd_req_ptr
->header
.dword_1
= UPIU_HEADER_DWORD(
2682 0, query
->request
.query_func
, 0, 0);
2684 /* Data segment length only need for WRITE_DESC */
2685 if (query
->request
.upiu_req
.opcode
== UPIU_QUERY_OPCODE_WRITE_DESC
)
2686 ucd_req_ptr
->header
.dword_2
=
2687 UPIU_HEADER_DWORD(0, 0, (len
>> 8), (u8
)len
);
2689 ucd_req_ptr
->header
.dword_2
= 0;
2691 /* Copy the Query Request buffer as is */
2692 memcpy(&ucd_req_ptr
->qr
, &query
->request
.upiu_req
,
2695 /* Copy the Descriptor */
2696 if (query
->request
.upiu_req
.opcode
== UPIU_QUERY_OPCODE_WRITE_DESC
)
2697 memcpy(ucd_req_ptr
+ 1, query
->descriptor
, len
);
2699 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2702 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb
*lrbp
)
2704 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2706 memset(ucd_req_ptr
, 0, sizeof(struct utp_upiu_req
));
2708 /* command descriptor fields */
2709 ucd_req_ptr
->header
.dword_0
=
2711 UPIU_TRANSACTION_NOP_OUT
, 0, 0, lrbp
->task_tag
);
2712 /* clear rest of the fields of basic header */
2713 ucd_req_ptr
->header
.dword_1
= 0;
2714 ucd_req_ptr
->header
.dword_2
= 0;
2716 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2720 * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
2721 * for Device Management Purposes
2722 * @hba: per adapter instance
2723 * @lrbp: pointer to local reference block
2725 static int ufshcd_compose_devman_upiu(struct ufs_hba
*hba
,
2726 struct ufshcd_lrb
*lrbp
)
2731 if (hba
->ufs_version
<= ufshci_version(1, 1))
2732 lrbp
->command_type
= UTP_CMD_TYPE_DEV_MANAGE
;
2734 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
2736 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, DMA_NONE
, 0);
2737 if (hba
->dev_cmd
.type
== DEV_CMD_TYPE_QUERY
)
2738 ufshcd_prepare_utp_query_req_upiu(hba
, lrbp
, upiu_flags
);
2739 else if (hba
->dev_cmd
.type
== DEV_CMD_TYPE_NOP
)
2740 ufshcd_prepare_utp_nop_upiu(lrbp
);
2748 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2750 * @hba: per adapter instance
2751 * @lrbp: pointer to local reference block
2753 static int ufshcd_comp_scsi_upiu(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2758 if (hba
->ufs_version
<= ufshci_version(1, 1))
2759 lrbp
->command_type
= UTP_CMD_TYPE_SCSI
;
2761 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
2763 if (likely(lrbp
->cmd
)) {
2764 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, lrbp
->cmd
->sc_data_direction
, 0);
2765 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp
, upiu_flags
);
2774 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2775 * @upiu_wlun_id: UPIU W-LUN id
2777 * Returns SCSI W-LUN id
2779 static inline u16
ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id
)
2781 return (upiu_wlun_id
& ~UFS_UPIU_WLUN_ID
) | SCSI_W_LUN_BASE
;
2784 static inline bool is_device_wlun(struct scsi_device
*sdev
)
2787 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN
);
2791 * Associate the UFS controller queue with the default and poll HCTX types.
2792 * Initialize the mq_map[] arrays.
2794 static void ufshcd_map_queues(struct Scsi_Host
*shost
)
2796 struct ufs_hba
*hba
= shost_priv(shost
);
2797 int i
, queue_offset
= 0;
2799 if (!is_mcq_supported(hba
)) {
2800 hba
->nr_queues
[HCTX_TYPE_DEFAULT
] = 1;
2801 hba
->nr_queues
[HCTX_TYPE_READ
] = 0;
2802 hba
->nr_queues
[HCTX_TYPE_POLL
] = 1;
2803 hba
->nr_hw_queues
= 1;
2806 for (i
= 0; i
< shost
->nr_maps
; i
++) {
2807 struct blk_mq_queue_map
*map
= &shost
->tag_set
.map
[i
];
2809 map
->nr_queues
= hba
->nr_queues
[i
];
2810 if (!map
->nr_queues
)
2812 map
->queue_offset
= queue_offset
;
2813 if (i
== HCTX_TYPE_POLL
&& !is_mcq_supported(hba
))
2814 map
->queue_offset
= 0;
2816 blk_mq_map_queues(map
);
2817 queue_offset
+= map
->nr_queues
;
2821 static void ufshcd_init_lrb(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrb
, int i
)
2823 struct utp_transfer_cmd_desc
*cmd_descp
= (void *)hba
->ucdl_base_addr
+
2824 i
* sizeof_utp_transfer_cmd_desc(hba
);
2825 struct utp_transfer_req_desc
*utrdlp
= hba
->utrdl_base_addr
;
2826 dma_addr_t cmd_desc_element_addr
= hba
->ucdl_dma_addr
+
2827 i
* sizeof_utp_transfer_cmd_desc(hba
);
2828 u16 response_offset
= offsetof(struct utp_transfer_cmd_desc
,
2830 u16 prdt_offset
= offsetof(struct utp_transfer_cmd_desc
, prd_table
);
2832 lrb
->utr_descriptor_ptr
= utrdlp
+ i
;
2833 lrb
->utrd_dma_addr
= hba
->utrdl_dma_addr
+
2834 i
* sizeof(struct utp_transfer_req_desc
);
2835 lrb
->ucd_req_ptr
= (struct utp_upiu_req
*)cmd_descp
->command_upiu
;
2836 lrb
->ucd_req_dma_addr
= cmd_desc_element_addr
;
2837 lrb
->ucd_rsp_ptr
= (struct utp_upiu_rsp
*)cmd_descp
->response_upiu
;
2838 lrb
->ucd_rsp_dma_addr
= cmd_desc_element_addr
+ response_offset
;
2839 lrb
->ucd_prdt_ptr
= (struct ufshcd_sg_entry
*)cmd_descp
->prd_table
;
2840 lrb
->ucd_prdt_dma_addr
= cmd_desc_element_addr
+ prdt_offset
;
2844 * ufshcd_queuecommand - main entry point for SCSI requests
2845 * @host: SCSI host pointer
2846 * @cmd: command from SCSI Midlayer
2848 * Returns 0 for success, non-zero in case of failure
2850 static int ufshcd_queuecommand(struct Scsi_Host
*host
, struct scsi_cmnd
*cmd
)
2852 struct ufs_hba
*hba
= shost_priv(host
);
2853 int tag
= scsi_cmd_to_rq(cmd
)->tag
;
2854 struct ufshcd_lrb
*lrbp
;
2856 struct ufs_hw_queue
*hwq
= NULL
;
2858 WARN_ONCE(tag
< 0 || tag
>= hba
->nutrs
, "Invalid tag %d\n", tag
);
2861 * Allows the UFS error handler to wait for prior ufshcd_queuecommand()
2866 switch (hba
->ufshcd_state
) {
2867 case UFSHCD_STATE_OPERATIONAL
:
2869 case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL
:
2871 * SCSI error handler can call ->queuecommand() while UFS error
2872 * handler is in progress. Error interrupts could change the
2873 * state from UFSHCD_STATE_RESET to
2874 * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests
2875 * being issued in that case.
2877 if (ufshcd_eh_in_progress(hba
)) {
2878 err
= SCSI_MLQUEUE_HOST_BUSY
;
2882 case UFSHCD_STATE_EH_SCHEDULED_FATAL
:
2884 * pm_runtime_get_sync() is used at error handling preparation
2885 * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
2886 * PM ops, it can never be finished if we let SCSI layer keep
2887 * retrying it, which gets err handler stuck forever. Neither
2888 * can we let the scsi cmd pass through, because UFS is in bad
2889 * state, the scsi cmd may eventually time out, which will get
2890 * err handler blocked for too long. So, just fail the scsi cmd
2891 * sent from PM ops, err handler can recover PM error anyways.
2893 if (hba
->pm_op_in_progress
) {
2894 hba
->force_reset
= true;
2895 set_host_byte(cmd
, DID_BAD_TARGET
);
2900 case UFSHCD_STATE_RESET
:
2901 err
= SCSI_MLQUEUE_HOST_BUSY
;
2903 case UFSHCD_STATE_ERROR
:
2904 set_host_byte(cmd
, DID_ERROR
);
2909 hba
->req_abort_count
= 0;
2911 err
= ufshcd_hold(hba
, true);
2913 err
= SCSI_MLQUEUE_HOST_BUSY
;
2916 WARN_ON(ufshcd_is_clkgating_allowed(hba
) &&
2917 (hba
->clk_gating
.state
!= CLKS_ON
));
2919 lrbp
= &hba
->lrb
[tag
];
2922 lrbp
->task_tag
= tag
;
2923 lrbp
->lun
= ufshcd_scsi_to_upiu_lun(cmd
->device
->lun
);
2924 lrbp
->intr_cmd
= !ufshcd_is_intr_aggr_allowed(hba
);
2926 ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd
), lrbp
);
2928 lrbp
->req_abort_skip
= false;
2930 ufshpb_prep(hba
, lrbp
);
2932 ufshcd_comp_scsi_upiu(hba
, lrbp
);
2934 err
= ufshcd_map_sg(hba
, lrbp
);
2937 ufshcd_release(hba
);
2941 if (is_mcq_enabled(hba
))
2942 hwq
= ufshcd_mcq_req_to_hwq(hba
, scsi_cmd_to_rq(cmd
));
2944 ufshcd_send_command(hba
, tag
, hwq
);
2949 if (ufs_trigger_eh()) {
2950 unsigned long flags
;
2952 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2953 ufshcd_schedule_eh_work(hba
);
2954 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2960 static int ufshcd_compose_dev_cmd(struct ufs_hba
*hba
,
2961 struct ufshcd_lrb
*lrbp
, enum dev_cmd_type cmd_type
, int tag
)
2964 lrbp
->task_tag
= tag
;
2965 lrbp
->lun
= 0; /* device management cmd is not specific to any LUN */
2966 lrbp
->intr_cmd
= true; /* No interrupt aggregation */
2967 ufshcd_prepare_lrbp_crypto(NULL
, lrbp
);
2968 hba
->dev_cmd
.type
= cmd_type
;
2970 return ufshcd_compose_devman_upiu(hba
, lrbp
);
2974 * Clear all the requests from the controller for which a bit has been set in
2975 * @mask and wait until the controller confirms that these requests have been
2978 static int ufshcd_clear_cmds(struct ufs_hba
*hba
, u32 mask
)
2980 unsigned long flags
;
2982 /* clear outstanding transaction before retry */
2983 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2984 ufshcd_utrl_clear(hba
, mask
);
2985 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2988 * wait for h/w to clear corresponding bit in door-bell.
2989 * max. wait is 1 sec.
2991 return ufshcd_wait_for_register(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
,
2992 mask
, ~mask
, 1000, 1000);
2996 ufshcd_check_query_response(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2998 struct ufs_query_res
*query_res
= &hba
->dev_cmd
.query
.response
;
3000 /* Get the UPIU response */
3001 query_res
->response
= ufshcd_get_rsp_upiu_result(lrbp
->ucd_rsp_ptr
) >>
3002 UPIU_RSP_CODE_OFFSET
;
3003 return query_res
->response
;
3007 * ufshcd_dev_cmd_completion() - handles device management command responses
3008 * @hba: per adapter instance
3009 * @lrbp: pointer to local reference block
3012 ufshcd_dev_cmd_completion(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
3017 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
3018 resp
= ufshcd_get_req_rsp(lrbp
->ucd_rsp_ptr
);
3021 case UPIU_TRANSACTION_NOP_IN
:
3022 if (hba
->dev_cmd
.type
!= DEV_CMD_TYPE_NOP
) {
3024 dev_err(hba
->dev
, "%s: unexpected response %x\n",
3028 case UPIU_TRANSACTION_QUERY_RSP
:
3029 err
= ufshcd_check_query_response(hba
, lrbp
);
3031 err
= ufshcd_copy_query_response(hba
, lrbp
);
3033 case UPIU_TRANSACTION_REJECT_UPIU
:
3034 /* TODO: handle Reject UPIU Response */
3036 dev_err(hba
->dev
, "%s: Reject UPIU not fully implemented\n",
3039 case UPIU_TRANSACTION_RESPONSE
:
3040 if (hba
->dev_cmd
.type
!= DEV_CMD_TYPE_RPMB
) {
3042 dev_err(hba
->dev
, "%s: unexpected response %x\n", __func__
, resp
);
3047 dev_err(hba
->dev
, "%s: Invalid device management cmd response: %x\n",
3055 static int ufshcd_wait_for_dev_cmd(struct ufs_hba
*hba
,
3056 struct ufshcd_lrb
*lrbp
, int max_timeout
)
3058 unsigned long time_left
= msecs_to_jiffies(max_timeout
);
3059 unsigned long flags
;
3064 time_left
= wait_for_completion_timeout(hba
->dev_cmd
.complete
,
3067 if (likely(time_left
)) {
3069 * The completion handler called complete() and the caller of
3070 * this function still owns the @lrbp tag so the code below does
3071 * not trigger any race conditions.
3073 hba
->dev_cmd
.complete
= NULL
;
3074 err
= ufshcd_get_tr_ocs(lrbp
, hba
->dev_cmd
.cqe
);
3076 err
= ufshcd_dev_cmd_completion(hba
, lrbp
);
3079 dev_dbg(hba
->dev
, "%s: dev_cmd request timedout, tag %d\n",
3080 __func__
, lrbp
->task_tag
);
3081 if (ufshcd_clear_cmds(hba
, 1U << lrbp
->task_tag
) == 0) {
3082 /* successfully cleared the command, retry if needed */
3085 * Since clearing the command succeeded we also need to
3086 * clear the task tag bit from the outstanding_reqs
3089 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
3090 pending
= test_bit(lrbp
->task_tag
,
3091 &hba
->outstanding_reqs
);
3093 hba
->dev_cmd
.complete
= NULL
;
3094 __clear_bit(lrbp
->task_tag
,
3095 &hba
->outstanding_reqs
);
3097 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
3101 * The completion handler ran while we tried to
3102 * clear the command.
3108 dev_err(hba
->dev
, "%s: failed to clear tag %d\n",
3109 __func__
, lrbp
->task_tag
);
3111 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
3112 pending
= test_bit(lrbp
->task_tag
,
3113 &hba
->outstanding_reqs
);
3115 hba
->dev_cmd
.complete
= NULL
;
3116 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
3120 * The completion handler ran while we tried to
3121 * clear the command.
3133 * ufshcd_exec_dev_cmd - API for sending device management requests
3135 * @cmd_type: specifies the type (NOP, Query...)
3136 * @timeout: timeout in milliseconds
3138 * NOTE: Since there is only one available tag for device management commands,
3139 * it is expected you hold the hba->dev_cmd.lock mutex.
3141 static int ufshcd_exec_dev_cmd(struct ufs_hba
*hba
,
3142 enum dev_cmd_type cmd_type
, int timeout
)
3144 DECLARE_COMPLETION_ONSTACK(wait
);
3145 const u32 tag
= hba
->reserved_slot
;
3146 struct ufshcd_lrb
*lrbp
;
3149 /* Protects use of hba->reserved_slot. */
3150 lockdep_assert_held(&hba
->dev_cmd
.lock
);
3152 down_read(&hba
->clk_scaling_lock
);
3154 lrbp
= &hba
->lrb
[tag
];
3156 err
= ufshcd_compose_dev_cmd(hba
, lrbp
, cmd_type
, tag
);
3160 hba
->dev_cmd
.complete
= &wait
;
3161 hba
->dev_cmd
.cqe
= NULL
;
3163 ufshcd_add_query_upiu_trace(hba
, UFS_QUERY_SEND
, lrbp
->ucd_req_ptr
);
3165 ufshcd_send_command(hba
, tag
, hba
->dev_cmd_queue
);
3166 err
= ufshcd_wait_for_dev_cmd(hba
, lrbp
, timeout
);
3167 ufshcd_add_query_upiu_trace(hba
, err
? UFS_QUERY_ERR
: UFS_QUERY_COMP
,
3168 (struct utp_upiu_req
*)lrbp
->ucd_rsp_ptr
);
3171 up_read(&hba
->clk_scaling_lock
);
3176 * ufshcd_init_query() - init the query response and request parameters
3177 * @hba: per-adapter instance
3178 * @request: address of the request pointer to be initialized
3179 * @response: address of the response pointer to be initialized
3180 * @opcode: operation to perform
3181 * @idn: flag idn to access
3182 * @index: LU number to access
3183 * @selector: query/flag/descriptor further identification
3185 static inline void ufshcd_init_query(struct ufs_hba
*hba
,
3186 struct ufs_query_req
**request
, struct ufs_query_res
**response
,
3187 enum query_opcode opcode
, u8 idn
, u8 index
, u8 selector
)
3189 *request
= &hba
->dev_cmd
.query
.request
;
3190 *response
= &hba
->dev_cmd
.query
.response
;
3191 memset(*request
, 0, sizeof(struct ufs_query_req
));
3192 memset(*response
, 0, sizeof(struct ufs_query_res
));
3193 (*request
)->upiu_req
.opcode
= opcode
;
3194 (*request
)->upiu_req
.idn
= idn
;
3195 (*request
)->upiu_req
.index
= index
;
3196 (*request
)->upiu_req
.selector
= selector
;
3199 static int ufshcd_query_flag_retry(struct ufs_hba
*hba
,
3200 enum query_opcode opcode
, enum flag_idn idn
, u8 index
, bool *flag_res
)
3205 for (retries
= 0; retries
< QUERY_REQ_RETRIES
; retries
++) {
3206 ret
= ufshcd_query_flag(hba
, opcode
, idn
, index
, flag_res
);
3209 "%s: failed with error %d, retries %d\n",
3210 __func__
, ret
, retries
);
3217 "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n",
3218 __func__
, opcode
, idn
, ret
, retries
);
3223 * ufshcd_query_flag() - API function for sending flag query requests
3224 * @hba: per-adapter instance
3225 * @opcode: flag query to perform
3226 * @idn: flag idn to access
3227 * @index: flag index to access
3228 * @flag_res: the flag value after the query request completes
3230 * Returns 0 for success, non-zero in case of failure
3232 int ufshcd_query_flag(struct ufs_hba
*hba
, enum query_opcode opcode
,
3233 enum flag_idn idn
, u8 index
, bool *flag_res
)
3235 struct ufs_query_req
*request
= NULL
;
3236 struct ufs_query_res
*response
= NULL
;
3237 int err
, selector
= 0;
3238 int timeout
= QUERY_REQ_TIMEOUT
;
3242 ufshcd_hold(hba
, false);
3243 mutex_lock(&hba
->dev_cmd
.lock
);
3244 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
3248 case UPIU_QUERY_OPCODE_SET_FLAG
:
3249 case UPIU_QUERY_OPCODE_CLEAR_FLAG
:
3250 case UPIU_QUERY_OPCODE_TOGGLE_FLAG
:
3251 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
3253 case UPIU_QUERY_OPCODE_READ_FLAG
:
3254 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
3256 /* No dummy reads */
3257 dev_err(hba
->dev
, "%s: Invalid argument for read request\n",
3265 "%s: Expected query flag opcode but got = %d\n",
3271 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, timeout
);
3275 "%s: Sending flag query for idn %d failed, err = %d\n",
3276 __func__
, idn
, err
);
3281 *flag_res
= (be32_to_cpu(response
->upiu_res
.value
) &
3282 MASK_QUERY_UPIU_FLAG_LOC
) & 0x1;
3285 mutex_unlock(&hba
->dev_cmd
.lock
);
3286 ufshcd_release(hba
);
3291 * ufshcd_query_attr - API function for sending attribute requests
3292 * @hba: per-adapter instance
3293 * @opcode: attribute opcode
3294 * @idn: attribute idn to access
3295 * @index: index field
3296 * @selector: selector field
3297 * @attr_val: the attribute value after the query request completes
3299 * Returns 0 for success, non-zero in case of failure
3301 int ufshcd_query_attr(struct ufs_hba
*hba
, enum query_opcode opcode
,
3302 enum attr_idn idn
, u8 index
, u8 selector
, u32
*attr_val
)
3304 struct ufs_query_req
*request
= NULL
;
3305 struct ufs_query_res
*response
= NULL
;
3311 dev_err(hba
->dev
, "%s: attribute value required for opcode 0x%x\n",
3316 ufshcd_hold(hba
, false);
3318 mutex_lock(&hba
->dev_cmd
.lock
);
3319 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
3323 case UPIU_QUERY_OPCODE_WRITE_ATTR
:
3324 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
3325 request
->upiu_req
.value
= cpu_to_be32(*attr_val
);
3327 case UPIU_QUERY_OPCODE_READ_ATTR
:
3328 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
3331 dev_err(hba
->dev
, "%s: Expected query attr opcode but got = 0x%.2x\n",
3337 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
3340 dev_err(hba
->dev
, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3341 __func__
, opcode
, idn
, index
, err
);
3345 *attr_val
= be32_to_cpu(response
->upiu_res
.value
);
3348 mutex_unlock(&hba
->dev_cmd
.lock
);
3349 ufshcd_release(hba
);
3354 * ufshcd_query_attr_retry() - API function for sending query
3355 * attribute with retries
3356 * @hba: per-adapter instance
3357 * @opcode: attribute opcode
3358 * @idn: attribute idn to access
3359 * @index: index field
3360 * @selector: selector field
3361 * @attr_val: the attribute value after the query request
3364 * Returns 0 for success, non-zero in case of failure
3366 int ufshcd_query_attr_retry(struct ufs_hba
*hba
,
3367 enum query_opcode opcode
, enum attr_idn idn
, u8 index
, u8 selector
,
3373 for (retries
= QUERY_REQ_RETRIES
; retries
> 0; retries
--) {
3374 ret
= ufshcd_query_attr(hba
, opcode
, idn
, index
,
3375 selector
, attr_val
);
3377 dev_dbg(hba
->dev
, "%s: failed with error %d, retries %d\n",
3378 __func__
, ret
, retries
);
3385 "%s: query attribute, idn %d, failed with error %d after %d retries\n",
3386 __func__
, idn
, ret
, QUERY_REQ_RETRIES
);
3390 static int __ufshcd_query_descriptor(struct ufs_hba
*hba
,
3391 enum query_opcode opcode
, enum desc_idn idn
, u8 index
,
3392 u8 selector
, u8
*desc_buf
, int *buf_len
)
3394 struct ufs_query_req
*request
= NULL
;
3395 struct ufs_query_res
*response
= NULL
;
3401 dev_err(hba
->dev
, "%s: descriptor buffer required for opcode 0x%x\n",
3406 if (*buf_len
< QUERY_DESC_MIN_SIZE
|| *buf_len
> QUERY_DESC_MAX_SIZE
) {
3407 dev_err(hba
->dev
, "%s: descriptor buffer size (%d) is out of range\n",
3408 __func__
, *buf_len
);
3412 ufshcd_hold(hba
, false);
3414 mutex_lock(&hba
->dev_cmd
.lock
);
3415 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
3417 hba
->dev_cmd
.query
.descriptor
= desc_buf
;
3418 request
->upiu_req
.length
= cpu_to_be16(*buf_len
);
3421 case UPIU_QUERY_OPCODE_WRITE_DESC
:
3422 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
3424 case UPIU_QUERY_OPCODE_READ_DESC
:
3425 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
3429 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3435 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
3438 dev_err(hba
->dev
, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3439 __func__
, opcode
, idn
, index
, err
);
3443 *buf_len
= be16_to_cpu(response
->upiu_res
.length
);
3446 hba
->dev_cmd
.query
.descriptor
= NULL
;
3447 mutex_unlock(&hba
->dev_cmd
.lock
);
3448 ufshcd_release(hba
);
3453 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3454 * @hba: per-adapter instance
3455 * @opcode: attribute opcode
3456 * @idn: attribute idn to access
3457 * @index: index field
3458 * @selector: selector field
3459 * @desc_buf: the buffer that contains the descriptor
3460 * @buf_len: length parameter passed to the device
3462 * Returns 0 for success, non-zero in case of failure.
3463 * The buf_len parameter will contain, on return, the length parameter
3464 * received on the response.
3466 int ufshcd_query_descriptor_retry(struct ufs_hba
*hba
,
3467 enum query_opcode opcode
,
3468 enum desc_idn idn
, u8 index
,
3470 u8
*desc_buf
, int *buf_len
)
3475 for (retries
= QUERY_REQ_RETRIES
; retries
> 0; retries
--) {
3476 err
= __ufshcd_query_descriptor(hba
, opcode
, idn
, index
,
3477 selector
, desc_buf
, buf_len
);
3478 if (!err
|| err
== -EINVAL
)
3486 * ufshcd_read_desc_param - read the specified descriptor parameter
3487 * @hba: Pointer to adapter instance
3488 * @desc_id: descriptor idn value
3489 * @desc_index: descriptor index
3490 * @param_offset: offset of the parameter to read
3491 * @param_read_buf: pointer to buffer where parameter would be read
3492 * @param_size: sizeof(param_read_buf)
3494 * Return 0 in case of success, non-zero otherwise
3496 int ufshcd_read_desc_param(struct ufs_hba
*hba
,
3497 enum desc_idn desc_id
,
3505 int buff_len
= QUERY_DESC_MAX_SIZE
;
3506 bool is_kmalloc
= true;
3509 if (desc_id
>= QUERY_DESC_IDN_MAX
|| !param_size
)
3512 /* Check whether we need temp memory */
3513 if (param_offset
!= 0 || param_size
< buff_len
) {
3514 desc_buf
= kzalloc(buff_len
, GFP_KERNEL
);
3518 desc_buf
= param_read_buf
;
3522 /* Request for full descriptor */
3523 ret
= ufshcd_query_descriptor_retry(hba
, UPIU_QUERY_OPCODE_READ_DESC
,
3524 desc_id
, desc_index
, 0,
3525 desc_buf
, &buff_len
);
3527 dev_err(hba
->dev
, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
3528 __func__
, desc_id
, desc_index
, param_offset
, ret
);
3532 /* Update descriptor length */
3533 buff_len
= desc_buf
[QUERY_DESC_LENGTH_OFFSET
];
3535 if (param_offset
>= buff_len
) {
3536 dev_err(hba
->dev
, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3537 __func__
, param_offset
, desc_id
, buff_len
);
3543 if (desc_buf
[QUERY_DESC_DESC_TYPE_OFFSET
] != desc_id
) {
3544 dev_err(hba
->dev
, "%s: invalid desc_id %d in descriptor header\n",
3545 __func__
, desc_buf
[QUERY_DESC_DESC_TYPE_OFFSET
]);
3551 /* Make sure we don't copy more data than available */
3552 if (param_offset
>= buff_len
)
3555 memcpy(param_read_buf
, &desc_buf
[param_offset
],
3556 min_t(u32
, param_size
, buff_len
- param_offset
));
3565 * struct uc_string_id - unicode string
3567 * @len: size of this descriptor inclusive
3568 * @type: descriptor type
3569 * @uc: unicode string character
3571 struct uc_string_id
{
3577 /* replace non-printable or non-ASCII characters with spaces */
3578 static inline char ufshcd_remove_non_printable(u8 ch
)
3580 return (ch
>= 0x20 && ch
<= 0x7e) ? ch
: ' ';
3584 * ufshcd_read_string_desc - read string descriptor
3585 * @hba: pointer to adapter instance
3586 * @desc_index: descriptor index
3587 * @buf: pointer to buffer where descriptor would be read,
3588 * the caller should free the memory.
3589 * @ascii: if true convert from unicode to ascii characters
3590 * null terminated string.
3593 * * string size on success.
3594 * * -ENOMEM: on allocation failure
3595 * * -EINVAL: on a wrong parameter
3597 int ufshcd_read_string_desc(struct ufs_hba
*hba
, u8 desc_index
,
3598 u8
**buf
, bool ascii
)
3600 struct uc_string_id
*uc_str
;
3607 uc_str
= kzalloc(QUERY_DESC_MAX_SIZE
, GFP_KERNEL
);
3611 ret
= ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_STRING
, desc_index
, 0,
3612 (u8
*)uc_str
, QUERY_DESC_MAX_SIZE
);
3614 dev_err(hba
->dev
, "Reading String Desc failed after %d retries. err = %d\n",
3615 QUERY_REQ_RETRIES
, ret
);
3620 if (uc_str
->len
<= QUERY_DESC_HDR_SIZE
) {
3621 dev_dbg(hba
->dev
, "String Desc is of zero length\n");
3630 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3631 ascii_len
= (uc_str
->len
- QUERY_DESC_HDR_SIZE
) / 2 + 1;
3632 str
= kzalloc(ascii_len
, GFP_KERNEL
);
3639 * the descriptor contains string in UTF16 format
3640 * we need to convert to utf-8 so it can be displayed
3642 ret
= utf16s_to_utf8s(uc_str
->uc
,
3643 uc_str
->len
- QUERY_DESC_HDR_SIZE
,
3644 UTF16_BIG_ENDIAN
, str
, ascii_len
);
3646 /* replace non-printable or non-ASCII characters with spaces */
3647 for (i
= 0; i
< ret
; i
++)
3648 str
[i
] = ufshcd_remove_non_printable(str
[i
]);
3653 str
= kmemdup(uc_str
, uc_str
->len
, GFP_KERNEL
);
3667 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3668 * @hba: Pointer to adapter instance
3670 * @param_offset: offset of the parameter to read
3671 * @param_read_buf: pointer to buffer where parameter would be read
3672 * @param_size: sizeof(param_read_buf)
3674 * Return 0 in case of success, non-zero otherwise
3676 static inline int ufshcd_read_unit_desc_param(struct ufs_hba
*hba
,
3678 enum unit_desc_param param_offset
,
3683 * Unit descriptors are only available for general purpose LUs (LUN id
3684 * from 0 to 7) and RPMB Well known LU.
3686 if (!ufs_is_valid_unit_desc_lun(&hba
->dev_info
, lun
))
3689 return ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_UNIT
, lun
,
3690 param_offset
, param_read_buf
, param_size
);
3693 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba
*hba
)
3696 u32 gating_wait
= UFSHCD_REF_CLK_GATING_WAIT_US
;
3698 if (hba
->dev_info
.wspecversion
>= 0x300) {
3699 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
3700 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME
, 0, 0,
3703 dev_err(hba
->dev
, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3706 if (gating_wait
== 0) {
3707 gating_wait
= UFSHCD_REF_CLK_GATING_WAIT_US
;
3708 dev_err(hba
->dev
, "Undefined ref clk gating wait time, use default %uus\n",
3712 hba
->dev_info
.clk_gating_wait_us
= gating_wait
;
3719 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3720 * @hba: per adapter instance
3722 * 1. Allocate DMA memory for Command Descriptor array
3723 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3724 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3725 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3727 * 4. Allocate memory for local reference block(lrb).
3729 * Returns 0 for success, non-zero in case of failure
3731 static int ufshcd_memory_alloc(struct ufs_hba
*hba
)
3733 size_t utmrdl_size
, utrdl_size
, ucdl_size
;
3735 /* Allocate memory for UTP command descriptors */
3736 ucdl_size
= sizeof_utp_transfer_cmd_desc(hba
) * hba
->nutrs
;
3737 hba
->ucdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3739 &hba
->ucdl_dma_addr
,
3743 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3745 if (!hba
->ucdl_base_addr
||
3746 WARN_ON(hba
->ucdl_dma_addr
& (128 - 1))) {
3748 "Command Descriptor Memory allocation failed\n");
3753 * Allocate memory for UTP Transfer descriptors
3754 * UFSHCI requires 1024 byte alignment of UTRD
3756 utrdl_size
= (sizeof(struct utp_transfer_req_desc
) * hba
->nutrs
);
3757 hba
->utrdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3759 &hba
->utrdl_dma_addr
,
3761 if (!hba
->utrdl_base_addr
||
3762 WARN_ON(hba
->utrdl_dma_addr
& (1024 - 1))) {
3764 "Transfer Descriptor Memory allocation failed\n");
3769 * Skip utmrdl allocation; it may have been
3770 * allocated during first pass and not released during
3771 * MCQ memory allocation.
3772 * See ufshcd_release_sdb_queue() and ufshcd_config_mcq()
3774 if (hba
->utmrdl_base_addr
)
3777 * Allocate memory for UTP Task Management descriptors
3778 * UFSHCI requires 1024 byte alignment of UTMRD
3780 utmrdl_size
= sizeof(struct utp_task_req_desc
) * hba
->nutmrs
;
3781 hba
->utmrdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3783 &hba
->utmrdl_dma_addr
,
3785 if (!hba
->utmrdl_base_addr
||
3786 WARN_ON(hba
->utmrdl_dma_addr
& (1024 - 1))) {
3788 "Task Management Descriptor Memory allocation failed\n");
3793 /* Allocate memory for local reference block */
3794 hba
->lrb
= devm_kcalloc(hba
->dev
,
3795 hba
->nutrs
, sizeof(struct ufshcd_lrb
),
3798 dev_err(hba
->dev
, "LRB Memory allocation failed\n");
3807 * ufshcd_host_memory_configure - configure local reference block with
3809 * @hba: per adapter instance
3811 * Configure Host memory space
3812 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3814 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3816 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3817 * into local reference block.
3819 static void ufshcd_host_memory_configure(struct ufs_hba
*hba
)
3821 struct utp_transfer_req_desc
*utrdlp
;
3822 dma_addr_t cmd_desc_dma_addr
;
3823 dma_addr_t cmd_desc_element_addr
;
3824 u16 response_offset
;
3829 utrdlp
= hba
->utrdl_base_addr
;
3832 offsetof(struct utp_transfer_cmd_desc
, response_upiu
);
3834 offsetof(struct utp_transfer_cmd_desc
, prd_table
);
3836 cmd_desc_size
= sizeof_utp_transfer_cmd_desc(hba
);
3837 cmd_desc_dma_addr
= hba
->ucdl_dma_addr
;
3839 for (i
= 0; i
< hba
->nutrs
; i
++) {
3840 /* Configure UTRD with command descriptor base address */
3841 cmd_desc_element_addr
=
3842 (cmd_desc_dma_addr
+ (cmd_desc_size
* i
));
3843 utrdlp
[i
].command_desc_base_addr_lo
=
3844 cpu_to_le32(lower_32_bits(cmd_desc_element_addr
));
3845 utrdlp
[i
].command_desc_base_addr_hi
=
3846 cpu_to_le32(upper_32_bits(cmd_desc_element_addr
));
3848 /* Response upiu and prdt offset should be in double words */
3849 if (hba
->quirks
& UFSHCD_QUIRK_PRDT_BYTE_GRAN
) {
3850 utrdlp
[i
].response_upiu_offset
=
3851 cpu_to_le16(response_offset
);
3852 utrdlp
[i
].prd_table_offset
=
3853 cpu_to_le16(prdt_offset
);
3854 utrdlp
[i
].response_upiu_length
=
3855 cpu_to_le16(ALIGNED_UPIU_SIZE
);
3857 utrdlp
[i
].response_upiu_offset
=
3858 cpu_to_le16(response_offset
>> 2);
3859 utrdlp
[i
].prd_table_offset
=
3860 cpu_to_le16(prdt_offset
>> 2);
3861 utrdlp
[i
].response_upiu_length
=
3862 cpu_to_le16(ALIGNED_UPIU_SIZE
>> 2);
3865 ufshcd_init_lrb(hba
, &hba
->lrb
[i
], i
);
3870 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3871 * @hba: per adapter instance
3873 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3874 * in order to initialize the Unipro link startup procedure.
3875 * Once the Unipro links are up, the device connected to the controller
3878 * Returns 0 on success, non-zero value on failure
3880 static int ufshcd_dme_link_startup(struct ufs_hba
*hba
)
3882 struct uic_command uic_cmd
= {0};
3885 uic_cmd
.command
= UIC_CMD_DME_LINK_STARTUP
;
3887 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
3890 "dme-link-startup: error code %d\n", ret
);
3894 * ufshcd_dme_reset - UIC command for DME_RESET
3895 * @hba: per adapter instance
3897 * DME_RESET command is issued in order to reset UniPro stack.
3898 * This function now deals with cold reset.
3900 * Returns 0 on success, non-zero value on failure
3902 static int ufshcd_dme_reset(struct ufs_hba
*hba
)
3904 struct uic_command uic_cmd
= {0};
3907 uic_cmd
.command
= UIC_CMD_DME_RESET
;
3909 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
3912 "dme-reset: error code %d\n", ret
);
3917 int ufshcd_dme_configure_adapt(struct ufs_hba
*hba
,
3923 if (agreed_gear
< UFS_HS_G4
)
3924 adapt_val
= PA_NO_ADAPT
;
3926 ret
= ufshcd_dme_set(hba
,
3927 UIC_ARG_MIB(PA_TXHSADAPTTYPE
),
3931 EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt
);
3934 * ufshcd_dme_enable - UIC command for DME_ENABLE
3935 * @hba: per adapter instance
3937 * DME_ENABLE command is issued in order to enable UniPro stack.
3939 * Returns 0 on success, non-zero value on failure
3941 static int ufshcd_dme_enable(struct ufs_hba
*hba
)
3943 struct uic_command uic_cmd
= {0};
3946 uic_cmd
.command
= UIC_CMD_DME_ENABLE
;
3948 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
3951 "dme-enable: error code %d\n", ret
);
3956 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba
*hba
)
3958 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3959 unsigned long min_sleep_time_us
;
3961 if (!(hba
->quirks
& UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
))
3965 * last_dme_cmd_tstamp will be 0 only for 1st call to
3968 if (unlikely(!ktime_to_us(hba
->last_dme_cmd_tstamp
))) {
3969 min_sleep_time_us
= MIN_DELAY_BEFORE_DME_CMDS_US
;
3971 unsigned long delta
=
3972 (unsigned long) ktime_to_us(
3973 ktime_sub(ktime_get(),
3974 hba
->last_dme_cmd_tstamp
));
3976 if (delta
< MIN_DELAY_BEFORE_DME_CMDS_US
)
3978 MIN_DELAY_BEFORE_DME_CMDS_US
- delta
;
3980 return; /* no more delay required */
3983 /* allow sleep for extra 50us if needed */
3984 usleep_range(min_sleep_time_us
, min_sleep_time_us
+ 50);
3988 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3989 * @hba: per adapter instance
3990 * @attr_sel: uic command argument1
3991 * @attr_set: attribute set type as uic command argument2
3992 * @mib_val: setting value as uic command argument3
3993 * @peer: indicate whether peer or local
3995 * Returns 0 on success, non-zero value on failure
3997 int ufshcd_dme_set_attr(struct ufs_hba
*hba
, u32 attr_sel
,
3998 u8 attr_set
, u32 mib_val
, u8 peer
)
4000 struct uic_command uic_cmd
= {0};
4001 static const char *const action
[] = {
4005 const char *set
= action
[!!peer
];
4007 int retries
= UFS_UIC_COMMAND_RETRIES
;
4009 uic_cmd
.command
= peer
?
4010 UIC_CMD_DME_PEER_SET
: UIC_CMD_DME_SET
;
4011 uic_cmd
.argument1
= attr_sel
;
4012 uic_cmd
.argument2
= UIC_ARG_ATTR_TYPE(attr_set
);
4013 uic_cmd
.argument3
= mib_val
;
4016 /* for peer attributes we retry upon failure */
4017 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
4019 dev_dbg(hba
->dev
, "%s: attr-id 0x%x val 0x%x error code %d\n",
4020 set
, UIC_GET_ATTR_ID(attr_sel
), mib_val
, ret
);
4021 } while (ret
&& peer
&& --retries
);
4024 dev_err(hba
->dev
, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
4025 set
, UIC_GET_ATTR_ID(attr_sel
), mib_val
,
4026 UFS_UIC_COMMAND_RETRIES
- retries
);
4030 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr
);
4033 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
4034 * @hba: per adapter instance
4035 * @attr_sel: uic command argument1
4036 * @mib_val: the value of the attribute as returned by the UIC command
4037 * @peer: indicate whether peer or local
4039 * Returns 0 on success, non-zero value on failure
4041 int ufshcd_dme_get_attr(struct ufs_hba
*hba
, u32 attr_sel
,
4042 u32
*mib_val
, u8 peer
)
4044 struct uic_command uic_cmd
= {0};
4045 static const char *const action
[] = {
4049 const char *get
= action
[!!peer
];
4051 int retries
= UFS_UIC_COMMAND_RETRIES
;
4052 struct ufs_pa_layer_attr orig_pwr_info
;
4053 struct ufs_pa_layer_attr temp_pwr_info
;
4054 bool pwr_mode_change
= false;
4056 if (peer
&& (hba
->quirks
& UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
)) {
4057 orig_pwr_info
= hba
->pwr_info
;
4058 temp_pwr_info
= orig_pwr_info
;
4060 if (orig_pwr_info
.pwr_tx
== FAST_MODE
||
4061 orig_pwr_info
.pwr_rx
== FAST_MODE
) {
4062 temp_pwr_info
.pwr_tx
= FASTAUTO_MODE
;
4063 temp_pwr_info
.pwr_rx
= FASTAUTO_MODE
;
4064 pwr_mode_change
= true;
4065 } else if (orig_pwr_info
.pwr_tx
== SLOW_MODE
||
4066 orig_pwr_info
.pwr_rx
== SLOW_MODE
) {
4067 temp_pwr_info
.pwr_tx
= SLOWAUTO_MODE
;
4068 temp_pwr_info
.pwr_rx
= SLOWAUTO_MODE
;
4069 pwr_mode_change
= true;
4071 if (pwr_mode_change
) {
4072 ret
= ufshcd_change_power_mode(hba
, &temp_pwr_info
);
4078 uic_cmd
.command
= peer
?
4079 UIC_CMD_DME_PEER_GET
: UIC_CMD_DME_GET
;
4080 uic_cmd
.argument1
= attr_sel
;
4083 /* for peer attributes we retry upon failure */
4084 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
4086 dev_dbg(hba
->dev
, "%s: attr-id 0x%x error code %d\n",
4087 get
, UIC_GET_ATTR_ID(attr_sel
), ret
);
4088 } while (ret
&& peer
&& --retries
);
4091 dev_err(hba
->dev
, "%s: attr-id 0x%x failed %d retries\n",
4092 get
, UIC_GET_ATTR_ID(attr_sel
),
4093 UFS_UIC_COMMAND_RETRIES
- retries
);
4095 if (mib_val
&& !ret
)
4096 *mib_val
= uic_cmd
.argument3
;
4098 if (peer
&& (hba
->quirks
& UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
)
4100 ufshcd_change_power_mode(hba
, &orig_pwr_info
);
4104 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr
);
4107 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
4108 * state) and waits for it to take effect.
4110 * @hba: per adapter instance
4111 * @cmd: UIC command to execute
4113 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
4114 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
4115 * and device UniPro link and hence it's final completion would be indicated by
4116 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
4117 * addition to normal UIC command completion Status (UCCS). This function only
4118 * returns after the relevant status bits indicate the completion.
4120 * Returns 0 on success, non-zero value on failure
4122 static int ufshcd_uic_pwr_ctrl(struct ufs_hba
*hba
, struct uic_command
*cmd
)
4124 DECLARE_COMPLETION_ONSTACK(uic_async_done
);
4125 unsigned long flags
;
4128 bool reenable_intr
= false;
4130 mutex_lock(&hba
->uic_cmd_mutex
);
4131 ufshcd_add_delay_before_dme_cmd(hba
);
4133 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4134 if (ufshcd_is_link_broken(hba
)) {
4138 hba
->uic_async_done
= &uic_async_done
;
4139 if (ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
) & UIC_COMMAND_COMPL
) {
4140 ufshcd_disable_intr(hba
, UIC_COMMAND_COMPL
);
4142 * Make sure UIC command completion interrupt is disabled before
4143 * issuing UIC command.
4146 reenable_intr
= true;
4148 ret
= __ufshcd_send_uic_cmd(hba
, cmd
, false);
4149 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4152 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
4153 cmd
->command
, cmd
->argument3
, ret
);
4157 if (!wait_for_completion_timeout(hba
->uic_async_done
,
4158 msecs_to_jiffies(UIC_CMD_TIMEOUT
))) {
4160 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
4161 cmd
->command
, cmd
->argument3
);
4163 if (!cmd
->cmd_active
) {
4164 dev_err(hba
->dev
, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
4174 status
= ufshcd_get_upmcrs(hba
);
4175 if (status
!= PWR_LOCAL
) {
4177 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
4178 cmd
->command
, status
);
4179 ret
= (status
!= PWR_OK
) ? status
: -1;
4183 ufshcd_print_host_state(hba
);
4184 ufshcd_print_pwr_info(hba
);
4185 ufshcd_print_evt_hist(hba
);
4188 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4189 hba
->active_uic_cmd
= NULL
;
4190 hba
->uic_async_done
= NULL
;
4192 ufshcd_enable_intr(hba
, UIC_COMMAND_COMPL
);
4194 ufshcd_set_link_broken(hba
);
4195 ufshcd_schedule_eh_work(hba
);
4198 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4199 mutex_unlock(&hba
->uic_cmd_mutex
);
4205 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
4206 * using DME_SET primitives.
4207 * @hba: per adapter instance
4208 * @mode: powr mode value
4210 * Returns 0 on success, non-zero value on failure
4212 int ufshcd_uic_change_pwr_mode(struct ufs_hba
*hba
, u8 mode
)
4214 struct uic_command uic_cmd
= {0};
4217 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
) {
4218 ret
= ufshcd_dme_set(hba
,
4219 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP
, 0), 1);
4221 dev_err(hba
->dev
, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
4227 uic_cmd
.command
= UIC_CMD_DME_SET
;
4228 uic_cmd
.argument1
= UIC_ARG_MIB(PA_PWRMODE
);
4229 uic_cmd
.argument3
= mode
;
4230 ufshcd_hold(hba
, false);
4231 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
4232 ufshcd_release(hba
);
4237 EXPORT_SYMBOL_GPL(ufshcd_uic_change_pwr_mode
);
4239 int ufshcd_link_recovery(struct ufs_hba
*hba
)
4242 unsigned long flags
;
4244 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4245 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
4246 ufshcd_set_eh_in_progress(hba
);
4247 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4249 /* Reset the attached device */
4250 ufshcd_device_reset(hba
);
4252 ret
= ufshcd_host_reset_and_restore(hba
);
4254 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4256 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
4257 ufshcd_clear_eh_in_progress(hba
);
4258 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4261 dev_err(hba
->dev
, "%s: link recovery failed, err %d",
4266 EXPORT_SYMBOL_GPL(ufshcd_link_recovery
);
4268 int ufshcd_uic_hibern8_enter(struct ufs_hba
*hba
)
4271 struct uic_command uic_cmd
= {0};
4272 ktime_t start
= ktime_get();
4274 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_ENTER
, PRE_CHANGE
);
4276 uic_cmd
.command
= UIC_CMD_DME_HIBER_ENTER
;
4277 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
4278 trace_ufshcd_profile_hibern8(dev_name(hba
->dev
), "enter",
4279 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
4282 dev_err(hba
->dev
, "%s: hibern8 enter failed. ret = %d\n",
4285 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_ENTER
,
4290 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter
);
4292 int ufshcd_uic_hibern8_exit(struct ufs_hba
*hba
)
4294 struct uic_command uic_cmd
= {0};
4296 ktime_t start
= ktime_get();
4298 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_EXIT
, PRE_CHANGE
);
4300 uic_cmd
.command
= UIC_CMD_DME_HIBER_EXIT
;
4301 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
4302 trace_ufshcd_profile_hibern8(dev_name(hba
->dev
), "exit",
4303 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
4306 dev_err(hba
->dev
, "%s: hibern8 exit failed. ret = %d\n",
4309 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_EXIT
,
4311 hba
->ufs_stats
.last_hibern8_exit_tstamp
= local_clock();
4312 hba
->ufs_stats
.hibern8_exit_cnt
++;
4317 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit
);
4319 void ufshcd_auto_hibern8_update(struct ufs_hba
*hba
, u32 ahit
)
4321 unsigned long flags
;
4322 bool update
= false;
4324 if (!ufshcd_is_auto_hibern8_supported(hba
))
4327 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4328 if (hba
->ahit
!= ahit
) {
4332 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4335 !pm_runtime_suspended(&hba
->ufs_device_wlun
->sdev_gendev
)) {
4336 ufshcd_rpm_get_sync(hba
);
4337 ufshcd_hold(hba
, false);
4338 ufshcd_auto_hibern8_enable(hba
);
4339 ufshcd_release(hba
);
4340 ufshcd_rpm_put_sync(hba
);
4343 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update
);
4345 void ufshcd_auto_hibern8_enable(struct ufs_hba
*hba
)
4347 if (!ufshcd_is_auto_hibern8_supported(hba
))
4350 ufshcd_writel(hba
, hba
->ahit
, REG_AUTO_HIBERNATE_IDLE_TIMER
);
4354 * ufshcd_init_pwr_info - setting the POR (power on reset)
4355 * values in hba power info
4356 * @hba: per-adapter instance
4358 static void ufshcd_init_pwr_info(struct ufs_hba
*hba
)
4360 hba
->pwr_info
.gear_rx
= UFS_PWM_G1
;
4361 hba
->pwr_info
.gear_tx
= UFS_PWM_G1
;
4362 hba
->pwr_info
.lane_rx
= 1;
4363 hba
->pwr_info
.lane_tx
= 1;
4364 hba
->pwr_info
.pwr_rx
= SLOWAUTO_MODE
;
4365 hba
->pwr_info
.pwr_tx
= SLOWAUTO_MODE
;
4366 hba
->pwr_info
.hs_rate
= 0;
4370 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4371 * @hba: per-adapter instance
4373 static int ufshcd_get_max_pwr_mode(struct ufs_hba
*hba
)
4375 struct ufs_pa_layer_attr
*pwr_info
= &hba
->max_pwr_info
.info
;
4377 if (hba
->max_pwr_info
.is_valid
)
4380 if (hba
->quirks
& UFSHCD_QUIRK_HIBERN_FASTAUTO
) {
4381 pwr_info
->pwr_tx
= FASTAUTO_MODE
;
4382 pwr_info
->pwr_rx
= FASTAUTO_MODE
;
4384 pwr_info
->pwr_tx
= FAST_MODE
;
4385 pwr_info
->pwr_rx
= FAST_MODE
;
4387 pwr_info
->hs_rate
= PA_HS_MODE_B
;
4389 /* Get the connected lane count */
4390 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES
),
4391 &pwr_info
->lane_rx
);
4392 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
4393 &pwr_info
->lane_tx
);
4395 if (!pwr_info
->lane_rx
|| !pwr_info
->lane_tx
) {
4396 dev_err(hba
->dev
, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4404 * First, get the maximum gears of HS speed.
4405 * If a zero value, it means there is no HSGEAR capability.
4406 * Then, get the maximum gears of PWM speed.
4408 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_MAXRXHSGEAR
), &pwr_info
->gear_rx
);
4409 if (!pwr_info
->gear_rx
) {
4410 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_MAXRXPWMGEAR
),
4411 &pwr_info
->gear_rx
);
4412 if (!pwr_info
->gear_rx
) {
4413 dev_err(hba
->dev
, "%s: invalid max pwm rx gear read = %d\n",
4414 __func__
, pwr_info
->gear_rx
);
4417 pwr_info
->pwr_rx
= SLOW_MODE
;
4420 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_MAXRXHSGEAR
),
4421 &pwr_info
->gear_tx
);
4422 if (!pwr_info
->gear_tx
) {
4423 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_MAXRXPWMGEAR
),
4424 &pwr_info
->gear_tx
);
4425 if (!pwr_info
->gear_tx
) {
4426 dev_err(hba
->dev
, "%s: invalid max pwm tx gear read = %d\n",
4427 __func__
, pwr_info
->gear_tx
);
4430 pwr_info
->pwr_tx
= SLOW_MODE
;
4433 hba
->max_pwr_info
.is_valid
= true;
4437 static int ufshcd_change_power_mode(struct ufs_hba
*hba
,
4438 struct ufs_pa_layer_attr
*pwr_mode
)
4442 /* if already configured to the requested pwr_mode */
4443 if (!hba
->force_pmc
&&
4444 pwr_mode
->gear_rx
== hba
->pwr_info
.gear_rx
&&
4445 pwr_mode
->gear_tx
== hba
->pwr_info
.gear_tx
&&
4446 pwr_mode
->lane_rx
== hba
->pwr_info
.lane_rx
&&
4447 pwr_mode
->lane_tx
== hba
->pwr_info
.lane_tx
&&
4448 pwr_mode
->pwr_rx
== hba
->pwr_info
.pwr_rx
&&
4449 pwr_mode
->pwr_tx
== hba
->pwr_info
.pwr_tx
&&
4450 pwr_mode
->hs_rate
== hba
->pwr_info
.hs_rate
) {
4451 dev_dbg(hba
->dev
, "%s: power already configured\n", __func__
);
4456 * Configure attributes for power mode change with below.
4457 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4458 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4461 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXGEAR
), pwr_mode
->gear_rx
);
4462 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_ACTIVERXDATALANES
),
4464 if (pwr_mode
->pwr_rx
== FASTAUTO_MODE
||
4465 pwr_mode
->pwr_rx
== FAST_MODE
)
4466 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXTERMINATION
), true);
4468 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXTERMINATION
), false);
4470 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXGEAR
), pwr_mode
->gear_tx
);
4471 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_ACTIVETXDATALANES
),
4473 if (pwr_mode
->pwr_tx
== FASTAUTO_MODE
||
4474 pwr_mode
->pwr_tx
== FAST_MODE
)
4475 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXTERMINATION
), true);
4477 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXTERMINATION
), false);
4479 if (pwr_mode
->pwr_rx
== FASTAUTO_MODE
||
4480 pwr_mode
->pwr_tx
== FASTAUTO_MODE
||
4481 pwr_mode
->pwr_rx
== FAST_MODE
||
4482 pwr_mode
->pwr_tx
== FAST_MODE
)
4483 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_HSSERIES
),
4486 if (!(hba
->quirks
& UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING
)) {
4487 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA0
),
4488 DL_FC0ProtectionTimeOutVal_Default
);
4489 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA1
),
4490 DL_TC0ReplayTimeOutVal_Default
);
4491 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA2
),
4492 DL_AFC0ReqTimeOutVal_Default
);
4493 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA3
),
4494 DL_FC1ProtectionTimeOutVal_Default
);
4495 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA4
),
4496 DL_TC1ReplayTimeOutVal_Default
);
4497 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA5
),
4498 DL_AFC1ReqTimeOutVal_Default
);
4500 ufshcd_dme_set(hba
, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal
),
4501 DL_FC0ProtectionTimeOutVal_Default
);
4502 ufshcd_dme_set(hba
, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal
),
4503 DL_TC0ReplayTimeOutVal_Default
);
4504 ufshcd_dme_set(hba
, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal
),
4505 DL_AFC0ReqTimeOutVal_Default
);
4508 ret
= ufshcd_uic_change_pwr_mode(hba
, pwr_mode
->pwr_rx
<< 4
4509 | pwr_mode
->pwr_tx
);
4513 "%s: power mode change failed %d\n", __func__
, ret
);
4515 ufshcd_vops_pwr_change_notify(hba
, POST_CHANGE
, NULL
,
4518 memcpy(&hba
->pwr_info
, pwr_mode
,
4519 sizeof(struct ufs_pa_layer_attr
));
4526 * ufshcd_config_pwr_mode - configure a new power mode
4527 * @hba: per-adapter instance
4528 * @desired_pwr_mode: desired power configuration
4530 int ufshcd_config_pwr_mode(struct ufs_hba
*hba
,
4531 struct ufs_pa_layer_attr
*desired_pwr_mode
)
4533 struct ufs_pa_layer_attr final_params
= { 0 };
4536 ret
= ufshcd_vops_pwr_change_notify(hba
, PRE_CHANGE
,
4537 desired_pwr_mode
, &final_params
);
4540 memcpy(&final_params
, desired_pwr_mode
, sizeof(final_params
));
4542 ret
= ufshcd_change_power_mode(hba
, &final_params
);
4546 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode
);
4549 * ufshcd_complete_dev_init() - checks device readiness
4550 * @hba: per-adapter instance
4552 * Set fDeviceInit flag and poll until device toggles it.
4554 static int ufshcd_complete_dev_init(struct ufs_hba
*hba
)
4557 bool flag_res
= true;
4560 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_SET_FLAG
,
4561 QUERY_FLAG_IDN_FDEVICEINIT
, 0, NULL
);
4564 "%s: setting fDeviceInit flag failed with error %d\n",
4569 /* Poll fDeviceInit flag to be cleared */
4570 timeout
= ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT
);
4572 err
= ufshcd_query_flag(hba
, UPIU_QUERY_OPCODE_READ_FLAG
,
4573 QUERY_FLAG_IDN_FDEVICEINIT
, 0, &flag_res
);
4576 usleep_range(500, 1000);
4577 } while (ktime_before(ktime_get(), timeout
));
4581 "%s: reading fDeviceInit flag failed with error %d\n",
4583 } else if (flag_res
) {
4585 "%s: fDeviceInit was not cleared by the device\n",
4594 * ufshcd_make_hba_operational - Make UFS controller operational
4595 * @hba: per adapter instance
4597 * To bring UFS host controller to operational state,
4598 * 1. Enable required interrupts
4599 * 2. Configure interrupt aggregation
4600 * 3. Program UTRL and UTMRL base address
4601 * 4. Configure run-stop-registers
4603 * Returns 0 on success, non-zero value on failure
4605 int ufshcd_make_hba_operational(struct ufs_hba
*hba
)
4610 /* Enable required interrupts */
4611 ufshcd_enable_intr(hba
, UFSHCD_ENABLE_INTRS
);
4613 /* Configure interrupt aggregation */
4614 if (ufshcd_is_intr_aggr_allowed(hba
))
4615 ufshcd_config_intr_aggr(hba
, hba
->nutrs
- 1, INT_AGGR_DEF_TO
);
4617 ufshcd_disable_intr_aggr(hba
);
4619 /* Configure UTRL and UTMRL base address registers */
4620 ufshcd_writel(hba
, lower_32_bits(hba
->utrdl_dma_addr
),
4621 REG_UTP_TRANSFER_REQ_LIST_BASE_L
);
4622 ufshcd_writel(hba
, upper_32_bits(hba
->utrdl_dma_addr
),
4623 REG_UTP_TRANSFER_REQ_LIST_BASE_H
);
4624 ufshcd_writel(hba
, lower_32_bits(hba
->utmrdl_dma_addr
),
4625 REG_UTP_TASK_REQ_LIST_BASE_L
);
4626 ufshcd_writel(hba
, upper_32_bits(hba
->utmrdl_dma_addr
),
4627 REG_UTP_TASK_REQ_LIST_BASE_H
);
4630 * Make sure base address and interrupt setup are updated before
4631 * enabling the run/stop registers below.
4636 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4638 reg
= ufshcd_readl(hba
, REG_CONTROLLER_STATUS
);
4639 if (!(ufshcd_get_lists_status(reg
))) {
4640 ufshcd_enable_run_stop_reg(hba
);
4643 "Host controller not ready to process requests");
4649 EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational
);
4652 * ufshcd_hba_stop - Send controller to reset state
4653 * @hba: per adapter instance
4655 void ufshcd_hba_stop(struct ufs_hba
*hba
)
4657 unsigned long flags
;
4661 * Obtain the host lock to prevent that the controller is disabled
4662 * while the UFS interrupt handler is active on another CPU.
4664 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4665 ufshcd_writel(hba
, CONTROLLER_DISABLE
, REG_CONTROLLER_ENABLE
);
4666 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4668 err
= ufshcd_wait_for_register(hba
, REG_CONTROLLER_ENABLE
,
4669 CONTROLLER_ENABLE
, CONTROLLER_DISABLE
,
4672 dev_err(hba
->dev
, "%s: Controller disable failed\n", __func__
);
4674 EXPORT_SYMBOL_GPL(ufshcd_hba_stop
);
4677 * ufshcd_hba_execute_hce - initialize the controller
4678 * @hba: per adapter instance
4680 * The controller resets itself and controller firmware initialization
4681 * sequence kicks off. When controller is ready it will set
4682 * the Host Controller Enable bit to 1.
4684 * Returns 0 on success, non-zero value on failure
4686 static int ufshcd_hba_execute_hce(struct ufs_hba
*hba
)
4688 int retry_outer
= 3;
4692 if (ufshcd_is_hba_active(hba
))
4693 /* change controller state to "reset state" */
4694 ufshcd_hba_stop(hba
);
4696 /* UniPro link is disabled at this point */
4697 ufshcd_set_link_off(hba
);
4699 ufshcd_vops_hce_enable_notify(hba
, PRE_CHANGE
);
4701 /* start controller initialization sequence */
4702 ufshcd_hba_start(hba
);
4705 * To initialize a UFS host controller HCE bit must be set to 1.
4706 * During initialization the HCE bit value changes from 1->0->1.
4707 * When the host controller completes initialization sequence
4708 * it sets the value of HCE bit to 1. The same HCE bit is read back
4709 * to check if the controller has completed initialization sequence.
4710 * So without this delay the value HCE = 1, set in the previous
4711 * instruction might be read back.
4712 * This delay can be changed based on the controller.
4714 ufshcd_delay_us(hba
->vps
->hba_enable_delay_us
, 100);
4716 /* wait for the host controller to complete initialization */
4718 while (!ufshcd_is_hba_active(hba
)) {
4723 "Controller enable failed\n");
4730 usleep_range(1000, 1100);
4733 /* enable UIC related interrupts */
4734 ufshcd_enable_intr(hba
, UFSHCD_UIC_MASK
);
4736 ufshcd_vops_hce_enable_notify(hba
, POST_CHANGE
);
4741 int ufshcd_hba_enable(struct ufs_hba
*hba
)
4745 if (hba
->quirks
& UFSHCI_QUIRK_BROKEN_HCE
) {
4746 ufshcd_set_link_off(hba
);
4747 ufshcd_vops_hce_enable_notify(hba
, PRE_CHANGE
);
4749 /* enable UIC related interrupts */
4750 ufshcd_enable_intr(hba
, UFSHCD_UIC_MASK
);
4751 ret
= ufshcd_dme_reset(hba
);
4753 dev_err(hba
->dev
, "DME_RESET failed\n");
4757 ret
= ufshcd_dme_enable(hba
);
4759 dev_err(hba
->dev
, "Enabling DME failed\n");
4763 ufshcd_vops_hce_enable_notify(hba
, POST_CHANGE
);
4765 ret
= ufshcd_hba_execute_hce(hba
);
4770 EXPORT_SYMBOL_GPL(ufshcd_hba_enable
);
4772 static int ufshcd_disable_tx_lcc(struct ufs_hba
*hba
, bool peer
)
4774 int tx_lanes
= 0, i
, err
= 0;
4777 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
4780 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
4782 for (i
= 0; i
< tx_lanes
; i
++) {
4784 err
= ufshcd_dme_set(hba
,
4785 UIC_ARG_MIB_SEL(TX_LCC_ENABLE
,
4786 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i
)),
4789 err
= ufshcd_dme_peer_set(hba
,
4790 UIC_ARG_MIB_SEL(TX_LCC_ENABLE
,
4791 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i
)),
4794 dev_err(hba
->dev
, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4795 __func__
, peer
, i
, err
);
4803 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba
*hba
)
4805 return ufshcd_disable_tx_lcc(hba
, true);
4808 void ufshcd_update_evt_hist(struct ufs_hba
*hba
, u32 id
, u32 val
)
4810 struct ufs_event_hist
*e
;
4812 if (id
>= UFS_EVT_CNT
)
4815 e
= &hba
->ufs_stats
.event
[id
];
4816 e
->val
[e
->pos
] = val
;
4817 e
->tstamp
[e
->pos
] = local_clock();
4819 e
->pos
= (e
->pos
+ 1) % UFS_EVENT_HIST_LENGTH
;
4821 ufshcd_vops_event_notify(hba
, id
, &val
);
4823 EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist
);
4826 * ufshcd_link_startup - Initialize unipro link startup
4827 * @hba: per adapter instance
4829 * Returns 0 for success, non-zero in case of failure
4831 static int ufshcd_link_startup(struct ufs_hba
*hba
)
4834 int retries
= DME_LINKSTARTUP_RETRIES
;
4835 bool link_startup_again
= false;
4838 * If UFS device isn't active then we will have to issue link startup
4839 * 2 times to make sure the device state move to active.
4841 if (!ufshcd_is_ufs_dev_active(hba
))
4842 link_startup_again
= true;
4846 ufshcd_vops_link_startup_notify(hba
, PRE_CHANGE
);
4848 ret
= ufshcd_dme_link_startup(hba
);
4850 /* check if device is detected by inter-connect layer */
4851 if (!ret
&& !ufshcd_is_device_present(hba
)) {
4852 ufshcd_update_evt_hist(hba
,
4853 UFS_EVT_LINK_STARTUP_FAIL
,
4855 dev_err(hba
->dev
, "%s: Device not present\n", __func__
);
4861 * DME link lost indication is only received when link is up,
4862 * but we can't be sure if the link is up until link startup
4863 * succeeds. So reset the local Uni-Pro and try again.
4865 if (ret
&& retries
&& ufshcd_hba_enable(hba
)) {
4866 ufshcd_update_evt_hist(hba
,
4867 UFS_EVT_LINK_STARTUP_FAIL
,
4871 } while (ret
&& retries
--);
4874 /* failed to get the link up... retire */
4875 ufshcd_update_evt_hist(hba
,
4876 UFS_EVT_LINK_STARTUP_FAIL
,
4881 if (link_startup_again
) {
4882 link_startup_again
= false;
4883 retries
= DME_LINKSTARTUP_RETRIES
;
4887 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4888 ufshcd_init_pwr_info(hba
);
4889 ufshcd_print_pwr_info(hba
);
4891 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_LCC
) {
4892 ret
= ufshcd_disable_device_tx_lcc(hba
);
4897 /* Include any host controller configuration via UIC commands */
4898 ret
= ufshcd_vops_link_startup_notify(hba
, POST_CHANGE
);
4902 /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
4903 ufshcd_readl(hba
, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER
);
4904 ret
= ufshcd_make_hba_operational(hba
);
4907 dev_err(hba
->dev
, "link startup failed %d\n", ret
);
4908 ufshcd_print_host_state(hba
);
4909 ufshcd_print_pwr_info(hba
);
4910 ufshcd_print_evt_hist(hba
);
4916 * ufshcd_verify_dev_init() - Verify device initialization
4917 * @hba: per-adapter instance
4919 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4920 * device Transport Protocol (UTP) layer is ready after a reset.
4921 * If the UTP layer at the device side is not initialized, it may
4922 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4923 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4925 static int ufshcd_verify_dev_init(struct ufs_hba
*hba
)
4930 ufshcd_hold(hba
, false);
4931 mutex_lock(&hba
->dev_cmd
.lock
);
4932 for (retries
= NOP_OUT_RETRIES
; retries
> 0; retries
--) {
4933 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_NOP
,
4934 hba
->nop_out_timeout
);
4936 if (!err
|| err
== -ETIMEDOUT
)
4939 dev_dbg(hba
->dev
, "%s: error %d retrying\n", __func__
, err
);
4941 mutex_unlock(&hba
->dev_cmd
.lock
);
4942 ufshcd_release(hba
);
4945 dev_err(hba
->dev
, "%s: NOP OUT failed %d\n", __func__
, err
);
4950 * ufshcd_setup_links - associate link b/w device wlun and other luns
4951 * @sdev: pointer to SCSI device
4952 * @hba: pointer to ufs hba
4954 static void ufshcd_setup_links(struct ufs_hba
*hba
, struct scsi_device
*sdev
)
4956 struct device_link
*link
;
4959 * Device wlun is the supplier & rest of the luns are consumers.
4960 * This ensures that device wlun suspends after all other luns.
4962 if (hba
->ufs_device_wlun
) {
4963 link
= device_link_add(&sdev
->sdev_gendev
,
4964 &hba
->ufs_device_wlun
->sdev_gendev
,
4965 DL_FLAG_PM_RUNTIME
| DL_FLAG_RPM_ACTIVE
);
4967 dev_err(&sdev
->sdev_gendev
, "Failed establishing link - %s\n",
4968 dev_name(&hba
->ufs_device_wlun
->sdev_gendev
));
4972 /* Ignore REPORT_LUN wlun probing */
4973 if (hba
->luns_avail
== 1) {
4974 ufshcd_rpm_put(hba
);
4979 * Device wlun is probed. The assumption is that WLUNs are
4980 * scanned before other LUNs.
4987 * ufshcd_lu_init - Initialize the relevant parameters of the LU
4988 * @hba: per-adapter instance
4989 * @sdev: pointer to SCSI device
4991 static void ufshcd_lu_init(struct ufs_hba
*hba
, struct scsi_device
*sdev
)
4993 int len
= QUERY_DESC_MAX_SIZE
;
4994 u8 lun
= ufshcd_scsi_to_upiu_lun(sdev
->lun
);
4995 u8 lun_qdepth
= hba
->nutrs
;
4999 desc_buf
= kzalloc(len
, GFP_KERNEL
);
5003 ret
= ufshcd_read_unit_desc_param(hba
, lun
, 0, desc_buf
, len
);
5005 if (ret
== -EOPNOTSUPP
)
5006 /* If LU doesn't support unit descriptor, its queue depth is set to 1 */
5012 if (desc_buf
[UNIT_DESC_PARAM_LU_Q_DEPTH
]) {
5014 * In per-LU queueing architecture, bLUQueueDepth will not be 0, then we will
5015 * use the smaller between UFSHCI CAP.NUTRS and UFS LU bLUQueueDepth
5017 lun_qdepth
= min_t(int, desc_buf
[UNIT_DESC_PARAM_LU_Q_DEPTH
], hba
->nutrs
);
5020 * According to UFS device specification, the write protection mode is only supported by
5021 * normal LU, not supported by WLUN.
5023 if (hba
->dev_info
.f_power_on_wp_en
&& lun
< hba
->dev_info
.max_lu_supported
&&
5024 !hba
->dev_info
.is_lu_power_on_wp
&&
5025 desc_buf
[UNIT_DESC_PARAM_LU_WR_PROTECT
] == UFS_LU_POWER_ON_WP
)
5026 hba
->dev_info
.is_lu_power_on_wp
= true;
5028 /* In case of RPMB LU, check if advanced RPMB mode is enabled */
5029 if (desc_buf
[UNIT_DESC_PARAM_UNIT_INDEX
] == UFS_UPIU_RPMB_WLUN
&&
5030 desc_buf
[RPMB_UNIT_DESC_PARAM_REGION_EN
] & BIT(4))
5031 hba
->dev_info
.b_advanced_rpmb_en
= true;
5037 * For WLUNs that don't support unit descriptor, queue depth is set to 1. For LUs whose
5038 * bLUQueueDepth == 0, the queue depth is set to a maximum value that host can queue.
5040 dev_dbg(hba
->dev
, "Set LU %x queue depth %d\n", lun
, lun_qdepth
);
5041 scsi_change_queue_depth(sdev
, lun_qdepth
);
5045 * ufshcd_slave_alloc - handle initial SCSI device configurations
5046 * @sdev: pointer to SCSI device
5050 static int ufshcd_slave_alloc(struct scsi_device
*sdev
)
5052 struct ufs_hba
*hba
;
5054 hba
= shost_priv(sdev
->host
);
5056 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
5057 sdev
->use_10_for_ms
= 1;
5059 /* DBD field should be set to 1 in mode sense(10) */
5060 sdev
->set_dbd_for_ms
= 1;
5062 /* allow SCSI layer to restart the device in case of errors */
5063 sdev
->allow_restart
= 1;
5065 /* REPORT SUPPORTED OPERATION CODES is not supported */
5066 sdev
->no_report_opcodes
= 1;
5068 /* WRITE_SAME command is not supported */
5069 sdev
->no_write_same
= 1;
5071 ufshcd_lu_init(hba
, sdev
);
5073 ufshcd_setup_links(hba
, sdev
);
5079 * ufshcd_change_queue_depth - change queue depth
5080 * @sdev: pointer to SCSI device
5081 * @depth: required depth to set
5083 * Change queue depth and make sure the max. limits are not crossed.
5085 static int ufshcd_change_queue_depth(struct scsi_device
*sdev
, int depth
)
5087 return scsi_change_queue_depth(sdev
, min(depth
, sdev
->host
->can_queue
));
5090 static void ufshcd_hpb_destroy(struct ufs_hba
*hba
, struct scsi_device
*sdev
)
5092 /* skip well-known LU */
5093 if ((sdev
->lun
>= UFS_UPIU_MAX_UNIT_NUM_ID
) ||
5094 !(hba
->dev_info
.hpb_enabled
) || !ufshpb_is_allowed(hba
))
5097 ufshpb_destroy_lu(hba
, sdev
);
5100 static void ufshcd_hpb_configure(struct ufs_hba
*hba
, struct scsi_device
*sdev
)
5102 /* skip well-known LU */
5103 if ((sdev
->lun
>= UFS_UPIU_MAX_UNIT_NUM_ID
) ||
5104 !(hba
->dev_info
.hpb_enabled
) || !ufshpb_is_allowed(hba
))
5107 ufshpb_init_hpb_lu(hba
, sdev
);
5111 * ufshcd_slave_configure - adjust SCSI device configurations
5112 * @sdev: pointer to SCSI device
5114 static int ufshcd_slave_configure(struct scsi_device
*sdev
)
5116 struct ufs_hba
*hba
= shost_priv(sdev
->host
);
5117 struct request_queue
*q
= sdev
->request_queue
;
5119 ufshcd_hpb_configure(hba
, sdev
);
5121 blk_queue_update_dma_pad(q
, PRDT_DATA_BYTE_COUNT_PAD
- 1);
5122 if (hba
->quirks
& UFSHCD_QUIRK_4KB_DMA_ALIGNMENT
)
5123 blk_queue_update_dma_alignment(q
, 4096 - 1);
5125 * Block runtime-pm until all consumers are added.
5126 * Refer ufshcd_setup_links().
5128 if (is_device_wlun(sdev
))
5129 pm_runtime_get_noresume(&sdev
->sdev_gendev
);
5130 else if (ufshcd_is_rpm_autosuspend_allowed(hba
))
5131 sdev
->rpm_autosuspend
= 1;
5133 * Do not print messages during runtime PM to avoid never-ending cycles
5134 * of messages written back to storage by user space causing runtime
5135 * resume, causing more messages and so on.
5137 sdev
->silence_suspend
= 1;
5139 ufshcd_crypto_register(hba
, q
);
5145 * ufshcd_slave_destroy - remove SCSI device configurations
5146 * @sdev: pointer to SCSI device
5148 static void ufshcd_slave_destroy(struct scsi_device
*sdev
)
5150 struct ufs_hba
*hba
;
5151 unsigned long flags
;
5153 hba
= shost_priv(sdev
->host
);
5155 ufshcd_hpb_destroy(hba
, sdev
);
5157 /* Drop the reference as it won't be needed anymore */
5158 if (ufshcd_scsi_to_upiu_lun(sdev
->lun
) == UFS_UPIU_UFS_DEVICE_WLUN
) {
5159 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5160 hba
->ufs_device_wlun
= NULL
;
5161 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5162 } else if (hba
->ufs_device_wlun
) {
5163 struct device
*supplier
= NULL
;
5165 /* Ensure UFS Device WLUN exists and does not disappear */
5166 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5167 if (hba
->ufs_device_wlun
) {
5168 supplier
= &hba
->ufs_device_wlun
->sdev_gendev
;
5169 get_device(supplier
);
5171 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5175 * If a LUN fails to probe (e.g. absent BOOT WLUN), the
5176 * device will not have been registered but can still
5177 * have a device link holding a reference to the device.
5179 device_link_remove(&sdev
->sdev_gendev
, supplier
);
5180 put_device(supplier
);
5186 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
5187 * @lrbp: pointer to local reference block of completed command
5188 * @scsi_status: SCSI command status
5190 * Returns value base on SCSI command status
5193 ufshcd_scsi_cmd_status(struct ufshcd_lrb
*lrbp
, int scsi_status
)
5197 switch (scsi_status
) {
5198 case SAM_STAT_CHECK_CONDITION
:
5199 ufshcd_copy_sense_data(lrbp
);
5202 result
|= DID_OK
<< 16 | scsi_status
;
5204 case SAM_STAT_TASK_SET_FULL
:
5206 case SAM_STAT_TASK_ABORTED
:
5207 ufshcd_copy_sense_data(lrbp
);
5208 result
|= scsi_status
;
5211 result
|= DID_ERROR
<< 16;
5213 } /* end of switch */
5219 * ufshcd_transfer_rsp_status - Get overall status of the response
5220 * @hba: per adapter instance
5221 * @lrbp: pointer to local reference block of completed command
5222 * @cqe: pointer to the completion queue entry
5224 * Returns result of the command to notify SCSI midlayer
5227 ufshcd_transfer_rsp_status(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
,
5228 struct cq_entry
*cqe
)
5234 /* overall command status of utrd */
5235 ocs
= ufshcd_get_tr_ocs(lrbp
, cqe
);
5237 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR
) {
5238 if (be32_to_cpu(lrbp
->ucd_rsp_ptr
->header
.dword_1
) &
5239 MASK_RSP_UPIU_RESULT
)
5245 result
= ufshcd_get_req_rsp(lrbp
->ucd_rsp_ptr
);
5246 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
5248 case UPIU_TRANSACTION_RESPONSE
:
5250 * get the response UPIU result to extract
5251 * the SCSI command status
5253 result
= ufshcd_get_rsp_upiu_result(lrbp
->ucd_rsp_ptr
);
5256 * get the result based on SCSI status response
5257 * to notify the SCSI midlayer of the command status
5259 scsi_status
= result
& MASK_SCSI_STATUS
;
5260 result
= ufshcd_scsi_cmd_status(lrbp
, scsi_status
);
5263 * Currently we are only supporting BKOPs exception
5264 * events hence we can ignore BKOPs exception event
5265 * during power management callbacks. BKOPs exception
5266 * event is not expected to be raised in runtime suspend
5267 * callback as it allows the urgent bkops.
5268 * During system suspend, we are anyway forcefully
5269 * disabling the bkops and if urgent bkops is needed
5270 * it will be enabled on system resume. Long term
5271 * solution could be to abort the system suspend if
5272 * UFS device needs urgent BKOPs.
5274 if (!hba
->pm_op_in_progress
&&
5275 !ufshcd_eh_in_progress(hba
) &&
5276 ufshcd_is_exception_event(lrbp
->ucd_rsp_ptr
))
5277 /* Flushed in suspend */
5278 schedule_work(&hba
->eeh_work
);
5280 if (scsi_status
== SAM_STAT_GOOD
)
5281 ufshpb_rsp_upiu(hba
, lrbp
);
5283 case UPIU_TRANSACTION_REJECT_UPIU
:
5284 /* TODO: handle Reject UPIU Response */
5285 result
= DID_ERROR
<< 16;
5287 "Reject UPIU not fully implemented\n");
5291 "Unexpected request response code = %x\n",
5293 result
= DID_ERROR
<< 16;
5298 result
|= DID_ABORT
<< 16;
5300 case OCS_INVALID_COMMAND_STATUS
:
5301 result
|= DID_REQUEUE
<< 16;
5303 case OCS_INVALID_CMD_TABLE_ATTR
:
5304 case OCS_INVALID_PRDT_ATTR
:
5305 case OCS_MISMATCH_DATA_BUF_SIZE
:
5306 case OCS_MISMATCH_RESP_UPIU_SIZE
:
5307 case OCS_PEER_COMM_FAILURE
:
5308 case OCS_FATAL_ERROR
:
5309 case OCS_DEVICE_FATAL_ERROR
:
5310 case OCS_INVALID_CRYPTO_CONFIG
:
5311 case OCS_GENERAL_CRYPTO_ERROR
:
5313 result
|= DID_ERROR
<< 16;
5315 "OCS error from controller = %x for tag %d\n",
5316 ocs
, lrbp
->task_tag
);
5317 ufshcd_print_evt_hist(hba
);
5318 ufshcd_print_host_state(hba
);
5320 } /* end of switch */
5322 if ((host_byte(result
) != DID_OK
) &&
5323 (host_byte(result
) != DID_REQUEUE
) && !hba
->silence_err_logs
)
5324 ufshcd_print_trs(hba
, 1 << lrbp
->task_tag
, true);
5328 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba
*hba
,
5331 if (!ufshcd_is_auto_hibern8_supported(hba
) ||
5332 !ufshcd_is_auto_hibern8_enabled(hba
))
5335 if (!(intr_mask
& UFSHCD_UIC_HIBERN8_MASK
))
5338 if (hba
->active_uic_cmd
&&
5339 (hba
->active_uic_cmd
->command
== UIC_CMD_DME_HIBER_ENTER
||
5340 hba
->active_uic_cmd
->command
== UIC_CMD_DME_HIBER_EXIT
))
5347 * ufshcd_uic_cmd_compl - handle completion of uic command
5348 * @hba: per adapter instance
5349 * @intr_status: interrupt status generated by the controller
5352 * IRQ_HANDLED - If interrupt is valid
5353 * IRQ_NONE - If invalid interrupt
5355 static irqreturn_t
ufshcd_uic_cmd_compl(struct ufs_hba
*hba
, u32 intr_status
)
5357 irqreturn_t retval
= IRQ_NONE
;
5359 spin_lock(hba
->host
->host_lock
);
5360 if (ufshcd_is_auto_hibern8_error(hba
, intr_status
))
5361 hba
->errors
|= (UFSHCD_UIC_HIBERN8_MASK
& intr_status
);
5363 if ((intr_status
& UIC_COMMAND_COMPL
) && hba
->active_uic_cmd
) {
5364 hba
->active_uic_cmd
->argument2
|=
5365 ufshcd_get_uic_cmd_result(hba
);
5366 hba
->active_uic_cmd
->argument3
=
5367 ufshcd_get_dme_attr_val(hba
);
5368 if (!hba
->uic_async_done
)
5369 hba
->active_uic_cmd
->cmd_active
= 0;
5370 complete(&hba
->active_uic_cmd
->done
);
5371 retval
= IRQ_HANDLED
;
5374 if ((intr_status
& UFSHCD_UIC_PWR_MASK
) && hba
->uic_async_done
) {
5375 hba
->active_uic_cmd
->cmd_active
= 0;
5376 complete(hba
->uic_async_done
);
5377 retval
= IRQ_HANDLED
;
5380 if (retval
== IRQ_HANDLED
)
5381 ufshcd_add_uic_command_trace(hba
, hba
->active_uic_cmd
,
5383 spin_unlock(hba
->host
->host_lock
);
5387 /* Release the resources allocated for processing a SCSI command. */
5388 static void ufshcd_release_scsi_cmd(struct ufs_hba
*hba
,
5389 struct ufshcd_lrb
*lrbp
)
5391 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
5393 scsi_dma_unmap(cmd
);
5394 lrbp
->cmd
= NULL
; /* Mark the command as completed. */
5395 ufshcd_release(hba
);
5396 ufshcd_clk_scaling_update_busy(hba
);
5400 * ufshcd_compl_one_cqe - handle a completion queue entry
5401 * @hba: per adapter instance
5402 * @task_tag: the task tag of the request to be completed
5403 * @cqe: pointer to the completion queue entry
5405 void ufshcd_compl_one_cqe(struct ufs_hba
*hba
, int task_tag
,
5406 struct cq_entry
*cqe
)
5408 struct ufshcd_lrb
*lrbp
;
5409 struct scsi_cmnd
*cmd
;
5411 lrbp
= &hba
->lrb
[task_tag
];
5412 lrbp
->compl_time_stamp
= ktime_get();
5415 if (unlikely(ufshcd_should_inform_monitor(hba
, lrbp
)))
5416 ufshcd_update_monitor(hba
, lrbp
);
5417 ufshcd_add_command_trace(hba
, task_tag
, UFS_CMD_COMP
);
5418 cmd
->result
= ufshcd_transfer_rsp_status(hba
, lrbp
, cqe
);
5419 ufshcd_release_scsi_cmd(hba
, lrbp
);
5420 /* Do not touch lrbp after scsi done */
5422 } else if (lrbp
->command_type
== UTP_CMD_TYPE_DEV_MANAGE
||
5423 lrbp
->command_type
== UTP_CMD_TYPE_UFS_STORAGE
) {
5424 if (hba
->dev_cmd
.complete
) {
5425 hba
->dev_cmd
.cqe
= cqe
;
5426 ufshcd_add_command_trace(hba
, task_tag
, UFS_DEV_COMP
);
5427 complete(hba
->dev_cmd
.complete
);
5428 ufshcd_clk_scaling_update_busy(hba
);
5434 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
5435 * @hba: per adapter instance
5436 * @completed_reqs: bitmask that indicates which requests to complete
5438 static void __ufshcd_transfer_req_compl(struct ufs_hba
*hba
,
5439 unsigned long completed_reqs
)
5443 for_each_set_bit(tag
, &completed_reqs
, hba
->nutrs
)
5444 ufshcd_compl_one_cqe(hba
, tag
, NULL
);
5447 /* Any value that is not an existing queue number is fine for this constant. */
5449 UFSHCD_POLL_FROM_INTERRUPT_CONTEXT
= -1
5452 static void ufshcd_clear_polled(struct ufs_hba
*hba
,
5453 unsigned long *completed_reqs
)
5457 for_each_set_bit(tag
, completed_reqs
, hba
->nutrs
) {
5458 struct scsi_cmnd
*cmd
= hba
->lrb
[tag
].cmd
;
5462 if (scsi_cmd_to_rq(cmd
)->cmd_flags
& REQ_POLLED
)
5463 __clear_bit(tag
, completed_reqs
);
5468 * Returns > 0 if one or more commands have been completed or 0 if no
5469 * requests have been completed.
5471 static int ufshcd_poll(struct Scsi_Host
*shost
, unsigned int queue_num
)
5473 struct ufs_hba
*hba
= shost_priv(shost
);
5474 unsigned long completed_reqs
, flags
;
5476 struct ufs_hw_queue
*hwq
;
5478 if (is_mcq_enabled(hba
)) {
5479 hwq
= &hba
->uhq
[queue_num
+ UFSHCD_MCQ_IO_QUEUE_OFFSET
];
5481 return ufshcd_mcq_poll_cqe_lock(hba
, hwq
);
5484 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
5485 tr_doorbell
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
5486 completed_reqs
= ~tr_doorbell
& hba
->outstanding_reqs
;
5487 WARN_ONCE(completed_reqs
& ~hba
->outstanding_reqs
,
5488 "completed: %#lx; outstanding: %#lx\n", completed_reqs
,
5489 hba
->outstanding_reqs
);
5490 if (queue_num
== UFSHCD_POLL_FROM_INTERRUPT_CONTEXT
) {
5491 /* Do not complete polled requests from interrupt context. */
5492 ufshcd_clear_polled(hba
, &completed_reqs
);
5494 hba
->outstanding_reqs
&= ~completed_reqs
;
5495 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
5498 __ufshcd_transfer_req_compl(hba
, completed_reqs
);
5500 return completed_reqs
!= 0;
5504 * ufshcd_transfer_req_compl - handle SCSI and query command completion
5505 * @hba: per adapter instance
5508 * IRQ_HANDLED - If interrupt is valid
5509 * IRQ_NONE - If invalid interrupt
5511 static irqreturn_t
ufshcd_transfer_req_compl(struct ufs_hba
*hba
)
5513 /* Resetting interrupt aggregation counters first and reading the
5514 * DOOR_BELL afterward allows us to handle all the completed requests.
5515 * In order to prevent other interrupts starvation the DB is read once
5516 * after reset. The down side of this solution is the possibility of
5517 * false interrupt if device completes another request after resetting
5518 * aggregation and before reading the DB.
5520 if (ufshcd_is_intr_aggr_allowed(hba
) &&
5521 !(hba
->quirks
& UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR
))
5522 ufshcd_reset_intr_aggr(hba
);
5524 if (ufs_fail_completion())
5528 * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
5529 * do not want polling to trigger spurious interrupt complaints.
5531 ufshcd_poll(hba
->host
, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT
);
5536 int __ufshcd_write_ee_control(struct ufs_hba
*hba
, u32 ee_ctrl_mask
)
5538 return ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
5539 QUERY_ATTR_IDN_EE_CONTROL
, 0, 0,
5543 int ufshcd_write_ee_control(struct ufs_hba
*hba
)
5547 mutex_lock(&hba
->ee_ctrl_mutex
);
5548 err
= __ufshcd_write_ee_control(hba
, hba
->ee_ctrl_mask
);
5549 mutex_unlock(&hba
->ee_ctrl_mutex
);
5551 dev_err(hba
->dev
, "%s: failed to write ee control %d\n",
5556 int ufshcd_update_ee_control(struct ufs_hba
*hba
, u16
*mask
,
5557 const u16
*other_mask
, u16 set
, u16 clr
)
5559 u16 new_mask
, ee_ctrl_mask
;
5562 mutex_lock(&hba
->ee_ctrl_mutex
);
5563 new_mask
= (*mask
& ~clr
) | set
;
5564 ee_ctrl_mask
= new_mask
| *other_mask
;
5565 if (ee_ctrl_mask
!= hba
->ee_ctrl_mask
)
5566 err
= __ufshcd_write_ee_control(hba
, ee_ctrl_mask
);
5567 /* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */
5569 hba
->ee_ctrl_mask
= ee_ctrl_mask
;
5572 mutex_unlock(&hba
->ee_ctrl_mutex
);
5577 * ufshcd_disable_ee - disable exception event
5578 * @hba: per-adapter instance
5579 * @mask: exception event to disable
5581 * Disables exception event in the device so that the EVENT_ALERT
5584 * Returns zero on success, non-zero error value on failure.
5586 static inline int ufshcd_disable_ee(struct ufs_hba
*hba
, u16 mask
)
5588 return ufshcd_update_ee_drv_mask(hba
, 0, mask
);
5592 * ufshcd_enable_ee - enable exception event
5593 * @hba: per-adapter instance
5594 * @mask: exception event to enable
5596 * Enable corresponding exception event in the device to allow
5597 * device to alert host in critical scenarios.
5599 * Returns zero on success, non-zero error value on failure.
5601 static inline int ufshcd_enable_ee(struct ufs_hba
*hba
, u16 mask
)
5603 return ufshcd_update_ee_drv_mask(hba
, mask
, 0);
5607 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5608 * @hba: per-adapter instance
5610 * Allow device to manage background operations on its own. Enabling
5611 * this might lead to inconsistent latencies during normal data transfers
5612 * as the device is allowed to manage its own way of handling background
5615 * Returns zero on success, non-zero on failure.
5617 static int ufshcd_enable_auto_bkops(struct ufs_hba
*hba
)
5621 if (hba
->auto_bkops_enabled
)
5624 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_SET_FLAG
,
5625 QUERY_FLAG_IDN_BKOPS_EN
, 0, NULL
);
5627 dev_err(hba
->dev
, "%s: failed to enable bkops %d\n",
5632 hba
->auto_bkops_enabled
= true;
5633 trace_ufshcd_auto_bkops_state(dev_name(hba
->dev
), "Enabled");
5635 /* No need of URGENT_BKOPS exception from the device */
5636 err
= ufshcd_disable_ee(hba
, MASK_EE_URGENT_BKOPS
);
5638 dev_err(hba
->dev
, "%s: failed to disable exception event %d\n",
5645 * ufshcd_disable_auto_bkops - block device in doing background operations
5646 * @hba: per-adapter instance
5648 * Disabling background operations improves command response latency but
5649 * has drawback of device moving into critical state where the device is
5650 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5651 * host is idle so that BKOPS are managed effectively without any negative
5654 * Returns zero on success, non-zero on failure.
5656 static int ufshcd_disable_auto_bkops(struct ufs_hba
*hba
)
5660 if (!hba
->auto_bkops_enabled
)
5664 * If host assisted BKOPs is to be enabled, make sure
5665 * urgent bkops exception is allowed.
5667 err
= ufshcd_enable_ee(hba
, MASK_EE_URGENT_BKOPS
);
5669 dev_err(hba
->dev
, "%s: failed to enable exception event %d\n",
5674 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_CLEAR_FLAG
,
5675 QUERY_FLAG_IDN_BKOPS_EN
, 0, NULL
);
5677 dev_err(hba
->dev
, "%s: failed to disable bkops %d\n",
5679 ufshcd_disable_ee(hba
, MASK_EE_URGENT_BKOPS
);
5683 hba
->auto_bkops_enabled
= false;
5684 trace_ufshcd_auto_bkops_state(dev_name(hba
->dev
), "Disabled");
5685 hba
->is_urgent_bkops_lvl_checked
= false;
5691 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5692 * @hba: per adapter instance
5694 * After a device reset the device may toggle the BKOPS_EN flag
5695 * to default value. The s/w tracking variables should be updated
5696 * as well. This function would change the auto-bkops state based on
5697 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5699 static void ufshcd_force_reset_auto_bkops(struct ufs_hba
*hba
)
5701 if (ufshcd_keep_autobkops_enabled_except_suspend(hba
)) {
5702 hba
->auto_bkops_enabled
= false;
5703 hba
->ee_ctrl_mask
|= MASK_EE_URGENT_BKOPS
;
5704 ufshcd_enable_auto_bkops(hba
);
5706 hba
->auto_bkops_enabled
= true;
5707 hba
->ee_ctrl_mask
&= ~MASK_EE_URGENT_BKOPS
;
5708 ufshcd_disable_auto_bkops(hba
);
5710 hba
->urgent_bkops_lvl
= BKOPS_STATUS_PERF_IMPACT
;
5711 hba
->is_urgent_bkops_lvl_checked
= false;
5714 static inline int ufshcd_get_bkops_status(struct ufs_hba
*hba
, u32
*status
)
5716 return ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5717 QUERY_ATTR_IDN_BKOPS_STATUS
, 0, 0, status
);
5721 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5722 * @hba: per-adapter instance
5723 * @status: bkops_status value
5725 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5726 * flag in the device to permit background operations if the device
5727 * bkops_status is greater than or equal to "status" argument passed to
5728 * this function, disable otherwise.
5730 * Returns 0 for success, non-zero in case of failure.
5732 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5733 * to know whether auto bkops is enabled or disabled after this function
5734 * returns control to it.
5736 static int ufshcd_bkops_ctrl(struct ufs_hba
*hba
,
5737 enum bkops_status status
)
5740 u32 curr_status
= 0;
5742 err
= ufshcd_get_bkops_status(hba
, &curr_status
);
5744 dev_err(hba
->dev
, "%s: failed to get BKOPS status %d\n",
5747 } else if (curr_status
> BKOPS_STATUS_MAX
) {
5748 dev_err(hba
->dev
, "%s: invalid BKOPS status %d\n",
5749 __func__
, curr_status
);
5754 if (curr_status
>= status
)
5755 err
= ufshcd_enable_auto_bkops(hba
);
5757 err
= ufshcd_disable_auto_bkops(hba
);
5763 * ufshcd_urgent_bkops - handle urgent bkops exception event
5764 * @hba: per-adapter instance
5766 * Enable fBackgroundOpsEn flag in the device to permit background
5769 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5770 * and negative error value for any other failure.
5772 static int ufshcd_urgent_bkops(struct ufs_hba
*hba
)
5774 return ufshcd_bkops_ctrl(hba
, hba
->urgent_bkops_lvl
);
5777 static inline int ufshcd_get_ee_status(struct ufs_hba
*hba
, u32
*status
)
5779 return ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5780 QUERY_ATTR_IDN_EE_STATUS
, 0, 0, status
);
5783 static void ufshcd_bkops_exception_event_handler(struct ufs_hba
*hba
)
5786 u32 curr_status
= 0;
5788 if (hba
->is_urgent_bkops_lvl_checked
)
5789 goto enable_auto_bkops
;
5791 err
= ufshcd_get_bkops_status(hba
, &curr_status
);
5793 dev_err(hba
->dev
, "%s: failed to get BKOPS status %d\n",
5799 * We are seeing that some devices are raising the urgent bkops
5800 * exception events even when BKOPS status doesn't indicate performace
5801 * impacted or critical. Handle these device by determining their urgent
5802 * bkops status at runtime.
5804 if (curr_status
< BKOPS_STATUS_PERF_IMPACT
) {
5805 dev_err(hba
->dev
, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5806 __func__
, curr_status
);
5807 /* update the current status as the urgent bkops level */
5808 hba
->urgent_bkops_lvl
= curr_status
;
5809 hba
->is_urgent_bkops_lvl_checked
= true;
5813 err
= ufshcd_enable_auto_bkops(hba
);
5816 dev_err(hba
->dev
, "%s: failed to handle urgent bkops %d\n",
5820 static void ufshcd_temp_exception_event_handler(struct ufs_hba
*hba
, u16 status
)
5824 if (ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5825 QUERY_ATTR_IDN_CASE_ROUGH_TEMP
, 0, 0, &value
))
5828 dev_info(hba
->dev
, "exception Tcase %d\n", value
- 80);
5830 ufs_hwmon_notify_event(hba
, status
& MASK_EE_URGENT_TEMP
);
5833 * A placeholder for the platform vendors to add whatever additional
5838 static int __ufshcd_wb_toggle(struct ufs_hba
*hba
, bool set
, enum flag_idn idn
)
5841 enum query_opcode opcode
= set
? UPIU_QUERY_OPCODE_SET_FLAG
:
5842 UPIU_QUERY_OPCODE_CLEAR_FLAG
;
5844 index
= ufshcd_wb_get_query_index(hba
);
5845 return ufshcd_query_flag_retry(hba
, opcode
, idn
, index
, NULL
);
5848 int ufshcd_wb_toggle(struct ufs_hba
*hba
, bool enable
)
5852 if (!ufshcd_is_wb_allowed(hba
) ||
5853 hba
->dev_info
.wb_enabled
== enable
)
5856 ret
= __ufshcd_wb_toggle(hba
, enable
, QUERY_FLAG_IDN_WB_EN
);
5858 dev_err(hba
->dev
, "%s: Write Booster %s failed %d\n",
5859 __func__
, enable
? "enabling" : "disabling", ret
);
5863 hba
->dev_info
.wb_enabled
= enable
;
5864 dev_dbg(hba
->dev
, "%s: Write Booster %s\n",
5865 __func__
, enable
? "enabled" : "disabled");
5870 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba
*hba
,
5875 ret
= __ufshcd_wb_toggle(hba
, enable
,
5876 QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8
);
5878 dev_err(hba
->dev
, "%s: WB-Buf Flush during H8 %s failed %d\n",
5879 __func__
, enable
? "enabling" : "disabling", ret
);
5882 dev_dbg(hba
->dev
, "%s: WB-Buf Flush during H8 %s\n",
5883 __func__
, enable
? "enabled" : "disabled");
5886 int ufshcd_wb_toggle_buf_flush(struct ufs_hba
*hba
, bool enable
)
5890 if (!ufshcd_is_wb_allowed(hba
) ||
5891 hba
->dev_info
.wb_buf_flush_enabled
== enable
)
5894 ret
= __ufshcd_wb_toggle(hba
, enable
, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN
);
5896 dev_err(hba
->dev
, "%s: WB-Buf Flush %s failed %d\n",
5897 __func__
, enable
? "enabling" : "disabling", ret
);
5901 hba
->dev_info
.wb_buf_flush_enabled
= enable
;
5902 dev_dbg(hba
->dev
, "%s: WB-Buf Flush %s\n",
5903 __func__
, enable
? "enabled" : "disabled");
5908 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba
*hba
,
5915 index
= ufshcd_wb_get_query_index(hba
);
5916 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5917 QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE
,
5918 index
, 0, &cur_buf
);
5920 dev_err(hba
->dev
, "%s: dCurWriteBoosterBufferSize read failed %d\n",
5926 dev_info(hba
->dev
, "dCurWBBuf: %d WB disabled until free-space is available\n",
5930 /* Let it continue to flush when available buffer exceeds threshold */
5931 return avail_buf
< hba
->vps
->wb_flush_threshold
;
5934 static void ufshcd_wb_force_disable(struct ufs_hba
*hba
)
5936 if (ufshcd_is_wb_buf_flush_allowed(hba
))
5937 ufshcd_wb_toggle_buf_flush(hba
, false);
5939 ufshcd_wb_toggle_buf_flush_during_h8(hba
, false);
5940 ufshcd_wb_toggle(hba
, false);
5941 hba
->caps
&= ~UFSHCD_CAP_WB_EN
;
5943 dev_info(hba
->dev
, "%s: WB force disabled\n", __func__
);
5946 static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba
*hba
)
5952 index
= ufshcd_wb_get_query_index(hba
);
5953 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5954 QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST
,
5955 index
, 0, &lifetime
);
5958 "%s: bWriteBoosterBufferLifeTimeEst read failed %d\n",
5963 if (lifetime
== UFS_WB_EXCEED_LIFETIME
) {
5964 dev_err(hba
->dev
, "%s: WB buf lifetime is exhausted 0x%02X\n",
5965 __func__
, lifetime
);
5969 dev_dbg(hba
->dev
, "%s: WB buf lifetime is 0x%02X\n",
5970 __func__
, lifetime
);
5975 static bool ufshcd_wb_need_flush(struct ufs_hba
*hba
)
5981 if (!ufshcd_is_wb_allowed(hba
))
5984 if (!ufshcd_is_wb_buf_lifetime_available(hba
)) {
5985 ufshcd_wb_force_disable(hba
);
5990 * The ufs device needs the vcc to be ON to flush.
5991 * With user-space reduction enabled, it's enough to enable flush
5992 * by checking only the available buffer. The threshold
5993 * defined here is > 90% full.
5994 * With user-space preserved enabled, the current-buffer
5995 * should be checked too because the wb buffer size can reduce
5996 * when disk tends to be full. This info is provided by current
5997 * buffer (dCurrentWriteBoosterBufferSize). There's no point in
5998 * keeping vcc on when current buffer is empty.
6000 index
= ufshcd_wb_get_query_index(hba
);
6001 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
6002 QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE
,
6003 index
, 0, &avail_buf
);
6005 dev_warn(hba
->dev
, "%s: dAvailableWriteBoosterBufferSize read failed %d\n",
6010 if (!hba
->dev_info
.b_presrv_uspc_en
)
6011 return avail_buf
<= UFS_WB_BUF_REMAIN_PERCENT(10);
6013 return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba
, avail_buf
);
6016 static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct
*work
)
6018 struct ufs_hba
*hba
= container_of(to_delayed_work(work
),
6020 rpm_dev_flush_recheck_work
);
6022 * To prevent unnecessary VCC power drain after device finishes
6023 * WriteBooster buffer flush or Auto BKOPs, force runtime resume
6024 * after a certain delay to recheck the threshold by next runtime
6027 ufshcd_rpm_get_sync(hba
);
6028 ufshcd_rpm_put_sync(hba
);
6032 * ufshcd_exception_event_handler - handle exceptions raised by device
6033 * @work: pointer to work data
6035 * Read bExceptionEventStatus attribute from the device and handle the
6036 * exception event accordingly.
6038 static void ufshcd_exception_event_handler(struct work_struct
*work
)
6040 struct ufs_hba
*hba
;
6043 hba
= container_of(work
, struct ufs_hba
, eeh_work
);
6045 ufshcd_scsi_block_requests(hba
);
6046 err
= ufshcd_get_ee_status(hba
, &status
);
6048 dev_err(hba
->dev
, "%s: failed to get exception status %d\n",
6053 trace_ufshcd_exception_event(dev_name(hba
->dev
), status
);
6055 if (status
& hba
->ee_drv_mask
& MASK_EE_URGENT_BKOPS
)
6056 ufshcd_bkops_exception_event_handler(hba
);
6058 if (status
& hba
->ee_drv_mask
& MASK_EE_URGENT_TEMP
)
6059 ufshcd_temp_exception_event_handler(hba
, status
);
6061 ufs_debugfs_exception_event(hba
, status
);
6063 ufshcd_scsi_unblock_requests(hba
);
6066 /* Complete requests that have door-bell cleared */
6067 static void ufshcd_complete_requests(struct ufs_hba
*hba
)
6069 ufshcd_transfer_req_compl(hba
);
6070 ufshcd_tmc_handler(hba
);
6074 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
6075 * to recover from the DL NAC errors or not.
6076 * @hba: per-adapter instance
6078 * Returns true if error handling is required, false otherwise
6080 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba
*hba
)
6082 unsigned long flags
;
6083 bool err_handling
= true;
6085 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6087 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
6088 * device fatal error and/or DL NAC & REPLAY timeout errors.
6090 if (hba
->saved_err
& (CONTROLLER_FATAL_ERROR
| SYSTEM_BUS_FATAL_ERROR
))
6093 if ((hba
->saved_err
& DEVICE_FATAL_ERROR
) ||
6094 ((hba
->saved_err
& UIC_ERROR
) &&
6095 (hba
->saved_uic_err
& UFSHCD_UIC_DL_TCx_REPLAY_ERROR
)))
6098 if ((hba
->saved_err
& UIC_ERROR
) &&
6099 (hba
->saved_uic_err
& UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)) {
6102 * wait for 50ms to see if we can get any other errors or not.
6104 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6106 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6109 * now check if we have got any other severe errors other than
6112 if ((hba
->saved_err
& INT_FATAL_ERRORS
) ||
6113 ((hba
->saved_err
& UIC_ERROR
) &&
6114 (hba
->saved_uic_err
& ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)))
6118 * As DL NAC is the only error received so far, send out NOP
6119 * command to confirm if link is still active or not.
6120 * - If we don't get any response then do error recovery.
6121 * - If we get response then clear the DL NAC error bit.
6124 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6125 err
= ufshcd_verify_dev_init(hba
);
6126 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6131 /* Link seems to be alive hence ignore the DL NAC errors */
6132 if (hba
->saved_uic_err
== UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)
6133 hba
->saved_err
&= ~UIC_ERROR
;
6134 /* clear NAC error */
6135 hba
->saved_uic_err
&= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
;
6136 if (!hba
->saved_uic_err
)
6137 err_handling
= false;
6140 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6141 return err_handling
;
6144 /* host lock must be held before calling this func */
6145 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba
*hba
)
6147 return (hba
->saved_uic_err
& UFSHCD_UIC_DL_PA_INIT_ERROR
) ||
6148 (hba
->saved_err
& (INT_FATAL_ERRORS
| UFSHCD_UIC_HIBERN8_MASK
));
6151 void ufshcd_schedule_eh_work(struct ufs_hba
*hba
)
6153 lockdep_assert_held(hba
->host
->host_lock
);
6155 /* handle fatal errors only when link is not in error state */
6156 if (hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
) {
6157 if (hba
->force_reset
|| ufshcd_is_link_broken(hba
) ||
6158 ufshcd_is_saved_err_fatal(hba
))
6159 hba
->ufshcd_state
= UFSHCD_STATE_EH_SCHEDULED_FATAL
;
6161 hba
->ufshcd_state
= UFSHCD_STATE_EH_SCHEDULED_NON_FATAL
;
6162 queue_work(hba
->eh_wq
, &hba
->eh_work
);
6166 static void ufshcd_force_error_recovery(struct ufs_hba
*hba
)
6168 spin_lock_irq(hba
->host
->host_lock
);
6169 hba
->force_reset
= true;
6170 ufshcd_schedule_eh_work(hba
);
6171 spin_unlock_irq(hba
->host
->host_lock
);
6174 static void ufshcd_clk_scaling_allow(struct ufs_hba
*hba
, bool allow
)
6176 mutex_lock(&hba
->wb_mutex
);
6177 down_write(&hba
->clk_scaling_lock
);
6178 hba
->clk_scaling
.is_allowed
= allow
;
6179 up_write(&hba
->clk_scaling_lock
);
6180 mutex_unlock(&hba
->wb_mutex
);
6183 static void ufshcd_clk_scaling_suspend(struct ufs_hba
*hba
, bool suspend
)
6186 if (hba
->clk_scaling
.is_enabled
)
6187 ufshcd_suspend_clkscaling(hba
);
6188 ufshcd_clk_scaling_allow(hba
, false);
6190 ufshcd_clk_scaling_allow(hba
, true);
6191 if (hba
->clk_scaling
.is_enabled
)
6192 ufshcd_resume_clkscaling(hba
);
6196 static void ufshcd_err_handling_prepare(struct ufs_hba
*hba
)
6198 ufshcd_rpm_get_sync(hba
);
6199 if (pm_runtime_status_suspended(&hba
->ufs_device_wlun
->sdev_gendev
) ||
6200 hba
->is_sys_suspended
) {
6201 enum ufs_pm_op pm_op
;
6204 * Don't assume anything of resume, if
6205 * resume fails, irq and clocks can be OFF, and powers
6206 * can be OFF or in LPM.
6208 ufshcd_setup_hba_vreg(hba
, true);
6209 ufshcd_enable_irq(hba
);
6210 ufshcd_setup_vreg(hba
, true);
6211 ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq
);
6212 ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq2
);
6213 ufshcd_hold(hba
, false);
6214 if (!ufshcd_is_clkgating_allowed(hba
))
6215 ufshcd_setup_clocks(hba
, true);
6216 ufshcd_release(hba
);
6217 pm_op
= hba
->is_sys_suspended
? UFS_SYSTEM_PM
: UFS_RUNTIME_PM
;
6218 ufshcd_vops_resume(hba
, pm_op
);
6220 ufshcd_hold(hba
, false);
6221 if (ufshcd_is_clkscaling_supported(hba
) &&
6222 hba
->clk_scaling
.is_enabled
)
6223 ufshcd_suspend_clkscaling(hba
);
6224 ufshcd_clk_scaling_allow(hba
, false);
6226 ufshcd_scsi_block_requests(hba
);
6227 /* Drain ufshcd_queuecommand() */
6229 cancel_work_sync(&hba
->eeh_work
);
6232 static void ufshcd_err_handling_unprepare(struct ufs_hba
*hba
)
6234 ufshcd_scsi_unblock_requests(hba
);
6235 ufshcd_release(hba
);
6236 if (ufshcd_is_clkscaling_supported(hba
))
6237 ufshcd_clk_scaling_suspend(hba
, false);
6238 ufshcd_rpm_put(hba
);
6241 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba
*hba
)
6243 return (!hba
->is_powered
|| hba
->shutting_down
||
6244 !hba
->ufs_device_wlun
||
6245 hba
->ufshcd_state
== UFSHCD_STATE_ERROR
||
6246 (!(hba
->saved_err
|| hba
->saved_uic_err
|| hba
->force_reset
||
6247 ufshcd_is_link_broken(hba
))));
6251 static void ufshcd_recover_pm_error(struct ufs_hba
*hba
)
6253 struct Scsi_Host
*shost
= hba
->host
;
6254 struct scsi_device
*sdev
;
6255 struct request_queue
*q
;
6258 hba
->is_sys_suspended
= false;
6260 * Set RPM status of wlun device to RPM_ACTIVE,
6261 * this also clears its runtime error.
6263 ret
= pm_runtime_set_active(&hba
->ufs_device_wlun
->sdev_gendev
);
6265 /* hba device might have a runtime error otherwise */
6267 ret
= pm_runtime_set_active(hba
->dev
);
6269 * If wlun device had runtime error, we also need to resume those
6270 * consumer scsi devices in case any of them has failed to be
6271 * resumed due to supplier runtime resume failure. This is to unblock
6272 * blk_queue_enter in case there are bios waiting inside it.
6275 shost_for_each_device(sdev
, shost
) {
6276 q
= sdev
->request_queue
;
6277 if (q
->dev
&& (q
->rpm_status
== RPM_SUSPENDED
||
6278 q
->rpm_status
== RPM_SUSPENDING
))
6279 pm_request_resume(q
->dev
);
6284 static inline void ufshcd_recover_pm_error(struct ufs_hba
*hba
)
6289 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba
*hba
)
6291 struct ufs_pa_layer_attr
*pwr_info
= &hba
->pwr_info
;
6294 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_PWRMODE
), &mode
);
6296 if (pwr_info
->pwr_rx
!= ((mode
>> PWRMODE_RX_OFFSET
) & PWRMODE_MASK
))
6299 if (pwr_info
->pwr_tx
!= (mode
& PWRMODE_MASK
))
6305 static bool ufshcd_abort_all(struct ufs_hba
*hba
)
6307 bool needs_reset
= false;
6310 /* Clear pending transfer requests */
6311 for_each_set_bit(tag
, &hba
->outstanding_reqs
, hba
->nutrs
) {
6312 ret
= ufshcd_try_to_abort_task(hba
, tag
);
6313 dev_err(hba
->dev
, "Aborting tag %d / CDB %#02x %s\n", tag
,
6314 hba
->lrb
[tag
].cmd
? hba
->lrb
[tag
].cmd
->cmnd
[0] : -1,
6315 ret
? "failed" : "succeeded");
6322 /* Clear pending task management requests */
6323 for_each_set_bit(tag
, &hba
->outstanding_tasks
, hba
->nutmrs
) {
6324 if (ufshcd_clear_tm_cmd(hba
, tag
)) {
6331 /* Complete the requests that are cleared by s/w */
6332 ufshcd_complete_requests(hba
);
6338 * ufshcd_err_handler - handle UFS errors that require s/w attention
6339 * @work: pointer to work structure
6341 static void ufshcd_err_handler(struct work_struct
*work
)
6343 int retries
= MAX_ERR_HANDLER_RETRIES
;
6344 struct ufs_hba
*hba
;
6345 unsigned long flags
;
6350 hba
= container_of(work
, struct ufs_hba
, eh_work
);
6353 "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
6354 __func__
, ufshcd_state_name
[hba
->ufshcd_state
],
6355 hba
->is_powered
, hba
->shutting_down
, hba
->saved_err
,
6356 hba
->saved_uic_err
, hba
->force_reset
,
6357 ufshcd_is_link_broken(hba
) ? "; link is broken" : "");
6359 down(&hba
->host_sem
);
6360 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6361 if (ufshcd_err_handling_should_stop(hba
)) {
6362 if (hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
)
6363 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
6364 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6368 ufshcd_set_eh_in_progress(hba
);
6369 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6370 ufshcd_err_handling_prepare(hba
);
6371 /* Complete requests that have door-bell cleared by h/w */
6372 ufshcd_complete_requests(hba
);
6373 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6375 needs_restore
= false;
6376 needs_reset
= false;
6378 if (hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
)
6379 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
6381 * A full reset and restore might have happened after preparation
6382 * is finished, double check whether we should stop.
6384 if (ufshcd_err_handling_should_stop(hba
))
6385 goto skip_err_handling
;
6387 if (hba
->dev_quirks
& UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
) {
6390 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6391 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
6392 ret
= ufshcd_quirk_dl_nac_errors(hba
);
6393 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6394 if (!ret
&& ufshcd_err_handling_should_stop(hba
))
6395 goto skip_err_handling
;
6398 if ((hba
->saved_err
& (INT_FATAL_ERRORS
| UFSHCD_UIC_HIBERN8_MASK
)) ||
6399 (hba
->saved_uic_err
&&
6400 (hba
->saved_uic_err
!= UFSHCD_UIC_PA_GENERIC_ERROR
))) {
6401 bool pr_prdt
= !!(hba
->saved_err
& SYSTEM_BUS_FATAL_ERROR
);
6403 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6404 ufshcd_print_host_state(hba
);
6405 ufshcd_print_pwr_info(hba
);
6406 ufshcd_print_evt_hist(hba
);
6407 ufshcd_print_tmrs(hba
, hba
->outstanding_tasks
);
6408 ufshcd_print_trs(hba
, hba
->outstanding_reqs
, pr_prdt
);
6409 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6413 * if host reset is required then skip clearing the pending
6414 * transfers forcefully because they will get cleared during
6415 * host reset and restore
6417 if (hba
->force_reset
|| ufshcd_is_link_broken(hba
) ||
6418 ufshcd_is_saved_err_fatal(hba
) ||
6419 ((hba
->saved_err
& UIC_ERROR
) &&
6420 (hba
->saved_uic_err
& (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
|
6421 UFSHCD_UIC_DL_TCx_REPLAY_ERROR
)))) {
6427 * If LINERESET was caught, UFS might have been put to PWM mode,
6428 * check if power mode restore is needed.
6430 if (hba
->saved_uic_err
& UFSHCD_UIC_PA_GENERIC_ERROR
) {
6431 hba
->saved_uic_err
&= ~UFSHCD_UIC_PA_GENERIC_ERROR
;
6432 if (!hba
->saved_uic_err
)
6433 hba
->saved_err
&= ~UIC_ERROR
;
6434 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6435 if (ufshcd_is_pwr_mode_restore_needed(hba
))
6436 needs_restore
= true;
6437 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6438 if (!hba
->saved_err
&& !needs_restore
)
6439 goto skip_err_handling
;
6442 hba
->silence_err_logs
= true;
6443 /* release lock as clear command might sleep */
6444 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6446 needs_reset
= ufshcd_abort_all(hba
);
6448 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6449 hba
->silence_err_logs
= false;
6454 * After all reqs and tasks are cleared from doorbell,
6455 * now it is safe to retore power mode.
6457 if (needs_restore
) {
6458 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6460 * Hold the scaling lock just in case dev cmds
6461 * are sent via bsg and/or sysfs.
6463 down_write(&hba
->clk_scaling_lock
);
6464 hba
->force_pmc
= true;
6465 pmc_err
= ufshcd_config_pwr_mode(hba
, &(hba
->pwr_info
));
6468 dev_err(hba
->dev
, "%s: Failed to restore power mode, err = %d\n",
6471 hba
->force_pmc
= false;
6472 ufshcd_print_pwr_info(hba
);
6473 up_write(&hba
->clk_scaling_lock
);
6474 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6478 /* Fatal errors need reset */
6482 hba
->force_reset
= false;
6483 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6484 err
= ufshcd_reset_and_restore(hba
);
6486 dev_err(hba
->dev
, "%s: reset and restore failed with err %d\n",
6489 ufshcd_recover_pm_error(hba
);
6490 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6495 if (hba
->ufshcd_state
== UFSHCD_STATE_RESET
)
6496 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
6497 if (hba
->saved_err
|| hba
->saved_uic_err
)
6498 dev_err_ratelimited(hba
->dev
, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
6499 __func__
, hba
->saved_err
, hba
->saved_uic_err
);
6501 /* Exit in an operational state or dead */
6502 if (hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
&&
6503 hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
) {
6506 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
6508 ufshcd_clear_eh_in_progress(hba
);
6509 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6510 ufshcd_err_handling_unprepare(hba
);
6513 dev_info(hba
->dev
, "%s finished; HBA state %s\n", __func__
,
6514 ufshcd_state_name
[hba
->ufshcd_state
]);
6518 * ufshcd_update_uic_error - check and set fatal UIC error flags.
6519 * @hba: per-adapter instance
6522 * IRQ_HANDLED - If interrupt is valid
6523 * IRQ_NONE - If invalid interrupt
6525 static irqreturn_t
ufshcd_update_uic_error(struct ufs_hba
*hba
)
6528 irqreturn_t retval
= IRQ_NONE
;
6530 /* PHY layer error */
6531 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER
);
6532 if ((reg
& UIC_PHY_ADAPTER_LAYER_ERROR
) &&
6533 (reg
& UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK
)) {
6534 ufshcd_update_evt_hist(hba
, UFS_EVT_PA_ERR
, reg
);
6536 * To know whether this error is fatal or not, DB timeout
6537 * must be checked but this error is handled separately.
6539 if (reg
& UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK
)
6540 dev_dbg(hba
->dev
, "%s: UIC Lane error reported\n",
6543 /* Got a LINERESET indication. */
6544 if (reg
& UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR
) {
6545 struct uic_command
*cmd
= NULL
;
6547 hba
->uic_error
|= UFSHCD_UIC_PA_GENERIC_ERROR
;
6548 if (hba
->uic_async_done
&& hba
->active_uic_cmd
)
6549 cmd
= hba
->active_uic_cmd
;
6551 * Ignore the LINERESET during power mode change
6552 * operation via DME_SET command.
6554 if (cmd
&& (cmd
->command
== UIC_CMD_DME_SET
))
6555 hba
->uic_error
&= ~UFSHCD_UIC_PA_GENERIC_ERROR
;
6557 retval
|= IRQ_HANDLED
;
6560 /* PA_INIT_ERROR is fatal and needs UIC reset */
6561 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_DATA_LINK_LAYER
);
6562 if ((reg
& UIC_DATA_LINK_LAYER_ERROR
) &&
6563 (reg
& UIC_DATA_LINK_LAYER_ERROR_CODE_MASK
)) {
6564 ufshcd_update_evt_hist(hba
, UFS_EVT_DL_ERR
, reg
);
6566 if (reg
& UIC_DATA_LINK_LAYER_ERROR_PA_INIT
)
6567 hba
->uic_error
|= UFSHCD_UIC_DL_PA_INIT_ERROR
;
6568 else if (hba
->dev_quirks
&
6569 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
) {
6570 if (reg
& UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED
)
6572 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
;
6573 else if (reg
& UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT
)
6574 hba
->uic_error
|= UFSHCD_UIC_DL_TCx_REPLAY_ERROR
;
6576 retval
|= IRQ_HANDLED
;
6579 /* UIC NL/TL/DME errors needs software retry */
6580 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_NETWORK_LAYER
);
6581 if ((reg
& UIC_NETWORK_LAYER_ERROR
) &&
6582 (reg
& UIC_NETWORK_LAYER_ERROR_CODE_MASK
)) {
6583 ufshcd_update_evt_hist(hba
, UFS_EVT_NL_ERR
, reg
);
6584 hba
->uic_error
|= UFSHCD_UIC_NL_ERROR
;
6585 retval
|= IRQ_HANDLED
;
6588 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_TRANSPORT_LAYER
);
6589 if ((reg
& UIC_TRANSPORT_LAYER_ERROR
) &&
6590 (reg
& UIC_TRANSPORT_LAYER_ERROR_CODE_MASK
)) {
6591 ufshcd_update_evt_hist(hba
, UFS_EVT_TL_ERR
, reg
);
6592 hba
->uic_error
|= UFSHCD_UIC_TL_ERROR
;
6593 retval
|= IRQ_HANDLED
;
6596 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_DME
);
6597 if ((reg
& UIC_DME_ERROR
) &&
6598 (reg
& UIC_DME_ERROR_CODE_MASK
)) {
6599 ufshcd_update_evt_hist(hba
, UFS_EVT_DME_ERR
, reg
);
6600 hba
->uic_error
|= UFSHCD_UIC_DME_ERROR
;
6601 retval
|= IRQ_HANDLED
;
6604 dev_dbg(hba
->dev
, "%s: UIC error flags = 0x%08x\n",
6605 __func__
, hba
->uic_error
);
6610 * ufshcd_check_errors - Check for errors that need s/w attention
6611 * @hba: per-adapter instance
6612 * @intr_status: interrupt status generated by the controller
6615 * IRQ_HANDLED - If interrupt is valid
6616 * IRQ_NONE - If invalid interrupt
6618 static irqreturn_t
ufshcd_check_errors(struct ufs_hba
*hba
, u32 intr_status
)
6620 bool queue_eh_work
= false;
6621 irqreturn_t retval
= IRQ_NONE
;
6623 spin_lock(hba
->host
->host_lock
);
6624 hba
->errors
|= UFSHCD_ERROR_MASK
& intr_status
;
6626 if (hba
->errors
& INT_FATAL_ERRORS
) {
6627 ufshcd_update_evt_hist(hba
, UFS_EVT_FATAL_ERR
,
6629 queue_eh_work
= true;
6632 if (hba
->errors
& UIC_ERROR
) {
6634 retval
= ufshcd_update_uic_error(hba
);
6636 queue_eh_work
= true;
6639 if (hba
->errors
& UFSHCD_UIC_HIBERN8_MASK
) {
6641 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
6642 __func__
, (hba
->errors
& UIC_HIBERNATE_ENTER
) ?
6644 hba
->errors
, ufshcd_get_upmcrs(hba
));
6645 ufshcd_update_evt_hist(hba
, UFS_EVT_AUTO_HIBERN8_ERR
,
6647 ufshcd_set_link_broken(hba
);
6648 queue_eh_work
= true;
6651 if (queue_eh_work
) {
6653 * update the transfer error masks to sticky bits, let's do this
6654 * irrespective of current ufshcd_state.
6656 hba
->saved_err
|= hba
->errors
;
6657 hba
->saved_uic_err
|= hba
->uic_error
;
6659 /* dump controller state before resetting */
6660 if ((hba
->saved_err
&
6661 (INT_FATAL_ERRORS
| UFSHCD_UIC_HIBERN8_MASK
)) ||
6662 (hba
->saved_uic_err
&&
6663 (hba
->saved_uic_err
!= UFSHCD_UIC_PA_GENERIC_ERROR
))) {
6664 dev_err(hba
->dev
, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
6665 __func__
, hba
->saved_err
,
6666 hba
->saved_uic_err
);
6667 ufshcd_dump_regs(hba
, 0, UFSHCI_REG_SPACE_SIZE
,
6669 ufshcd_print_pwr_info(hba
);
6671 ufshcd_schedule_eh_work(hba
);
6672 retval
|= IRQ_HANDLED
;
6675 * if (!queue_eh_work) -
6676 * Other errors are either non-fatal where host recovers
6677 * itself without s/w intervention or errors that will be
6678 * handled by the SCSI core layer.
6682 spin_unlock(hba
->host
->host_lock
);
6687 * ufshcd_tmc_handler - handle task management function completion
6688 * @hba: per adapter instance
6691 * IRQ_HANDLED - If interrupt is valid
6692 * IRQ_NONE - If invalid interrupt
6694 static irqreturn_t
ufshcd_tmc_handler(struct ufs_hba
*hba
)
6696 unsigned long flags
, pending
, issued
;
6697 irqreturn_t ret
= IRQ_NONE
;
6700 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6701 pending
= ufshcd_readl(hba
, REG_UTP_TASK_REQ_DOOR_BELL
);
6702 issued
= hba
->outstanding_tasks
& ~pending
;
6703 for_each_set_bit(tag
, &issued
, hba
->nutmrs
) {
6704 struct request
*req
= hba
->tmf_rqs
[tag
];
6705 struct completion
*c
= req
->end_io_data
;
6710 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6716 * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events
6717 * @hba: per adapter instance
6719 * Returns IRQ_HANDLED if interrupt is handled
6721 static irqreturn_t
ufshcd_handle_mcq_cq_events(struct ufs_hba
*hba
)
6723 struct ufs_hw_queue
*hwq
;
6724 unsigned long outstanding_cqs
;
6725 unsigned int nr_queues
;
6729 ret
= ufshcd_vops_get_outstanding_cqs(hba
, &outstanding_cqs
);
6731 outstanding_cqs
= (1U << hba
->nr_hw_queues
) - 1;
6733 /* Exclude the poll queues */
6734 nr_queues
= hba
->nr_hw_queues
- hba
->nr_queues
[HCTX_TYPE_POLL
];
6735 for_each_set_bit(i
, &outstanding_cqs
, nr_queues
) {
6738 events
= ufshcd_mcq_read_cqis(hba
, i
);
6740 ufshcd_mcq_write_cqis(hba
, events
, i
);
6742 if (events
& UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS
)
6743 ufshcd_mcq_poll_cqe_nolock(hba
, hwq
);
6750 * ufshcd_sl_intr - Interrupt service routine
6751 * @hba: per adapter instance
6752 * @intr_status: contains interrupts generated by the controller
6755 * IRQ_HANDLED - If interrupt is valid
6756 * IRQ_NONE - If invalid interrupt
6758 static irqreturn_t
ufshcd_sl_intr(struct ufs_hba
*hba
, u32 intr_status
)
6760 irqreturn_t retval
= IRQ_NONE
;
6762 if (intr_status
& UFSHCD_UIC_MASK
)
6763 retval
|= ufshcd_uic_cmd_compl(hba
, intr_status
);
6765 if (intr_status
& UFSHCD_ERROR_MASK
|| hba
->errors
)
6766 retval
|= ufshcd_check_errors(hba
, intr_status
);
6768 if (intr_status
& UTP_TASK_REQ_COMPL
)
6769 retval
|= ufshcd_tmc_handler(hba
);
6771 if (intr_status
& UTP_TRANSFER_REQ_COMPL
)
6772 retval
|= ufshcd_transfer_req_compl(hba
);
6774 if (intr_status
& MCQ_CQ_EVENT_STATUS
)
6775 retval
|= ufshcd_handle_mcq_cq_events(hba
);
6781 * ufshcd_intr - Main interrupt service routine
6783 * @__hba: pointer to adapter instance
6786 * IRQ_HANDLED - If interrupt is valid
6787 * IRQ_NONE - If invalid interrupt
6789 static irqreturn_t
ufshcd_intr(int irq
, void *__hba
)
6791 u32 intr_status
, enabled_intr_status
= 0;
6792 irqreturn_t retval
= IRQ_NONE
;
6793 struct ufs_hba
*hba
= __hba
;
6794 int retries
= hba
->nutrs
;
6796 intr_status
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
6797 hba
->ufs_stats
.last_intr_status
= intr_status
;
6798 hba
->ufs_stats
.last_intr_ts
= local_clock();
6801 * There could be max of hba->nutrs reqs in flight and in worst case
6802 * if the reqs get finished 1 by 1 after the interrupt status is
6803 * read, make sure we handle them by checking the interrupt status
6804 * again in a loop until we process all of the reqs before returning.
6806 while (intr_status
&& retries
--) {
6807 enabled_intr_status
=
6808 intr_status
& ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
6809 ufshcd_writel(hba
, intr_status
, REG_INTERRUPT_STATUS
);
6810 if (enabled_intr_status
)
6811 retval
|= ufshcd_sl_intr(hba
, enabled_intr_status
);
6813 intr_status
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
6816 if (enabled_intr_status
&& retval
== IRQ_NONE
&&
6817 (!(enabled_intr_status
& UTP_TRANSFER_REQ_COMPL
) ||
6818 hba
->outstanding_reqs
) && !ufshcd_eh_in_progress(hba
)) {
6819 dev_err(hba
->dev
, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
6822 hba
->ufs_stats
.last_intr_status
,
6823 enabled_intr_status
);
6824 ufshcd_dump_regs(hba
, 0, UFSHCI_REG_SPACE_SIZE
, "host_regs: ");
6830 static int ufshcd_clear_tm_cmd(struct ufs_hba
*hba
, int tag
)
6833 u32 mask
= 1 << tag
;
6834 unsigned long flags
;
6836 if (!test_bit(tag
, &hba
->outstanding_tasks
))
6839 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6840 ufshcd_utmrl_clear(hba
, tag
);
6841 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6843 /* poll for max. 1 sec to clear door bell register by h/w */
6844 err
= ufshcd_wait_for_register(hba
,
6845 REG_UTP_TASK_REQ_DOOR_BELL
,
6846 mask
, 0, 1000, 1000);
6848 dev_err(hba
->dev
, "Clearing task management function with tag %d %s\n",
6849 tag
, err
? "succeeded" : "failed");
6855 static int __ufshcd_issue_tm_cmd(struct ufs_hba
*hba
,
6856 struct utp_task_req_desc
*treq
, u8 tm_function
)
6858 struct request_queue
*q
= hba
->tmf_queue
;
6859 struct Scsi_Host
*host
= hba
->host
;
6860 DECLARE_COMPLETION_ONSTACK(wait
);
6861 struct request
*req
;
6862 unsigned long flags
;
6866 * blk_mq_alloc_request() is used here only to get a free tag.
6868 req
= blk_mq_alloc_request(q
, REQ_OP_DRV_OUT
, 0);
6870 return PTR_ERR(req
);
6872 req
->end_io_data
= &wait
;
6873 ufshcd_hold(hba
, false);
6875 spin_lock_irqsave(host
->host_lock
, flags
);
6877 task_tag
= req
->tag
;
6878 WARN_ONCE(task_tag
< 0 || task_tag
>= hba
->nutmrs
, "Invalid tag %d\n",
6880 hba
->tmf_rqs
[req
->tag
] = req
;
6881 treq
->upiu_req
.req_header
.dword_0
|= cpu_to_be32(task_tag
);
6883 memcpy(hba
->utmrdl_base_addr
+ task_tag
, treq
, sizeof(*treq
));
6884 ufshcd_vops_setup_task_mgmt(hba
, task_tag
, tm_function
);
6886 /* send command to the controller */
6887 __set_bit(task_tag
, &hba
->outstanding_tasks
);
6889 ufshcd_writel(hba
, 1 << task_tag
, REG_UTP_TASK_REQ_DOOR_BELL
);
6890 /* Make sure that doorbell is committed immediately */
6893 spin_unlock_irqrestore(host
->host_lock
, flags
);
6895 ufshcd_add_tm_upiu_trace(hba
, task_tag
, UFS_TM_SEND
);
6897 /* wait until the task management command is completed */
6898 err
= wait_for_completion_io_timeout(&wait
,
6899 msecs_to_jiffies(TM_CMD_TIMEOUT
));
6901 ufshcd_add_tm_upiu_trace(hba
, task_tag
, UFS_TM_ERR
);
6902 dev_err(hba
->dev
, "%s: task management cmd 0x%.2x timed-out\n",
6903 __func__
, tm_function
);
6904 if (ufshcd_clear_tm_cmd(hba
, task_tag
))
6905 dev_WARN(hba
->dev
, "%s: unable to clear tm cmd (slot %d) after timeout\n",
6906 __func__
, task_tag
);
6910 memcpy(treq
, hba
->utmrdl_base_addr
+ task_tag
, sizeof(*treq
));
6912 ufshcd_add_tm_upiu_trace(hba
, task_tag
, UFS_TM_COMP
);
6915 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6916 hba
->tmf_rqs
[req
->tag
] = NULL
;
6917 __clear_bit(task_tag
, &hba
->outstanding_tasks
);
6918 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6920 ufshcd_release(hba
);
6921 blk_mq_free_request(req
);
6927 * ufshcd_issue_tm_cmd - issues task management commands to controller
6928 * @hba: per adapter instance
6929 * @lun_id: LUN ID to which TM command is sent
6930 * @task_id: task ID to which the TM command is applicable
6931 * @tm_function: task management function opcode
6932 * @tm_response: task management service response return value
6934 * Returns non-zero value on error, zero on success.
6936 static int ufshcd_issue_tm_cmd(struct ufs_hba
*hba
, int lun_id
, int task_id
,
6937 u8 tm_function
, u8
*tm_response
)
6939 struct utp_task_req_desc treq
= { { 0 }, };
6940 enum utp_ocs ocs_value
;
6943 /* Configure task request descriptor */
6944 treq
.header
.dword_0
= cpu_to_le32(UTP_REQ_DESC_INT_CMD
);
6945 treq
.header
.dword_2
= cpu_to_le32(OCS_INVALID_COMMAND_STATUS
);
6947 /* Configure task request UPIU */
6948 treq
.upiu_req
.req_header
.dword_0
= cpu_to_be32(lun_id
<< 8) |
6949 cpu_to_be32(UPIU_TRANSACTION_TASK_REQ
<< 24);
6950 treq
.upiu_req
.req_header
.dword_1
= cpu_to_be32(tm_function
<< 16);
6953 * The host shall provide the same value for LUN field in the basic
6954 * header and for Input Parameter.
6956 treq
.upiu_req
.input_param1
= cpu_to_be32(lun_id
);
6957 treq
.upiu_req
.input_param2
= cpu_to_be32(task_id
);
6959 err
= __ufshcd_issue_tm_cmd(hba
, &treq
, tm_function
);
6960 if (err
== -ETIMEDOUT
)
6963 ocs_value
= le32_to_cpu(treq
.header
.dword_2
) & MASK_OCS
;
6964 if (ocs_value
!= OCS_SUCCESS
)
6965 dev_err(hba
->dev
, "%s: failed, ocs = 0x%x\n",
6966 __func__
, ocs_value
);
6967 else if (tm_response
)
6968 *tm_response
= be32_to_cpu(treq
.upiu_rsp
.output_param1
) &
6969 MASK_TM_SERVICE_RESP
;
6974 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
6975 * @hba: per-adapter instance
6976 * @req_upiu: upiu request
6977 * @rsp_upiu: upiu reply
6978 * @desc_buff: pointer to descriptor buffer, NULL if NA
6979 * @buff_len: descriptor size, 0 if NA
6980 * @cmd_type: specifies the type (NOP, Query...)
6981 * @desc_op: descriptor operation
6983 * Those type of requests uses UTP Transfer Request Descriptor - utrd.
6984 * Therefore, it "rides" the device management infrastructure: uses its tag and
6985 * tasks work queues.
6987 * Since there is only one available tag for device management commands,
6988 * the caller is expected to hold the hba->dev_cmd.lock mutex.
6990 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba
*hba
,
6991 struct utp_upiu_req
*req_upiu
,
6992 struct utp_upiu_req
*rsp_upiu
,
6993 u8
*desc_buff
, int *buff_len
,
6994 enum dev_cmd_type cmd_type
,
6995 enum query_opcode desc_op
)
6997 DECLARE_COMPLETION_ONSTACK(wait
);
6998 const u32 tag
= hba
->reserved_slot
;
6999 struct ufshcd_lrb
*lrbp
;
7003 /* Protects use of hba->reserved_slot. */
7004 lockdep_assert_held(&hba
->dev_cmd
.lock
);
7006 down_read(&hba
->clk_scaling_lock
);
7008 lrbp
= &hba
->lrb
[tag
];
7011 lrbp
->task_tag
= tag
;
7013 lrbp
->intr_cmd
= true;
7014 ufshcd_prepare_lrbp_crypto(NULL
, lrbp
);
7015 hba
->dev_cmd
.type
= cmd_type
;
7017 if (hba
->ufs_version
<= ufshci_version(1, 1))
7018 lrbp
->command_type
= UTP_CMD_TYPE_DEV_MANAGE
;
7020 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
7022 /* update the task tag in the request upiu */
7023 req_upiu
->header
.dword_0
|= cpu_to_be32(tag
);
7025 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, DMA_NONE
, 0);
7027 /* just copy the upiu request as it is */
7028 memcpy(lrbp
->ucd_req_ptr
, req_upiu
, sizeof(*lrbp
->ucd_req_ptr
));
7029 if (desc_buff
&& desc_op
== UPIU_QUERY_OPCODE_WRITE_DESC
) {
7030 /* The Data Segment Area is optional depending upon the query
7031 * function value. for WRITE DESCRIPTOR, the data segment
7032 * follows right after the tsf.
7034 memcpy(lrbp
->ucd_req_ptr
+ 1, desc_buff
, *buff_len
);
7038 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
7040 hba
->dev_cmd
.complete
= &wait
;
7042 ufshcd_add_query_upiu_trace(hba
, UFS_QUERY_SEND
, lrbp
->ucd_req_ptr
);
7044 ufshcd_send_command(hba
, tag
, hba
->dev_cmd_queue
);
7046 * ignore the returning value here - ufshcd_check_query_response is
7047 * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
7048 * read the response directly ignoring all errors.
7050 ufshcd_wait_for_dev_cmd(hba
, lrbp
, QUERY_REQ_TIMEOUT
);
7052 /* just copy the upiu response as it is */
7053 memcpy(rsp_upiu
, lrbp
->ucd_rsp_ptr
, sizeof(*rsp_upiu
));
7054 if (desc_buff
&& desc_op
== UPIU_QUERY_OPCODE_READ_DESC
) {
7055 u8
*descp
= (u8
*)lrbp
->ucd_rsp_ptr
+ sizeof(*rsp_upiu
);
7056 u16 resp_len
= be32_to_cpu(lrbp
->ucd_rsp_ptr
->header
.dword_2
) &
7057 MASK_QUERY_DATA_SEG_LEN
;
7059 if (*buff_len
>= resp_len
) {
7060 memcpy(desc_buff
, descp
, resp_len
);
7061 *buff_len
= resp_len
;
7064 "%s: rsp size %d is bigger than buffer size %d",
7065 __func__
, resp_len
, *buff_len
);
7070 ufshcd_add_query_upiu_trace(hba
, err
? UFS_QUERY_ERR
: UFS_QUERY_COMP
,
7071 (struct utp_upiu_req
*)lrbp
->ucd_rsp_ptr
);
7073 up_read(&hba
->clk_scaling_lock
);
7078 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
7079 * @hba: per-adapter instance
7080 * @req_upiu: upiu request
7081 * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
7082 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
7083 * @desc_buff: pointer to descriptor buffer, NULL if NA
7084 * @buff_len: descriptor size, 0 if NA
7085 * @desc_op: descriptor operation
7087 * Supports UTP Transfer requests (nop and query), and UTP Task
7088 * Management requests.
7089 * It is up to the caller to fill the upiu conent properly, as it will
7090 * be copied without any further input validations.
7092 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba
*hba
,
7093 struct utp_upiu_req
*req_upiu
,
7094 struct utp_upiu_req
*rsp_upiu
,
7096 u8
*desc_buff
, int *buff_len
,
7097 enum query_opcode desc_op
)
7100 enum dev_cmd_type cmd_type
= DEV_CMD_TYPE_QUERY
;
7101 struct utp_task_req_desc treq
= { { 0 }, };
7102 enum utp_ocs ocs_value
;
7103 u8 tm_f
= be32_to_cpu(req_upiu
->header
.dword_1
) >> 16 & MASK_TM_FUNC
;
7106 case UPIU_TRANSACTION_NOP_OUT
:
7107 cmd_type
= DEV_CMD_TYPE_NOP
;
7109 case UPIU_TRANSACTION_QUERY_REQ
:
7110 ufshcd_hold(hba
, false);
7111 mutex_lock(&hba
->dev_cmd
.lock
);
7112 err
= ufshcd_issue_devman_upiu_cmd(hba
, req_upiu
, rsp_upiu
,
7113 desc_buff
, buff_len
,
7115 mutex_unlock(&hba
->dev_cmd
.lock
);
7116 ufshcd_release(hba
);
7119 case UPIU_TRANSACTION_TASK_REQ
:
7120 treq
.header
.dword_0
= cpu_to_le32(UTP_REQ_DESC_INT_CMD
);
7121 treq
.header
.dword_2
= cpu_to_le32(OCS_INVALID_COMMAND_STATUS
);
7123 memcpy(&treq
.upiu_req
, req_upiu
, sizeof(*req_upiu
));
7125 err
= __ufshcd_issue_tm_cmd(hba
, &treq
, tm_f
);
7126 if (err
== -ETIMEDOUT
)
7129 ocs_value
= le32_to_cpu(treq
.header
.dword_2
) & MASK_OCS
;
7130 if (ocs_value
!= OCS_SUCCESS
) {
7131 dev_err(hba
->dev
, "%s: failed, ocs = 0x%x\n", __func__
,
7136 memcpy(rsp_upiu
, &treq
.upiu_rsp
, sizeof(*rsp_upiu
));
7149 * ufshcd_advanced_rpmb_req_handler - handle advanced RPMB request
7150 * @hba: per adapter instance
7151 * @req_upiu: upiu request
7152 * @rsp_upiu: upiu reply
7153 * @req_ehs: EHS field which contains Advanced RPMB Request Message
7154 * @rsp_ehs: EHS field which returns Advanced RPMB Response Message
7155 * @sg_cnt: The number of sg lists actually used
7156 * @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
7157 * @dir: DMA direction
7159 * Returns zero on success, non-zero on failure
7161 int ufshcd_advanced_rpmb_req_handler(struct ufs_hba
*hba
, struct utp_upiu_req
*req_upiu
,
7162 struct utp_upiu_req
*rsp_upiu
, struct ufs_ehs
*req_ehs
,
7163 struct ufs_ehs
*rsp_ehs
, int sg_cnt
, struct scatterlist
*sg_list
,
7164 enum dma_data_direction dir
)
7166 DECLARE_COMPLETION_ONSTACK(wait
);
7167 const u32 tag
= hba
->reserved_slot
;
7168 struct ufshcd_lrb
*lrbp
;
7175 /* Protects use of hba->reserved_slot. */
7176 ufshcd_hold(hba
, false);
7177 mutex_lock(&hba
->dev_cmd
.lock
);
7178 down_read(&hba
->clk_scaling_lock
);
7180 lrbp
= &hba
->lrb
[tag
];
7183 lrbp
->task_tag
= tag
;
7184 lrbp
->lun
= UFS_UPIU_RPMB_WLUN
;
7186 lrbp
->intr_cmd
= true;
7187 ufshcd_prepare_lrbp_crypto(NULL
, lrbp
);
7188 hba
->dev_cmd
.type
= DEV_CMD_TYPE_RPMB
;
7190 /* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */
7191 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
7193 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, dir
, 2);
7195 /* update the task tag and LUN in the request upiu */
7196 req_upiu
->header
.dword_0
|= cpu_to_be32(upiu_flags
<< 16 | UFS_UPIU_RPMB_WLUN
<< 8 | tag
);
7198 /* copy the UPIU(contains CDB) request as it is */
7199 memcpy(lrbp
->ucd_req_ptr
, req_upiu
, sizeof(*lrbp
->ucd_req_ptr
));
7200 /* Copy EHS, starting with byte32, immediately after the CDB package */
7201 memcpy(lrbp
->ucd_req_ptr
+ 1, req_ehs
, sizeof(*req_ehs
));
7203 if (dir
!= DMA_NONE
&& sg_list
)
7204 ufshcd_sgl_to_prdt(hba
, lrbp
, sg_cnt
, sg_list
);
7206 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
7208 hba
->dev_cmd
.complete
= &wait
;
7210 ufshcd_send_command(hba
, tag
, hba
->dev_cmd_queue
);
7212 err
= ufshcd_wait_for_dev_cmd(hba
, lrbp
, ADVANCED_RPMB_REQ_TIMEOUT
);
7215 /* Just copy the upiu response as it is */
7216 memcpy(rsp_upiu
, lrbp
->ucd_rsp_ptr
, sizeof(*rsp_upiu
));
7217 /* Get the response UPIU result */
7218 result
= ufshcd_get_rsp_upiu_result(lrbp
->ucd_rsp_ptr
);
7220 ehs_len
= be32_to_cpu(lrbp
->ucd_rsp_ptr
->header
.dword_2
) >> 24;
7222 * Since the bLength in EHS indicates the total size of the EHS Header and EHS Data
7223 * in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB
7226 if (ehs_len
== 2 && rsp_ehs
) {
7228 * ucd_rsp_ptr points to a buffer with a length of 512 bytes
7229 * (ALIGNED_UPIU_SIZE = 512), and the EHS data just starts from byte32
7231 ehs_data
= (u8
*)lrbp
->ucd_rsp_ptr
+ EHS_OFFSET_IN_RESPONSE
;
7232 memcpy(rsp_ehs
, ehs_data
, ehs_len
* 32);
7236 up_read(&hba
->clk_scaling_lock
);
7237 mutex_unlock(&hba
->dev_cmd
.lock
);
7238 ufshcd_release(hba
);
7239 return err
? : result
;
7243 * ufshcd_eh_device_reset_handler() - Reset a single logical unit.
7244 * @cmd: SCSI command pointer
7246 * Returns SUCCESS/FAILED
7248 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd
*cmd
)
7250 unsigned long flags
, pending_reqs
= 0, not_cleared
= 0;
7251 struct Scsi_Host
*host
;
7252 struct ufs_hba
*hba
;
7257 host
= cmd
->device
->host
;
7258 hba
= shost_priv(host
);
7260 lun
= ufshcd_scsi_to_upiu_lun(cmd
->device
->lun
);
7261 err
= ufshcd_issue_tm_cmd(hba
, lun
, 0, UFS_LOGICAL_RESET
, &resp
);
7262 if (err
|| resp
!= UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
7268 /* clear the commands that were pending for corresponding LUN */
7269 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
7270 for_each_set_bit(pos
, &hba
->outstanding_reqs
, hba
->nutrs
)
7271 if (hba
->lrb
[pos
].lun
== lun
)
7272 __set_bit(pos
, &pending_reqs
);
7273 hba
->outstanding_reqs
&= ~pending_reqs
;
7274 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
7276 if (ufshcd_clear_cmds(hba
, pending_reqs
) < 0) {
7277 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
7278 not_cleared
= pending_reqs
&
7279 ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
7280 hba
->outstanding_reqs
|= not_cleared
;
7281 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
7283 dev_err(hba
->dev
, "%s: failed to clear requests %#lx\n",
7284 __func__
, not_cleared
);
7286 __ufshcd_transfer_req_compl(hba
, pending_reqs
& ~not_cleared
);
7289 hba
->req_abort_count
= 0;
7290 ufshcd_update_evt_hist(hba
, UFS_EVT_DEV_RESET
, (u32
)err
);
7294 dev_err(hba
->dev
, "%s: failed with err %d\n", __func__
, err
);
7300 static void ufshcd_set_req_abort_skip(struct ufs_hba
*hba
, unsigned long bitmap
)
7302 struct ufshcd_lrb
*lrbp
;
7305 for_each_set_bit(tag
, &bitmap
, hba
->nutrs
) {
7306 lrbp
= &hba
->lrb
[tag
];
7307 lrbp
->req_abort_skip
= true;
7312 * ufshcd_try_to_abort_task - abort a specific task
7313 * @hba: Pointer to adapter instance
7314 * @tag: Task tag/index to be aborted
7316 * Abort the pending command in device by sending UFS_ABORT_TASK task management
7317 * command, and in host controller by clearing the door-bell register. There can
7318 * be race between controller sending the command to the device while abort is
7319 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
7320 * really issued and then try to abort it.
7322 * Returns zero on success, non-zero on failure
7324 static int ufshcd_try_to_abort_task(struct ufs_hba
*hba
, int tag
)
7326 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[tag
];
7332 for (poll_cnt
= 100; poll_cnt
; poll_cnt
--) {
7333 err
= ufshcd_issue_tm_cmd(hba
, lrbp
->lun
, lrbp
->task_tag
,
7334 UFS_QUERY_TASK
, &resp
);
7335 if (!err
&& resp
== UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED
) {
7336 /* cmd pending in the device */
7337 dev_err(hba
->dev
, "%s: cmd pending in the device. tag = %d\n",
7340 } else if (!err
&& resp
== UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
7342 * cmd not pending in the device, check if it is
7345 dev_err(hba
->dev
, "%s: cmd at tag %d not pending in the device.\n",
7347 reg
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
7348 if (reg
& (1 << tag
)) {
7349 /* sleep for max. 200us to stabilize */
7350 usleep_range(100, 200);
7353 /* command completed already */
7354 dev_err(hba
->dev
, "%s: cmd at tag %d successfully cleared from DB.\n",
7359 "%s: no response from device. tag = %d, err %d\n",
7360 __func__
, tag
, err
);
7362 err
= resp
; /* service response error */
7372 err
= ufshcd_issue_tm_cmd(hba
, lrbp
->lun
, lrbp
->task_tag
,
7373 UFS_ABORT_TASK
, &resp
);
7374 if (err
|| resp
!= UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
7376 err
= resp
; /* service response error */
7377 dev_err(hba
->dev
, "%s: issued. tag = %d, err %d\n",
7378 __func__
, tag
, err
);
7383 err
= ufshcd_clear_cmds(hba
, 1U << tag
);
7385 dev_err(hba
->dev
, "%s: Failed clearing cmd at tag %d, err %d\n",
7386 __func__
, tag
, err
);
7393 * ufshcd_abort - scsi host template eh_abort_handler callback
7394 * @cmd: SCSI command pointer
7396 * Returns SUCCESS/FAILED
7398 static int ufshcd_abort(struct scsi_cmnd
*cmd
)
7400 struct Scsi_Host
*host
= cmd
->device
->host
;
7401 struct ufs_hba
*hba
= shost_priv(host
);
7402 int tag
= scsi_cmd_to_rq(cmd
)->tag
;
7403 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[tag
];
7404 unsigned long flags
;
7409 WARN_ONCE(tag
< 0, "Invalid tag %d\n", tag
);
7411 ufshcd_hold(hba
, false);
7412 reg
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
7413 /* If command is already aborted/completed, return FAILED. */
7414 if (!(test_bit(tag
, &hba
->outstanding_reqs
))) {
7416 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
7417 __func__
, tag
, hba
->outstanding_reqs
, reg
);
7421 /* Print Transfer Request of aborted task */
7422 dev_info(hba
->dev
, "%s: Device abort task at tag %d\n", __func__
, tag
);
7425 * Print detailed info about aborted request.
7426 * As more than one request might get aborted at the same time,
7427 * print full information only for the first aborted request in order
7428 * to reduce repeated printouts. For other aborted requests only print
7431 scsi_print_command(cmd
);
7432 if (!hba
->req_abort_count
) {
7433 ufshcd_update_evt_hist(hba
, UFS_EVT_ABORT
, tag
);
7434 ufshcd_print_evt_hist(hba
);
7435 ufshcd_print_host_state(hba
);
7436 ufshcd_print_pwr_info(hba
);
7437 ufshcd_print_trs(hba
, 1 << tag
, true);
7439 ufshcd_print_trs(hba
, 1 << tag
, false);
7441 hba
->req_abort_count
++;
7443 if (!(reg
& (1 << tag
))) {
7445 "%s: cmd was completed, but without a notifying intr, tag = %d",
7447 __ufshcd_transfer_req_compl(hba
, 1UL << tag
);
7452 * Task abort to the device W-LUN is illegal. When this command
7453 * will fail, due to spec violation, scsi err handling next step
7454 * will be to send LU reset which, again, is a spec violation.
7455 * To avoid these unnecessary/illegal steps, first we clean up
7456 * the lrb taken by this cmd and re-set it in outstanding_reqs,
7457 * then queue the eh_work and bail.
7459 if (lrbp
->lun
== UFS_UPIU_UFS_DEVICE_WLUN
) {
7460 ufshcd_update_evt_hist(hba
, UFS_EVT_ABORT
, lrbp
->lun
);
7462 spin_lock_irqsave(host
->host_lock
, flags
);
7463 hba
->force_reset
= true;
7464 ufshcd_schedule_eh_work(hba
);
7465 spin_unlock_irqrestore(host
->host_lock
, flags
);
7469 /* Skip task abort in case previous aborts failed and report failure */
7470 if (lrbp
->req_abort_skip
) {
7471 dev_err(hba
->dev
, "%s: skipping abort\n", __func__
);
7472 ufshcd_set_req_abort_skip(hba
, hba
->outstanding_reqs
);
7476 err
= ufshcd_try_to_abort_task(hba
, tag
);
7478 dev_err(hba
->dev
, "%s: failed with err %d\n", __func__
, err
);
7479 ufshcd_set_req_abort_skip(hba
, hba
->outstanding_reqs
);
7485 * Clear the corresponding bit from outstanding_reqs since the command
7486 * has been aborted successfully.
7488 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
7489 outstanding
= __test_and_clear_bit(tag
, &hba
->outstanding_reqs
);
7490 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
7493 ufshcd_release_scsi_cmd(hba
, lrbp
);
7498 /* Matches the ufshcd_hold() call at the start of this function. */
7499 ufshcd_release(hba
);
7504 * ufshcd_host_reset_and_restore - reset and restore host controller
7505 * @hba: per-adapter instance
7507 * Note that host controller reset may issue DME_RESET to
7508 * local and remote (device) Uni-Pro stack and the attributes
7509 * are reset to default state.
7511 * Returns zero on success, non-zero on failure
7513 static int ufshcd_host_reset_and_restore(struct ufs_hba
*hba
)
7518 * Stop the host controller and complete the requests
7521 ufshpb_toggle_state(hba
, HPB_PRESENT
, HPB_RESET
);
7522 ufshcd_hba_stop(hba
);
7523 hba
->silence_err_logs
= true;
7524 ufshcd_complete_requests(hba
);
7525 hba
->silence_err_logs
= false;
7527 /* scale up clocks to max frequency before full reinitialization */
7528 ufshcd_scale_clks(hba
, true);
7530 err
= ufshcd_hba_enable(hba
);
7532 /* Establish the link again and restore the device */
7534 err
= ufshcd_probe_hba(hba
, false);
7537 dev_err(hba
->dev
, "%s: Host init failed %d\n", __func__
, err
);
7538 ufshcd_update_evt_hist(hba
, UFS_EVT_HOST_RESET
, (u32
)err
);
7543 * ufshcd_reset_and_restore - reset and re-initialize host/device
7544 * @hba: per-adapter instance
7546 * Reset and recover device, host and re-establish link. This
7547 * is helpful to recover the communication in fatal error conditions.
7549 * Returns zero on success, non-zero on failure
7551 static int ufshcd_reset_and_restore(struct ufs_hba
*hba
)
7554 u32 saved_uic_err
= 0;
7556 unsigned long flags
;
7557 int retries
= MAX_HOST_RESET_RETRIES
;
7559 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7562 * This is a fresh start, cache and clear saved error first,
7563 * in case new error generated during reset and restore.
7565 saved_err
|= hba
->saved_err
;
7566 saved_uic_err
|= hba
->saved_uic_err
;
7568 hba
->saved_uic_err
= 0;
7569 hba
->force_reset
= false;
7570 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
7571 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7573 /* Reset the attached device */
7574 ufshcd_device_reset(hba
);
7576 err
= ufshcd_host_reset_and_restore(hba
);
7578 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7581 /* Do not exit unless operational or dead */
7582 if (hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
&&
7583 hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
&&
7584 hba
->ufshcd_state
!= UFSHCD_STATE_EH_SCHEDULED_NON_FATAL
)
7586 } while (err
&& --retries
);
7589 * Inform scsi mid-layer that we did reset and allow to handle
7590 * Unit Attention properly.
7592 scsi_report_bus_reset(hba
->host
, 0);
7594 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
7595 hba
->saved_err
|= saved_err
;
7596 hba
->saved_uic_err
|= saved_uic_err
;
7598 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7604 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
7605 * @cmd: SCSI command pointer
7607 * Returns SUCCESS/FAILED
7609 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd
*cmd
)
7612 unsigned long flags
;
7613 struct ufs_hba
*hba
;
7615 hba
= shost_priv(cmd
->device
->host
);
7617 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7618 hba
->force_reset
= true;
7619 ufshcd_schedule_eh_work(hba
);
7620 dev_err(hba
->dev
, "%s: reset in progress - 1\n", __func__
);
7621 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7623 flush_work(&hba
->eh_work
);
7625 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7626 if (hba
->ufshcd_state
== UFSHCD_STATE_ERROR
)
7628 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7634 * ufshcd_get_max_icc_level - calculate the ICC level
7635 * @sup_curr_uA: max. current supported by the regulator
7636 * @start_scan: row at the desc table to start scan from
7637 * @buff: power descriptor buffer
7639 * Returns calculated max ICC level for specific regulator
7641 static u32
ufshcd_get_max_icc_level(int sup_curr_uA
, u32 start_scan
,
7649 for (i
= start_scan
; i
>= 0; i
--) {
7650 data
= get_unaligned_be16(&buff
[2 * i
]);
7651 unit
= (data
& ATTR_ICC_LVL_UNIT_MASK
) >>
7652 ATTR_ICC_LVL_UNIT_OFFSET
;
7653 curr_uA
= data
& ATTR_ICC_LVL_VALUE_MASK
;
7655 case UFSHCD_NANO_AMP
:
7656 curr_uA
= curr_uA
/ 1000;
7658 case UFSHCD_MILI_AMP
:
7659 curr_uA
= curr_uA
* 1000;
7662 curr_uA
= curr_uA
* 1000 * 1000;
7664 case UFSHCD_MICRO_AMP
:
7668 if (sup_curr_uA
>= curr_uA
)
7673 pr_err("%s: Couldn't find valid icc_level = %d", __func__
, i
);
7680 * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level
7681 * In case regulators are not initialized we'll return 0
7682 * @hba: per-adapter instance
7683 * @desc_buf: power descriptor buffer to extract ICC levels from.
7685 * Returns calculated ICC level
7687 static u32
ufshcd_find_max_sup_active_icc_level(struct ufs_hba
*hba
,
7692 if (!hba
->vreg_info
.vcc
|| !hba
->vreg_info
.vccq
||
7693 !hba
->vreg_info
.vccq2
) {
7695 * Using dev_dbg to avoid messages during runtime PM to avoid
7696 * never-ending cycles of messages written back to storage by
7697 * user space causing runtime resume, causing more messages and
7701 "%s: Regulator capability was not set, actvIccLevel=%d",
7702 __func__
, icc_level
);
7706 if (hba
->vreg_info
.vcc
->max_uA
)
7707 icc_level
= ufshcd_get_max_icc_level(
7708 hba
->vreg_info
.vcc
->max_uA
,
7709 POWER_DESC_MAX_ACTV_ICC_LVLS
- 1,
7710 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCC_0
]);
7712 if (hba
->vreg_info
.vccq
->max_uA
)
7713 icc_level
= ufshcd_get_max_icc_level(
7714 hba
->vreg_info
.vccq
->max_uA
,
7716 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCCQ_0
]);
7718 if (hba
->vreg_info
.vccq2
->max_uA
)
7719 icc_level
= ufshcd_get_max_icc_level(
7720 hba
->vreg_info
.vccq2
->max_uA
,
7722 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCCQ2_0
]);
7727 static void ufshcd_set_active_icc_lvl(struct ufs_hba
*hba
)
7733 desc_buf
= kzalloc(QUERY_DESC_MAX_SIZE
, GFP_KERNEL
);
7737 ret
= ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_POWER
, 0, 0,
7738 desc_buf
, QUERY_DESC_MAX_SIZE
);
7741 "%s: Failed reading power descriptor ret = %d",
7746 icc_level
= ufshcd_find_max_sup_active_icc_level(hba
, desc_buf
);
7747 dev_dbg(hba
->dev
, "%s: setting icc_level 0x%x", __func__
, icc_level
);
7749 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
7750 QUERY_ATTR_IDN_ACTIVE_ICC_LVL
, 0, 0, &icc_level
);
7754 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
7755 __func__
, icc_level
, ret
);
7761 static inline void ufshcd_blk_pm_runtime_init(struct scsi_device
*sdev
)
7763 scsi_autopm_get_device(sdev
);
7764 blk_pm_runtime_init(sdev
->request_queue
, &sdev
->sdev_gendev
);
7765 if (sdev
->rpm_autosuspend
)
7766 pm_runtime_set_autosuspend_delay(&sdev
->sdev_gendev
,
7767 RPM_AUTOSUSPEND_DELAY_MS
);
7768 scsi_autopm_put_device(sdev
);
7772 * ufshcd_scsi_add_wlus - Adds required W-LUs
7773 * @hba: per-adapter instance
7775 * UFS device specification requires the UFS devices to support 4 well known
7777 * "REPORT_LUNS" (address: 01h)
7778 * "UFS Device" (address: 50h)
7779 * "RPMB" (address: 44h)
7780 * "BOOT" (address: 30h)
7781 * UFS device's power management needs to be controlled by "POWER CONDITION"
7782 * field of SSU (START STOP UNIT) command. But this "power condition" field
7783 * will take effect only when its sent to "UFS device" well known logical unit
7784 * hence we require the scsi_device instance to represent this logical unit in
7785 * order for the UFS host driver to send the SSU command for power management.
7787 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
7788 * Block) LU so user space process can control this LU. User space may also
7789 * want to have access to BOOT LU.
7791 * This function adds scsi device instances for each of all well known LUs
7792 * (except "REPORT LUNS" LU).
7794 * Returns zero on success (all required W-LUs are added successfully),
7795 * non-zero error value on failure (if failed to add any of the required W-LU).
7797 static int ufshcd_scsi_add_wlus(struct ufs_hba
*hba
)
7800 struct scsi_device
*sdev_boot
, *sdev_rpmb
;
7802 hba
->ufs_device_wlun
= __scsi_add_device(hba
->host
, 0, 0,
7803 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN
), NULL
);
7804 if (IS_ERR(hba
->ufs_device_wlun
)) {
7805 ret
= PTR_ERR(hba
->ufs_device_wlun
);
7806 hba
->ufs_device_wlun
= NULL
;
7809 scsi_device_put(hba
->ufs_device_wlun
);
7811 sdev_rpmb
= __scsi_add_device(hba
->host
, 0, 0,
7812 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN
), NULL
);
7813 if (IS_ERR(sdev_rpmb
)) {
7814 ret
= PTR_ERR(sdev_rpmb
);
7815 goto remove_ufs_device_wlun
;
7817 ufshcd_blk_pm_runtime_init(sdev_rpmb
);
7818 scsi_device_put(sdev_rpmb
);
7820 sdev_boot
= __scsi_add_device(hba
->host
, 0, 0,
7821 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN
), NULL
);
7822 if (IS_ERR(sdev_boot
)) {
7823 dev_err(hba
->dev
, "%s: BOOT WLUN not found\n", __func__
);
7825 ufshcd_blk_pm_runtime_init(sdev_boot
);
7826 scsi_device_put(sdev_boot
);
7830 remove_ufs_device_wlun
:
7831 scsi_remove_device(hba
->ufs_device_wlun
);
7836 static void ufshcd_wb_probe(struct ufs_hba
*hba
, const u8
*desc_buf
)
7838 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
7840 u32 d_lu_wb_buf_alloc
;
7841 u32 ext_ufs_feature
;
7843 if (!ufshcd_is_wb_allowed(hba
))
7847 * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
7848 * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
7851 if (!(dev_info
->wspecversion
>= 0x310 ||
7852 dev_info
->wspecversion
== 0x220 ||
7853 (hba
->dev_quirks
& UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
)))
7856 ext_ufs_feature
= get_unaligned_be32(desc_buf
+
7857 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP
);
7859 if (!(ext_ufs_feature
& UFS_DEV_WRITE_BOOSTER_SUP
))
7863 * WB may be supported but not configured while provisioning. The spec
7864 * says, in dedicated wb buffer mode, a max of 1 lun would have wb
7865 * buffer configured.
7867 dev_info
->wb_buffer_type
= desc_buf
[DEVICE_DESC_PARAM_WB_TYPE
];
7869 dev_info
->b_presrv_uspc_en
=
7870 desc_buf
[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN
];
7872 if (dev_info
->wb_buffer_type
== WB_BUF_MODE_SHARED
) {
7873 if (!get_unaligned_be32(desc_buf
+
7874 DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS
))
7877 for (lun
= 0; lun
< UFS_UPIU_MAX_WB_LUN_ID
; lun
++) {
7878 d_lu_wb_buf_alloc
= 0;
7879 ufshcd_read_unit_desc_param(hba
,
7881 UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS
,
7882 (u8
*)&d_lu_wb_buf_alloc
,
7883 sizeof(d_lu_wb_buf_alloc
));
7884 if (d_lu_wb_buf_alloc
) {
7885 dev_info
->wb_dedicated_lu
= lun
;
7890 if (!d_lu_wb_buf_alloc
)
7894 if (!ufshcd_is_wb_buf_lifetime_available(hba
))
7900 hba
->caps
&= ~UFSHCD_CAP_WB_EN
;
7903 static void ufshcd_temp_notif_probe(struct ufs_hba
*hba
, const u8
*desc_buf
)
7905 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
7906 u32 ext_ufs_feature
;
7909 if (!(hba
->caps
& UFSHCD_CAP_TEMP_NOTIF
) || dev_info
->wspecversion
< 0x300)
7912 ext_ufs_feature
= get_unaligned_be32(desc_buf
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP
);
7914 if (ext_ufs_feature
& UFS_DEV_LOW_TEMP_NOTIF
)
7915 mask
|= MASK_EE_TOO_LOW_TEMP
;
7917 if (ext_ufs_feature
& UFS_DEV_HIGH_TEMP_NOTIF
)
7918 mask
|= MASK_EE_TOO_HIGH_TEMP
;
7921 ufshcd_enable_ee(hba
, mask
);
7922 ufs_hwmon_probe(hba
, mask
);
7926 static void ufshcd_ext_iid_probe(struct ufs_hba
*hba
, u8
*desc_buf
)
7928 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
7929 u32 ext_ufs_feature
;
7933 /* Only UFS-4.0 and above may support EXT_IID */
7934 if (dev_info
->wspecversion
< 0x400)
7937 ext_ufs_feature
= get_unaligned_be32(desc_buf
+
7938 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP
);
7939 if (!(ext_ufs_feature
& UFS_DEV_EXT_IID_SUP
))
7942 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
7943 QUERY_ATTR_IDN_EXT_IID_EN
, 0, 0, &ext_iid_en
);
7945 dev_err(hba
->dev
, "failed reading bEXTIIDEn. err = %d\n", err
);
7948 dev_info
->b_ext_iid_en
= ext_iid_en
;
7951 void ufshcd_fixup_dev_quirks(struct ufs_hba
*hba
,
7952 const struct ufs_dev_quirk
*fixups
)
7954 const struct ufs_dev_quirk
*f
;
7955 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
7960 for (f
= fixups
; f
->quirk
; f
++) {
7961 if ((f
->wmanufacturerid
== dev_info
->wmanufacturerid
||
7962 f
->wmanufacturerid
== UFS_ANY_VENDOR
) &&
7963 ((dev_info
->model
&&
7964 STR_PRFX_EQUAL(f
->model
, dev_info
->model
)) ||
7965 !strcmp(f
->model
, UFS_ANY_MODEL
)))
7966 hba
->dev_quirks
|= f
->quirk
;
7969 EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks
);
7971 static void ufs_fixup_device_setup(struct ufs_hba
*hba
)
7973 /* fix by general quirk table */
7974 ufshcd_fixup_dev_quirks(hba
, ufs_fixups
);
7976 /* allow vendors to fix quirks */
7977 ufshcd_vops_fixup_dev_quirks(hba
);
7980 static int ufs_get_device_desc(struct ufs_hba
*hba
)
7984 u8 b_ufs_feature_sup
;
7986 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
7988 desc_buf
= kzalloc(QUERY_DESC_MAX_SIZE
, GFP_KERNEL
);
7994 err
= ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_DEVICE
, 0, 0, desc_buf
,
7995 QUERY_DESC_MAX_SIZE
);
7997 dev_err(hba
->dev
, "%s: Failed reading Device Desc. err = %d\n",
8003 * getting vendor (manufacturerID) and Bank Index in big endian
8006 dev_info
->wmanufacturerid
= desc_buf
[DEVICE_DESC_PARAM_MANF_ID
] << 8 |
8007 desc_buf
[DEVICE_DESC_PARAM_MANF_ID
+ 1];
8009 /* getting Specification Version in big endian format */
8010 dev_info
->wspecversion
= desc_buf
[DEVICE_DESC_PARAM_SPEC_VER
] << 8 |
8011 desc_buf
[DEVICE_DESC_PARAM_SPEC_VER
+ 1];
8012 dev_info
->bqueuedepth
= desc_buf
[DEVICE_DESC_PARAM_Q_DPTH
];
8013 b_ufs_feature_sup
= desc_buf
[DEVICE_DESC_PARAM_UFS_FEAT
];
8015 model_index
= desc_buf
[DEVICE_DESC_PARAM_PRDCT_NAME
];
8017 if (dev_info
->wspecversion
>= UFS_DEV_HPB_SUPPORT_VERSION
&&
8018 (b_ufs_feature_sup
& UFS_DEV_HPB_SUPPORT
)) {
8019 bool hpb_en
= false;
8021 ufshpb_get_dev_info(hba
, desc_buf
);
8023 if (!ufshpb_is_legacy(hba
))
8024 err
= ufshcd_query_flag_retry(hba
,
8025 UPIU_QUERY_OPCODE_READ_FLAG
,
8026 QUERY_FLAG_IDN_HPB_EN
, 0,
8029 if (ufshpb_is_legacy(hba
) || (!err
&& hpb_en
))
8030 dev_info
->hpb_enabled
= true;
8033 err
= ufshcd_read_string_desc(hba
, model_index
,
8034 &dev_info
->model
, SD_ASCII_STD
);
8036 dev_err(hba
->dev
, "%s: Failed reading Product Name. err = %d\n",
8041 hba
->luns_avail
= desc_buf
[DEVICE_DESC_PARAM_NUM_LU
] +
8042 desc_buf
[DEVICE_DESC_PARAM_NUM_WLU
];
8044 ufs_fixup_device_setup(hba
);
8046 ufshcd_wb_probe(hba
, desc_buf
);
8048 ufshcd_temp_notif_probe(hba
, desc_buf
);
8050 if (hba
->ext_iid_sup
)
8051 ufshcd_ext_iid_probe(hba
, desc_buf
);
8054 * ufshcd_read_string_desc returns size of the string
8055 * reset the error value
8064 static void ufs_put_device_desc(struct ufs_hba
*hba
)
8066 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8068 kfree(dev_info
->model
);
8069 dev_info
->model
= NULL
;
8073 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
8074 * @hba: per-adapter instance
8076 * PA_TActivate parameter can be tuned manually if UniPro version is less than
8077 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
8078 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
8079 * the hibern8 exit latency.
8081 * Returns zero on success, non-zero error value on failure.
8083 static int ufshcd_tune_pa_tactivate(struct ufs_hba
*hba
)
8086 u32 peer_rx_min_activatetime
= 0, tuned_pa_tactivate
;
8088 ret
= ufshcd_dme_peer_get(hba
,
8090 RX_MIN_ACTIVATETIME_CAPABILITY
,
8091 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
8092 &peer_rx_min_activatetime
);
8096 /* make sure proper unit conversion is applied */
8097 tuned_pa_tactivate
=
8098 ((peer_rx_min_activatetime
* RX_MIN_ACTIVATETIME_UNIT_US
)
8099 / PA_TACTIVATE_TIME_UNIT_US
);
8100 ret
= ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
),
8101 tuned_pa_tactivate
);
8108 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
8109 * @hba: per-adapter instance
8111 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
8112 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
8113 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
8114 * This optimal value can help reduce the hibern8 exit latency.
8116 * Returns zero on success, non-zero error value on failure.
8118 static int ufshcd_tune_pa_hibern8time(struct ufs_hba
*hba
)
8121 u32 local_tx_hibern8_time_cap
= 0, peer_rx_hibern8_time_cap
= 0;
8122 u32 max_hibern8_time
, tuned_pa_hibern8time
;
8124 ret
= ufshcd_dme_get(hba
,
8125 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY
,
8126 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
8127 &local_tx_hibern8_time_cap
);
8131 ret
= ufshcd_dme_peer_get(hba
,
8132 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY
,
8133 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
8134 &peer_rx_hibern8_time_cap
);
8138 max_hibern8_time
= max(local_tx_hibern8_time_cap
,
8139 peer_rx_hibern8_time_cap
);
8140 /* make sure proper unit conversion is applied */
8141 tuned_pa_hibern8time
= ((max_hibern8_time
* HIBERN8TIME_UNIT_US
)
8142 / PA_HIBERN8_TIME_UNIT_US
);
8143 ret
= ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_HIBERN8TIME
),
8144 tuned_pa_hibern8time
);
8150 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
8151 * less than device PA_TACTIVATE time.
8152 * @hba: per-adapter instance
8154 * Some UFS devices require host PA_TACTIVATE to be lower than device
8155 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
8158 * Returns zero on success, non-zero error value on failure.
8160 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba
*hba
)
8163 u32 granularity
, peer_granularity
;
8164 u32 pa_tactivate
, peer_pa_tactivate
;
8165 u32 pa_tactivate_us
, peer_pa_tactivate_us
;
8166 static const u8 gran_to_us_table
[] = {1, 4, 8, 16, 32, 100};
8168 ret
= ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_GRANULARITY
),
8173 ret
= ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_GRANULARITY
),
8178 if ((granularity
< PA_GRANULARITY_MIN_VAL
) ||
8179 (granularity
> PA_GRANULARITY_MAX_VAL
)) {
8180 dev_err(hba
->dev
, "%s: invalid host PA_GRANULARITY %d",
8181 __func__
, granularity
);
8185 if ((peer_granularity
< PA_GRANULARITY_MIN_VAL
) ||
8186 (peer_granularity
> PA_GRANULARITY_MAX_VAL
)) {
8187 dev_err(hba
->dev
, "%s: invalid device PA_GRANULARITY %d",
8188 __func__
, peer_granularity
);
8192 ret
= ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_TACTIVATE
), &pa_tactivate
);
8196 ret
= ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_TACTIVATE
),
8197 &peer_pa_tactivate
);
8201 pa_tactivate_us
= pa_tactivate
* gran_to_us_table
[granularity
- 1];
8202 peer_pa_tactivate_us
= peer_pa_tactivate
*
8203 gran_to_us_table
[peer_granularity
- 1];
8205 if (pa_tactivate_us
>= peer_pa_tactivate_us
) {
8206 u32 new_peer_pa_tactivate
;
8208 new_peer_pa_tactivate
= pa_tactivate_us
/
8209 gran_to_us_table
[peer_granularity
- 1];
8210 new_peer_pa_tactivate
++;
8211 ret
= ufshcd_dme_peer_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
),
8212 new_peer_pa_tactivate
);
8219 static void ufshcd_tune_unipro_params(struct ufs_hba
*hba
)
8221 if (ufshcd_is_unipro_pa_params_tuning_req(hba
)) {
8222 ufshcd_tune_pa_tactivate(hba
);
8223 ufshcd_tune_pa_hibern8time(hba
);
8226 ufshcd_vops_apply_dev_quirks(hba
);
8228 if (hba
->dev_quirks
& UFS_DEVICE_QUIRK_PA_TACTIVATE
)
8229 /* set 1ms timeout for PA_TACTIVATE */
8230 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
), 10);
8232 if (hba
->dev_quirks
& UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE
)
8233 ufshcd_quirk_tune_host_pa_tactivate(hba
);
8236 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba
*hba
)
8238 hba
->ufs_stats
.hibern8_exit_cnt
= 0;
8239 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
8240 hba
->req_abort_count
= 0;
8243 static int ufshcd_device_geo_params_init(struct ufs_hba
*hba
)
8248 desc_buf
= kzalloc(QUERY_DESC_MAX_SIZE
, GFP_KERNEL
);
8254 err
= ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_GEOMETRY
, 0, 0,
8255 desc_buf
, QUERY_DESC_MAX_SIZE
);
8257 dev_err(hba
->dev
, "%s: Failed reading Geometry Desc. err = %d\n",
8262 if (desc_buf
[GEOMETRY_DESC_PARAM_MAX_NUM_LUN
] == 1)
8263 hba
->dev_info
.max_lu_supported
= 32;
8264 else if (desc_buf
[GEOMETRY_DESC_PARAM_MAX_NUM_LUN
] == 0)
8265 hba
->dev_info
.max_lu_supported
= 8;
8267 if (desc_buf
[QUERY_DESC_LENGTH_OFFSET
] >=
8268 GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS
)
8269 ufshpb_get_geo_info(hba
, desc_buf
);
8276 struct ufs_ref_clk
{
8277 unsigned long freq_hz
;
8278 enum ufs_ref_clk_freq val
;
8281 static const struct ufs_ref_clk ufs_ref_clk_freqs
[] = {
8282 {19200000, REF_CLK_FREQ_19_2_MHZ
},
8283 {26000000, REF_CLK_FREQ_26_MHZ
},
8284 {38400000, REF_CLK_FREQ_38_4_MHZ
},
8285 {52000000, REF_CLK_FREQ_52_MHZ
},
8286 {0, REF_CLK_FREQ_INVAL
},
8289 static enum ufs_ref_clk_freq
8290 ufs_get_bref_clk_from_hz(unsigned long freq
)
8294 for (i
= 0; ufs_ref_clk_freqs
[i
].freq_hz
; i
++)
8295 if (ufs_ref_clk_freqs
[i
].freq_hz
== freq
)
8296 return ufs_ref_clk_freqs
[i
].val
;
8298 return REF_CLK_FREQ_INVAL
;
8301 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba
*hba
, struct clk
*refclk
)
8305 freq
= clk_get_rate(refclk
);
8307 hba
->dev_ref_clk_freq
=
8308 ufs_get_bref_clk_from_hz(freq
);
8310 if (hba
->dev_ref_clk_freq
== REF_CLK_FREQ_INVAL
)
8312 "invalid ref_clk setting = %ld\n", freq
);
8315 static int ufshcd_set_dev_ref_clk(struct ufs_hba
*hba
)
8319 u32 freq
= hba
->dev_ref_clk_freq
;
8321 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
8322 QUERY_ATTR_IDN_REF_CLK_FREQ
, 0, 0, &ref_clk
);
8325 dev_err(hba
->dev
, "failed reading bRefClkFreq. err = %d\n",
8330 if (ref_clk
== freq
)
8331 goto out
; /* nothing to update */
8333 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
8334 QUERY_ATTR_IDN_REF_CLK_FREQ
, 0, 0, &freq
);
8337 dev_err(hba
->dev
, "bRefClkFreq setting to %lu Hz failed\n",
8338 ufs_ref_clk_freqs
[freq
].freq_hz
);
8342 dev_dbg(hba
->dev
, "bRefClkFreq setting to %lu Hz succeeded\n",
8343 ufs_ref_clk_freqs
[freq
].freq_hz
);
8349 static int ufshcd_device_params_init(struct ufs_hba
*hba
)
8354 /* Init UFS geometry descriptor related parameters */
8355 ret
= ufshcd_device_geo_params_init(hba
);
8359 /* Check and apply UFS device quirks */
8360 ret
= ufs_get_device_desc(hba
);
8362 dev_err(hba
->dev
, "%s: Failed getting device info. err = %d\n",
8367 ufshcd_get_ref_clk_gating_wait(hba
);
8369 if (!ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_READ_FLAG
,
8370 QUERY_FLAG_IDN_PWR_ON_WPE
, 0, &flag
))
8371 hba
->dev_info
.f_power_on_wp_en
= flag
;
8373 /* Probe maximum power mode co-supported by both UFS host and device */
8374 if (ufshcd_get_max_pwr_mode(hba
))
8376 "%s: Failed getting max supported power mode\n",
8383 * ufshcd_add_lus - probe and add UFS logical units
8384 * @hba: per-adapter instance
8386 static int ufshcd_add_lus(struct ufs_hba
*hba
)
8390 /* Add required well known logical units to scsi mid layer */
8391 ret
= ufshcd_scsi_add_wlus(hba
);
8395 /* Initialize devfreq after UFS device is detected */
8396 if (ufshcd_is_clkscaling_supported(hba
)) {
8397 memcpy(&hba
->clk_scaling
.saved_pwr_info
.info
,
8399 sizeof(struct ufs_pa_layer_attr
));
8400 hba
->clk_scaling
.saved_pwr_info
.is_valid
= true;
8401 hba
->clk_scaling
.is_allowed
= true;
8403 ret
= ufshcd_devfreq_init(hba
);
8407 hba
->clk_scaling
.is_enabled
= true;
8408 ufshcd_init_clk_scaling_sysfs(hba
);
8413 scsi_scan_host(hba
->host
);
8414 pm_runtime_put_sync(hba
->dev
);
8420 /* SDB - Single Doorbell */
8421 static void ufshcd_release_sdb_queue(struct ufs_hba
*hba
, int nutrs
)
8423 size_t ucdl_size
, utrdl_size
;
8425 ucdl_size
= sizeof(struct utp_transfer_cmd_desc
) * nutrs
;
8426 dmam_free_coherent(hba
->dev
, ucdl_size
, hba
->ucdl_base_addr
,
8427 hba
->ucdl_dma_addr
);
8429 utrdl_size
= sizeof(struct utp_transfer_req_desc
) * nutrs
;
8430 dmam_free_coherent(hba
->dev
, utrdl_size
, hba
->utrdl_base_addr
,
8431 hba
->utrdl_dma_addr
);
8433 devm_kfree(hba
->dev
, hba
->lrb
);
8436 static int ufshcd_alloc_mcq(struct ufs_hba
*hba
)
8439 int old_nutrs
= hba
->nutrs
;
8441 ret
= ufshcd_mcq_decide_queue_depth(hba
);
8446 ret
= ufshcd_mcq_init(hba
);
8451 * Previously allocated memory for nutrs may not be enough in MCQ mode.
8452 * Number of supported tags in MCQ mode may be larger than SDB mode.
8454 if (hba
->nutrs
!= old_nutrs
) {
8455 ufshcd_release_sdb_queue(hba
, old_nutrs
);
8456 ret
= ufshcd_memory_alloc(hba
);
8459 ufshcd_host_memory_configure(hba
);
8462 ret
= ufshcd_mcq_memory_alloc(hba
);
8468 hba
->nutrs
= old_nutrs
;
8472 static void ufshcd_config_mcq(struct ufs_hba
*hba
)
8476 ret
= ufshcd_mcq_vops_config_esi(hba
);
8477 dev_info(hba
->dev
, "ESI %sconfigured\n", ret
? "is not " : "");
8479 ufshcd_enable_intr(hba
, UFSHCD_ENABLE_MCQ_INTRS
);
8480 ufshcd_mcq_make_queues_operational(hba
);
8481 ufshcd_mcq_config_mac(hba
, hba
->nutrs
);
8483 hba
->host
->can_queue
= hba
->nutrs
- UFSHCD_NUM_RESERVED
;
8484 hba
->reserved_slot
= hba
->nutrs
- UFSHCD_NUM_RESERVED
;
8486 /* Select MCQ mode */
8487 ufshcd_writel(hba
, ufshcd_readl(hba
, REG_UFS_MEM_CFG
) | 0x1,
8489 hba
->mcq_enabled
= true;
8491 dev_info(hba
->dev
, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n",
8492 hba
->nr_hw_queues
, hba
->nr_queues
[HCTX_TYPE_DEFAULT
],
8493 hba
->nr_queues
[HCTX_TYPE_READ
], hba
->nr_queues
[HCTX_TYPE_POLL
],
8497 static int ufshcd_device_init(struct ufs_hba
*hba
, bool init_dev_params
)
8500 struct Scsi_Host
*host
= hba
->host
;
8502 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
8504 ret
= ufshcd_link_startup(hba
);
8508 if (hba
->quirks
& UFSHCD_QUIRK_SKIP_PH_CONFIGURATION
)
8511 /* Debug counters initialization */
8512 ufshcd_clear_dbg_ufs_stats(hba
);
8514 /* UniPro link is active now */
8515 ufshcd_set_link_active(hba
);
8517 /* Reconfigure MCQ upon reset */
8518 if (is_mcq_enabled(hba
) && !init_dev_params
)
8519 ufshcd_config_mcq(hba
);
8521 /* Verify device initialization by sending NOP OUT UPIU */
8522 ret
= ufshcd_verify_dev_init(hba
);
8526 /* Initiate UFS initialization, and waiting until completion */
8527 ret
= ufshcd_complete_dev_init(hba
);
8532 * Initialize UFS device parameters used by driver, these
8533 * parameters are associated with UFS descriptors.
8535 if (init_dev_params
) {
8536 ret
= ufshcd_device_params_init(hba
);
8539 if (is_mcq_supported(hba
) && !hba
->scsi_host_added
) {
8540 ret
= ufshcd_alloc_mcq(hba
);
8542 /* Continue with SDB mode */
8543 use_mcq_mode
= false;
8544 dev_err(hba
->dev
, "MCQ mode is disabled, err=%d\n",
8547 ret
= scsi_add_host(host
, hba
->dev
);
8549 dev_err(hba
->dev
, "scsi_add_host failed\n");
8552 hba
->scsi_host_added
= true;
8554 /* MCQ may be disabled if ufshcd_alloc_mcq() fails */
8555 if (is_mcq_supported(hba
) && use_mcq_mode
)
8556 ufshcd_config_mcq(hba
);
8559 ufshcd_tune_unipro_params(hba
);
8561 /* UFS device is also active now */
8562 ufshcd_set_ufs_dev_active(hba
);
8563 ufshcd_force_reset_auto_bkops(hba
);
8565 /* Gear up to HS gear if supported */
8566 if (hba
->max_pwr_info
.is_valid
) {
8568 * Set the right value to bRefClkFreq before attempting to
8569 * switch to HS gears.
8571 if (hba
->dev_ref_clk_freq
!= REF_CLK_FREQ_INVAL
)
8572 ufshcd_set_dev_ref_clk(hba
);
8573 ret
= ufshcd_config_pwr_mode(hba
, &hba
->max_pwr_info
.info
);
8575 dev_err(hba
->dev
, "%s: Failed setting power mode, err = %d\n",
8585 * ufshcd_probe_hba - probe hba to detect device and initialize it
8586 * @hba: per-adapter instance
8587 * @init_dev_params: whether or not to call ufshcd_device_params_init().
8589 * Execute link-startup and verify device initialization
8591 static int ufshcd_probe_hba(struct ufs_hba
*hba
, bool init_dev_params
)
8593 ktime_t start
= ktime_get();
8594 unsigned long flags
;
8597 ret
= ufshcd_device_init(hba
, init_dev_params
);
8601 if (hba
->quirks
& UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH
) {
8602 /* Reset the device and controller before doing reinit */
8603 ufshcd_device_reset(hba
);
8604 ufshcd_hba_stop(hba
);
8605 ufshcd_vops_reinit_notify(hba
);
8606 ret
= ufshcd_hba_enable(hba
);
8608 dev_err(hba
->dev
, "Host controller enable failed\n");
8609 ufshcd_print_evt_hist(hba
);
8610 ufshcd_print_host_state(hba
);
8614 /* Reinit the device */
8615 ret
= ufshcd_device_init(hba
, init_dev_params
);
8620 ufshcd_print_pwr_info(hba
);
8623 * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
8624 * and for removable UFS card as well, hence always set the parameter.
8625 * Note: Error handler may issue the device reset hence resetting
8626 * bActiveICCLevel as well so it is always safe to set this here.
8628 ufshcd_set_active_icc_lvl(hba
);
8630 /* Enable UFS Write Booster if supported */
8631 ufshcd_configure_wb(hba
);
8633 if (hba
->ee_usr_mask
)
8634 ufshcd_write_ee_control(hba
);
8635 /* Enable Auto-Hibernate if configured */
8636 ufshcd_auto_hibern8_enable(hba
);
8638 ufshpb_toggle_state(hba
, HPB_RESET
, HPB_PRESENT
);
8640 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
8642 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
8643 else if (hba
->ufshcd_state
== UFSHCD_STATE_RESET
)
8644 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
8645 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
8647 trace_ufshcd_init(dev_name(hba
->dev
), ret
,
8648 ktime_to_us(ktime_sub(ktime_get(), start
)),
8649 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
8654 * ufshcd_async_scan - asynchronous execution for probing hba
8655 * @data: data pointer to pass to this function
8656 * @cookie: cookie data
8658 static void ufshcd_async_scan(void *data
, async_cookie_t cookie
)
8660 struct ufs_hba
*hba
= (struct ufs_hba
*)data
;
8663 down(&hba
->host_sem
);
8664 /* Initialize hba, detect and initialize UFS device */
8665 ret
= ufshcd_probe_hba(hba
, true);
8670 /* Probe and add UFS logical units */
8671 ret
= ufshcd_add_lus(hba
);
8674 * If we failed to initialize the device or the device is not
8675 * present, turn off the power/clocks etc.
8678 pm_runtime_put_sync(hba
->dev
);
8679 ufshcd_hba_exit(hba
);
8683 static enum scsi_timeout_action
ufshcd_eh_timed_out(struct scsi_cmnd
*scmd
)
8685 struct ufs_hba
*hba
= shost_priv(scmd
->device
->host
);
8687 if (!hba
->system_suspending
) {
8688 /* Activate the error handler in the SCSI core. */
8689 return SCSI_EH_NOT_HANDLED
;
8693 * If we get here we know that no TMFs are outstanding and also that
8694 * the only pending command is a START STOP UNIT command. Handle the
8695 * timeout of that command directly to prevent a deadlock between
8696 * ufshcd_set_dev_pwr_mode() and ufshcd_err_handler().
8698 ufshcd_link_recovery(hba
);
8699 dev_info(hba
->dev
, "%s() finished; outstanding_tasks = %#lx.\n",
8700 __func__
, hba
->outstanding_tasks
);
8702 return hba
->outstanding_reqs
? SCSI_EH_RESET_TIMER
: SCSI_EH_DONE
;
8705 static const struct attribute_group
*ufshcd_driver_groups
[] = {
8706 &ufs_sysfs_unit_descriptor_group
,
8707 &ufs_sysfs_lun_attributes_group
,
8708 #ifdef CONFIG_SCSI_UFS_HPB
8709 &ufs_sysfs_hpb_stat_group
,
8710 &ufs_sysfs_hpb_param_group
,
8715 static struct ufs_hba_variant_params ufs_hba_vps
= {
8716 .hba_enable_delay_us
= 1000,
8717 .wb_flush_threshold
= UFS_WB_BUF_REMAIN_PERCENT(40),
8718 .devfreq_profile
.polling_ms
= 100,
8719 .devfreq_profile
.target
= ufshcd_devfreq_target
,
8720 .devfreq_profile
.get_dev_status
= ufshcd_devfreq_get_dev_status
,
8721 .ondemand_data
.upthreshold
= 70,
8722 .ondemand_data
.downdifferential
= 5,
8725 static struct scsi_host_template ufshcd_driver_template
= {
8726 .module
= THIS_MODULE
,
8728 .proc_name
= UFSHCD
,
8729 .map_queues
= ufshcd_map_queues
,
8730 .queuecommand
= ufshcd_queuecommand
,
8731 .mq_poll
= ufshcd_poll
,
8732 .slave_alloc
= ufshcd_slave_alloc
,
8733 .slave_configure
= ufshcd_slave_configure
,
8734 .slave_destroy
= ufshcd_slave_destroy
,
8735 .change_queue_depth
= ufshcd_change_queue_depth
,
8736 .eh_abort_handler
= ufshcd_abort
,
8737 .eh_device_reset_handler
= ufshcd_eh_device_reset_handler
,
8738 .eh_host_reset_handler
= ufshcd_eh_host_reset_handler
,
8739 .eh_timed_out
= ufshcd_eh_timed_out
,
8741 .sg_tablesize
= SG_ALL
,
8742 .cmd_per_lun
= UFSHCD_CMD_PER_LUN
,
8743 .can_queue
= UFSHCD_CAN_QUEUE
,
8744 .max_segment_size
= PRDT_DATA_BYTE_COUNT_MAX
,
8745 .max_sectors
= (1 << 20) / SECTOR_SIZE
, /* 1 MiB */
8746 .max_host_blocked
= 1,
8747 .track_queue_depth
= 1,
8748 .sdev_groups
= ufshcd_driver_groups
,
8749 .rpm_autosuspend_delay
= RPM_AUTOSUSPEND_DELAY_MS
,
8752 static int ufshcd_config_vreg_load(struct device
*dev
, struct ufs_vreg
*vreg
,
8761 * "set_load" operation shall be required on those regulators
8762 * which specifically configured current limitation. Otherwise
8763 * zero max_uA may cause unexpected behavior when regulator is
8764 * enabled or set as high power mode.
8769 ret
= regulator_set_load(vreg
->reg
, ua
);
8771 dev_err(dev
, "%s: %s set load (ua=%d) failed, err=%d\n",
8772 __func__
, vreg
->name
, ua
, ret
);
8778 static inline int ufshcd_config_vreg_lpm(struct ufs_hba
*hba
,
8779 struct ufs_vreg
*vreg
)
8781 return ufshcd_config_vreg_load(hba
->dev
, vreg
, UFS_VREG_LPM_LOAD_UA
);
8784 static inline int ufshcd_config_vreg_hpm(struct ufs_hba
*hba
,
8785 struct ufs_vreg
*vreg
)
8790 return ufshcd_config_vreg_load(hba
->dev
, vreg
, vreg
->max_uA
);
8793 static int ufshcd_config_vreg(struct device
*dev
,
8794 struct ufs_vreg
*vreg
, bool on
)
8796 if (regulator_count_voltages(vreg
->reg
) <= 0)
8799 return ufshcd_config_vreg_load(dev
, vreg
, on
? vreg
->max_uA
: 0);
8802 static int ufshcd_enable_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
8806 if (!vreg
|| vreg
->enabled
)
8809 ret
= ufshcd_config_vreg(dev
, vreg
, true);
8811 ret
= regulator_enable(vreg
->reg
);
8814 vreg
->enabled
= true;
8816 dev_err(dev
, "%s: %s enable failed, err=%d\n",
8817 __func__
, vreg
->name
, ret
);
8822 static int ufshcd_disable_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
8826 if (!vreg
|| !vreg
->enabled
|| vreg
->always_on
)
8829 ret
= regulator_disable(vreg
->reg
);
8832 /* ignore errors on applying disable config */
8833 ufshcd_config_vreg(dev
, vreg
, false);
8834 vreg
->enabled
= false;
8836 dev_err(dev
, "%s: %s disable failed, err=%d\n",
8837 __func__
, vreg
->name
, ret
);
8843 static int ufshcd_setup_vreg(struct ufs_hba
*hba
, bool on
)
8846 struct device
*dev
= hba
->dev
;
8847 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
8849 ret
= ufshcd_toggle_vreg(dev
, info
->vcc
, on
);
8853 ret
= ufshcd_toggle_vreg(dev
, info
->vccq
, on
);
8857 ret
= ufshcd_toggle_vreg(dev
, info
->vccq2
, on
);
8861 ufshcd_toggle_vreg(dev
, info
->vccq2
, false);
8862 ufshcd_toggle_vreg(dev
, info
->vccq
, false);
8863 ufshcd_toggle_vreg(dev
, info
->vcc
, false);
8868 static int ufshcd_setup_hba_vreg(struct ufs_hba
*hba
, bool on
)
8870 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
8872 return ufshcd_toggle_vreg(hba
->dev
, info
->vdd_hba
, on
);
8875 int ufshcd_get_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
8882 vreg
->reg
= devm_regulator_get(dev
, vreg
->name
);
8883 if (IS_ERR(vreg
->reg
)) {
8884 ret
= PTR_ERR(vreg
->reg
);
8885 dev_err(dev
, "%s: %s get failed, err=%d\n",
8886 __func__
, vreg
->name
, ret
);
8891 EXPORT_SYMBOL_GPL(ufshcd_get_vreg
);
8893 static int ufshcd_init_vreg(struct ufs_hba
*hba
)
8896 struct device
*dev
= hba
->dev
;
8897 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
8899 ret
= ufshcd_get_vreg(dev
, info
->vcc
);
8903 ret
= ufshcd_get_vreg(dev
, info
->vccq
);
8905 ret
= ufshcd_get_vreg(dev
, info
->vccq2
);
8910 static int ufshcd_init_hba_vreg(struct ufs_hba
*hba
)
8912 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
8914 return ufshcd_get_vreg(hba
->dev
, info
->vdd_hba
);
8917 static int ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
)
8920 struct ufs_clk_info
*clki
;
8921 struct list_head
*head
= &hba
->clk_list_head
;
8922 unsigned long flags
;
8923 ktime_t start
= ktime_get();
8924 bool clk_state_changed
= false;
8926 if (list_empty(head
))
8929 ret
= ufshcd_vops_setup_clocks(hba
, on
, PRE_CHANGE
);
8933 list_for_each_entry(clki
, head
, list
) {
8934 if (!IS_ERR_OR_NULL(clki
->clk
)) {
8936 * Don't disable clocks which are needed
8937 * to keep the link active.
8939 if (ufshcd_is_link_active(hba
) &&
8940 clki
->keep_link_active
)
8943 clk_state_changed
= on
^ clki
->enabled
;
8944 if (on
&& !clki
->enabled
) {
8945 ret
= clk_prepare_enable(clki
->clk
);
8947 dev_err(hba
->dev
, "%s: %s prepare enable failed, %d\n",
8948 __func__
, clki
->name
, ret
);
8951 } else if (!on
&& clki
->enabled
) {
8952 clk_disable_unprepare(clki
->clk
);
8955 dev_dbg(hba
->dev
, "%s: clk: %s %sabled\n", __func__
,
8956 clki
->name
, on
? "en" : "dis");
8960 ret
= ufshcd_vops_setup_clocks(hba
, on
, POST_CHANGE
);
8966 list_for_each_entry(clki
, head
, list
) {
8967 if (!IS_ERR_OR_NULL(clki
->clk
) && clki
->enabled
)
8968 clk_disable_unprepare(clki
->clk
);
8970 } else if (!ret
&& on
) {
8971 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
8972 hba
->clk_gating
.state
= CLKS_ON
;
8973 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
8974 hba
->clk_gating
.state
);
8975 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
8978 if (clk_state_changed
)
8979 trace_ufshcd_profile_clk_gating(dev_name(hba
->dev
),
8980 (on
? "on" : "off"),
8981 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
8985 static enum ufs_ref_clk_freq
ufshcd_parse_ref_clk_property(struct ufs_hba
*hba
)
8988 int ret
= device_property_read_u32(hba
->dev
, "ref-clk-freq", &freq
);
8991 dev_dbg(hba
->dev
, "Cannot query 'ref-clk-freq' property = %d", ret
);
8992 return REF_CLK_FREQ_INVAL
;
8995 return ufs_get_bref_clk_from_hz(freq
);
8998 static int ufshcd_init_clocks(struct ufs_hba
*hba
)
9001 struct ufs_clk_info
*clki
;
9002 struct device
*dev
= hba
->dev
;
9003 struct list_head
*head
= &hba
->clk_list_head
;
9005 if (list_empty(head
))
9008 list_for_each_entry(clki
, head
, list
) {
9012 clki
->clk
= devm_clk_get(dev
, clki
->name
);
9013 if (IS_ERR(clki
->clk
)) {
9014 ret
= PTR_ERR(clki
->clk
);
9015 dev_err(dev
, "%s: %s clk get failed, %d\n",
9016 __func__
, clki
->name
, ret
);
9021 * Parse device ref clk freq as per device tree "ref_clk".
9022 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
9023 * in ufshcd_alloc_host().
9025 if (!strcmp(clki
->name
, "ref_clk"))
9026 ufshcd_parse_dev_ref_clk_freq(hba
, clki
->clk
);
9028 if (clki
->max_freq
) {
9029 ret
= clk_set_rate(clki
->clk
, clki
->max_freq
);
9031 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
9032 __func__
, clki
->name
,
9033 clki
->max_freq
, ret
);
9036 clki
->curr_freq
= clki
->max_freq
;
9038 dev_dbg(dev
, "%s: clk: %s, rate: %lu\n", __func__
,
9039 clki
->name
, clk_get_rate(clki
->clk
));
9045 static int ufshcd_variant_hba_init(struct ufs_hba
*hba
)
9052 err
= ufshcd_vops_init(hba
);
9054 dev_err(hba
->dev
, "%s: variant %s init failed err %d\n",
9055 __func__
, ufshcd_get_var_name(hba
), err
);
9060 static void ufshcd_variant_hba_exit(struct ufs_hba
*hba
)
9065 ufshcd_vops_exit(hba
);
9068 static int ufshcd_hba_init(struct ufs_hba
*hba
)
9073 * Handle host controller power separately from the UFS device power
9074 * rails as it will help controlling the UFS host controller power
9075 * collapse easily which is different than UFS device power collapse.
9076 * Also, enable the host controller power before we go ahead with rest
9077 * of the initialization here.
9079 err
= ufshcd_init_hba_vreg(hba
);
9083 err
= ufshcd_setup_hba_vreg(hba
, true);
9087 err
= ufshcd_init_clocks(hba
);
9089 goto out_disable_hba_vreg
;
9091 if (hba
->dev_ref_clk_freq
== REF_CLK_FREQ_INVAL
)
9092 hba
->dev_ref_clk_freq
= ufshcd_parse_ref_clk_property(hba
);
9094 err
= ufshcd_setup_clocks(hba
, true);
9096 goto out_disable_hba_vreg
;
9098 err
= ufshcd_init_vreg(hba
);
9100 goto out_disable_clks
;
9102 err
= ufshcd_setup_vreg(hba
, true);
9104 goto out_disable_clks
;
9106 err
= ufshcd_variant_hba_init(hba
);
9108 goto out_disable_vreg
;
9110 ufs_debugfs_hba_init(hba
);
9112 hba
->is_powered
= true;
9116 ufshcd_setup_vreg(hba
, false);
9118 ufshcd_setup_clocks(hba
, false);
9119 out_disable_hba_vreg
:
9120 ufshcd_setup_hba_vreg(hba
, false);
9125 static void ufshcd_hba_exit(struct ufs_hba
*hba
)
9127 if (hba
->is_powered
) {
9128 ufshcd_exit_clk_scaling(hba
);
9129 ufshcd_exit_clk_gating(hba
);
9131 destroy_workqueue(hba
->eh_wq
);
9132 ufs_debugfs_hba_exit(hba
);
9133 ufshcd_variant_hba_exit(hba
);
9134 ufshcd_setup_vreg(hba
, false);
9135 ufshcd_setup_clocks(hba
, false);
9136 ufshcd_setup_hba_vreg(hba
, false);
9137 hba
->is_powered
= false;
9138 ufs_put_device_desc(hba
);
9142 static int ufshcd_execute_start_stop(struct scsi_device
*sdev
,
9143 enum ufs_dev_pwr_mode pwr_mode
,
9144 struct scsi_sense_hdr
*sshdr
)
9146 unsigned char cdb
[6] = { START_STOP
, 0, 0, 0, pwr_mode
<< 4, 0 };
9147 struct request
*req
;
9148 struct scsi_cmnd
*scmd
;
9151 req
= scsi_alloc_request(sdev
->request_queue
, REQ_OP_DRV_IN
,
9154 return PTR_ERR(req
);
9156 scmd
= blk_mq_rq_to_pdu(req
);
9157 scmd
->cmd_len
= COMMAND_SIZE(cdb
[0]);
9158 memcpy(scmd
->cmnd
, cdb
, scmd
->cmd_len
);
9159 scmd
->allowed
= 0/*retries*/;
9160 scmd
->flags
|= SCMD_FAIL_IF_RECOVERING
;
9161 req
->timeout
= 1 * HZ
;
9162 req
->rq_flags
|= RQF_PM
| RQF_QUIET
;
9164 blk_execute_rq(req
, /*at_head=*/true);
9167 scsi_normalize_sense(scmd
->sense_buffer
, scmd
->sense_len
,
9171 blk_mq_free_request(req
);
9177 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
9179 * @hba: per adapter instance
9180 * @pwr_mode: device power mode to set
9182 * Returns 0 if requested power mode is set successfully
9183 * Returns < 0 if failed to set the requested power mode
9185 static int ufshcd_set_dev_pwr_mode(struct ufs_hba
*hba
,
9186 enum ufs_dev_pwr_mode pwr_mode
)
9188 struct scsi_sense_hdr sshdr
;
9189 struct scsi_device
*sdp
;
9190 unsigned long flags
;
9193 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
9194 sdp
= hba
->ufs_device_wlun
;
9195 if (sdp
&& scsi_device_online(sdp
))
9196 ret
= scsi_device_get(sdp
);
9199 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
9205 * If scsi commands fail, the scsi mid-layer schedules scsi error-
9206 * handling, which would wait for host to be resumed. Since we know
9207 * we are functional while we are here, skip host resume in error
9210 hba
->host
->eh_noresume
= 1;
9213 * Current function would be generally called from the power management
9214 * callbacks hence set the RQF_PM flag so that it doesn't resume the
9215 * already suspended childs.
9217 for (retries
= 3; retries
> 0; --retries
) {
9218 ret
= ufshcd_execute_start_stop(sdp
, pwr_mode
, &sshdr
);
9220 * scsi_execute() only returns a negative value if the request
9227 sdev_printk(KERN_WARNING
, sdp
,
9228 "START_STOP failed for power mode: %d, result %x\n",
9231 if (scsi_sense_valid(&sshdr
))
9232 scsi_print_sense_hdr(sdp
, NULL
, &sshdr
);
9236 hba
->curr_dev_pwr_mode
= pwr_mode
;
9239 scsi_device_put(sdp
);
9240 hba
->host
->eh_noresume
= 0;
9244 static int ufshcd_link_state_transition(struct ufs_hba
*hba
,
9245 enum uic_link_state req_link_state
,
9246 bool check_for_bkops
)
9250 if (req_link_state
== hba
->uic_link_state
)
9253 if (req_link_state
== UIC_LINK_HIBERN8_STATE
) {
9254 ret
= ufshcd_uic_hibern8_enter(hba
);
9256 ufshcd_set_link_hibern8(hba
);
9258 dev_err(hba
->dev
, "%s: hibern8 enter failed %d\n",
9264 * If autobkops is enabled, link can't be turned off because
9265 * turning off the link would also turn off the device, except in the
9266 * case of DeepSleep where the device is expected to remain powered.
9268 else if ((req_link_state
== UIC_LINK_OFF_STATE
) &&
9269 (!check_for_bkops
|| !hba
->auto_bkops_enabled
)) {
9271 * Let's make sure that link is in low power mode, we are doing
9272 * this currently by putting the link in Hibern8. Otherway to
9273 * put the link in low power mode is to send the DME end point
9274 * to device and then send the DME reset command to local
9275 * unipro. But putting the link in hibern8 is much faster.
9277 * Note also that putting the link in Hibern8 is a requirement
9278 * for entering DeepSleep.
9280 ret
= ufshcd_uic_hibern8_enter(hba
);
9282 dev_err(hba
->dev
, "%s: hibern8 enter failed %d\n",
9287 * Change controller state to "reset state" which
9288 * should also put the link in off/reset state
9290 ufshcd_hba_stop(hba
);
9292 * TODO: Check if we need any delay to make sure that
9293 * controller is reset
9295 ufshcd_set_link_off(hba
);
9302 static void ufshcd_vreg_set_lpm(struct ufs_hba
*hba
)
9304 bool vcc_off
= false;
9307 * It seems some UFS devices may keep drawing more than sleep current
9308 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
9309 * To avoid this situation, add 2ms delay before putting these UFS
9310 * rails in LPM mode.
9312 if (!ufshcd_is_link_active(hba
) &&
9313 hba
->dev_quirks
& UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
)
9314 usleep_range(2000, 2100);
9317 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
9320 * If UFS device and link is in OFF state, all power supplies (VCC,
9321 * VCCQ, VCCQ2) can be turned off if power on write protect is not
9322 * required. If UFS link is inactive (Hibern8 or OFF state) and device
9323 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
9325 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
9326 * in low power state which would save some power.
9328 * If Write Booster is enabled and the device needs to flush the WB
9329 * buffer OR if bkops status is urgent for WB, keep Vcc on.
9331 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
) &&
9332 !hba
->dev_info
.is_lu_power_on_wp
) {
9333 ufshcd_setup_vreg(hba
, false);
9335 } else if (!ufshcd_is_ufs_dev_active(hba
)) {
9336 ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, false);
9338 if (ufshcd_is_link_hibern8(hba
) || ufshcd_is_link_off(hba
)) {
9339 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq
);
9340 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq2
);
9345 * Some UFS devices require delay after VCC power rail is turned-off.
9347 if (vcc_off
&& hba
->vreg_info
.vcc
&&
9348 hba
->dev_quirks
& UFS_DEVICE_QUIRK_DELAY_AFTER_LPM
)
9349 usleep_range(5000, 5100);
9353 static int ufshcd_vreg_set_hpm(struct ufs_hba
*hba
)
9357 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
) &&
9358 !hba
->dev_info
.is_lu_power_on_wp
) {
9359 ret
= ufshcd_setup_vreg(hba
, true);
9360 } else if (!ufshcd_is_ufs_dev_active(hba
)) {
9361 if (!ufshcd_is_link_active(hba
)) {
9362 ret
= ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq
);
9365 ret
= ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq2
);
9369 ret
= ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, true);
9374 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq
);
9376 ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, false);
9380 #endif /* CONFIG_PM */
9382 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba
*hba
)
9384 if (ufshcd_is_link_off(hba
) || ufshcd_can_aggressive_pc(hba
))
9385 ufshcd_setup_hba_vreg(hba
, false);
9388 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba
*hba
)
9390 if (ufshcd_is_link_off(hba
) || ufshcd_can_aggressive_pc(hba
))
9391 ufshcd_setup_hba_vreg(hba
, true);
9394 static int __ufshcd_wl_suspend(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
9397 bool check_for_bkops
;
9398 enum ufs_pm_level pm_lvl
;
9399 enum ufs_dev_pwr_mode req_dev_pwr_mode
;
9400 enum uic_link_state req_link_state
;
9402 hba
->pm_op_in_progress
= true;
9403 if (pm_op
!= UFS_SHUTDOWN_PM
) {
9404 pm_lvl
= pm_op
== UFS_RUNTIME_PM
?
9405 hba
->rpm_lvl
: hba
->spm_lvl
;
9406 req_dev_pwr_mode
= ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl
);
9407 req_link_state
= ufs_get_pm_lvl_to_link_pwr_state(pm_lvl
);
9409 req_dev_pwr_mode
= UFS_POWERDOWN_PWR_MODE
;
9410 req_link_state
= UIC_LINK_OFF_STATE
;
9413 ufshpb_suspend(hba
);
9416 * If we can't transition into any of the low power modes
9417 * just gate the clocks.
9419 ufshcd_hold(hba
, false);
9420 hba
->clk_gating
.is_suspended
= true;
9422 if (ufshcd_is_clkscaling_supported(hba
))
9423 ufshcd_clk_scaling_suspend(hba
, true);
9425 if (req_dev_pwr_mode
== UFS_ACTIVE_PWR_MODE
&&
9426 req_link_state
== UIC_LINK_ACTIVE_STATE
) {
9430 if ((req_dev_pwr_mode
== hba
->curr_dev_pwr_mode
) &&
9431 (req_link_state
== hba
->uic_link_state
))
9432 goto enable_scaling
;
9434 /* UFS device & link must be active before we enter in this function */
9435 if (!ufshcd_is_ufs_dev_active(hba
) || !ufshcd_is_link_active(hba
)) {
9437 goto enable_scaling
;
9440 if (pm_op
== UFS_RUNTIME_PM
) {
9441 if (ufshcd_can_autobkops_during_suspend(hba
)) {
9443 * The device is idle with no requests in the queue,
9444 * allow background operations if bkops status shows
9445 * that performance might be impacted.
9447 ret
= ufshcd_urgent_bkops(hba
);
9449 goto enable_scaling
;
9451 /* make sure that auto bkops is disabled */
9452 ufshcd_disable_auto_bkops(hba
);
9455 * If device needs to do BKOP or WB buffer flush during
9456 * Hibern8, keep device power mode as "active power mode"
9459 hba
->dev_info
.b_rpm_dev_flush_capable
=
9460 hba
->auto_bkops_enabled
||
9461 (((req_link_state
== UIC_LINK_HIBERN8_STATE
) ||
9462 ((req_link_state
== UIC_LINK_ACTIVE_STATE
) &&
9463 ufshcd_is_auto_hibern8_enabled(hba
))) &&
9464 ufshcd_wb_need_flush(hba
));
9467 flush_work(&hba
->eeh_work
);
9469 ret
= ufshcd_vops_suspend(hba
, pm_op
, PRE_CHANGE
);
9471 goto enable_scaling
;
9473 if (req_dev_pwr_mode
!= hba
->curr_dev_pwr_mode
) {
9474 if (pm_op
!= UFS_RUNTIME_PM
)
9475 /* ensure that bkops is disabled */
9476 ufshcd_disable_auto_bkops(hba
);
9478 if (!hba
->dev_info
.b_rpm_dev_flush_capable
) {
9479 ret
= ufshcd_set_dev_pwr_mode(hba
, req_dev_pwr_mode
);
9480 if (ret
&& pm_op
!= UFS_SHUTDOWN_PM
) {
9482 * If return err in suspend flow, IO will hang.
9483 * Trigger error handler and break suspend for
9486 ufshcd_force_error_recovery(hba
);
9490 goto enable_scaling
;
9495 * In the case of DeepSleep, the device is expected to remain powered
9496 * with the link off, so do not check for bkops.
9498 check_for_bkops
= !ufshcd_is_ufs_dev_deepsleep(hba
);
9499 ret
= ufshcd_link_state_transition(hba
, req_link_state
, check_for_bkops
);
9500 if (ret
&& pm_op
!= UFS_SHUTDOWN_PM
) {
9502 * If return err in suspend flow, IO will hang.
9503 * Trigger error handler and break suspend for
9506 ufshcd_force_error_recovery(hba
);
9510 goto set_dev_active
;
9514 * Call vendor specific suspend callback. As these callbacks may access
9515 * vendor specific host controller register space call them before the
9516 * host clocks are ON.
9518 ret
= ufshcd_vops_suspend(hba
, pm_op
, POST_CHANGE
);
9520 goto set_link_active
;
9525 * Device hardware reset is required to exit DeepSleep. Also, for
9526 * DeepSleep, the link is off so host reset and restore will be done
9529 if (ufshcd_is_ufs_dev_deepsleep(hba
)) {
9530 ufshcd_device_reset(hba
);
9531 WARN_ON(!ufshcd_is_link_off(hba
));
9533 if (ufshcd_is_link_hibern8(hba
) && !ufshcd_uic_hibern8_exit(hba
))
9534 ufshcd_set_link_active(hba
);
9535 else if (ufshcd_is_link_off(hba
))
9536 ufshcd_host_reset_and_restore(hba
);
9538 /* Can also get here needing to exit DeepSleep */
9539 if (ufshcd_is_ufs_dev_deepsleep(hba
)) {
9540 ufshcd_device_reset(hba
);
9541 ufshcd_host_reset_and_restore(hba
);
9543 if (!ufshcd_set_dev_pwr_mode(hba
, UFS_ACTIVE_PWR_MODE
))
9544 ufshcd_disable_auto_bkops(hba
);
9546 if (ufshcd_is_clkscaling_supported(hba
))
9547 ufshcd_clk_scaling_suspend(hba
, false);
9549 hba
->dev_info
.b_rpm_dev_flush_capable
= false;
9551 if (hba
->dev_info
.b_rpm_dev_flush_capable
) {
9552 schedule_delayed_work(&hba
->rpm_dev_flush_recheck_work
,
9553 msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS
));
9557 ufshcd_update_evt_hist(hba
, UFS_EVT_WL_SUSP_ERR
, (u32
)ret
);
9558 hba
->clk_gating
.is_suspended
= false;
9559 ufshcd_release(hba
);
9562 hba
->pm_op_in_progress
= false;
9567 static int __ufshcd_wl_resume(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
9570 enum uic_link_state old_link_state
= hba
->uic_link_state
;
9572 hba
->pm_op_in_progress
= true;
9575 * Call vendor specific resume callback. As these callbacks may access
9576 * vendor specific host controller register space call them when the
9577 * host clocks are ON.
9579 ret
= ufshcd_vops_resume(hba
, pm_op
);
9583 /* For DeepSleep, the only supported option is to have the link off */
9584 WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba
) && !ufshcd_is_link_off(hba
));
9586 if (ufshcd_is_link_hibern8(hba
)) {
9587 ret
= ufshcd_uic_hibern8_exit(hba
);
9589 ufshcd_set_link_active(hba
);
9591 dev_err(hba
->dev
, "%s: hibern8 exit failed %d\n",
9593 goto vendor_suspend
;
9595 } else if (ufshcd_is_link_off(hba
)) {
9597 * A full initialization of the host and the device is
9598 * required since the link was put to off during suspend.
9599 * Note, in the case of DeepSleep, the device will exit
9600 * DeepSleep due to device reset.
9602 ret
= ufshcd_reset_and_restore(hba
);
9604 * ufshcd_reset_and_restore() should have already
9605 * set the link state as active
9607 if (ret
|| !ufshcd_is_link_active(hba
))
9608 goto vendor_suspend
;
9611 if (!ufshcd_is_ufs_dev_active(hba
)) {
9612 ret
= ufshcd_set_dev_pwr_mode(hba
, UFS_ACTIVE_PWR_MODE
);
9614 goto set_old_link_state
;
9617 if (ufshcd_keep_autobkops_enabled_except_suspend(hba
))
9618 ufshcd_enable_auto_bkops(hba
);
9621 * If BKOPs operations are urgently needed at this moment then
9622 * keep auto-bkops enabled or else disable it.
9624 ufshcd_urgent_bkops(hba
);
9626 if (hba
->ee_usr_mask
)
9627 ufshcd_write_ee_control(hba
);
9629 if (ufshcd_is_clkscaling_supported(hba
))
9630 ufshcd_clk_scaling_suspend(hba
, false);
9632 if (hba
->dev_info
.b_rpm_dev_flush_capable
) {
9633 hba
->dev_info
.b_rpm_dev_flush_capable
= false;
9634 cancel_delayed_work(&hba
->rpm_dev_flush_recheck_work
);
9637 /* Enable Auto-Hibernate if configured */
9638 ufshcd_auto_hibern8_enable(hba
);
9644 ufshcd_link_state_transition(hba
, old_link_state
, 0);
9646 ufshcd_vops_suspend(hba
, pm_op
, PRE_CHANGE
);
9647 ufshcd_vops_suspend(hba
, pm_op
, POST_CHANGE
);
9650 ufshcd_update_evt_hist(hba
, UFS_EVT_WL_RES_ERR
, (u32
)ret
);
9651 hba
->clk_gating
.is_suspended
= false;
9652 ufshcd_release(hba
);
9653 hba
->pm_op_in_progress
= false;
9657 static int ufshcd_wl_runtime_suspend(struct device
*dev
)
9659 struct scsi_device
*sdev
= to_scsi_device(dev
);
9660 struct ufs_hba
*hba
;
9662 ktime_t start
= ktime_get();
9664 hba
= shost_priv(sdev
->host
);
9666 ret
= __ufshcd_wl_suspend(hba
, UFS_RUNTIME_PM
);
9668 dev_err(&sdev
->sdev_gendev
, "%s failed: %d\n", __func__
, ret
);
9670 trace_ufshcd_wl_runtime_suspend(dev_name(dev
), ret
,
9671 ktime_to_us(ktime_sub(ktime_get(), start
)),
9672 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9677 static int ufshcd_wl_runtime_resume(struct device
*dev
)
9679 struct scsi_device
*sdev
= to_scsi_device(dev
);
9680 struct ufs_hba
*hba
;
9682 ktime_t start
= ktime_get();
9684 hba
= shost_priv(sdev
->host
);
9686 ret
= __ufshcd_wl_resume(hba
, UFS_RUNTIME_PM
);
9688 dev_err(&sdev
->sdev_gendev
, "%s failed: %d\n", __func__
, ret
);
9690 trace_ufshcd_wl_runtime_resume(dev_name(dev
), ret
,
9691 ktime_to_us(ktime_sub(ktime_get(), start
)),
9692 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9698 #ifdef CONFIG_PM_SLEEP
9699 static int ufshcd_wl_suspend(struct device
*dev
)
9701 struct scsi_device
*sdev
= to_scsi_device(dev
);
9702 struct ufs_hba
*hba
;
9704 ktime_t start
= ktime_get();
9706 hba
= shost_priv(sdev
->host
);
9707 down(&hba
->host_sem
);
9708 hba
->system_suspending
= true;
9710 if (pm_runtime_suspended(dev
))
9713 ret
= __ufshcd_wl_suspend(hba
, UFS_SYSTEM_PM
);
9715 dev_err(&sdev
->sdev_gendev
, "%s failed: %d\n", __func__
, ret
);
9721 hba
->is_sys_suspended
= true;
9722 trace_ufshcd_wl_suspend(dev_name(dev
), ret
,
9723 ktime_to_us(ktime_sub(ktime_get(), start
)),
9724 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9729 static int ufshcd_wl_resume(struct device
*dev
)
9731 struct scsi_device
*sdev
= to_scsi_device(dev
);
9732 struct ufs_hba
*hba
;
9734 ktime_t start
= ktime_get();
9736 hba
= shost_priv(sdev
->host
);
9738 if (pm_runtime_suspended(dev
))
9741 ret
= __ufshcd_wl_resume(hba
, UFS_SYSTEM_PM
);
9743 dev_err(&sdev
->sdev_gendev
, "%s failed: %d\n", __func__
, ret
);
9745 trace_ufshcd_wl_resume(dev_name(dev
), ret
,
9746 ktime_to_us(ktime_sub(ktime_get(), start
)),
9747 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9749 hba
->is_sys_suspended
= false;
9750 hba
->system_suspending
= false;
9756 static void ufshcd_wl_shutdown(struct device
*dev
)
9758 struct scsi_device
*sdev
= to_scsi_device(dev
);
9759 struct ufs_hba
*hba
;
9761 hba
= shost_priv(sdev
->host
);
9763 down(&hba
->host_sem
);
9764 hba
->shutting_down
= true;
9767 /* Turn on everything while shutting down */
9768 ufshcd_rpm_get_sync(hba
);
9769 scsi_device_quiesce(sdev
);
9770 shost_for_each_device(sdev
, hba
->host
) {
9771 if (sdev
== hba
->ufs_device_wlun
)
9773 scsi_device_quiesce(sdev
);
9775 __ufshcd_wl_suspend(hba
, UFS_SHUTDOWN_PM
);
9779 * ufshcd_suspend - helper function for suspend operations
9780 * @hba: per adapter instance
9782 * This function will put disable irqs, turn off clocks
9783 * and set vreg and hba-vreg in lpm mode.
9785 static int ufshcd_suspend(struct ufs_hba
*hba
)
9789 if (!hba
->is_powered
)
9792 * Disable the host irq as host controller as there won't be any
9793 * host controller transaction expected till resume.
9795 ufshcd_disable_irq(hba
);
9796 ret
= ufshcd_setup_clocks(hba
, false);
9798 ufshcd_enable_irq(hba
);
9801 if (ufshcd_is_clkgating_allowed(hba
)) {
9802 hba
->clk_gating
.state
= CLKS_OFF
;
9803 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
9804 hba
->clk_gating
.state
);
9807 ufshcd_vreg_set_lpm(hba
);
9808 /* Put the host controller in low power mode if possible */
9809 ufshcd_hba_vreg_set_lpm(hba
);
9815 * ufshcd_resume - helper function for resume operations
9816 * @hba: per adapter instance
9818 * This function basically turns on the regulators, clocks and
9821 * Returns 0 for success and non-zero for failure
9823 static int ufshcd_resume(struct ufs_hba
*hba
)
9827 if (!hba
->is_powered
)
9830 ufshcd_hba_vreg_set_hpm(hba
);
9831 ret
= ufshcd_vreg_set_hpm(hba
);
9835 /* Make sure clocks are enabled before accessing controller */
9836 ret
= ufshcd_setup_clocks(hba
, true);
9840 /* enable the host irq as host controller would be active soon */
9841 ufshcd_enable_irq(hba
);
9846 ufshcd_vreg_set_lpm(hba
);
9849 ufshcd_update_evt_hist(hba
, UFS_EVT_RESUME_ERR
, (u32
)ret
);
9852 #endif /* CONFIG_PM */
9854 #ifdef CONFIG_PM_SLEEP
9856 * ufshcd_system_suspend - system suspend callback
9857 * @dev: Device associated with the UFS controller.
9859 * Executed before putting the system into a sleep state in which the contents
9860 * of main memory are preserved.
9862 * Returns 0 for success and non-zero for failure
9864 int ufshcd_system_suspend(struct device
*dev
)
9866 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
9868 ktime_t start
= ktime_get();
9870 if (pm_runtime_suspended(hba
->dev
))
9873 ret
= ufshcd_suspend(hba
);
9875 trace_ufshcd_system_suspend(dev_name(hba
->dev
), ret
,
9876 ktime_to_us(ktime_sub(ktime_get(), start
)),
9877 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9880 EXPORT_SYMBOL(ufshcd_system_suspend
);
9883 * ufshcd_system_resume - system resume callback
9884 * @dev: Device associated with the UFS controller.
9886 * Executed after waking the system up from a sleep state in which the contents
9887 * of main memory were preserved.
9889 * Returns 0 for success and non-zero for failure
9891 int ufshcd_system_resume(struct device
*dev
)
9893 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
9894 ktime_t start
= ktime_get();
9897 if (pm_runtime_suspended(hba
->dev
))
9900 ret
= ufshcd_resume(hba
);
9903 trace_ufshcd_system_resume(dev_name(hba
->dev
), ret
,
9904 ktime_to_us(ktime_sub(ktime_get(), start
)),
9905 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9909 EXPORT_SYMBOL(ufshcd_system_resume
);
9910 #endif /* CONFIG_PM_SLEEP */
9914 * ufshcd_runtime_suspend - runtime suspend callback
9915 * @dev: Device associated with the UFS controller.
9917 * Check the description of ufshcd_suspend() function for more details.
9919 * Returns 0 for success and non-zero for failure
9921 int ufshcd_runtime_suspend(struct device
*dev
)
9923 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
9925 ktime_t start
= ktime_get();
9927 ret
= ufshcd_suspend(hba
);
9929 trace_ufshcd_runtime_suspend(dev_name(hba
->dev
), ret
,
9930 ktime_to_us(ktime_sub(ktime_get(), start
)),
9931 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9934 EXPORT_SYMBOL(ufshcd_runtime_suspend
);
9937 * ufshcd_runtime_resume - runtime resume routine
9938 * @dev: Device associated with the UFS controller.
9940 * This function basically brings controller
9941 * to active state. Following operations are done in this function:
9943 * 1. Turn on all the controller related clocks
9944 * 2. Turn ON VCC rail
9946 int ufshcd_runtime_resume(struct device
*dev
)
9948 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
9950 ktime_t start
= ktime_get();
9952 ret
= ufshcd_resume(hba
);
9954 trace_ufshcd_runtime_resume(dev_name(hba
->dev
), ret
,
9955 ktime_to_us(ktime_sub(ktime_get(), start
)),
9956 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9959 EXPORT_SYMBOL(ufshcd_runtime_resume
);
9960 #endif /* CONFIG_PM */
9963 * ufshcd_shutdown - shutdown routine
9964 * @hba: per adapter instance
9966 * This function would turn off both UFS device and UFS hba
9967 * regulators. It would also disable clocks.
9969 * Returns 0 always to allow force shutdown even in case of errors.
9971 int ufshcd_shutdown(struct ufs_hba
*hba
)
9973 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
))
9974 ufshcd_suspend(hba
);
9976 hba
->is_powered
= false;
9977 /* allow force shutdown even in case of errors */
9980 EXPORT_SYMBOL(ufshcd_shutdown
);
9983 * ufshcd_remove - de-allocate SCSI host and host memory space
9984 * data structure memory
9985 * @hba: per adapter instance
9987 void ufshcd_remove(struct ufs_hba
*hba
)
9989 if (hba
->ufs_device_wlun
)
9990 ufshcd_rpm_get_sync(hba
);
9991 ufs_hwmon_remove(hba
);
9992 ufs_bsg_remove(hba
);
9994 ufs_sysfs_remove_nodes(hba
->dev
);
9995 blk_mq_destroy_queue(hba
->tmf_queue
);
9996 blk_put_queue(hba
->tmf_queue
);
9997 blk_mq_free_tag_set(&hba
->tmf_tag_set
);
9998 scsi_remove_host(hba
->host
);
9999 /* disable interrupts */
10000 ufshcd_disable_intr(hba
, hba
->intr_mask
);
10001 ufshcd_hba_stop(hba
);
10002 ufshcd_hba_exit(hba
);
10004 EXPORT_SYMBOL_GPL(ufshcd_remove
);
10006 #ifdef CONFIG_PM_SLEEP
10007 int ufshcd_system_freeze(struct device
*dev
)
10010 return ufshcd_system_suspend(dev
);
10013 EXPORT_SYMBOL_GPL(ufshcd_system_freeze
);
10015 int ufshcd_system_restore(struct device
*dev
)
10018 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10021 ret
= ufshcd_system_resume(dev
);
10025 /* Configure UTRL and UTMRL base address registers */
10026 ufshcd_writel(hba
, lower_32_bits(hba
->utrdl_dma_addr
),
10027 REG_UTP_TRANSFER_REQ_LIST_BASE_L
);
10028 ufshcd_writel(hba
, upper_32_bits(hba
->utrdl_dma_addr
),
10029 REG_UTP_TRANSFER_REQ_LIST_BASE_H
);
10030 ufshcd_writel(hba
, lower_32_bits(hba
->utmrdl_dma_addr
),
10031 REG_UTP_TASK_REQ_LIST_BASE_L
);
10032 ufshcd_writel(hba
, upper_32_bits(hba
->utmrdl_dma_addr
),
10033 REG_UTP_TASK_REQ_LIST_BASE_H
);
10035 * Make sure that UTRL and UTMRL base address registers
10036 * are updated with the latest queue addresses. Only after
10037 * updating these addresses, we can queue the new commands.
10041 /* Resuming from hibernate, assume that link was OFF */
10042 ufshcd_set_link_off(hba
);
10047 EXPORT_SYMBOL_GPL(ufshcd_system_restore
);
10049 int ufshcd_system_thaw(struct device
*dev
)
10051 return ufshcd_system_resume(dev
);
10053 EXPORT_SYMBOL_GPL(ufshcd_system_thaw
);
10054 #endif /* CONFIG_PM_SLEEP */
10057 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
10058 * @hba: pointer to Host Bus Adapter (HBA)
10060 void ufshcd_dealloc_host(struct ufs_hba
*hba
)
10062 scsi_host_put(hba
->host
);
10064 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host
);
10067 * ufshcd_set_dma_mask - Set dma mask based on the controller
10068 * addressing capability
10069 * @hba: per adapter instance
10071 * Returns 0 for success, non-zero for failure
10073 static int ufshcd_set_dma_mask(struct ufs_hba
*hba
)
10075 if (hba
->capabilities
& MASK_64_ADDRESSING_SUPPORT
) {
10076 if (!dma_set_mask_and_coherent(hba
->dev
, DMA_BIT_MASK(64)))
10079 return dma_set_mask_and_coherent(hba
->dev
, DMA_BIT_MASK(32));
10083 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
10084 * @dev: pointer to device handle
10085 * @hba_handle: driver private handle
10086 * Returns 0 on success, non-zero value on failure
10088 int ufshcd_alloc_host(struct device
*dev
, struct ufs_hba
**hba_handle
)
10090 struct Scsi_Host
*host
;
10091 struct ufs_hba
*hba
;
10096 "Invalid memory reference for dev is NULL\n");
10101 host
= scsi_host_alloc(&ufshcd_driver_template
,
10102 sizeof(struct ufs_hba
));
10104 dev_err(dev
, "scsi_host_alloc failed\n");
10108 host
->nr_maps
= HCTX_TYPE_POLL
+ 1;
10109 hba
= shost_priv(host
);
10112 hba
->dev_ref_clk_freq
= REF_CLK_FREQ_INVAL
;
10113 hba
->nop_out_timeout
= NOP_OUT_TIMEOUT
;
10114 ufshcd_set_sg_entry_size(hba
, sizeof(struct ufshcd_sg_entry
));
10115 INIT_LIST_HEAD(&hba
->clk_list_head
);
10116 spin_lock_init(&hba
->outstanding_lock
);
10123 EXPORT_SYMBOL(ufshcd_alloc_host
);
10125 /* This function exists because blk_mq_alloc_tag_set() requires this. */
10126 static blk_status_t
ufshcd_queue_tmf(struct blk_mq_hw_ctx
*hctx
,
10127 const struct blk_mq_queue_data
*qd
)
10129 WARN_ON_ONCE(true);
10130 return BLK_STS_NOTSUPP
;
10133 static const struct blk_mq_ops ufshcd_tmf_ops
= {
10134 .queue_rq
= ufshcd_queue_tmf
,
10138 * ufshcd_init - Driver initialization routine
10139 * @hba: per-adapter instance
10140 * @mmio_base: base register address
10141 * @irq: Interrupt line of device
10142 * Returns 0 on success, non-zero value on failure
10144 int ufshcd_init(struct ufs_hba
*hba
, void __iomem
*mmio_base
, unsigned int irq
)
10147 struct Scsi_Host
*host
= hba
->host
;
10148 struct device
*dev
= hba
->dev
;
10149 char eh_wq_name
[sizeof("ufs_eh_wq_00")];
10152 * dev_set_drvdata() must be called before any callbacks are registered
10153 * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon,
10156 dev_set_drvdata(dev
, hba
);
10160 "Invalid memory reference for mmio_base is NULL\n");
10165 hba
->mmio_base
= mmio_base
;
10167 hba
->vps
= &ufs_hba_vps
;
10169 err
= ufshcd_hba_init(hba
);
10173 /* Read capabilities registers */
10174 err
= ufshcd_hba_capabilities(hba
);
10178 /* Get UFS version supported by the controller */
10179 hba
->ufs_version
= ufshcd_get_ufs_version(hba
);
10181 /* Get Interrupt bit mask per version */
10182 hba
->intr_mask
= ufshcd_get_intr_mask(hba
);
10184 err
= ufshcd_set_dma_mask(hba
);
10186 dev_err(hba
->dev
, "set dma mask failed\n");
10190 /* Allocate memory for host memory space */
10191 err
= ufshcd_memory_alloc(hba
);
10193 dev_err(hba
->dev
, "Memory allocation failed\n");
10197 /* Configure LRB */
10198 ufshcd_host_memory_configure(hba
);
10200 host
->can_queue
= hba
->nutrs
- UFSHCD_NUM_RESERVED
;
10201 host
->cmd_per_lun
= hba
->nutrs
- UFSHCD_NUM_RESERVED
;
10202 host
->max_id
= UFSHCD_MAX_ID
;
10203 host
->max_lun
= UFS_MAX_LUNS
;
10204 host
->max_channel
= UFSHCD_MAX_CHANNEL
;
10205 host
->unique_id
= host
->host_no
;
10206 host
->max_cmd_len
= UFS_CDB_SIZE
;
10208 hba
->max_pwr_info
.is_valid
= false;
10210 /* Initialize work queues */
10211 snprintf(eh_wq_name
, sizeof(eh_wq_name
), "ufs_eh_wq_%d",
10212 hba
->host
->host_no
);
10213 hba
->eh_wq
= create_singlethread_workqueue(eh_wq_name
);
10215 dev_err(hba
->dev
, "%s: failed to create eh workqueue\n",
10220 INIT_WORK(&hba
->eh_work
, ufshcd_err_handler
);
10221 INIT_WORK(&hba
->eeh_work
, ufshcd_exception_event_handler
);
10223 sema_init(&hba
->host_sem
, 1);
10225 /* Initialize UIC command mutex */
10226 mutex_init(&hba
->uic_cmd_mutex
);
10228 /* Initialize mutex for device management commands */
10229 mutex_init(&hba
->dev_cmd
.lock
);
10231 /* Initialize mutex for exception event control */
10232 mutex_init(&hba
->ee_ctrl_mutex
);
10234 mutex_init(&hba
->wb_mutex
);
10235 init_rwsem(&hba
->clk_scaling_lock
);
10237 ufshcd_init_clk_gating(hba
);
10239 ufshcd_init_clk_scaling(hba
);
10242 * In order to avoid any spurious interrupt immediately after
10243 * registering UFS controller interrupt handler, clear any pending UFS
10244 * interrupt status and disable all the UFS interrupts.
10246 ufshcd_writel(hba
, ufshcd_readl(hba
, REG_INTERRUPT_STATUS
),
10247 REG_INTERRUPT_STATUS
);
10248 ufshcd_writel(hba
, 0, REG_INTERRUPT_ENABLE
);
10250 * Make sure that UFS interrupts are disabled and any pending interrupt
10251 * status is cleared before registering UFS interrupt handler.
10255 /* IRQ registration */
10256 err
= devm_request_irq(dev
, irq
, ufshcd_intr
, IRQF_SHARED
, UFSHCD
, hba
);
10258 dev_err(hba
->dev
, "request irq failed\n");
10261 hba
->is_irq_enabled
= true;
10264 if (!is_mcq_supported(hba
)) {
10265 err
= scsi_add_host(host
, hba
->dev
);
10267 dev_err(hba
->dev
, "scsi_add_host failed\n");
10272 hba
->tmf_tag_set
= (struct blk_mq_tag_set
) {
10274 .queue_depth
= hba
->nutmrs
,
10275 .ops
= &ufshcd_tmf_ops
,
10276 .flags
= BLK_MQ_F_NO_SCHED
,
10278 err
= blk_mq_alloc_tag_set(&hba
->tmf_tag_set
);
10280 goto out_remove_scsi_host
;
10281 hba
->tmf_queue
= blk_mq_init_queue(&hba
->tmf_tag_set
);
10282 if (IS_ERR(hba
->tmf_queue
)) {
10283 err
= PTR_ERR(hba
->tmf_queue
);
10284 goto free_tmf_tag_set
;
10286 hba
->tmf_rqs
= devm_kcalloc(hba
->dev
, hba
->nutmrs
,
10287 sizeof(*hba
->tmf_rqs
), GFP_KERNEL
);
10288 if (!hba
->tmf_rqs
) {
10290 goto free_tmf_queue
;
10293 /* Reset the attached device */
10294 ufshcd_device_reset(hba
);
10296 ufshcd_init_crypto(hba
);
10298 /* Host controller enable */
10299 err
= ufshcd_hba_enable(hba
);
10301 dev_err(hba
->dev
, "Host controller enable failed\n");
10302 ufshcd_print_evt_hist(hba
);
10303 ufshcd_print_host_state(hba
);
10304 goto free_tmf_queue
;
10308 * Set the default power management level for runtime and system PM.
10309 * Default power saving mode is to keep UFS link in Hibern8 state
10310 * and UFS device in sleep state.
10312 hba
->rpm_lvl
= ufs_get_desired_pm_lvl_for_dev_link_state(
10313 UFS_SLEEP_PWR_MODE
,
10314 UIC_LINK_HIBERN8_STATE
);
10315 hba
->spm_lvl
= ufs_get_desired_pm_lvl_for_dev_link_state(
10316 UFS_SLEEP_PWR_MODE
,
10317 UIC_LINK_HIBERN8_STATE
);
10319 INIT_DELAYED_WORK(&hba
->rpm_dev_flush_recheck_work
,
10320 ufshcd_rpm_dev_flush_recheck_work
);
10322 /* Set the default auto-hiberate idle timer value to 150 ms */
10323 if (ufshcd_is_auto_hibern8_supported(hba
) && !hba
->ahit
) {
10324 hba
->ahit
= FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK
, 150) |
10325 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK
, 3);
10328 /* Hold auto suspend until async scan completes */
10329 pm_runtime_get_sync(dev
);
10330 atomic_set(&hba
->scsi_block_reqs_cnt
, 0);
10332 * We are assuming that device wasn't put in sleep/power-down
10333 * state exclusively during the boot stage before kernel.
10334 * This assumption helps avoid doing link startup twice during
10335 * ufshcd_probe_hba().
10337 ufshcd_set_ufs_dev_active(hba
);
10339 async_schedule(ufshcd_async_scan
, hba
);
10340 ufs_sysfs_add_nodes(hba
->dev
);
10342 device_enable_async_suspend(dev
);
10346 blk_mq_destroy_queue(hba
->tmf_queue
);
10347 blk_put_queue(hba
->tmf_queue
);
10349 blk_mq_free_tag_set(&hba
->tmf_tag_set
);
10350 out_remove_scsi_host
:
10351 scsi_remove_host(hba
->host
);
10353 hba
->is_irq_enabled
= false;
10354 ufshcd_hba_exit(hba
);
10358 EXPORT_SYMBOL_GPL(ufshcd_init
);
10360 void ufshcd_resume_complete(struct device
*dev
)
10362 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10364 if (hba
->complete_put
) {
10365 ufshcd_rpm_put(hba
);
10366 hba
->complete_put
= false;
10369 EXPORT_SYMBOL_GPL(ufshcd_resume_complete
);
10371 static bool ufshcd_rpm_ok_for_spm(struct ufs_hba
*hba
)
10373 struct device
*dev
= &hba
->ufs_device_wlun
->sdev_gendev
;
10374 enum ufs_dev_pwr_mode dev_pwr_mode
;
10375 enum uic_link_state link_state
;
10376 unsigned long flags
;
10379 spin_lock_irqsave(&dev
->power
.lock
, flags
);
10380 dev_pwr_mode
= ufs_get_pm_lvl_to_dev_pwr_mode(hba
->spm_lvl
);
10381 link_state
= ufs_get_pm_lvl_to_link_pwr_state(hba
->spm_lvl
);
10382 res
= pm_runtime_suspended(dev
) &&
10383 hba
->curr_dev_pwr_mode
== dev_pwr_mode
&&
10384 hba
->uic_link_state
== link_state
&&
10385 !hba
->dev_info
.b_rpm_dev_flush_capable
;
10386 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
10391 int __ufshcd_suspend_prepare(struct device
*dev
, bool rpm_ok_for_spm
)
10393 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10397 * SCSI assumes that runtime-pm and system-pm for scsi drivers
10398 * are same. And it doesn't wake up the device for system-suspend
10399 * if it's runtime suspended. But ufs doesn't follow that.
10400 * Refer ufshcd_resume_complete()
10402 if (hba
->ufs_device_wlun
) {
10403 /* Prevent runtime suspend */
10404 ufshcd_rpm_get_noresume(hba
);
10406 * Check if already runtime suspended in same state as system
10407 * suspend would be.
10409 if (!rpm_ok_for_spm
|| !ufshcd_rpm_ok_for_spm(hba
)) {
10410 /* RPM state is not ok for SPM, so runtime resume */
10411 ret
= ufshcd_rpm_resume(hba
);
10412 if (ret
< 0 && ret
!= -EACCES
) {
10413 ufshcd_rpm_put(hba
);
10417 hba
->complete_put
= true;
10421 EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare
);
10423 int ufshcd_suspend_prepare(struct device
*dev
)
10425 return __ufshcd_suspend_prepare(dev
, true);
10427 EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare
);
10429 #ifdef CONFIG_PM_SLEEP
10430 static int ufshcd_wl_poweroff(struct device
*dev
)
10432 struct scsi_device
*sdev
= to_scsi_device(dev
);
10433 struct ufs_hba
*hba
= shost_priv(sdev
->host
);
10435 __ufshcd_wl_suspend(hba
, UFS_SHUTDOWN_PM
);
10440 static int ufshcd_wl_probe(struct device
*dev
)
10442 struct scsi_device
*sdev
= to_scsi_device(dev
);
10444 if (!is_device_wlun(sdev
))
10447 blk_pm_runtime_init(sdev
->request_queue
, dev
);
10448 pm_runtime_set_autosuspend_delay(dev
, 0);
10449 pm_runtime_allow(dev
);
10454 static int ufshcd_wl_remove(struct device
*dev
)
10456 pm_runtime_forbid(dev
);
10460 static const struct dev_pm_ops ufshcd_wl_pm_ops
= {
10461 #ifdef CONFIG_PM_SLEEP
10462 .suspend
= ufshcd_wl_suspend
,
10463 .resume
= ufshcd_wl_resume
,
10464 .freeze
= ufshcd_wl_suspend
,
10465 .thaw
= ufshcd_wl_resume
,
10466 .poweroff
= ufshcd_wl_poweroff
,
10467 .restore
= ufshcd_wl_resume
,
10469 SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend
, ufshcd_wl_runtime_resume
, NULL
)
10473 * ufs_dev_wlun_template - describes ufs device wlun
10474 * ufs-device wlun - used to send pm commands
10475 * All luns are consumers of ufs-device wlun.
10477 * Currently, no sd driver is present for wluns.
10478 * Hence the no specific pm operations are performed.
10479 * With ufs design, SSU should be sent to ufs-device wlun.
10480 * Hence register a scsi driver for ufs wluns only.
10482 static struct scsi_driver ufs_dev_wlun_template
= {
10484 .name
= "ufs_device_wlun",
10485 .owner
= THIS_MODULE
,
10486 .probe
= ufshcd_wl_probe
,
10487 .remove
= ufshcd_wl_remove
,
10488 .pm
= &ufshcd_wl_pm_ops
,
10489 .shutdown
= ufshcd_wl_shutdown
,
10493 static int __init
ufshcd_core_init(void)
10497 ufs_debugfs_init();
10499 ret
= scsi_register_driver(&ufs_dev_wlun_template
.gendrv
);
10501 ufs_debugfs_exit();
10505 static void __exit
ufshcd_core_exit(void)
10507 ufs_debugfs_exit();
10508 scsi_unregister_driver(&ufs_dev_wlun_template
.gendrv
);
10511 module_init(ufshcd_core_init
);
10512 module_exit(ufshcd_core_exit
);
10514 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
10515 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
10516 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
10517 MODULE_LICENSE("GPL");