1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Universal Flash Storage Host controller driver Core
4 * Copyright (C) 2011-2013 Samsung India Software Operations
5 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
8 * Santosh Yaraganavi <santosh.sy@samsung.com>
9 * Vinayak Holikatti <h.vinayak@samsung.com>
12 #include <linux/async.h>
13 #include <linux/devfreq.h>
14 #include <linux/nls.h>
16 #include <linux/bitfield.h>
17 #include <linux/blk-pm.h>
18 #include <linux/blkdev.h>
19 #include <linux/clk.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/sched/clock.h>
25 #include <scsi/scsi_cmnd.h>
26 #include <scsi/scsi_dbg.h>
27 #include <scsi/scsi_driver.h>
28 #include <scsi/scsi_eh.h>
29 #include "ufshcd-priv.h"
30 #include <ufs/ufs_quirks.h>
31 #include <ufs/unipro.h>
32 #include "ufs-sysfs.h"
33 #include "ufs-debugfs.h"
34 #include "ufs-fault-injection.h"
36 #include "ufshcd-crypto.h"
38 #include <asm/unaligned.h>
40 #define CREATE_TRACE_POINTS
41 #include <trace/events/ufs.h>
43 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
47 #define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
52 /* UIC command timeout, unit: ms */
53 #define UIC_CMD_TIMEOUT 500
55 /* NOP OUT retries waiting for NOP IN response */
56 #define NOP_OUT_RETRIES 10
57 /* Timeout after 50 msecs if NOP OUT hangs without response */
58 #define NOP_OUT_TIMEOUT 50 /* msecs */
60 /* Query request retries */
61 #define QUERY_REQ_RETRIES 3
62 /* Query request timeout */
63 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
65 /* Advanced RPMB request timeout */
66 #define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */
68 /* Task management command timeout */
69 #define TM_CMD_TIMEOUT 100 /* msecs */
71 /* maximum number of retries for a general UIC command */
72 #define UFS_UIC_COMMAND_RETRIES 3
74 /* maximum number of link-startup retries */
75 #define DME_LINKSTARTUP_RETRIES 3
77 /* maximum number of reset retries before giving up */
78 #define MAX_HOST_RESET_RETRIES 5
80 /* Maximum number of error handler retries before giving up */
81 #define MAX_ERR_HANDLER_RETRIES 5
83 /* Expose the flag value from utp_upiu_query.value */
84 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
86 /* Interrupt aggregation default timeout, unit: 40us */
87 #define INT_AGGR_DEF_TO 0x02
89 /* default delay of autosuspend: 2000 ms */
90 #define RPM_AUTOSUSPEND_DELAY_MS 2000
92 /* Default delay of RPM device flush delayed work */
93 #define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
95 /* Default value of wait time before gating device ref clock */
96 #define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
98 /* Polling time to wait for fDeviceInit */
99 #define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
101 /* UFSHC 4.0 compliant HC support this mode, refer param_set_mcq_mode() */
102 static bool use_mcq_mode
= true;
104 static bool is_mcq_supported(struct ufs_hba
*hba
)
106 return hba
->mcq_sup
&& use_mcq_mode
;
109 static int param_set_mcq_mode(const char *val
, const struct kernel_param
*kp
)
113 ret
= param_set_bool(val
, kp
);
120 static const struct kernel_param_ops mcq_mode_ops
= {
121 .set
= param_set_mcq_mode
,
122 .get
= param_get_bool
,
125 module_param_cb(use_mcq_mode
, &mcq_mode_ops
, &use_mcq_mode
, 0644);
126 MODULE_PARM_DESC(use_mcq_mode
, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
128 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
132 _ret = ufshcd_enable_vreg(_dev, _vreg); \
134 _ret = ufshcd_disable_vreg(_dev, _vreg); \
138 #define ufshcd_hex_dump(prefix_str, buf, len) do { \
139 size_t __len = (len); \
140 print_hex_dump(KERN_ERR, prefix_str, \
141 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
142 16, 4, buf, __len, false); \
145 int ufshcd_dump_regs(struct ufs_hba
*hba
, size_t offset
, size_t len
,
151 if (offset
% 4 != 0 || len
% 4 != 0) /* keep readl happy */
154 regs
= kzalloc(len
, GFP_ATOMIC
);
158 for (pos
= 0; pos
< len
; pos
+= 4) {
160 pos
>= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER
&&
161 pos
<= REG_UIC_ERROR_CODE_DME
)
163 regs
[pos
/ 4] = ufshcd_readl(hba
, offset
+ pos
);
166 ufshcd_hex_dump(prefix
, regs
, len
);
171 EXPORT_SYMBOL_GPL(ufshcd_dump_regs
);
174 UFSHCD_MAX_CHANNEL
= 0,
176 UFSHCD_NUM_RESERVED
= 1,
177 UFSHCD_CMD_PER_LUN
= 32 - UFSHCD_NUM_RESERVED
,
178 UFSHCD_CAN_QUEUE
= 32 - UFSHCD_NUM_RESERVED
,
181 static const char *const ufshcd_state_name
[] = {
182 [UFSHCD_STATE_RESET
] = "reset",
183 [UFSHCD_STATE_OPERATIONAL
] = "operational",
184 [UFSHCD_STATE_ERROR
] = "error",
185 [UFSHCD_STATE_EH_SCHEDULED_FATAL
] = "eh_fatal",
186 [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL
] = "eh_non_fatal",
189 /* UFSHCD error handling flags */
191 UFSHCD_EH_IN_PROGRESS
= (1 << 0),
194 /* UFSHCD UIC layer error flags */
196 UFSHCD_UIC_DL_PA_INIT_ERROR
= (1 << 0), /* Data link layer error */
197 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
= (1 << 1), /* Data link layer error */
198 UFSHCD_UIC_DL_TCx_REPLAY_ERROR
= (1 << 2), /* Data link layer error */
199 UFSHCD_UIC_NL_ERROR
= (1 << 3), /* Network layer error */
200 UFSHCD_UIC_TL_ERROR
= (1 << 4), /* Transport Layer error */
201 UFSHCD_UIC_DME_ERROR
= (1 << 5), /* DME error */
202 UFSHCD_UIC_PA_GENERIC_ERROR
= (1 << 6), /* Generic PA error */
205 #define ufshcd_set_eh_in_progress(h) \
206 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
207 #define ufshcd_eh_in_progress(h) \
208 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
209 #define ufshcd_clear_eh_in_progress(h) \
210 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
212 const struct ufs_pm_lvl_states ufs_pm_lvl_states
[] = {
213 [UFS_PM_LVL_0
] = {UFS_ACTIVE_PWR_MODE
, UIC_LINK_ACTIVE_STATE
},
214 [UFS_PM_LVL_1
] = {UFS_ACTIVE_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
215 [UFS_PM_LVL_2
] = {UFS_SLEEP_PWR_MODE
, UIC_LINK_ACTIVE_STATE
},
216 [UFS_PM_LVL_3
] = {UFS_SLEEP_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
217 [UFS_PM_LVL_4
] = {UFS_POWERDOWN_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
218 [UFS_PM_LVL_5
] = {UFS_POWERDOWN_PWR_MODE
, UIC_LINK_OFF_STATE
},
220 * For DeepSleep, the link is first put in hibern8 and then off.
221 * Leaving the link in hibern8 is not supported.
223 [UFS_PM_LVL_6
] = {UFS_DEEPSLEEP_PWR_MODE
, UIC_LINK_OFF_STATE
},
226 static inline enum ufs_dev_pwr_mode
227 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl
)
229 return ufs_pm_lvl_states
[lvl
].dev_state
;
232 static inline enum uic_link_state
233 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl
)
235 return ufs_pm_lvl_states
[lvl
].link_state
;
238 static inline enum ufs_pm_level
239 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state
,
240 enum uic_link_state link_state
)
242 enum ufs_pm_level lvl
;
244 for (lvl
= UFS_PM_LVL_0
; lvl
< UFS_PM_LVL_MAX
; lvl
++) {
245 if ((ufs_pm_lvl_states
[lvl
].dev_state
== dev_state
) &&
246 (ufs_pm_lvl_states
[lvl
].link_state
== link_state
))
250 /* if no match found, return the level 0 */
254 static const struct ufs_dev_quirk ufs_fixups
[] = {
255 /* UFS cards deviations table */
256 { .wmanufacturerid
= UFS_VENDOR_MICRON
,
257 .model
= UFS_ANY_MODEL
,
258 .quirk
= UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
|
259 UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ
},
260 { .wmanufacturerid
= UFS_VENDOR_SAMSUNG
,
261 .model
= UFS_ANY_MODEL
,
262 .quirk
= UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
|
263 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE
|
264 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
},
265 { .wmanufacturerid
= UFS_VENDOR_SKHYNIX
,
266 .model
= UFS_ANY_MODEL
,
267 .quirk
= UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME
},
268 { .wmanufacturerid
= UFS_VENDOR_SKHYNIX
,
269 .model
= "hB8aL1" /*H28U62301AMR*/,
270 .quirk
= UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME
},
271 { .wmanufacturerid
= UFS_VENDOR_TOSHIBA
,
272 .model
= UFS_ANY_MODEL
,
273 .quirk
= UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
},
274 { .wmanufacturerid
= UFS_VENDOR_TOSHIBA
,
275 .model
= "THGLF2G9C8KBADG",
276 .quirk
= UFS_DEVICE_QUIRK_PA_TACTIVATE
},
277 { .wmanufacturerid
= UFS_VENDOR_TOSHIBA
,
278 .model
= "THGLF2G9D8KBADG",
279 .quirk
= UFS_DEVICE_QUIRK_PA_TACTIVATE
},
283 static irqreturn_t
ufshcd_tmc_handler(struct ufs_hba
*hba
);
284 static void ufshcd_async_scan(void *data
, async_cookie_t cookie
);
285 static int ufshcd_reset_and_restore(struct ufs_hba
*hba
);
286 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd
*cmd
);
287 static int ufshcd_clear_tm_cmd(struct ufs_hba
*hba
, int tag
);
288 static void ufshcd_hba_exit(struct ufs_hba
*hba
);
289 static int ufshcd_probe_hba(struct ufs_hba
*hba
, bool init_dev_params
);
290 static int ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
);
291 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba
*hba
);
292 static int ufshcd_host_reset_and_restore(struct ufs_hba
*hba
);
293 static void ufshcd_resume_clkscaling(struct ufs_hba
*hba
);
294 static void ufshcd_suspend_clkscaling(struct ufs_hba
*hba
);
295 static void __ufshcd_suspend_clkscaling(struct ufs_hba
*hba
);
296 static int ufshcd_scale_clks(struct ufs_hba
*hba
, bool scale_up
);
297 static irqreturn_t
ufshcd_intr(int irq
, void *__hba
);
298 static int ufshcd_change_power_mode(struct ufs_hba
*hba
,
299 struct ufs_pa_layer_attr
*pwr_mode
);
300 static int ufshcd_setup_hba_vreg(struct ufs_hba
*hba
, bool on
);
301 static int ufshcd_setup_vreg(struct ufs_hba
*hba
, bool on
);
302 static inline int ufshcd_config_vreg_hpm(struct ufs_hba
*hba
,
303 struct ufs_vreg
*vreg
);
304 static int ufshcd_try_to_abort_task(struct ufs_hba
*hba
, int tag
);
305 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba
*hba
,
307 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba
*hba
);
308 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba
*hba
);
310 static inline void ufshcd_enable_irq(struct ufs_hba
*hba
)
312 if (!hba
->is_irq_enabled
) {
313 enable_irq(hba
->irq
);
314 hba
->is_irq_enabled
= true;
318 static inline void ufshcd_disable_irq(struct ufs_hba
*hba
)
320 if (hba
->is_irq_enabled
) {
321 disable_irq(hba
->irq
);
322 hba
->is_irq_enabled
= false;
326 static void ufshcd_configure_wb(struct ufs_hba
*hba
)
328 if (!ufshcd_is_wb_allowed(hba
))
331 ufshcd_wb_toggle(hba
, true);
333 ufshcd_wb_toggle_buf_flush_during_h8(hba
, true);
335 if (ufshcd_is_wb_buf_flush_allowed(hba
))
336 ufshcd_wb_toggle_buf_flush(hba
, true);
339 static void ufshcd_scsi_unblock_requests(struct ufs_hba
*hba
)
341 if (atomic_dec_and_test(&hba
->scsi_block_reqs_cnt
))
342 scsi_unblock_requests(hba
->host
);
345 static void ufshcd_scsi_block_requests(struct ufs_hba
*hba
)
347 if (atomic_inc_return(&hba
->scsi_block_reqs_cnt
) == 1)
348 scsi_block_requests(hba
->host
);
351 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba
*hba
, unsigned int tag
,
352 enum ufs_trace_str_t str_t
)
354 struct utp_upiu_req
*rq
= hba
->lrb
[tag
].ucd_req_ptr
;
355 struct utp_upiu_header
*header
;
357 if (!trace_ufshcd_upiu_enabled())
360 if (str_t
== UFS_CMD_SEND
)
361 header
= &rq
->header
;
363 header
= &hba
->lrb
[tag
].ucd_rsp_ptr
->header
;
365 trace_ufshcd_upiu(dev_name(hba
->dev
), str_t
, header
, &rq
->sc
.cdb
,
369 static void ufshcd_add_query_upiu_trace(struct ufs_hba
*hba
,
370 enum ufs_trace_str_t str_t
,
371 struct utp_upiu_req
*rq_rsp
)
373 if (!trace_ufshcd_upiu_enabled())
376 trace_ufshcd_upiu(dev_name(hba
->dev
), str_t
, &rq_rsp
->header
,
377 &rq_rsp
->qr
, UFS_TSF_OSF
);
380 static void ufshcd_add_tm_upiu_trace(struct ufs_hba
*hba
, unsigned int tag
,
381 enum ufs_trace_str_t str_t
)
383 struct utp_task_req_desc
*descp
= &hba
->utmrdl_base_addr
[tag
];
385 if (!trace_ufshcd_upiu_enabled())
388 if (str_t
== UFS_TM_SEND
)
389 trace_ufshcd_upiu(dev_name(hba
->dev
), str_t
,
390 &descp
->upiu_req
.req_header
,
391 &descp
->upiu_req
.input_param1
,
394 trace_ufshcd_upiu(dev_name(hba
->dev
), str_t
,
395 &descp
->upiu_rsp
.rsp_header
,
396 &descp
->upiu_rsp
.output_param1
,
400 static void ufshcd_add_uic_command_trace(struct ufs_hba
*hba
,
401 const struct uic_command
*ucmd
,
402 enum ufs_trace_str_t str_t
)
406 if (!trace_ufshcd_uic_command_enabled())
409 if (str_t
== UFS_CMD_SEND
)
412 cmd
= ufshcd_readl(hba
, REG_UIC_COMMAND
);
414 trace_ufshcd_uic_command(dev_name(hba
->dev
), str_t
, cmd
,
415 ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_1
),
416 ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_2
),
417 ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_3
));
420 static void ufshcd_add_command_trace(struct ufs_hba
*hba
, unsigned int tag
,
421 enum ufs_trace_str_t str_t
)
424 u8 opcode
= 0, group_id
= 0;
426 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[tag
];
427 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
428 struct request
*rq
= scsi_cmd_to_rq(cmd
);
429 int transfer_len
= -1;
434 /* trace UPIU also */
435 ufshcd_add_cmd_upiu_trace(hba
, tag
, str_t
);
436 if (!trace_ufshcd_command_enabled())
439 opcode
= cmd
->cmnd
[0];
441 if (opcode
== READ_10
|| opcode
== WRITE_10
) {
443 * Currently we only fully trace read(10) and write(10) commands
446 be32_to_cpu(lrbp
->ucd_req_ptr
->sc
.exp_data_transfer_len
);
447 lba
= scsi_get_lba(cmd
);
448 if (opcode
== WRITE_10
)
449 group_id
= lrbp
->cmd
->cmnd
[6];
450 } else if (opcode
== UNMAP
) {
452 * The number of Bytes to be unmapped beginning with the lba.
454 transfer_len
= blk_rq_bytes(rq
);
455 lba
= scsi_get_lba(cmd
);
458 intr
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
459 doorbell
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
460 trace_ufshcd_command(dev_name(hba
->dev
), str_t
, tag
,
461 doorbell
, transfer_len
, intr
, lba
, opcode
, group_id
);
464 static void ufshcd_print_clk_freqs(struct ufs_hba
*hba
)
466 struct ufs_clk_info
*clki
;
467 struct list_head
*head
= &hba
->clk_list_head
;
469 if (list_empty(head
))
472 list_for_each_entry(clki
, head
, list
) {
473 if (!IS_ERR_OR_NULL(clki
->clk
) && clki
->min_freq
&&
475 dev_err(hba
->dev
, "clk: %s, rate: %u\n",
476 clki
->name
, clki
->curr_freq
);
480 static void ufshcd_print_evt(struct ufs_hba
*hba
, u32 id
,
481 const char *err_name
)
485 const struct ufs_event_hist
*e
;
487 if (id
>= UFS_EVT_CNT
)
490 e
= &hba
->ufs_stats
.event
[id
];
492 for (i
= 0; i
< UFS_EVENT_HIST_LENGTH
; i
++) {
493 int p
= (i
+ e
->pos
) % UFS_EVENT_HIST_LENGTH
;
495 if (e
->tstamp
[p
] == 0)
497 dev_err(hba
->dev
, "%s[%d] = 0x%x at %lld us\n", err_name
, p
,
498 e
->val
[p
], div_u64(e
->tstamp
[p
], 1000));
503 dev_err(hba
->dev
, "No record of %s\n", err_name
);
505 dev_err(hba
->dev
, "%s: total cnt=%llu\n", err_name
, e
->cnt
);
508 static void ufshcd_print_evt_hist(struct ufs_hba
*hba
)
510 ufshcd_dump_regs(hba
, 0, UFSHCI_REG_SPACE_SIZE
, "host_regs: ");
512 ufshcd_print_evt(hba
, UFS_EVT_PA_ERR
, "pa_err");
513 ufshcd_print_evt(hba
, UFS_EVT_DL_ERR
, "dl_err");
514 ufshcd_print_evt(hba
, UFS_EVT_NL_ERR
, "nl_err");
515 ufshcd_print_evt(hba
, UFS_EVT_TL_ERR
, "tl_err");
516 ufshcd_print_evt(hba
, UFS_EVT_DME_ERR
, "dme_err");
517 ufshcd_print_evt(hba
, UFS_EVT_AUTO_HIBERN8_ERR
,
519 ufshcd_print_evt(hba
, UFS_EVT_FATAL_ERR
, "fatal_err");
520 ufshcd_print_evt(hba
, UFS_EVT_LINK_STARTUP_FAIL
,
521 "link_startup_fail");
522 ufshcd_print_evt(hba
, UFS_EVT_RESUME_ERR
, "resume_fail");
523 ufshcd_print_evt(hba
, UFS_EVT_SUSPEND_ERR
,
525 ufshcd_print_evt(hba
, UFS_EVT_WL_RES_ERR
, "wlun resume_fail");
526 ufshcd_print_evt(hba
, UFS_EVT_WL_SUSP_ERR
,
527 "wlun suspend_fail");
528 ufshcd_print_evt(hba
, UFS_EVT_DEV_RESET
, "dev_reset");
529 ufshcd_print_evt(hba
, UFS_EVT_HOST_RESET
, "host_reset");
530 ufshcd_print_evt(hba
, UFS_EVT_ABORT
, "task_abort");
532 ufshcd_vops_dbg_register_dump(hba
);
536 void ufshcd_print_trs(struct ufs_hba
*hba
, unsigned long bitmap
, bool pr_prdt
)
538 const struct ufshcd_lrb
*lrbp
;
542 for_each_set_bit(tag
, &bitmap
, hba
->nutrs
) {
543 lrbp
= &hba
->lrb
[tag
];
545 dev_err(hba
->dev
, "UPIU[%d] - issue time %lld us\n",
546 tag
, div_u64(lrbp
->issue_time_stamp_local_clock
, 1000));
547 dev_err(hba
->dev
, "UPIU[%d] - complete time %lld us\n",
548 tag
, div_u64(lrbp
->compl_time_stamp_local_clock
, 1000));
550 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
551 tag
, (u64
)lrbp
->utrd_dma_addr
);
553 ufshcd_hex_dump("UPIU TRD: ", lrbp
->utr_descriptor_ptr
,
554 sizeof(struct utp_transfer_req_desc
));
555 dev_err(hba
->dev
, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag
,
556 (u64
)lrbp
->ucd_req_dma_addr
);
557 ufshcd_hex_dump("UPIU REQ: ", lrbp
->ucd_req_ptr
,
558 sizeof(struct utp_upiu_req
));
559 dev_err(hba
->dev
, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag
,
560 (u64
)lrbp
->ucd_rsp_dma_addr
);
561 ufshcd_hex_dump("UPIU RSP: ", lrbp
->ucd_rsp_ptr
,
562 sizeof(struct utp_upiu_rsp
));
564 prdt_length
= le16_to_cpu(
565 lrbp
->utr_descriptor_ptr
->prd_table_length
);
566 if (hba
->quirks
& UFSHCD_QUIRK_PRDT_BYTE_GRAN
)
567 prdt_length
/= ufshcd_sg_entry_size(hba
);
570 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
572 (u64
)lrbp
->ucd_prdt_dma_addr
);
575 ufshcd_hex_dump("UPIU PRDT: ", lrbp
->ucd_prdt_ptr
,
576 ufshcd_sg_entry_size(hba
) * prdt_length
);
580 static void ufshcd_print_tmrs(struct ufs_hba
*hba
, unsigned long bitmap
)
584 for_each_set_bit(tag
, &bitmap
, hba
->nutmrs
) {
585 struct utp_task_req_desc
*tmrdp
= &hba
->utmrdl_base_addr
[tag
];
587 dev_err(hba
->dev
, "TM[%d] - Task Management Header\n", tag
);
588 ufshcd_hex_dump("", tmrdp
, sizeof(*tmrdp
));
592 static void ufshcd_print_host_state(struct ufs_hba
*hba
)
594 const struct scsi_device
*sdev_ufs
= hba
->ufs_device_wlun
;
596 dev_err(hba
->dev
, "UFS Host state=%d\n", hba
->ufshcd_state
);
597 dev_err(hba
->dev
, "outstanding reqs=0x%lx tasks=0x%lx\n",
598 hba
->outstanding_reqs
, hba
->outstanding_tasks
);
599 dev_err(hba
->dev
, "saved_err=0x%x, saved_uic_err=0x%x\n",
600 hba
->saved_err
, hba
->saved_uic_err
);
601 dev_err(hba
->dev
, "Device power mode=%d, UIC link state=%d\n",
602 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
603 dev_err(hba
->dev
, "PM in progress=%d, sys. suspended=%d\n",
604 hba
->pm_op_in_progress
, hba
->is_sys_suspended
);
605 dev_err(hba
->dev
, "Auto BKOPS=%d, Host self-block=%d\n",
606 hba
->auto_bkops_enabled
, hba
->host
->host_self_blocked
);
607 dev_err(hba
->dev
, "Clk gate=%d\n", hba
->clk_gating
.state
);
609 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
610 div_u64(hba
->ufs_stats
.last_hibern8_exit_tstamp
, 1000),
611 hba
->ufs_stats
.hibern8_exit_cnt
);
612 dev_err(hba
->dev
, "last intr at %lld us, last intr status=0x%x\n",
613 div_u64(hba
->ufs_stats
.last_intr_ts
, 1000),
614 hba
->ufs_stats
.last_intr_status
);
615 dev_err(hba
->dev
, "error handling flags=0x%x, req. abort count=%d\n",
616 hba
->eh_flags
, hba
->req_abort_count
);
617 dev_err(hba
->dev
, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
618 hba
->ufs_version
, hba
->capabilities
, hba
->caps
);
619 dev_err(hba
->dev
, "quirks=0x%x, dev. quirks=0x%x\n", hba
->quirks
,
622 dev_err(hba
->dev
, "UFS dev info: %.8s %.16s rev %.4s\n",
623 sdev_ufs
->vendor
, sdev_ufs
->model
, sdev_ufs
->rev
);
625 ufshcd_print_clk_freqs(hba
);
629 * ufshcd_print_pwr_info - print power params as saved in hba
631 * @hba: per-adapter instance
633 static void ufshcd_print_pwr_info(struct ufs_hba
*hba
)
635 static const char * const names
[] = {
646 * Using dev_dbg to avoid messages during runtime PM to avoid
647 * never-ending cycles of messages written back to storage by user space
648 * causing runtime resume, causing more messages and so on.
650 dev_dbg(hba
->dev
, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
652 hba
->pwr_info
.gear_rx
, hba
->pwr_info
.gear_tx
,
653 hba
->pwr_info
.lane_rx
, hba
->pwr_info
.lane_tx
,
654 names
[hba
->pwr_info
.pwr_rx
],
655 names
[hba
->pwr_info
.pwr_tx
],
656 hba
->pwr_info
.hs_rate
);
659 static void ufshcd_device_reset(struct ufs_hba
*hba
)
663 err
= ufshcd_vops_device_reset(hba
);
666 ufshcd_set_ufs_dev_active(hba
);
667 if (ufshcd_is_wb_allowed(hba
)) {
668 hba
->dev_info
.wb_enabled
= false;
669 hba
->dev_info
.wb_buf_flush_enabled
= false;
672 if (err
!= -EOPNOTSUPP
)
673 ufshcd_update_evt_hist(hba
, UFS_EVT_DEV_RESET
, err
);
676 void ufshcd_delay_us(unsigned long us
, unsigned long tolerance
)
684 usleep_range(us
, us
+ tolerance
);
686 EXPORT_SYMBOL_GPL(ufshcd_delay_us
);
689 * ufshcd_wait_for_register - wait for register value to change
690 * @hba: per-adapter interface
691 * @reg: mmio register offset
692 * @mask: mask to apply to the read register value
693 * @val: value to wait for
694 * @interval_us: polling interval in microseconds
695 * @timeout_ms: timeout in milliseconds
698 * -ETIMEDOUT on error, zero on success.
700 static int ufshcd_wait_for_register(struct ufs_hba
*hba
, u32 reg
, u32 mask
,
701 u32 val
, unsigned long interval_us
,
702 unsigned long timeout_ms
)
705 unsigned long timeout
= jiffies
+ msecs_to_jiffies(timeout_ms
);
707 /* ignore bits that we don't intend to wait on */
710 while ((ufshcd_readl(hba
, reg
) & mask
) != val
) {
711 usleep_range(interval_us
, interval_us
+ 50);
712 if (time_after(jiffies
, timeout
)) {
713 if ((ufshcd_readl(hba
, reg
) & mask
) != val
)
723 * ufshcd_get_intr_mask - Get the interrupt bit mask
724 * @hba: Pointer to adapter instance
726 * Returns interrupt bit mask per version
728 static inline u32
ufshcd_get_intr_mask(struct ufs_hba
*hba
)
730 if (hba
->ufs_version
== ufshci_version(1, 0))
731 return INTERRUPT_MASK_ALL_VER_10
;
732 if (hba
->ufs_version
<= ufshci_version(2, 0))
733 return INTERRUPT_MASK_ALL_VER_11
;
735 return INTERRUPT_MASK_ALL_VER_21
;
739 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
740 * @hba: Pointer to adapter instance
742 * Returns UFSHCI version supported by the controller
744 static inline u32
ufshcd_get_ufs_version(struct ufs_hba
*hba
)
748 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION
)
749 ufshci_ver
= ufshcd_vops_get_ufs_hci_version(hba
);
751 ufshci_ver
= ufshcd_readl(hba
, REG_UFS_VERSION
);
754 * UFSHCI v1.x uses a different version scheme, in order
755 * to allow the use of comparisons with the ufshci_version
756 * function, we convert it to the same scheme as ufs 2.0+.
758 if (ufshci_ver
& 0x00010000)
759 return ufshci_version(1, ufshci_ver
& 0x00000100);
765 * ufshcd_is_device_present - Check if any device connected to
766 * the host controller
767 * @hba: pointer to adapter instance
769 * Returns true if device present, false if no device detected
771 static inline bool ufshcd_is_device_present(struct ufs_hba
*hba
)
773 return ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) & DEVICE_PRESENT
;
777 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
778 * @lrbp: pointer to local command reference block
779 * @cqe: pointer to the completion queue entry
781 * This function is used to get the OCS field from UTRD
782 * Returns the OCS field in the UTRD
784 static enum utp_ocs
ufshcd_get_tr_ocs(struct ufshcd_lrb
*lrbp
,
785 struct cq_entry
*cqe
)
788 return le32_to_cpu(cqe
->status
) & MASK_OCS
;
790 return le32_to_cpu(lrbp
->utr_descriptor_ptr
->header
.dword_2
) & MASK_OCS
;
794 * ufshcd_utrl_clear() - Clear requests from the controller request list.
795 * @hba: per adapter instance
796 * @mask: mask with one bit set for each request to be cleared
798 static inline void ufshcd_utrl_clear(struct ufs_hba
*hba
, u32 mask
)
800 if (hba
->quirks
& UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR
)
803 * From the UFSHCI specification: "UTP Transfer Request List CLear
804 * Register (UTRLCLR): This field is bit significant. Each bit
805 * corresponds to a slot in the UTP Transfer Request List, where bit 0
806 * corresponds to request slot 0. A bit in this field is set to ‘0’
807 * by host software to indicate to the host controller that a transfer
808 * request slot is cleared. The host controller
809 * shall free up any resources associated to the request slot
810 * immediately, and shall set the associated bit in UTRLDBR to ‘0’. The
811 * host software indicates no change to request slots by setting the
812 * associated bits in this field to ‘1’. Bits in this field shall only
813 * be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’."
815 ufshcd_writel(hba
, ~mask
, REG_UTP_TRANSFER_REQ_LIST_CLEAR
);
819 * ufshcd_utmrl_clear - Clear a bit in UTMRLCLR register
820 * @hba: per adapter instance
821 * @pos: position of the bit to be cleared
823 static inline void ufshcd_utmrl_clear(struct ufs_hba
*hba
, u32 pos
)
825 if (hba
->quirks
& UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR
)
826 ufshcd_writel(hba
, (1 << pos
), REG_UTP_TASK_REQ_LIST_CLEAR
);
828 ufshcd_writel(hba
, ~(1 << pos
), REG_UTP_TASK_REQ_LIST_CLEAR
);
832 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
833 * @reg: Register value of host controller status
835 * Returns integer, 0 on Success and positive value if failed
837 static inline int ufshcd_get_lists_status(u32 reg
)
839 return !((reg
& UFSHCD_STATUS_READY
) == UFSHCD_STATUS_READY
);
843 * ufshcd_get_uic_cmd_result - Get the UIC command result
844 * @hba: Pointer to adapter instance
846 * This function gets the result of UIC command completion
847 * Returns 0 on success, non zero value on error
849 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba
*hba
)
851 return ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_2
) &
852 MASK_UIC_COMMAND_RESULT
;
856 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
857 * @hba: Pointer to adapter instance
859 * This function gets UIC command argument3
860 * Returns 0 on success, non zero value on error
862 static inline u32
ufshcd_get_dme_attr_val(struct ufs_hba
*hba
)
864 return ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_3
);
868 * ufshcd_get_req_rsp - returns the TR response transaction type
869 * @ucd_rsp_ptr: pointer to response UPIU
872 ufshcd_get_req_rsp(struct utp_upiu_rsp
*ucd_rsp_ptr
)
874 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_0
) >> 24;
878 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
879 * @ucd_rsp_ptr: pointer to response UPIU
881 * This function gets the response status and scsi_status from response UPIU
882 * Returns the response result code.
885 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp
*ucd_rsp_ptr
)
887 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_1
) & MASK_RSP_UPIU_RESULT
;
891 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
893 * @ucd_rsp_ptr: pointer to response UPIU
895 * Return the data segment length.
897 static inline unsigned int
898 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp
*ucd_rsp_ptr
)
900 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_2
) &
901 MASK_RSP_UPIU_DATA_SEG_LEN
;
905 * ufshcd_is_exception_event - Check if the device raised an exception event
906 * @ucd_rsp_ptr: pointer to response UPIU
908 * The function checks if the device raised an exception event indicated in
909 * the Device Information field of response UPIU.
911 * Returns true if exception is raised, false otherwise.
913 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp
*ucd_rsp_ptr
)
915 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_2
) &
916 MASK_RSP_EXCEPTION_EVENT
;
920 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
921 * @hba: per adapter instance
924 ufshcd_reset_intr_aggr(struct ufs_hba
*hba
)
926 ufshcd_writel(hba
, INT_AGGR_ENABLE
|
927 INT_AGGR_COUNTER_AND_TIMER_RESET
,
928 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
932 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
933 * @hba: per adapter instance
934 * @cnt: Interrupt aggregation counter threshold
935 * @tmout: Interrupt aggregation timeout value
938 ufshcd_config_intr_aggr(struct ufs_hba
*hba
, u8 cnt
, u8 tmout
)
940 ufshcd_writel(hba
, INT_AGGR_ENABLE
| INT_AGGR_PARAM_WRITE
|
941 INT_AGGR_COUNTER_THLD_VAL(cnt
) |
942 INT_AGGR_TIMEOUT_VAL(tmout
),
943 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
947 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
948 * @hba: per adapter instance
950 static inline void ufshcd_disable_intr_aggr(struct ufs_hba
*hba
)
952 ufshcd_writel(hba
, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
956 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
957 * When run-stop registers are set to 1, it indicates the
958 * host controller that it can process the requests
959 * @hba: per adapter instance
961 static void ufshcd_enable_run_stop_reg(struct ufs_hba
*hba
)
963 ufshcd_writel(hba
, UTP_TASK_REQ_LIST_RUN_STOP_BIT
,
964 REG_UTP_TASK_REQ_LIST_RUN_STOP
);
965 ufshcd_writel(hba
, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT
,
966 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP
);
970 * ufshcd_hba_start - Start controller initialization sequence
971 * @hba: per adapter instance
973 static inline void ufshcd_hba_start(struct ufs_hba
*hba
)
975 u32 val
= CONTROLLER_ENABLE
;
977 if (ufshcd_crypto_enable(hba
))
978 val
|= CRYPTO_GENERAL_ENABLE
;
980 ufshcd_writel(hba
, val
, REG_CONTROLLER_ENABLE
);
984 * ufshcd_is_hba_active - Get controller state
985 * @hba: per adapter instance
987 * Returns true if and only if the controller is active.
989 static inline bool ufshcd_is_hba_active(struct ufs_hba
*hba
)
991 return ufshcd_readl(hba
, REG_CONTROLLER_ENABLE
) & CONTROLLER_ENABLE
;
994 u32
ufshcd_get_local_unipro_ver(struct ufs_hba
*hba
)
996 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
997 if (hba
->ufs_version
<= ufshci_version(1, 1))
998 return UFS_UNIPRO_VER_1_41
;
1000 return UFS_UNIPRO_VER_1_6
;
1002 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver
);
1004 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba
*hba
)
1007 * If both host and device support UniPro ver1.6 or later, PA layer
1008 * parameters tuning happens during link startup itself.
1010 * We can manually tune PA layer parameters if either host or device
1011 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
1012 * logic simple, we will only do manual tuning if local unipro version
1013 * doesn't support ver1.6 or later.
1015 return ufshcd_get_local_unipro_ver(hba
) < UFS_UNIPRO_VER_1_6
;
1019 * ufshcd_set_clk_freq - set UFS controller clock frequencies
1020 * @hba: per adapter instance
1021 * @scale_up: If True, set max possible frequency othewise set low frequency
1023 * Returns 0 if successful
1024 * Returns < 0 for any other errors
1026 static int ufshcd_set_clk_freq(struct ufs_hba
*hba
, bool scale_up
)
1029 struct ufs_clk_info
*clki
;
1030 struct list_head
*head
= &hba
->clk_list_head
;
1032 if (list_empty(head
))
1035 list_for_each_entry(clki
, head
, list
) {
1036 if (!IS_ERR_OR_NULL(clki
->clk
)) {
1037 if (scale_up
&& clki
->max_freq
) {
1038 if (clki
->curr_freq
== clki
->max_freq
)
1041 ret
= clk_set_rate(clki
->clk
, clki
->max_freq
);
1043 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
1044 __func__
, clki
->name
,
1045 clki
->max_freq
, ret
);
1048 trace_ufshcd_clk_scaling(dev_name(hba
->dev
),
1049 "scaled up", clki
->name
,
1053 clki
->curr_freq
= clki
->max_freq
;
1055 } else if (!scale_up
&& clki
->min_freq
) {
1056 if (clki
->curr_freq
== clki
->min_freq
)
1059 ret
= clk_set_rate(clki
->clk
, clki
->min_freq
);
1061 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
1062 __func__
, clki
->name
,
1063 clki
->min_freq
, ret
);
1066 trace_ufshcd_clk_scaling(dev_name(hba
->dev
),
1067 "scaled down", clki
->name
,
1070 clki
->curr_freq
= clki
->min_freq
;
1073 dev_dbg(hba
->dev
, "%s: clk: %s, rate: %lu\n", __func__
,
1074 clki
->name
, clk_get_rate(clki
->clk
));
1082 * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1083 * @hba: per adapter instance
1084 * @scale_up: True if scaling up and false if scaling down
1086 * Returns 0 if successful
1087 * Returns < 0 for any other errors
1089 static int ufshcd_scale_clks(struct ufs_hba
*hba
, bool scale_up
)
1092 ktime_t start
= ktime_get();
1094 ret
= ufshcd_vops_clk_scale_notify(hba
, scale_up
, PRE_CHANGE
);
1098 ret
= ufshcd_set_clk_freq(hba
, scale_up
);
1102 ret
= ufshcd_vops_clk_scale_notify(hba
, scale_up
, POST_CHANGE
);
1104 ufshcd_set_clk_freq(hba
, !scale_up
);
1107 trace_ufshcd_profile_clk_scaling(dev_name(hba
->dev
),
1108 (scale_up
? "up" : "down"),
1109 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
1114 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1115 * @hba: per adapter instance
1116 * @scale_up: True if scaling up and false if scaling down
1118 * Returns true if scaling is required, false otherwise.
1120 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba
*hba
,
1123 struct ufs_clk_info
*clki
;
1124 struct list_head
*head
= &hba
->clk_list_head
;
1126 if (list_empty(head
))
1129 list_for_each_entry(clki
, head
, list
) {
1130 if (!IS_ERR_OR_NULL(clki
->clk
)) {
1131 if (scale_up
&& clki
->max_freq
) {
1132 if (clki
->curr_freq
== clki
->max_freq
)
1135 } else if (!scale_up
&& clki
->min_freq
) {
1136 if (clki
->curr_freq
== clki
->min_freq
)
1147 * Determine the number of pending commands by counting the bits in the SCSI
1148 * device budget maps. This approach has been selected because a bit is set in
1149 * the budget map before scsi_host_queue_ready() checks the host_self_blocked
1150 * flag. The host_self_blocked flag can be modified by calling
1151 * scsi_block_requests() or scsi_unblock_requests().
1153 static u32
ufshcd_pending_cmds(struct ufs_hba
*hba
)
1155 const struct scsi_device
*sdev
;
1158 lockdep_assert_held(hba
->host
->host_lock
);
1159 __shost_for_each_device(sdev
, hba
->host
)
1160 pending
+= sbitmap_weight(&sdev
->budget_map
);
1166 * Wait until all pending SCSI commands and TMFs have finished or the timeout
1169 * Return: 0 upon success; -EBUSY upon timeout.
1171 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba
*hba
,
1172 u64 wait_timeout_us
)
1174 unsigned long flags
;
1178 bool timeout
= false, do_last_check
= false;
1181 ufshcd_hold(hba
, false);
1182 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1184 * Wait for all the outstanding tasks/transfer requests.
1185 * Verify by checking the doorbell registers are clear.
1187 start
= ktime_get();
1189 if (hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
) {
1194 tm_doorbell
= ufshcd_readl(hba
, REG_UTP_TASK_REQ_DOOR_BELL
);
1195 tr_pending
= ufshcd_pending_cmds(hba
);
1196 if (!tm_doorbell
&& !tr_pending
) {
1199 } else if (do_last_check
) {
1203 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1204 io_schedule_timeout(msecs_to_jiffies(20));
1205 if (ktime_to_us(ktime_sub(ktime_get(), start
)) >
1209 * We might have scheduled out for long time so make
1210 * sure to check if doorbells are cleared by this time
1213 do_last_check
= true;
1215 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1216 } while (tm_doorbell
|| tr_pending
);
1220 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1221 __func__
, tm_doorbell
, tr_pending
);
1225 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1226 ufshcd_release(hba
);
1231 * ufshcd_scale_gear - scale up/down UFS gear
1232 * @hba: per adapter instance
1233 * @scale_up: True for scaling up gear and false for scaling down
1235 * Returns 0 for success,
1236 * Returns -EBUSY if scaling can't happen at this time
1237 * Returns non-zero for any other errors
1239 static int ufshcd_scale_gear(struct ufs_hba
*hba
, bool scale_up
)
1242 struct ufs_pa_layer_attr new_pwr_info
;
1245 memcpy(&new_pwr_info
, &hba
->clk_scaling
.saved_pwr_info
.info
,
1246 sizeof(struct ufs_pa_layer_attr
));
1248 memcpy(&new_pwr_info
, &hba
->pwr_info
,
1249 sizeof(struct ufs_pa_layer_attr
));
1251 if (hba
->pwr_info
.gear_tx
> hba
->clk_scaling
.min_gear
||
1252 hba
->pwr_info
.gear_rx
> hba
->clk_scaling
.min_gear
) {
1253 /* save the current power mode */
1254 memcpy(&hba
->clk_scaling
.saved_pwr_info
.info
,
1256 sizeof(struct ufs_pa_layer_attr
));
1258 /* scale down gear */
1259 new_pwr_info
.gear_tx
= hba
->clk_scaling
.min_gear
;
1260 new_pwr_info
.gear_rx
= hba
->clk_scaling
.min_gear
;
1264 /* check if the power mode needs to be changed or not? */
1265 ret
= ufshcd_config_pwr_mode(hba
, &new_pwr_info
);
1267 dev_err(hba
->dev
, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1269 hba
->pwr_info
.gear_tx
, hba
->pwr_info
.gear_rx
,
1270 new_pwr_info
.gear_tx
, new_pwr_info
.gear_rx
);
1276 * Wait until all pending SCSI commands and TMFs have finished or the timeout
1279 * Return: 0 upon success; -EBUSY upon timeout.
1281 static int ufshcd_clock_scaling_prepare(struct ufs_hba
*hba
, u64 timeout_us
)
1285 * make sure that there are no outstanding requests when
1286 * clock scaling is in progress
1288 ufshcd_scsi_block_requests(hba
);
1289 down_write(&hba
->clk_scaling_lock
);
1291 if (!hba
->clk_scaling
.is_allowed
||
1292 ufshcd_wait_for_doorbell_clr(hba
, timeout_us
)) {
1294 up_write(&hba
->clk_scaling_lock
);
1295 ufshcd_scsi_unblock_requests(hba
);
1299 /* let's not get into low power until clock scaling is completed */
1300 ufshcd_hold(hba
, false);
1306 static void ufshcd_clock_scaling_unprepare(struct ufs_hba
*hba
, bool writelock
)
1309 up_write(&hba
->clk_scaling_lock
);
1311 up_read(&hba
->clk_scaling_lock
);
1312 ufshcd_scsi_unblock_requests(hba
);
1313 ufshcd_release(hba
);
1317 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1318 * @hba: per adapter instance
1319 * @scale_up: True for scaling up and false for scalin down
1321 * Returns 0 for success,
1322 * Returns -EBUSY if scaling can't happen at this time
1323 * Returns non-zero for any other errors
1325 static int ufshcd_devfreq_scale(struct ufs_hba
*hba
, bool scale_up
)
1328 bool is_writelock
= true;
1330 ret
= ufshcd_clock_scaling_prepare(hba
, 1 * USEC_PER_SEC
);
1334 /* scale down the gear before scaling down clocks */
1336 ret
= ufshcd_scale_gear(hba
, false);
1341 ret
= ufshcd_scale_clks(hba
, scale_up
);
1344 ufshcd_scale_gear(hba
, true);
1348 /* scale up the gear after scaling up clocks */
1350 ret
= ufshcd_scale_gear(hba
, true);
1352 ufshcd_scale_clks(hba
, false);
1357 /* Enable Write Booster if we have scaled up else disable it */
1358 if (ufshcd_enable_wb_if_scaling_up(hba
)) {
1359 downgrade_write(&hba
->clk_scaling_lock
);
1360 is_writelock
= false;
1361 ufshcd_wb_toggle(hba
, scale_up
);
1365 ufshcd_clock_scaling_unprepare(hba
, is_writelock
);
1369 static void ufshcd_clk_scaling_suspend_work(struct work_struct
*work
)
1371 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1372 clk_scaling
.suspend_work
);
1373 unsigned long irq_flags
;
1375 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1376 if (hba
->clk_scaling
.active_reqs
|| hba
->clk_scaling
.is_suspended
) {
1377 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1380 hba
->clk_scaling
.is_suspended
= true;
1381 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1383 __ufshcd_suspend_clkscaling(hba
);
1386 static void ufshcd_clk_scaling_resume_work(struct work_struct
*work
)
1388 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1389 clk_scaling
.resume_work
);
1390 unsigned long irq_flags
;
1392 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1393 if (!hba
->clk_scaling
.is_suspended
) {
1394 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1397 hba
->clk_scaling
.is_suspended
= false;
1398 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1400 devfreq_resume_device(hba
->devfreq
);
1403 static int ufshcd_devfreq_target(struct device
*dev
,
1404 unsigned long *freq
, u32 flags
)
1407 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1409 bool scale_up
, sched_clk_scaling_suspend_work
= false;
1410 struct list_head
*clk_list
= &hba
->clk_list_head
;
1411 struct ufs_clk_info
*clki
;
1412 unsigned long irq_flags
;
1414 if (!ufshcd_is_clkscaling_supported(hba
))
1417 clki
= list_first_entry(&hba
->clk_list_head
, struct ufs_clk_info
, list
);
1418 /* Override with the closest supported frequency */
1419 *freq
= (unsigned long) clk_round_rate(clki
->clk
, *freq
);
1420 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1421 if (ufshcd_eh_in_progress(hba
)) {
1422 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1426 if (!hba
->clk_scaling
.active_reqs
)
1427 sched_clk_scaling_suspend_work
= true;
1429 if (list_empty(clk_list
)) {
1430 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1434 /* Decide based on the rounded-off frequency and update */
1435 scale_up
= *freq
== clki
->max_freq
;
1437 *freq
= clki
->min_freq
;
1438 /* Update the frequency */
1439 if (!ufshcd_is_devfreq_scaling_required(hba
, scale_up
)) {
1440 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1442 goto out
; /* no state change required */
1444 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1446 start
= ktime_get();
1447 ret
= ufshcd_devfreq_scale(hba
, scale_up
);
1449 trace_ufshcd_profile_clk_scaling(dev_name(hba
->dev
),
1450 (scale_up
? "up" : "down"),
1451 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
1454 if (sched_clk_scaling_suspend_work
)
1455 queue_work(hba
->clk_scaling
.workq
,
1456 &hba
->clk_scaling
.suspend_work
);
1461 static int ufshcd_devfreq_get_dev_status(struct device
*dev
,
1462 struct devfreq_dev_status
*stat
)
1464 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1465 struct ufs_clk_scaling
*scaling
= &hba
->clk_scaling
;
1466 unsigned long flags
;
1467 struct list_head
*clk_list
= &hba
->clk_list_head
;
1468 struct ufs_clk_info
*clki
;
1471 if (!ufshcd_is_clkscaling_supported(hba
))
1474 memset(stat
, 0, sizeof(*stat
));
1476 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1477 curr_t
= ktime_get();
1478 if (!scaling
->window_start_t
)
1481 clki
= list_first_entry(clk_list
, struct ufs_clk_info
, list
);
1483 * If current frequency is 0, then the ondemand governor considers
1484 * there's no initial frequency set. And it always requests to set
1485 * to max. frequency.
1487 stat
->current_frequency
= clki
->curr_freq
;
1488 if (scaling
->is_busy_started
)
1489 scaling
->tot_busy_t
+= ktime_us_delta(curr_t
,
1490 scaling
->busy_start_t
);
1492 stat
->total_time
= ktime_us_delta(curr_t
, scaling
->window_start_t
);
1493 stat
->busy_time
= scaling
->tot_busy_t
;
1495 scaling
->window_start_t
= curr_t
;
1496 scaling
->tot_busy_t
= 0;
1498 if (hba
->outstanding_reqs
) {
1499 scaling
->busy_start_t
= curr_t
;
1500 scaling
->is_busy_started
= true;
1502 scaling
->busy_start_t
= 0;
1503 scaling
->is_busy_started
= false;
1505 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1509 static int ufshcd_devfreq_init(struct ufs_hba
*hba
)
1511 struct list_head
*clk_list
= &hba
->clk_list_head
;
1512 struct ufs_clk_info
*clki
;
1513 struct devfreq
*devfreq
;
1516 /* Skip devfreq if we don't have any clocks in the list */
1517 if (list_empty(clk_list
))
1520 clki
= list_first_entry(clk_list
, struct ufs_clk_info
, list
);
1521 dev_pm_opp_add(hba
->dev
, clki
->min_freq
, 0);
1522 dev_pm_opp_add(hba
->dev
, clki
->max_freq
, 0);
1524 ufshcd_vops_config_scaling_param(hba
, &hba
->vps
->devfreq_profile
,
1525 &hba
->vps
->ondemand_data
);
1526 devfreq
= devfreq_add_device(hba
->dev
,
1527 &hba
->vps
->devfreq_profile
,
1528 DEVFREQ_GOV_SIMPLE_ONDEMAND
,
1529 &hba
->vps
->ondemand_data
);
1530 if (IS_ERR(devfreq
)) {
1531 ret
= PTR_ERR(devfreq
);
1532 dev_err(hba
->dev
, "Unable to register with devfreq %d\n", ret
);
1534 dev_pm_opp_remove(hba
->dev
, clki
->min_freq
);
1535 dev_pm_opp_remove(hba
->dev
, clki
->max_freq
);
1539 hba
->devfreq
= devfreq
;
1544 static void ufshcd_devfreq_remove(struct ufs_hba
*hba
)
1546 struct list_head
*clk_list
= &hba
->clk_list_head
;
1547 struct ufs_clk_info
*clki
;
1552 devfreq_remove_device(hba
->devfreq
);
1553 hba
->devfreq
= NULL
;
1555 clki
= list_first_entry(clk_list
, struct ufs_clk_info
, list
);
1556 dev_pm_opp_remove(hba
->dev
, clki
->min_freq
);
1557 dev_pm_opp_remove(hba
->dev
, clki
->max_freq
);
1560 static void __ufshcd_suspend_clkscaling(struct ufs_hba
*hba
)
1562 unsigned long flags
;
1564 devfreq_suspend_device(hba
->devfreq
);
1565 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1566 hba
->clk_scaling
.window_start_t
= 0;
1567 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1570 static void ufshcd_suspend_clkscaling(struct ufs_hba
*hba
)
1572 unsigned long flags
;
1573 bool suspend
= false;
1575 cancel_work_sync(&hba
->clk_scaling
.suspend_work
);
1576 cancel_work_sync(&hba
->clk_scaling
.resume_work
);
1578 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1579 if (!hba
->clk_scaling
.is_suspended
) {
1581 hba
->clk_scaling
.is_suspended
= true;
1583 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1586 __ufshcd_suspend_clkscaling(hba
);
1589 static void ufshcd_resume_clkscaling(struct ufs_hba
*hba
)
1591 unsigned long flags
;
1592 bool resume
= false;
1594 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1595 if (hba
->clk_scaling
.is_suspended
) {
1597 hba
->clk_scaling
.is_suspended
= false;
1599 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1602 devfreq_resume_device(hba
->devfreq
);
1605 static ssize_t
ufshcd_clkscale_enable_show(struct device
*dev
,
1606 struct device_attribute
*attr
, char *buf
)
1608 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1610 return sysfs_emit(buf
, "%d\n", hba
->clk_scaling
.is_enabled
);
1613 static ssize_t
ufshcd_clkscale_enable_store(struct device
*dev
,
1614 struct device_attribute
*attr
, const char *buf
, size_t count
)
1616 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1620 if (kstrtou32(buf
, 0, &value
))
1623 down(&hba
->host_sem
);
1624 if (!ufshcd_is_user_access_allowed(hba
)) {
1630 if (value
== hba
->clk_scaling
.is_enabled
)
1633 ufshcd_rpm_get_sync(hba
);
1634 ufshcd_hold(hba
, false);
1636 hba
->clk_scaling
.is_enabled
= value
;
1639 ufshcd_resume_clkscaling(hba
);
1641 ufshcd_suspend_clkscaling(hba
);
1642 err
= ufshcd_devfreq_scale(hba
, true);
1644 dev_err(hba
->dev
, "%s: failed to scale clocks up %d\n",
1648 ufshcd_release(hba
);
1649 ufshcd_rpm_put_sync(hba
);
1652 return err
? err
: count
;
1655 static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba
*hba
)
1657 hba
->clk_scaling
.enable_attr
.show
= ufshcd_clkscale_enable_show
;
1658 hba
->clk_scaling
.enable_attr
.store
= ufshcd_clkscale_enable_store
;
1659 sysfs_attr_init(&hba
->clk_scaling
.enable_attr
.attr
);
1660 hba
->clk_scaling
.enable_attr
.attr
.name
= "clkscale_enable";
1661 hba
->clk_scaling
.enable_attr
.attr
.mode
= 0644;
1662 if (device_create_file(hba
->dev
, &hba
->clk_scaling
.enable_attr
))
1663 dev_err(hba
->dev
, "Failed to create sysfs for clkscale_enable\n");
1666 static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba
*hba
)
1668 if (hba
->clk_scaling
.enable_attr
.attr
.name
)
1669 device_remove_file(hba
->dev
, &hba
->clk_scaling
.enable_attr
);
1672 static void ufshcd_init_clk_scaling(struct ufs_hba
*hba
)
1674 char wq_name
[sizeof("ufs_clkscaling_00")];
1676 if (!ufshcd_is_clkscaling_supported(hba
))
1679 if (!hba
->clk_scaling
.min_gear
)
1680 hba
->clk_scaling
.min_gear
= UFS_HS_G1
;
1682 INIT_WORK(&hba
->clk_scaling
.suspend_work
,
1683 ufshcd_clk_scaling_suspend_work
);
1684 INIT_WORK(&hba
->clk_scaling
.resume_work
,
1685 ufshcd_clk_scaling_resume_work
);
1687 snprintf(wq_name
, sizeof(wq_name
), "ufs_clkscaling_%d",
1688 hba
->host
->host_no
);
1689 hba
->clk_scaling
.workq
= create_singlethread_workqueue(wq_name
);
1691 hba
->clk_scaling
.is_initialized
= true;
1694 static void ufshcd_exit_clk_scaling(struct ufs_hba
*hba
)
1696 if (!hba
->clk_scaling
.is_initialized
)
1699 ufshcd_remove_clk_scaling_sysfs(hba
);
1700 destroy_workqueue(hba
->clk_scaling
.workq
);
1701 ufshcd_devfreq_remove(hba
);
1702 hba
->clk_scaling
.is_initialized
= false;
1705 static void ufshcd_ungate_work(struct work_struct
*work
)
1708 unsigned long flags
;
1709 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1710 clk_gating
.ungate_work
);
1712 cancel_delayed_work_sync(&hba
->clk_gating
.gate_work
);
1714 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1715 if (hba
->clk_gating
.state
== CLKS_ON
) {
1716 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1720 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1721 ufshcd_hba_vreg_set_hpm(hba
);
1722 ufshcd_setup_clocks(hba
, true);
1724 ufshcd_enable_irq(hba
);
1726 /* Exit from hibern8 */
1727 if (ufshcd_can_hibern8_during_gating(hba
)) {
1728 /* Prevent gating in this path */
1729 hba
->clk_gating
.is_suspended
= true;
1730 if (ufshcd_is_link_hibern8(hba
)) {
1731 ret
= ufshcd_uic_hibern8_exit(hba
);
1733 dev_err(hba
->dev
, "%s: hibern8 exit failed %d\n",
1736 ufshcd_set_link_active(hba
);
1738 hba
->clk_gating
.is_suspended
= false;
1741 ufshcd_scsi_unblock_requests(hba
);
1745 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1746 * Also, exit from hibern8 mode and set the link as active.
1747 * @hba: per adapter instance
1748 * @async: This indicates whether caller should ungate clocks asynchronously.
1750 int ufshcd_hold(struct ufs_hba
*hba
, bool async
)
1754 unsigned long flags
;
1756 if (!ufshcd_is_clkgating_allowed(hba
) ||
1757 !hba
->clk_gating
.is_initialized
)
1759 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1760 hba
->clk_gating
.active_reqs
++;
1763 switch (hba
->clk_gating
.state
) {
1766 * Wait for the ungate work to complete if in progress.
1767 * Though the clocks may be in ON state, the link could
1768 * still be in hibner8 state if hibern8 is allowed
1769 * during clock gating.
1770 * Make sure we exit hibern8 state also in addition to
1773 if (ufshcd_can_hibern8_during_gating(hba
) &&
1774 ufshcd_is_link_hibern8(hba
)) {
1777 hba
->clk_gating
.active_reqs
--;
1780 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1781 flush_result
= flush_work(&hba
->clk_gating
.ungate_work
);
1782 if (hba
->clk_gating
.is_suspended
&& !flush_result
)
1784 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1789 if (cancel_delayed_work(&hba
->clk_gating
.gate_work
)) {
1790 hba
->clk_gating
.state
= CLKS_ON
;
1791 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1792 hba
->clk_gating
.state
);
1796 * If we are here, it means gating work is either done or
1797 * currently running. Hence, fall through to cancel gating
1798 * work and to enable clocks.
1802 hba
->clk_gating
.state
= REQ_CLKS_ON
;
1803 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1804 hba
->clk_gating
.state
);
1805 if (queue_work(hba
->clk_gating
.clk_gating_workq
,
1806 &hba
->clk_gating
.ungate_work
))
1807 ufshcd_scsi_block_requests(hba
);
1809 * fall through to check if we should wait for this
1810 * work to be done or not.
1816 hba
->clk_gating
.active_reqs
--;
1820 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1821 flush_work(&hba
->clk_gating
.ungate_work
);
1822 /* Make sure state is CLKS_ON before returning */
1823 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1826 dev_err(hba
->dev
, "%s: clk gating is in invalid state %d\n",
1827 __func__
, hba
->clk_gating
.state
);
1830 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1834 EXPORT_SYMBOL_GPL(ufshcd_hold
);
1836 static void ufshcd_gate_work(struct work_struct
*work
)
1838 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1839 clk_gating
.gate_work
.work
);
1840 unsigned long flags
;
1843 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1845 * In case you are here to cancel this work the gating state
1846 * would be marked as REQ_CLKS_ON. In this case save time by
1847 * skipping the gating work and exit after changing the clock
1850 if (hba
->clk_gating
.is_suspended
||
1851 (hba
->clk_gating
.state
!= REQ_CLKS_OFF
)) {
1852 hba
->clk_gating
.state
= CLKS_ON
;
1853 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1854 hba
->clk_gating
.state
);
1858 if (hba
->clk_gating
.active_reqs
1859 || hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
1860 || hba
->outstanding_reqs
|| hba
->outstanding_tasks
1861 || hba
->active_uic_cmd
|| hba
->uic_async_done
)
1864 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1866 /* put the link into hibern8 mode before turning off clocks */
1867 if (ufshcd_can_hibern8_during_gating(hba
)) {
1868 ret
= ufshcd_uic_hibern8_enter(hba
);
1870 hba
->clk_gating
.state
= CLKS_ON
;
1871 dev_err(hba
->dev
, "%s: hibern8 enter failed %d\n",
1873 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1874 hba
->clk_gating
.state
);
1877 ufshcd_set_link_hibern8(hba
);
1880 ufshcd_disable_irq(hba
);
1882 ufshcd_setup_clocks(hba
, false);
1884 /* Put the host controller in low power mode if possible */
1885 ufshcd_hba_vreg_set_lpm(hba
);
1887 * In case you are here to cancel this work the gating state
1888 * would be marked as REQ_CLKS_ON. In this case keep the state
1889 * as REQ_CLKS_ON which would anyway imply that clocks are off
1890 * and a request to turn them on is pending. By doing this way,
1891 * we keep the state machine in tact and this would ultimately
1892 * prevent from doing cancel work multiple times when there are
1893 * new requests arriving before the current cancel work is done.
1895 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1896 if (hba
->clk_gating
.state
== REQ_CLKS_OFF
) {
1897 hba
->clk_gating
.state
= CLKS_OFF
;
1898 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1899 hba
->clk_gating
.state
);
1902 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1907 /* host lock must be held before calling this variant */
1908 static void __ufshcd_release(struct ufs_hba
*hba
)
1910 if (!ufshcd_is_clkgating_allowed(hba
))
1913 hba
->clk_gating
.active_reqs
--;
1915 if (hba
->clk_gating
.active_reqs
|| hba
->clk_gating
.is_suspended
||
1916 hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
||
1917 hba
->outstanding_tasks
|| !hba
->clk_gating
.is_initialized
||
1918 hba
->active_uic_cmd
|| hba
->uic_async_done
||
1919 hba
->clk_gating
.state
== CLKS_OFF
)
1922 hba
->clk_gating
.state
= REQ_CLKS_OFF
;
1923 trace_ufshcd_clk_gating(dev_name(hba
->dev
), hba
->clk_gating
.state
);
1924 queue_delayed_work(hba
->clk_gating
.clk_gating_workq
,
1925 &hba
->clk_gating
.gate_work
,
1926 msecs_to_jiffies(hba
->clk_gating
.delay_ms
));
1929 void ufshcd_release(struct ufs_hba
*hba
)
1931 unsigned long flags
;
1933 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1934 __ufshcd_release(hba
);
1935 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1937 EXPORT_SYMBOL_GPL(ufshcd_release
);
1939 static ssize_t
ufshcd_clkgate_delay_show(struct device
*dev
,
1940 struct device_attribute
*attr
, char *buf
)
1942 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1944 return sysfs_emit(buf
, "%lu\n", hba
->clk_gating
.delay_ms
);
1947 void ufshcd_clkgate_delay_set(struct device
*dev
, unsigned long value
)
1949 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1950 unsigned long flags
;
1952 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1953 hba
->clk_gating
.delay_ms
= value
;
1954 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1956 EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set
);
1958 static ssize_t
ufshcd_clkgate_delay_store(struct device
*dev
,
1959 struct device_attribute
*attr
, const char *buf
, size_t count
)
1961 unsigned long value
;
1963 if (kstrtoul(buf
, 0, &value
))
1966 ufshcd_clkgate_delay_set(dev
, value
);
1970 static ssize_t
ufshcd_clkgate_enable_show(struct device
*dev
,
1971 struct device_attribute
*attr
, char *buf
)
1973 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1975 return sysfs_emit(buf
, "%d\n", hba
->clk_gating
.is_enabled
);
1978 static ssize_t
ufshcd_clkgate_enable_store(struct device
*dev
,
1979 struct device_attribute
*attr
, const char *buf
, size_t count
)
1981 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1982 unsigned long flags
;
1985 if (kstrtou32(buf
, 0, &value
))
1990 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1991 if (value
== hba
->clk_gating
.is_enabled
)
1995 __ufshcd_release(hba
);
1997 hba
->clk_gating
.active_reqs
++;
1999 hba
->clk_gating
.is_enabled
= value
;
2001 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2005 static void ufshcd_init_clk_gating_sysfs(struct ufs_hba
*hba
)
2007 hba
->clk_gating
.delay_attr
.show
= ufshcd_clkgate_delay_show
;
2008 hba
->clk_gating
.delay_attr
.store
= ufshcd_clkgate_delay_store
;
2009 sysfs_attr_init(&hba
->clk_gating
.delay_attr
.attr
);
2010 hba
->clk_gating
.delay_attr
.attr
.name
= "clkgate_delay_ms";
2011 hba
->clk_gating
.delay_attr
.attr
.mode
= 0644;
2012 if (device_create_file(hba
->dev
, &hba
->clk_gating
.delay_attr
))
2013 dev_err(hba
->dev
, "Failed to create sysfs for clkgate_delay\n");
2015 hba
->clk_gating
.enable_attr
.show
= ufshcd_clkgate_enable_show
;
2016 hba
->clk_gating
.enable_attr
.store
= ufshcd_clkgate_enable_store
;
2017 sysfs_attr_init(&hba
->clk_gating
.enable_attr
.attr
);
2018 hba
->clk_gating
.enable_attr
.attr
.name
= "clkgate_enable";
2019 hba
->clk_gating
.enable_attr
.attr
.mode
= 0644;
2020 if (device_create_file(hba
->dev
, &hba
->clk_gating
.enable_attr
))
2021 dev_err(hba
->dev
, "Failed to create sysfs for clkgate_enable\n");
2024 static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba
*hba
)
2026 if (hba
->clk_gating
.delay_attr
.attr
.name
)
2027 device_remove_file(hba
->dev
, &hba
->clk_gating
.delay_attr
);
2028 if (hba
->clk_gating
.enable_attr
.attr
.name
)
2029 device_remove_file(hba
->dev
, &hba
->clk_gating
.enable_attr
);
2032 static void ufshcd_init_clk_gating(struct ufs_hba
*hba
)
2034 char wq_name
[sizeof("ufs_clk_gating_00")];
2036 if (!ufshcd_is_clkgating_allowed(hba
))
2039 hba
->clk_gating
.state
= CLKS_ON
;
2041 hba
->clk_gating
.delay_ms
= 150;
2042 INIT_DELAYED_WORK(&hba
->clk_gating
.gate_work
, ufshcd_gate_work
);
2043 INIT_WORK(&hba
->clk_gating
.ungate_work
, ufshcd_ungate_work
);
2045 snprintf(wq_name
, ARRAY_SIZE(wq_name
), "ufs_clk_gating_%d",
2046 hba
->host
->host_no
);
2047 hba
->clk_gating
.clk_gating_workq
= alloc_ordered_workqueue(wq_name
,
2048 WQ_MEM_RECLAIM
| WQ_HIGHPRI
);
2050 ufshcd_init_clk_gating_sysfs(hba
);
2052 hba
->clk_gating
.is_enabled
= true;
2053 hba
->clk_gating
.is_initialized
= true;
2056 static void ufshcd_exit_clk_gating(struct ufs_hba
*hba
)
2058 if (!hba
->clk_gating
.is_initialized
)
2061 ufshcd_remove_clk_gating_sysfs(hba
);
2063 /* Ungate the clock if necessary. */
2064 ufshcd_hold(hba
, false);
2065 hba
->clk_gating
.is_initialized
= false;
2066 ufshcd_release(hba
);
2068 destroy_workqueue(hba
->clk_gating
.clk_gating_workq
);
2071 static void ufshcd_clk_scaling_start_busy(struct ufs_hba
*hba
)
2073 bool queue_resume_work
= false;
2074 ktime_t curr_t
= ktime_get();
2075 unsigned long flags
;
2077 if (!ufshcd_is_clkscaling_supported(hba
))
2080 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2081 if (!hba
->clk_scaling
.active_reqs
++)
2082 queue_resume_work
= true;
2084 if (!hba
->clk_scaling
.is_enabled
|| hba
->pm_op_in_progress
) {
2085 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2089 if (queue_resume_work
)
2090 queue_work(hba
->clk_scaling
.workq
,
2091 &hba
->clk_scaling
.resume_work
);
2093 if (!hba
->clk_scaling
.window_start_t
) {
2094 hba
->clk_scaling
.window_start_t
= curr_t
;
2095 hba
->clk_scaling
.tot_busy_t
= 0;
2096 hba
->clk_scaling
.is_busy_started
= false;
2099 if (!hba
->clk_scaling
.is_busy_started
) {
2100 hba
->clk_scaling
.busy_start_t
= curr_t
;
2101 hba
->clk_scaling
.is_busy_started
= true;
2103 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2106 static void ufshcd_clk_scaling_update_busy(struct ufs_hba
*hba
)
2108 struct ufs_clk_scaling
*scaling
= &hba
->clk_scaling
;
2109 unsigned long flags
;
2111 if (!ufshcd_is_clkscaling_supported(hba
))
2114 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2115 hba
->clk_scaling
.active_reqs
--;
2116 if (!hba
->outstanding_reqs
&& scaling
->is_busy_started
) {
2117 scaling
->tot_busy_t
+= ktime_to_us(ktime_sub(ktime_get(),
2118 scaling
->busy_start_t
));
2119 scaling
->busy_start_t
= 0;
2120 scaling
->is_busy_started
= false;
2122 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2125 static inline int ufshcd_monitor_opcode2dir(u8 opcode
)
2127 if (opcode
== READ_6
|| opcode
== READ_10
|| opcode
== READ_16
)
2129 else if (opcode
== WRITE_6
|| opcode
== WRITE_10
|| opcode
== WRITE_16
)
2135 static inline bool ufshcd_should_inform_monitor(struct ufs_hba
*hba
,
2136 struct ufshcd_lrb
*lrbp
)
2138 const struct ufs_hba_monitor
*m
= &hba
->monitor
;
2140 return (m
->enabled
&& lrbp
&& lrbp
->cmd
&&
2141 (!m
->chunk_size
|| m
->chunk_size
== lrbp
->cmd
->sdb
.length
) &&
2142 ktime_before(hba
->monitor
.enabled_ts
, lrbp
->issue_time_stamp
));
2145 static void ufshcd_start_monitor(struct ufs_hba
*hba
,
2146 const struct ufshcd_lrb
*lrbp
)
2148 int dir
= ufshcd_monitor_opcode2dir(*lrbp
->cmd
->cmnd
);
2149 unsigned long flags
;
2151 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2152 if (dir
>= 0 && hba
->monitor
.nr_queued
[dir
]++ == 0)
2153 hba
->monitor
.busy_start_ts
[dir
] = ktime_get();
2154 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2157 static void ufshcd_update_monitor(struct ufs_hba
*hba
, const struct ufshcd_lrb
*lrbp
)
2159 int dir
= ufshcd_monitor_opcode2dir(*lrbp
->cmd
->cmnd
);
2160 unsigned long flags
;
2162 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2163 if (dir
>= 0 && hba
->monitor
.nr_queued
[dir
] > 0) {
2164 const struct request
*req
= scsi_cmd_to_rq(lrbp
->cmd
);
2165 struct ufs_hba_monitor
*m
= &hba
->monitor
;
2166 ktime_t now
, inc
, lat
;
2168 now
= lrbp
->compl_time_stamp
;
2169 inc
= ktime_sub(now
, m
->busy_start_ts
[dir
]);
2170 m
->total_busy
[dir
] = ktime_add(m
->total_busy
[dir
], inc
);
2171 m
->nr_sec_rw
[dir
] += blk_rq_sectors(req
);
2173 /* Update latencies */
2175 lat
= ktime_sub(now
, lrbp
->issue_time_stamp
);
2176 m
->lat_sum
[dir
] += lat
;
2177 if (m
->lat_max
[dir
] < lat
|| !m
->lat_max
[dir
])
2178 m
->lat_max
[dir
] = lat
;
2179 if (m
->lat_min
[dir
] > lat
|| !m
->lat_min
[dir
])
2180 m
->lat_min
[dir
] = lat
;
2182 m
->nr_queued
[dir
]--;
2183 /* Push forward the busy start of monitor */
2184 m
->busy_start_ts
[dir
] = now
;
2186 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2190 * ufshcd_send_command - Send SCSI or device management commands
2191 * @hba: per adapter instance
2192 * @task_tag: Task tag of the command
2193 * @hwq: pointer to hardware queue instance
2196 void ufshcd_send_command(struct ufs_hba
*hba
, unsigned int task_tag
,
2197 struct ufs_hw_queue
*hwq
)
2199 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[task_tag
];
2200 unsigned long flags
;
2202 lrbp
->issue_time_stamp
= ktime_get();
2203 lrbp
->issue_time_stamp_local_clock
= local_clock();
2204 lrbp
->compl_time_stamp
= ktime_set(0, 0);
2205 lrbp
->compl_time_stamp_local_clock
= 0;
2206 ufshcd_add_command_trace(hba
, task_tag
, UFS_CMD_SEND
);
2207 ufshcd_clk_scaling_start_busy(hba
);
2208 if (unlikely(ufshcd_should_inform_monitor(hba
, lrbp
)))
2209 ufshcd_start_monitor(hba
, lrbp
);
2211 if (is_mcq_enabled(hba
)) {
2212 int utrd_size
= sizeof(struct utp_transfer_req_desc
);
2214 spin_lock(&hwq
->sq_lock
);
2215 memcpy(hwq
->sqe_base_addr
+ (hwq
->sq_tail_slot
* utrd_size
),
2216 lrbp
->utr_descriptor_ptr
, utrd_size
);
2217 ufshcd_inc_sq_tail(hwq
);
2218 spin_unlock(&hwq
->sq_lock
);
2220 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
2221 if (hba
->vops
&& hba
->vops
->setup_xfer_req
)
2222 hba
->vops
->setup_xfer_req(hba
, lrbp
->task_tag
,
2224 __set_bit(lrbp
->task_tag
, &hba
->outstanding_reqs
);
2225 ufshcd_writel(hba
, 1 << lrbp
->task_tag
,
2226 REG_UTP_TRANSFER_REQ_DOOR_BELL
);
2227 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
2232 * ufshcd_copy_sense_data - Copy sense data in case of check condition
2233 * @lrbp: pointer to local reference block
2235 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb
*lrbp
)
2237 u8
*const sense_buffer
= lrbp
->cmd
->sense_buffer
;
2241 ufshcd_get_rsp_upiu_data_seg_len(lrbp
->ucd_rsp_ptr
)) {
2244 len
= be16_to_cpu(lrbp
->ucd_rsp_ptr
->sr
.sense_data_len
);
2245 len_to_copy
= min_t(int, UFS_SENSE_SIZE
, len
);
2247 memcpy(sense_buffer
, lrbp
->ucd_rsp_ptr
->sr
.sense_data
,
2253 * ufshcd_copy_query_response() - Copy the Query Response and the data
2255 * @hba: per adapter instance
2256 * @lrbp: pointer to local reference block
2259 int ufshcd_copy_query_response(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2261 struct ufs_query_res
*query_res
= &hba
->dev_cmd
.query
.response
;
2263 memcpy(&query_res
->upiu_res
, &lrbp
->ucd_rsp_ptr
->qr
, QUERY_OSF_SIZE
);
2265 /* Get the descriptor */
2266 if (hba
->dev_cmd
.query
.descriptor
&&
2267 lrbp
->ucd_rsp_ptr
->qr
.opcode
== UPIU_QUERY_OPCODE_READ_DESC
) {
2268 u8
*descp
= (u8
*)lrbp
->ucd_rsp_ptr
+
2269 GENERAL_UPIU_REQUEST_SIZE
;
2273 /* data segment length */
2274 resp_len
= be32_to_cpu(lrbp
->ucd_rsp_ptr
->header
.dword_2
) &
2275 MASK_QUERY_DATA_SEG_LEN
;
2276 buf_len
= be16_to_cpu(
2277 hba
->dev_cmd
.query
.request
.upiu_req
.length
);
2278 if (likely(buf_len
>= resp_len
)) {
2279 memcpy(hba
->dev_cmd
.query
.descriptor
, descp
, resp_len
);
2282 "%s: rsp size %d is bigger than buffer size %d",
2283 __func__
, resp_len
, buf_len
);
2292 * ufshcd_hba_capabilities - Read controller capabilities
2293 * @hba: per adapter instance
2295 * Return: 0 on success, negative on error.
2297 static inline int ufshcd_hba_capabilities(struct ufs_hba
*hba
)
2301 hba
->capabilities
= ufshcd_readl(hba
, REG_CONTROLLER_CAPABILITIES
);
2302 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS
)
2303 hba
->capabilities
&= ~MASK_64_ADDRESSING_SUPPORT
;
2305 /* nutrs and nutmrs are 0 based values */
2306 hba
->nutrs
= (hba
->capabilities
& MASK_TRANSFER_REQUESTS_SLOTS
) + 1;
2308 ((hba
->capabilities
& MASK_TASK_MANAGEMENT_REQUEST_SLOTS
) >> 16) + 1;
2309 hba
->reserved_slot
= hba
->nutrs
- 1;
2311 /* Read crypto capabilities */
2312 err
= ufshcd_hba_init_crypto_capabilities(hba
);
2314 dev_err(hba
->dev
, "crypto setup failed\n");
2316 hba
->mcq_sup
= FIELD_GET(MASK_MCQ_SUPPORT
, hba
->capabilities
);
2320 hba
->mcq_capabilities
= ufshcd_readl(hba
, REG_MCQCAP
);
2321 hba
->ext_iid_sup
= FIELD_GET(MASK_EXT_IID_SUPPORT
,
2322 hba
->mcq_capabilities
);
2328 * ufshcd_ready_for_uic_cmd - Check if controller is ready
2329 * to accept UIC commands
2330 * @hba: per adapter instance
2331 * Return true on success, else false
2333 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba
*hba
)
2335 return ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) & UIC_COMMAND_READY
;
2339 * ufshcd_get_upmcrs - Get the power mode change request status
2340 * @hba: Pointer to adapter instance
2342 * This function gets the UPMCRS field of HCS register
2343 * Returns value of UPMCRS field
2345 static inline u8
ufshcd_get_upmcrs(struct ufs_hba
*hba
)
2347 return (ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) >> 8) & 0x7;
2351 * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer
2352 * @hba: per adapter instance
2353 * @uic_cmd: UIC command
2356 ufshcd_dispatch_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
2358 lockdep_assert_held(&hba
->uic_cmd_mutex
);
2360 WARN_ON(hba
->active_uic_cmd
);
2362 hba
->active_uic_cmd
= uic_cmd
;
2365 ufshcd_writel(hba
, uic_cmd
->argument1
, REG_UIC_COMMAND_ARG_1
);
2366 ufshcd_writel(hba
, uic_cmd
->argument2
, REG_UIC_COMMAND_ARG_2
);
2367 ufshcd_writel(hba
, uic_cmd
->argument3
, REG_UIC_COMMAND_ARG_3
);
2369 ufshcd_add_uic_command_trace(hba
, uic_cmd
, UFS_CMD_SEND
);
2372 ufshcd_writel(hba
, uic_cmd
->command
& COMMAND_OPCODE_MASK
,
2377 * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command
2378 * @hba: per adapter instance
2379 * @uic_cmd: UIC command
2381 * Returns 0 only if success.
2384 ufshcd_wait_for_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
2387 unsigned long flags
;
2389 lockdep_assert_held(&hba
->uic_cmd_mutex
);
2391 if (wait_for_completion_timeout(&uic_cmd
->done
,
2392 msecs_to_jiffies(UIC_CMD_TIMEOUT
))) {
2393 ret
= uic_cmd
->argument2
& MASK_UIC_COMMAND_RESULT
;
2397 "uic cmd 0x%x with arg3 0x%x completion timeout\n",
2398 uic_cmd
->command
, uic_cmd
->argument3
);
2400 if (!uic_cmd
->cmd_active
) {
2401 dev_err(hba
->dev
, "%s: UIC cmd has been completed, return the result\n",
2403 ret
= uic_cmd
->argument2
& MASK_UIC_COMMAND_RESULT
;
2407 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2408 hba
->active_uic_cmd
= NULL
;
2409 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2415 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2416 * @hba: per adapter instance
2417 * @uic_cmd: UIC command
2418 * @completion: initialize the completion only if this is set to true
2420 * Returns 0 only if success.
2423 __ufshcd_send_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
,
2426 lockdep_assert_held(&hba
->uic_cmd_mutex
);
2427 lockdep_assert_held(hba
->host
->host_lock
);
2429 if (!ufshcd_ready_for_uic_cmd(hba
)) {
2431 "Controller not ready to accept UIC commands\n");
2436 init_completion(&uic_cmd
->done
);
2438 uic_cmd
->cmd_active
= 1;
2439 ufshcd_dispatch_uic_cmd(hba
, uic_cmd
);
2445 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2446 * @hba: per adapter instance
2447 * @uic_cmd: UIC command
2449 * Returns 0 only if success.
2451 int ufshcd_send_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
2454 unsigned long flags
;
2456 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_UIC_CMD
)
2459 ufshcd_hold(hba
, false);
2460 mutex_lock(&hba
->uic_cmd_mutex
);
2461 ufshcd_add_delay_before_dme_cmd(hba
);
2463 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2464 ret
= __ufshcd_send_uic_cmd(hba
, uic_cmd
, true);
2465 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2467 ret
= ufshcd_wait_for_uic_cmd(hba
, uic_cmd
);
2469 mutex_unlock(&hba
->uic_cmd_mutex
);
2471 ufshcd_release(hba
);
2476 * ufshcd_sgl_to_prdt - SG list to PRTD (Physical Region Description Table, 4DW format)
2477 * @hba: per-adapter instance
2478 * @lrbp: pointer to local reference block
2479 * @sg_entries: The number of sg lists actually used
2480 * @sg_list: Pointer to SG list
2482 static void ufshcd_sgl_to_prdt(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
, int sg_entries
,
2483 struct scatterlist
*sg_list
)
2485 struct ufshcd_sg_entry
*prd
;
2486 struct scatterlist
*sg
;
2491 if (hba
->quirks
& UFSHCD_QUIRK_PRDT_BYTE_GRAN
)
2492 lrbp
->utr_descriptor_ptr
->prd_table_length
=
2493 cpu_to_le16(sg_entries
* ufshcd_sg_entry_size(hba
));
2495 lrbp
->utr_descriptor_ptr
->prd_table_length
= cpu_to_le16(sg_entries
);
2497 prd
= lrbp
->ucd_prdt_ptr
;
2499 for_each_sg(sg_list
, sg
, sg_entries
, i
) {
2500 const unsigned int len
= sg_dma_len(sg
);
2503 * From the UFSHCI spec: "Data Byte Count (DBC): A '0'
2504 * based value that indicates the length, in bytes, of
2505 * the data block. A maximum of length of 256KB may
2506 * exist for any entry. Bits 1:0 of this field shall be
2507 * 11b to indicate Dword granularity. A value of '3'
2508 * indicates 4 bytes, '7' indicates 8 bytes, etc."
2510 WARN_ONCE(len
> 256 * 1024, "len = %#x\n", len
);
2511 prd
->size
= cpu_to_le32(len
- 1);
2512 prd
->addr
= cpu_to_le64(sg
->dma_address
);
2514 prd
= (void *)prd
+ ufshcd_sg_entry_size(hba
);
2517 lrbp
->utr_descriptor_ptr
->prd_table_length
= 0;
2522 * ufshcd_map_sg - Map scatter-gather list to prdt
2523 * @hba: per adapter instance
2524 * @lrbp: pointer to local reference block
2526 * Returns 0 in case of success, non-zero value in case of failure
2528 static int ufshcd_map_sg(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2530 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
2531 int sg_segments
= scsi_dma_map(cmd
);
2533 if (sg_segments
< 0)
2536 ufshcd_sgl_to_prdt(hba
, lrbp
, sg_segments
, scsi_sglist(cmd
));
2542 * ufshcd_enable_intr - enable interrupts
2543 * @hba: per adapter instance
2544 * @intrs: interrupt bits
2546 static void ufshcd_enable_intr(struct ufs_hba
*hba
, u32 intrs
)
2548 u32 set
= ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
2550 if (hba
->ufs_version
== ufshci_version(1, 0)) {
2552 rw
= set
& INTERRUPT_MASK_RW_VER_10
;
2553 set
= rw
| ((set
^ intrs
) & intrs
);
2558 ufshcd_writel(hba
, set
, REG_INTERRUPT_ENABLE
);
2562 * ufshcd_disable_intr - disable interrupts
2563 * @hba: per adapter instance
2564 * @intrs: interrupt bits
2566 static void ufshcd_disable_intr(struct ufs_hba
*hba
, u32 intrs
)
2568 u32 set
= ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
2570 if (hba
->ufs_version
== ufshci_version(1, 0)) {
2572 rw
= (set
& INTERRUPT_MASK_RW_VER_10
) &
2573 ~(intrs
& INTERRUPT_MASK_RW_VER_10
);
2574 set
= rw
| ((set
& intrs
) & ~INTERRUPT_MASK_RW_VER_10
);
2580 ufshcd_writel(hba
, set
, REG_INTERRUPT_ENABLE
);
2584 * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
2585 * descriptor according to request
2586 * @lrbp: pointer to local reference block
2587 * @upiu_flags: flags required in the header
2588 * @cmd_dir: requests data direction
2589 * @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments)
2591 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb
*lrbp
, u8
*upiu_flags
,
2592 enum dma_data_direction cmd_dir
, int ehs_length
)
2594 struct utp_transfer_req_desc
*req_desc
= lrbp
->utr_descriptor_ptr
;
2600 if (cmd_dir
== DMA_FROM_DEVICE
) {
2601 data_direction
= UTP_DEVICE_TO_HOST
;
2602 *upiu_flags
= UPIU_CMD_FLAGS_READ
;
2603 } else if (cmd_dir
== DMA_TO_DEVICE
) {
2604 data_direction
= UTP_HOST_TO_DEVICE
;
2605 *upiu_flags
= UPIU_CMD_FLAGS_WRITE
;
2607 data_direction
= UTP_NO_DATA_TRANSFER
;
2608 *upiu_flags
= UPIU_CMD_FLAGS_NONE
;
2611 dword_0
= data_direction
| (lrbp
->command_type
<< UPIU_COMMAND_TYPE_OFFSET
) |
2614 dword_0
|= UTP_REQ_DESC_INT_CMD
;
2616 /* Prepare crypto related dwords */
2617 ufshcd_prepare_req_desc_hdr_crypto(lrbp
, &dword_0
, &dword_1
, &dword_3
);
2619 /* Transfer request descriptor header fields */
2620 req_desc
->header
.dword_0
= cpu_to_le32(dword_0
);
2621 req_desc
->header
.dword_1
= cpu_to_le32(dword_1
);
2623 * assigning invalid value for command status. Controller
2624 * updates OCS on command completion, with the command
2627 req_desc
->header
.dword_2
=
2628 cpu_to_le32(OCS_INVALID_COMMAND_STATUS
);
2629 req_desc
->header
.dword_3
= cpu_to_le32(dword_3
);
2631 req_desc
->prd_table_length
= 0;
2635 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2637 * @lrbp: local reference block pointer
2638 * @upiu_flags: flags
2641 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb
*lrbp
, u8 upiu_flags
)
2643 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
2644 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2645 unsigned short cdb_len
;
2647 /* command descriptor fields */
2648 ucd_req_ptr
->header
.dword_0
= UPIU_HEADER_DWORD(
2649 UPIU_TRANSACTION_COMMAND
, upiu_flags
,
2650 lrbp
->lun
, lrbp
->task_tag
);
2651 ucd_req_ptr
->header
.dword_1
= UPIU_HEADER_DWORD(
2652 UPIU_COMMAND_SET_TYPE_SCSI
, 0, 0, 0);
2654 /* Total EHS length and Data segment length will be zero */
2655 ucd_req_ptr
->header
.dword_2
= 0;
2657 ucd_req_ptr
->sc
.exp_data_transfer_len
= cpu_to_be32(cmd
->sdb
.length
);
2659 cdb_len
= min_t(unsigned short, cmd
->cmd_len
, UFS_CDB_SIZE
);
2660 memset(ucd_req_ptr
->sc
.cdb
, 0, UFS_CDB_SIZE
);
2661 memcpy(ucd_req_ptr
->sc
.cdb
, cmd
->cmnd
, cdb_len
);
2663 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2667 * ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request
2669 * @lrbp: local reference block pointer
2670 * @upiu_flags: flags
2672 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba
*hba
,
2673 struct ufshcd_lrb
*lrbp
, u8 upiu_flags
)
2675 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2676 struct ufs_query
*query
= &hba
->dev_cmd
.query
;
2677 u16 len
= be16_to_cpu(query
->request
.upiu_req
.length
);
2679 /* Query request header */
2680 ucd_req_ptr
->header
.dword_0
= UPIU_HEADER_DWORD(
2681 UPIU_TRANSACTION_QUERY_REQ
, upiu_flags
,
2682 lrbp
->lun
, lrbp
->task_tag
);
2683 ucd_req_ptr
->header
.dword_1
= UPIU_HEADER_DWORD(
2684 0, query
->request
.query_func
, 0, 0);
2686 /* Data segment length only need for WRITE_DESC */
2687 if (query
->request
.upiu_req
.opcode
== UPIU_QUERY_OPCODE_WRITE_DESC
)
2688 ucd_req_ptr
->header
.dword_2
=
2689 UPIU_HEADER_DWORD(0, 0, (len
>> 8), (u8
)len
);
2691 ucd_req_ptr
->header
.dword_2
= 0;
2693 /* Copy the Query Request buffer as is */
2694 memcpy(&ucd_req_ptr
->qr
, &query
->request
.upiu_req
,
2697 /* Copy the Descriptor */
2698 if (query
->request
.upiu_req
.opcode
== UPIU_QUERY_OPCODE_WRITE_DESC
)
2699 memcpy(ucd_req_ptr
+ 1, query
->descriptor
, len
);
2701 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2704 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb
*lrbp
)
2706 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2708 memset(ucd_req_ptr
, 0, sizeof(struct utp_upiu_req
));
2710 /* command descriptor fields */
2711 ucd_req_ptr
->header
.dword_0
=
2713 UPIU_TRANSACTION_NOP_OUT
, 0, 0, lrbp
->task_tag
);
2714 /* clear rest of the fields of basic header */
2715 ucd_req_ptr
->header
.dword_1
= 0;
2716 ucd_req_ptr
->header
.dword_2
= 0;
2718 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2722 * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
2723 * for Device Management Purposes
2724 * @hba: per adapter instance
2725 * @lrbp: pointer to local reference block
2727 static int ufshcd_compose_devman_upiu(struct ufs_hba
*hba
,
2728 struct ufshcd_lrb
*lrbp
)
2733 if (hba
->ufs_version
<= ufshci_version(1, 1))
2734 lrbp
->command_type
= UTP_CMD_TYPE_DEV_MANAGE
;
2736 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
2738 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, DMA_NONE
, 0);
2739 if (hba
->dev_cmd
.type
== DEV_CMD_TYPE_QUERY
)
2740 ufshcd_prepare_utp_query_req_upiu(hba
, lrbp
, upiu_flags
);
2741 else if (hba
->dev_cmd
.type
== DEV_CMD_TYPE_NOP
)
2742 ufshcd_prepare_utp_nop_upiu(lrbp
);
2750 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2752 * @hba: per adapter instance
2753 * @lrbp: pointer to local reference block
2755 static int ufshcd_comp_scsi_upiu(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2760 if (hba
->ufs_version
<= ufshci_version(1, 1))
2761 lrbp
->command_type
= UTP_CMD_TYPE_SCSI
;
2763 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
2765 if (likely(lrbp
->cmd
)) {
2766 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, lrbp
->cmd
->sc_data_direction
, 0);
2767 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp
, upiu_flags
);
2776 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2777 * @upiu_wlun_id: UPIU W-LUN id
2779 * Returns SCSI W-LUN id
2781 static inline u16
ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id
)
2783 return (upiu_wlun_id
& ~UFS_UPIU_WLUN_ID
) | SCSI_W_LUN_BASE
;
2786 static inline bool is_device_wlun(struct scsi_device
*sdev
)
2789 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN
);
2793 * Associate the UFS controller queue with the default and poll HCTX types.
2794 * Initialize the mq_map[] arrays.
2796 static void ufshcd_map_queues(struct Scsi_Host
*shost
)
2798 struct ufs_hba
*hba
= shost_priv(shost
);
2799 int i
, queue_offset
= 0;
2801 if (!is_mcq_supported(hba
)) {
2802 hba
->nr_queues
[HCTX_TYPE_DEFAULT
] = 1;
2803 hba
->nr_queues
[HCTX_TYPE_READ
] = 0;
2804 hba
->nr_queues
[HCTX_TYPE_POLL
] = 1;
2805 hba
->nr_hw_queues
= 1;
2808 for (i
= 0; i
< shost
->nr_maps
; i
++) {
2809 struct blk_mq_queue_map
*map
= &shost
->tag_set
.map
[i
];
2811 map
->nr_queues
= hba
->nr_queues
[i
];
2812 if (!map
->nr_queues
)
2814 map
->queue_offset
= queue_offset
;
2815 if (i
== HCTX_TYPE_POLL
&& !is_mcq_supported(hba
))
2816 map
->queue_offset
= 0;
2818 blk_mq_map_queues(map
);
2819 queue_offset
+= map
->nr_queues
;
2823 static void ufshcd_init_lrb(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrb
, int i
)
2825 struct utp_transfer_cmd_desc
*cmd_descp
= (void *)hba
->ucdl_base_addr
+
2826 i
* sizeof_utp_transfer_cmd_desc(hba
);
2827 struct utp_transfer_req_desc
*utrdlp
= hba
->utrdl_base_addr
;
2828 dma_addr_t cmd_desc_element_addr
= hba
->ucdl_dma_addr
+
2829 i
* sizeof_utp_transfer_cmd_desc(hba
);
2830 u16 response_offset
= offsetof(struct utp_transfer_cmd_desc
,
2832 u16 prdt_offset
= offsetof(struct utp_transfer_cmd_desc
, prd_table
);
2834 lrb
->utr_descriptor_ptr
= utrdlp
+ i
;
2835 lrb
->utrd_dma_addr
= hba
->utrdl_dma_addr
+
2836 i
* sizeof(struct utp_transfer_req_desc
);
2837 lrb
->ucd_req_ptr
= (struct utp_upiu_req
*)cmd_descp
->command_upiu
;
2838 lrb
->ucd_req_dma_addr
= cmd_desc_element_addr
;
2839 lrb
->ucd_rsp_ptr
= (struct utp_upiu_rsp
*)cmd_descp
->response_upiu
;
2840 lrb
->ucd_rsp_dma_addr
= cmd_desc_element_addr
+ response_offset
;
2841 lrb
->ucd_prdt_ptr
= (struct ufshcd_sg_entry
*)cmd_descp
->prd_table
;
2842 lrb
->ucd_prdt_dma_addr
= cmd_desc_element_addr
+ prdt_offset
;
2846 * ufshcd_queuecommand - main entry point for SCSI requests
2847 * @host: SCSI host pointer
2848 * @cmd: command from SCSI Midlayer
2850 * Returns 0 for success, non-zero in case of failure
2852 static int ufshcd_queuecommand(struct Scsi_Host
*host
, struct scsi_cmnd
*cmd
)
2854 struct ufs_hba
*hba
= shost_priv(host
);
2855 int tag
= scsi_cmd_to_rq(cmd
)->tag
;
2856 struct ufshcd_lrb
*lrbp
;
2858 struct ufs_hw_queue
*hwq
= NULL
;
2860 WARN_ONCE(tag
< 0 || tag
>= hba
->nutrs
, "Invalid tag %d\n", tag
);
2863 * Allows the UFS error handler to wait for prior ufshcd_queuecommand()
2868 switch (hba
->ufshcd_state
) {
2869 case UFSHCD_STATE_OPERATIONAL
:
2871 case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL
:
2873 * SCSI error handler can call ->queuecommand() while UFS error
2874 * handler is in progress. Error interrupts could change the
2875 * state from UFSHCD_STATE_RESET to
2876 * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests
2877 * being issued in that case.
2879 if (ufshcd_eh_in_progress(hba
)) {
2880 err
= SCSI_MLQUEUE_HOST_BUSY
;
2884 case UFSHCD_STATE_EH_SCHEDULED_FATAL
:
2886 * pm_runtime_get_sync() is used at error handling preparation
2887 * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
2888 * PM ops, it can never be finished if we let SCSI layer keep
2889 * retrying it, which gets err handler stuck forever. Neither
2890 * can we let the scsi cmd pass through, because UFS is in bad
2891 * state, the scsi cmd may eventually time out, which will get
2892 * err handler blocked for too long. So, just fail the scsi cmd
2893 * sent from PM ops, err handler can recover PM error anyways.
2895 if (hba
->pm_op_in_progress
) {
2896 hba
->force_reset
= true;
2897 set_host_byte(cmd
, DID_BAD_TARGET
);
2902 case UFSHCD_STATE_RESET
:
2903 err
= SCSI_MLQUEUE_HOST_BUSY
;
2905 case UFSHCD_STATE_ERROR
:
2906 set_host_byte(cmd
, DID_ERROR
);
2911 hba
->req_abort_count
= 0;
2913 err
= ufshcd_hold(hba
, true);
2915 err
= SCSI_MLQUEUE_HOST_BUSY
;
2918 WARN_ON(ufshcd_is_clkgating_allowed(hba
) &&
2919 (hba
->clk_gating
.state
!= CLKS_ON
));
2921 lrbp
= &hba
->lrb
[tag
];
2924 lrbp
->task_tag
= tag
;
2925 lrbp
->lun
= ufshcd_scsi_to_upiu_lun(cmd
->device
->lun
);
2926 lrbp
->intr_cmd
= !ufshcd_is_intr_aggr_allowed(hba
);
2928 ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd
), lrbp
);
2930 lrbp
->req_abort_skip
= false;
2932 ufshpb_prep(hba
, lrbp
);
2934 ufshcd_comp_scsi_upiu(hba
, lrbp
);
2936 err
= ufshcd_map_sg(hba
, lrbp
);
2939 ufshcd_release(hba
);
2943 if (is_mcq_enabled(hba
))
2944 hwq
= ufshcd_mcq_req_to_hwq(hba
, scsi_cmd_to_rq(cmd
));
2946 ufshcd_send_command(hba
, tag
, hwq
);
2951 if (ufs_trigger_eh()) {
2952 unsigned long flags
;
2954 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2955 ufshcd_schedule_eh_work(hba
);
2956 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2962 static int ufshcd_compose_dev_cmd(struct ufs_hba
*hba
,
2963 struct ufshcd_lrb
*lrbp
, enum dev_cmd_type cmd_type
, int tag
)
2966 lrbp
->task_tag
= tag
;
2967 lrbp
->lun
= 0; /* device management cmd is not specific to any LUN */
2968 lrbp
->intr_cmd
= true; /* No interrupt aggregation */
2969 ufshcd_prepare_lrbp_crypto(NULL
, lrbp
);
2970 hba
->dev_cmd
.type
= cmd_type
;
2972 return ufshcd_compose_devman_upiu(hba
, lrbp
);
2976 * Clear all the requests from the controller for which a bit has been set in
2977 * @mask and wait until the controller confirms that these requests have been
2980 static int ufshcd_clear_cmds(struct ufs_hba
*hba
, u32 mask
)
2982 unsigned long flags
;
2984 /* clear outstanding transaction before retry */
2985 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2986 ufshcd_utrl_clear(hba
, mask
);
2987 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2990 * wait for h/w to clear corresponding bit in door-bell.
2991 * max. wait is 1 sec.
2993 return ufshcd_wait_for_register(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
,
2994 mask
, ~mask
, 1000, 1000);
2998 ufshcd_check_query_response(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
3000 struct ufs_query_res
*query_res
= &hba
->dev_cmd
.query
.response
;
3002 /* Get the UPIU response */
3003 query_res
->response
= ufshcd_get_rsp_upiu_result(lrbp
->ucd_rsp_ptr
) >>
3004 UPIU_RSP_CODE_OFFSET
;
3005 return query_res
->response
;
3009 * ufshcd_dev_cmd_completion() - handles device management command responses
3010 * @hba: per adapter instance
3011 * @lrbp: pointer to local reference block
3014 ufshcd_dev_cmd_completion(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
3019 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
3020 resp
= ufshcd_get_req_rsp(lrbp
->ucd_rsp_ptr
);
3023 case UPIU_TRANSACTION_NOP_IN
:
3024 if (hba
->dev_cmd
.type
!= DEV_CMD_TYPE_NOP
) {
3026 dev_err(hba
->dev
, "%s: unexpected response %x\n",
3030 case UPIU_TRANSACTION_QUERY_RSP
:
3031 err
= ufshcd_check_query_response(hba
, lrbp
);
3033 err
= ufshcd_copy_query_response(hba
, lrbp
);
3035 case UPIU_TRANSACTION_REJECT_UPIU
:
3036 /* TODO: handle Reject UPIU Response */
3038 dev_err(hba
->dev
, "%s: Reject UPIU not fully implemented\n",
3041 case UPIU_TRANSACTION_RESPONSE
:
3042 if (hba
->dev_cmd
.type
!= DEV_CMD_TYPE_RPMB
) {
3044 dev_err(hba
->dev
, "%s: unexpected response %x\n", __func__
, resp
);
3049 dev_err(hba
->dev
, "%s: Invalid device management cmd response: %x\n",
3057 static int ufshcd_wait_for_dev_cmd(struct ufs_hba
*hba
,
3058 struct ufshcd_lrb
*lrbp
, int max_timeout
)
3060 unsigned long time_left
= msecs_to_jiffies(max_timeout
);
3061 unsigned long flags
;
3066 time_left
= wait_for_completion_timeout(hba
->dev_cmd
.complete
,
3069 if (likely(time_left
)) {
3071 * The completion handler called complete() and the caller of
3072 * this function still owns the @lrbp tag so the code below does
3073 * not trigger any race conditions.
3075 hba
->dev_cmd
.complete
= NULL
;
3076 err
= ufshcd_get_tr_ocs(lrbp
, hba
->dev_cmd
.cqe
);
3078 err
= ufshcd_dev_cmd_completion(hba
, lrbp
);
3081 dev_dbg(hba
->dev
, "%s: dev_cmd request timedout, tag %d\n",
3082 __func__
, lrbp
->task_tag
);
3083 if (ufshcd_clear_cmds(hba
, 1U << lrbp
->task_tag
) == 0) {
3084 /* successfully cleared the command, retry if needed */
3087 * Since clearing the command succeeded we also need to
3088 * clear the task tag bit from the outstanding_reqs
3091 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
3092 pending
= test_bit(lrbp
->task_tag
,
3093 &hba
->outstanding_reqs
);
3095 hba
->dev_cmd
.complete
= NULL
;
3096 __clear_bit(lrbp
->task_tag
,
3097 &hba
->outstanding_reqs
);
3099 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
3103 * The completion handler ran while we tried to
3104 * clear the command.
3110 dev_err(hba
->dev
, "%s: failed to clear tag %d\n",
3111 __func__
, lrbp
->task_tag
);
3113 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
3114 pending
= test_bit(lrbp
->task_tag
,
3115 &hba
->outstanding_reqs
);
3117 hba
->dev_cmd
.complete
= NULL
;
3118 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
3122 * The completion handler ran while we tried to
3123 * clear the command.
3135 * ufshcd_exec_dev_cmd - API for sending device management requests
3137 * @cmd_type: specifies the type (NOP, Query...)
3138 * @timeout: timeout in milliseconds
3140 * NOTE: Since there is only one available tag for device management commands,
3141 * it is expected you hold the hba->dev_cmd.lock mutex.
3143 static int ufshcd_exec_dev_cmd(struct ufs_hba
*hba
,
3144 enum dev_cmd_type cmd_type
, int timeout
)
3146 DECLARE_COMPLETION_ONSTACK(wait
);
3147 const u32 tag
= hba
->reserved_slot
;
3148 struct ufshcd_lrb
*lrbp
;
3151 /* Protects use of hba->reserved_slot. */
3152 lockdep_assert_held(&hba
->dev_cmd
.lock
);
3154 down_read(&hba
->clk_scaling_lock
);
3156 lrbp
= &hba
->lrb
[tag
];
3158 err
= ufshcd_compose_dev_cmd(hba
, lrbp
, cmd_type
, tag
);
3162 hba
->dev_cmd
.complete
= &wait
;
3163 hba
->dev_cmd
.cqe
= NULL
;
3165 ufshcd_add_query_upiu_trace(hba
, UFS_QUERY_SEND
, lrbp
->ucd_req_ptr
);
3167 ufshcd_send_command(hba
, tag
, hba
->dev_cmd_queue
);
3168 err
= ufshcd_wait_for_dev_cmd(hba
, lrbp
, timeout
);
3169 ufshcd_add_query_upiu_trace(hba
, err
? UFS_QUERY_ERR
: UFS_QUERY_COMP
,
3170 (struct utp_upiu_req
*)lrbp
->ucd_rsp_ptr
);
3173 up_read(&hba
->clk_scaling_lock
);
3178 * ufshcd_init_query() - init the query response and request parameters
3179 * @hba: per-adapter instance
3180 * @request: address of the request pointer to be initialized
3181 * @response: address of the response pointer to be initialized
3182 * @opcode: operation to perform
3183 * @idn: flag idn to access
3184 * @index: LU number to access
3185 * @selector: query/flag/descriptor further identification
3187 static inline void ufshcd_init_query(struct ufs_hba
*hba
,
3188 struct ufs_query_req
**request
, struct ufs_query_res
**response
,
3189 enum query_opcode opcode
, u8 idn
, u8 index
, u8 selector
)
3191 *request
= &hba
->dev_cmd
.query
.request
;
3192 *response
= &hba
->dev_cmd
.query
.response
;
3193 memset(*request
, 0, sizeof(struct ufs_query_req
));
3194 memset(*response
, 0, sizeof(struct ufs_query_res
));
3195 (*request
)->upiu_req
.opcode
= opcode
;
3196 (*request
)->upiu_req
.idn
= idn
;
3197 (*request
)->upiu_req
.index
= index
;
3198 (*request
)->upiu_req
.selector
= selector
;
3201 static int ufshcd_query_flag_retry(struct ufs_hba
*hba
,
3202 enum query_opcode opcode
, enum flag_idn idn
, u8 index
, bool *flag_res
)
3207 for (retries
= 0; retries
< QUERY_REQ_RETRIES
; retries
++) {
3208 ret
= ufshcd_query_flag(hba
, opcode
, idn
, index
, flag_res
);
3211 "%s: failed with error %d, retries %d\n",
3212 __func__
, ret
, retries
);
3219 "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n",
3220 __func__
, opcode
, idn
, ret
, retries
);
3225 * ufshcd_query_flag() - API function for sending flag query requests
3226 * @hba: per-adapter instance
3227 * @opcode: flag query to perform
3228 * @idn: flag idn to access
3229 * @index: flag index to access
3230 * @flag_res: the flag value after the query request completes
3232 * Returns 0 for success, non-zero in case of failure
3234 int ufshcd_query_flag(struct ufs_hba
*hba
, enum query_opcode opcode
,
3235 enum flag_idn idn
, u8 index
, bool *flag_res
)
3237 struct ufs_query_req
*request
= NULL
;
3238 struct ufs_query_res
*response
= NULL
;
3239 int err
, selector
= 0;
3240 int timeout
= QUERY_REQ_TIMEOUT
;
3244 ufshcd_hold(hba
, false);
3245 mutex_lock(&hba
->dev_cmd
.lock
);
3246 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
3250 case UPIU_QUERY_OPCODE_SET_FLAG
:
3251 case UPIU_QUERY_OPCODE_CLEAR_FLAG
:
3252 case UPIU_QUERY_OPCODE_TOGGLE_FLAG
:
3253 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
3255 case UPIU_QUERY_OPCODE_READ_FLAG
:
3256 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
3258 /* No dummy reads */
3259 dev_err(hba
->dev
, "%s: Invalid argument for read request\n",
3267 "%s: Expected query flag opcode but got = %d\n",
3273 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, timeout
);
3277 "%s: Sending flag query for idn %d failed, err = %d\n",
3278 __func__
, idn
, err
);
3283 *flag_res
= (be32_to_cpu(response
->upiu_res
.value
) &
3284 MASK_QUERY_UPIU_FLAG_LOC
) & 0x1;
3287 mutex_unlock(&hba
->dev_cmd
.lock
);
3288 ufshcd_release(hba
);
3293 * ufshcd_query_attr - API function for sending attribute requests
3294 * @hba: per-adapter instance
3295 * @opcode: attribute opcode
3296 * @idn: attribute idn to access
3297 * @index: index field
3298 * @selector: selector field
3299 * @attr_val: the attribute value after the query request completes
3301 * Returns 0 for success, non-zero in case of failure
3303 int ufshcd_query_attr(struct ufs_hba
*hba
, enum query_opcode opcode
,
3304 enum attr_idn idn
, u8 index
, u8 selector
, u32
*attr_val
)
3306 struct ufs_query_req
*request
= NULL
;
3307 struct ufs_query_res
*response
= NULL
;
3313 dev_err(hba
->dev
, "%s: attribute value required for opcode 0x%x\n",
3318 ufshcd_hold(hba
, false);
3320 mutex_lock(&hba
->dev_cmd
.lock
);
3321 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
3325 case UPIU_QUERY_OPCODE_WRITE_ATTR
:
3326 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
3327 request
->upiu_req
.value
= cpu_to_be32(*attr_val
);
3329 case UPIU_QUERY_OPCODE_READ_ATTR
:
3330 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
3333 dev_err(hba
->dev
, "%s: Expected query attr opcode but got = 0x%.2x\n",
3339 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
3342 dev_err(hba
->dev
, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3343 __func__
, opcode
, idn
, index
, err
);
3347 *attr_val
= be32_to_cpu(response
->upiu_res
.value
);
3350 mutex_unlock(&hba
->dev_cmd
.lock
);
3351 ufshcd_release(hba
);
3356 * ufshcd_query_attr_retry() - API function for sending query
3357 * attribute with retries
3358 * @hba: per-adapter instance
3359 * @opcode: attribute opcode
3360 * @idn: attribute idn to access
3361 * @index: index field
3362 * @selector: selector field
3363 * @attr_val: the attribute value after the query request
3366 * Returns 0 for success, non-zero in case of failure
3368 int ufshcd_query_attr_retry(struct ufs_hba
*hba
,
3369 enum query_opcode opcode
, enum attr_idn idn
, u8 index
, u8 selector
,
3375 for (retries
= QUERY_REQ_RETRIES
; retries
> 0; retries
--) {
3376 ret
= ufshcd_query_attr(hba
, opcode
, idn
, index
,
3377 selector
, attr_val
);
3379 dev_dbg(hba
->dev
, "%s: failed with error %d, retries %d\n",
3380 __func__
, ret
, retries
);
3387 "%s: query attribute, idn %d, failed with error %d after %d retries\n",
3388 __func__
, idn
, ret
, QUERY_REQ_RETRIES
);
3392 static int __ufshcd_query_descriptor(struct ufs_hba
*hba
,
3393 enum query_opcode opcode
, enum desc_idn idn
, u8 index
,
3394 u8 selector
, u8
*desc_buf
, int *buf_len
)
3396 struct ufs_query_req
*request
= NULL
;
3397 struct ufs_query_res
*response
= NULL
;
3403 dev_err(hba
->dev
, "%s: descriptor buffer required for opcode 0x%x\n",
3408 if (*buf_len
< QUERY_DESC_MIN_SIZE
|| *buf_len
> QUERY_DESC_MAX_SIZE
) {
3409 dev_err(hba
->dev
, "%s: descriptor buffer size (%d) is out of range\n",
3410 __func__
, *buf_len
);
3414 ufshcd_hold(hba
, false);
3416 mutex_lock(&hba
->dev_cmd
.lock
);
3417 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
3419 hba
->dev_cmd
.query
.descriptor
= desc_buf
;
3420 request
->upiu_req
.length
= cpu_to_be16(*buf_len
);
3423 case UPIU_QUERY_OPCODE_WRITE_DESC
:
3424 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
3426 case UPIU_QUERY_OPCODE_READ_DESC
:
3427 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
3431 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3437 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
3440 dev_err(hba
->dev
, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3441 __func__
, opcode
, idn
, index
, err
);
3445 *buf_len
= be16_to_cpu(response
->upiu_res
.length
);
3448 hba
->dev_cmd
.query
.descriptor
= NULL
;
3449 mutex_unlock(&hba
->dev_cmd
.lock
);
3450 ufshcd_release(hba
);
3455 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3456 * @hba: per-adapter instance
3457 * @opcode: attribute opcode
3458 * @idn: attribute idn to access
3459 * @index: index field
3460 * @selector: selector field
3461 * @desc_buf: the buffer that contains the descriptor
3462 * @buf_len: length parameter passed to the device
3464 * Returns 0 for success, non-zero in case of failure.
3465 * The buf_len parameter will contain, on return, the length parameter
3466 * received on the response.
3468 int ufshcd_query_descriptor_retry(struct ufs_hba
*hba
,
3469 enum query_opcode opcode
,
3470 enum desc_idn idn
, u8 index
,
3472 u8
*desc_buf
, int *buf_len
)
3477 for (retries
= QUERY_REQ_RETRIES
; retries
> 0; retries
--) {
3478 err
= __ufshcd_query_descriptor(hba
, opcode
, idn
, index
,
3479 selector
, desc_buf
, buf_len
);
3480 if (!err
|| err
== -EINVAL
)
3488 * ufshcd_read_desc_param - read the specified descriptor parameter
3489 * @hba: Pointer to adapter instance
3490 * @desc_id: descriptor idn value
3491 * @desc_index: descriptor index
3492 * @param_offset: offset of the parameter to read
3493 * @param_read_buf: pointer to buffer where parameter would be read
3494 * @param_size: sizeof(param_read_buf)
3496 * Return 0 in case of success, non-zero otherwise
3498 int ufshcd_read_desc_param(struct ufs_hba
*hba
,
3499 enum desc_idn desc_id
,
3507 int buff_len
= QUERY_DESC_MAX_SIZE
;
3508 bool is_kmalloc
= true;
3511 if (desc_id
>= QUERY_DESC_IDN_MAX
|| !param_size
)
3514 /* Check whether we need temp memory */
3515 if (param_offset
!= 0 || param_size
< buff_len
) {
3516 desc_buf
= kzalloc(buff_len
, GFP_KERNEL
);
3520 desc_buf
= param_read_buf
;
3524 /* Request for full descriptor */
3525 ret
= ufshcd_query_descriptor_retry(hba
, UPIU_QUERY_OPCODE_READ_DESC
,
3526 desc_id
, desc_index
, 0,
3527 desc_buf
, &buff_len
);
3529 dev_err(hba
->dev
, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
3530 __func__
, desc_id
, desc_index
, param_offset
, ret
);
3534 /* Update descriptor length */
3535 buff_len
= desc_buf
[QUERY_DESC_LENGTH_OFFSET
];
3537 if (param_offset
>= buff_len
) {
3538 dev_err(hba
->dev
, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3539 __func__
, param_offset
, desc_id
, buff_len
);
3545 if (desc_buf
[QUERY_DESC_DESC_TYPE_OFFSET
] != desc_id
) {
3546 dev_err(hba
->dev
, "%s: invalid desc_id %d in descriptor header\n",
3547 __func__
, desc_buf
[QUERY_DESC_DESC_TYPE_OFFSET
]);
3553 /* Make sure we don't copy more data than available */
3554 if (param_offset
>= buff_len
)
3557 memcpy(param_read_buf
, &desc_buf
[param_offset
],
3558 min_t(u32
, param_size
, buff_len
- param_offset
));
3567 * struct uc_string_id - unicode string
3569 * @len: size of this descriptor inclusive
3570 * @type: descriptor type
3571 * @uc: unicode string character
3573 struct uc_string_id
{
3579 /* replace non-printable or non-ASCII characters with spaces */
3580 static inline char ufshcd_remove_non_printable(u8 ch
)
3582 return (ch
>= 0x20 && ch
<= 0x7e) ? ch
: ' ';
3586 * ufshcd_read_string_desc - read string descriptor
3587 * @hba: pointer to adapter instance
3588 * @desc_index: descriptor index
3589 * @buf: pointer to buffer where descriptor would be read,
3590 * the caller should free the memory.
3591 * @ascii: if true convert from unicode to ascii characters
3592 * null terminated string.
3595 * * string size on success.
3596 * * -ENOMEM: on allocation failure
3597 * * -EINVAL: on a wrong parameter
3599 int ufshcd_read_string_desc(struct ufs_hba
*hba
, u8 desc_index
,
3600 u8
**buf
, bool ascii
)
3602 struct uc_string_id
*uc_str
;
3609 uc_str
= kzalloc(QUERY_DESC_MAX_SIZE
, GFP_KERNEL
);
3613 ret
= ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_STRING
, desc_index
, 0,
3614 (u8
*)uc_str
, QUERY_DESC_MAX_SIZE
);
3616 dev_err(hba
->dev
, "Reading String Desc failed after %d retries. err = %d\n",
3617 QUERY_REQ_RETRIES
, ret
);
3622 if (uc_str
->len
<= QUERY_DESC_HDR_SIZE
) {
3623 dev_dbg(hba
->dev
, "String Desc is of zero length\n");
3632 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3633 ascii_len
= (uc_str
->len
- QUERY_DESC_HDR_SIZE
) / 2 + 1;
3634 str
= kzalloc(ascii_len
, GFP_KERNEL
);
3641 * the descriptor contains string in UTF16 format
3642 * we need to convert to utf-8 so it can be displayed
3644 ret
= utf16s_to_utf8s(uc_str
->uc
,
3645 uc_str
->len
- QUERY_DESC_HDR_SIZE
,
3646 UTF16_BIG_ENDIAN
, str
, ascii_len
);
3648 /* replace non-printable or non-ASCII characters with spaces */
3649 for (i
= 0; i
< ret
; i
++)
3650 str
[i
] = ufshcd_remove_non_printable(str
[i
]);
3655 str
= kmemdup(uc_str
, uc_str
->len
, GFP_KERNEL
);
3669 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3670 * @hba: Pointer to adapter instance
3672 * @param_offset: offset of the parameter to read
3673 * @param_read_buf: pointer to buffer where parameter would be read
3674 * @param_size: sizeof(param_read_buf)
3676 * Return 0 in case of success, non-zero otherwise
3678 static inline int ufshcd_read_unit_desc_param(struct ufs_hba
*hba
,
3680 enum unit_desc_param param_offset
,
3685 * Unit descriptors are only available for general purpose LUs (LUN id
3686 * from 0 to 7) and RPMB Well known LU.
3688 if (!ufs_is_valid_unit_desc_lun(&hba
->dev_info
, lun
))
3691 return ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_UNIT
, lun
,
3692 param_offset
, param_read_buf
, param_size
);
3695 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba
*hba
)
3698 u32 gating_wait
= UFSHCD_REF_CLK_GATING_WAIT_US
;
3700 if (hba
->dev_info
.wspecversion
>= 0x300) {
3701 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
3702 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME
, 0, 0,
3705 dev_err(hba
->dev
, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3708 if (gating_wait
== 0) {
3709 gating_wait
= UFSHCD_REF_CLK_GATING_WAIT_US
;
3710 dev_err(hba
->dev
, "Undefined ref clk gating wait time, use default %uus\n",
3714 hba
->dev_info
.clk_gating_wait_us
= gating_wait
;
3721 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3722 * @hba: per adapter instance
3724 * 1. Allocate DMA memory for Command Descriptor array
3725 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3726 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3727 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3729 * 4. Allocate memory for local reference block(lrb).
3731 * Returns 0 for success, non-zero in case of failure
3733 static int ufshcd_memory_alloc(struct ufs_hba
*hba
)
3735 size_t utmrdl_size
, utrdl_size
, ucdl_size
;
3737 /* Allocate memory for UTP command descriptors */
3738 ucdl_size
= sizeof_utp_transfer_cmd_desc(hba
) * hba
->nutrs
;
3739 hba
->ucdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3741 &hba
->ucdl_dma_addr
,
3745 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3747 if (!hba
->ucdl_base_addr
||
3748 WARN_ON(hba
->ucdl_dma_addr
& (128 - 1))) {
3750 "Command Descriptor Memory allocation failed\n");
3755 * Allocate memory for UTP Transfer descriptors
3756 * UFSHCI requires 1024 byte alignment of UTRD
3758 utrdl_size
= (sizeof(struct utp_transfer_req_desc
) * hba
->nutrs
);
3759 hba
->utrdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3761 &hba
->utrdl_dma_addr
,
3763 if (!hba
->utrdl_base_addr
||
3764 WARN_ON(hba
->utrdl_dma_addr
& (1024 - 1))) {
3766 "Transfer Descriptor Memory allocation failed\n");
3771 * Skip utmrdl allocation; it may have been
3772 * allocated during first pass and not released during
3773 * MCQ memory allocation.
3774 * See ufshcd_release_sdb_queue() and ufshcd_config_mcq()
3776 if (hba
->utmrdl_base_addr
)
3779 * Allocate memory for UTP Task Management descriptors
3780 * UFSHCI requires 1024 byte alignment of UTMRD
3782 utmrdl_size
= sizeof(struct utp_task_req_desc
) * hba
->nutmrs
;
3783 hba
->utmrdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3785 &hba
->utmrdl_dma_addr
,
3787 if (!hba
->utmrdl_base_addr
||
3788 WARN_ON(hba
->utmrdl_dma_addr
& (1024 - 1))) {
3790 "Task Management Descriptor Memory allocation failed\n");
3795 /* Allocate memory for local reference block */
3796 hba
->lrb
= devm_kcalloc(hba
->dev
,
3797 hba
->nutrs
, sizeof(struct ufshcd_lrb
),
3800 dev_err(hba
->dev
, "LRB Memory allocation failed\n");
3809 * ufshcd_host_memory_configure - configure local reference block with
3811 * @hba: per adapter instance
3813 * Configure Host memory space
3814 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3816 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3818 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3819 * into local reference block.
3821 static void ufshcd_host_memory_configure(struct ufs_hba
*hba
)
3823 struct utp_transfer_req_desc
*utrdlp
;
3824 dma_addr_t cmd_desc_dma_addr
;
3825 dma_addr_t cmd_desc_element_addr
;
3826 u16 response_offset
;
3831 utrdlp
= hba
->utrdl_base_addr
;
3834 offsetof(struct utp_transfer_cmd_desc
, response_upiu
);
3836 offsetof(struct utp_transfer_cmd_desc
, prd_table
);
3838 cmd_desc_size
= sizeof_utp_transfer_cmd_desc(hba
);
3839 cmd_desc_dma_addr
= hba
->ucdl_dma_addr
;
3841 for (i
= 0; i
< hba
->nutrs
; i
++) {
3842 /* Configure UTRD with command descriptor base address */
3843 cmd_desc_element_addr
=
3844 (cmd_desc_dma_addr
+ (cmd_desc_size
* i
));
3845 utrdlp
[i
].command_desc_base_addr_lo
=
3846 cpu_to_le32(lower_32_bits(cmd_desc_element_addr
));
3847 utrdlp
[i
].command_desc_base_addr_hi
=
3848 cpu_to_le32(upper_32_bits(cmd_desc_element_addr
));
3850 /* Response upiu and prdt offset should be in double words */
3851 if (hba
->quirks
& UFSHCD_QUIRK_PRDT_BYTE_GRAN
) {
3852 utrdlp
[i
].response_upiu_offset
=
3853 cpu_to_le16(response_offset
);
3854 utrdlp
[i
].prd_table_offset
=
3855 cpu_to_le16(prdt_offset
);
3856 utrdlp
[i
].response_upiu_length
=
3857 cpu_to_le16(ALIGNED_UPIU_SIZE
);
3859 utrdlp
[i
].response_upiu_offset
=
3860 cpu_to_le16(response_offset
>> 2);
3861 utrdlp
[i
].prd_table_offset
=
3862 cpu_to_le16(prdt_offset
>> 2);
3863 utrdlp
[i
].response_upiu_length
=
3864 cpu_to_le16(ALIGNED_UPIU_SIZE
>> 2);
3867 ufshcd_init_lrb(hba
, &hba
->lrb
[i
], i
);
3872 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3873 * @hba: per adapter instance
3875 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3876 * in order to initialize the Unipro link startup procedure.
3877 * Once the Unipro links are up, the device connected to the controller
3880 * Returns 0 on success, non-zero value on failure
3882 static int ufshcd_dme_link_startup(struct ufs_hba
*hba
)
3884 struct uic_command uic_cmd
= {0};
3887 uic_cmd
.command
= UIC_CMD_DME_LINK_STARTUP
;
3889 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
3892 "dme-link-startup: error code %d\n", ret
);
3896 * ufshcd_dme_reset - UIC command for DME_RESET
3897 * @hba: per adapter instance
3899 * DME_RESET command is issued in order to reset UniPro stack.
3900 * This function now deals with cold reset.
3902 * Returns 0 on success, non-zero value on failure
3904 static int ufshcd_dme_reset(struct ufs_hba
*hba
)
3906 struct uic_command uic_cmd
= {0};
3909 uic_cmd
.command
= UIC_CMD_DME_RESET
;
3911 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
3914 "dme-reset: error code %d\n", ret
);
3919 int ufshcd_dme_configure_adapt(struct ufs_hba
*hba
,
3925 if (agreed_gear
< UFS_HS_G4
)
3926 adapt_val
= PA_NO_ADAPT
;
3928 ret
= ufshcd_dme_set(hba
,
3929 UIC_ARG_MIB(PA_TXHSADAPTTYPE
),
3933 EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt
);
3936 * ufshcd_dme_enable - UIC command for DME_ENABLE
3937 * @hba: per adapter instance
3939 * DME_ENABLE command is issued in order to enable UniPro stack.
3941 * Returns 0 on success, non-zero value on failure
3943 static int ufshcd_dme_enable(struct ufs_hba
*hba
)
3945 struct uic_command uic_cmd
= {0};
3948 uic_cmd
.command
= UIC_CMD_DME_ENABLE
;
3950 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
3953 "dme-enable: error code %d\n", ret
);
3958 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba
*hba
)
3960 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3961 unsigned long min_sleep_time_us
;
3963 if (!(hba
->quirks
& UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
))
3967 * last_dme_cmd_tstamp will be 0 only for 1st call to
3970 if (unlikely(!ktime_to_us(hba
->last_dme_cmd_tstamp
))) {
3971 min_sleep_time_us
= MIN_DELAY_BEFORE_DME_CMDS_US
;
3973 unsigned long delta
=
3974 (unsigned long) ktime_to_us(
3975 ktime_sub(ktime_get(),
3976 hba
->last_dme_cmd_tstamp
));
3978 if (delta
< MIN_DELAY_BEFORE_DME_CMDS_US
)
3980 MIN_DELAY_BEFORE_DME_CMDS_US
- delta
;
3982 return; /* no more delay required */
3985 /* allow sleep for extra 50us if needed */
3986 usleep_range(min_sleep_time_us
, min_sleep_time_us
+ 50);
3990 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3991 * @hba: per adapter instance
3992 * @attr_sel: uic command argument1
3993 * @attr_set: attribute set type as uic command argument2
3994 * @mib_val: setting value as uic command argument3
3995 * @peer: indicate whether peer or local
3997 * Returns 0 on success, non-zero value on failure
3999 int ufshcd_dme_set_attr(struct ufs_hba
*hba
, u32 attr_sel
,
4000 u8 attr_set
, u32 mib_val
, u8 peer
)
4002 struct uic_command uic_cmd
= {0};
4003 static const char *const action
[] = {
4007 const char *set
= action
[!!peer
];
4009 int retries
= UFS_UIC_COMMAND_RETRIES
;
4011 uic_cmd
.command
= peer
?
4012 UIC_CMD_DME_PEER_SET
: UIC_CMD_DME_SET
;
4013 uic_cmd
.argument1
= attr_sel
;
4014 uic_cmd
.argument2
= UIC_ARG_ATTR_TYPE(attr_set
);
4015 uic_cmd
.argument3
= mib_val
;
4018 /* for peer attributes we retry upon failure */
4019 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
4021 dev_dbg(hba
->dev
, "%s: attr-id 0x%x val 0x%x error code %d\n",
4022 set
, UIC_GET_ATTR_ID(attr_sel
), mib_val
, ret
);
4023 } while (ret
&& peer
&& --retries
);
4026 dev_err(hba
->dev
, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
4027 set
, UIC_GET_ATTR_ID(attr_sel
), mib_val
,
4028 UFS_UIC_COMMAND_RETRIES
- retries
);
4032 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr
);
4035 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
4036 * @hba: per adapter instance
4037 * @attr_sel: uic command argument1
4038 * @mib_val: the value of the attribute as returned by the UIC command
4039 * @peer: indicate whether peer or local
4041 * Returns 0 on success, non-zero value on failure
4043 int ufshcd_dme_get_attr(struct ufs_hba
*hba
, u32 attr_sel
,
4044 u32
*mib_val
, u8 peer
)
4046 struct uic_command uic_cmd
= {0};
4047 static const char *const action
[] = {
4051 const char *get
= action
[!!peer
];
4053 int retries
= UFS_UIC_COMMAND_RETRIES
;
4054 struct ufs_pa_layer_attr orig_pwr_info
;
4055 struct ufs_pa_layer_attr temp_pwr_info
;
4056 bool pwr_mode_change
= false;
4058 if (peer
&& (hba
->quirks
& UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
)) {
4059 orig_pwr_info
= hba
->pwr_info
;
4060 temp_pwr_info
= orig_pwr_info
;
4062 if (orig_pwr_info
.pwr_tx
== FAST_MODE
||
4063 orig_pwr_info
.pwr_rx
== FAST_MODE
) {
4064 temp_pwr_info
.pwr_tx
= FASTAUTO_MODE
;
4065 temp_pwr_info
.pwr_rx
= FASTAUTO_MODE
;
4066 pwr_mode_change
= true;
4067 } else if (orig_pwr_info
.pwr_tx
== SLOW_MODE
||
4068 orig_pwr_info
.pwr_rx
== SLOW_MODE
) {
4069 temp_pwr_info
.pwr_tx
= SLOWAUTO_MODE
;
4070 temp_pwr_info
.pwr_rx
= SLOWAUTO_MODE
;
4071 pwr_mode_change
= true;
4073 if (pwr_mode_change
) {
4074 ret
= ufshcd_change_power_mode(hba
, &temp_pwr_info
);
4080 uic_cmd
.command
= peer
?
4081 UIC_CMD_DME_PEER_GET
: UIC_CMD_DME_GET
;
4082 uic_cmd
.argument1
= attr_sel
;
4085 /* for peer attributes we retry upon failure */
4086 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
4088 dev_dbg(hba
->dev
, "%s: attr-id 0x%x error code %d\n",
4089 get
, UIC_GET_ATTR_ID(attr_sel
), ret
);
4090 } while (ret
&& peer
&& --retries
);
4093 dev_err(hba
->dev
, "%s: attr-id 0x%x failed %d retries\n",
4094 get
, UIC_GET_ATTR_ID(attr_sel
),
4095 UFS_UIC_COMMAND_RETRIES
- retries
);
4097 if (mib_val
&& !ret
)
4098 *mib_val
= uic_cmd
.argument3
;
4100 if (peer
&& (hba
->quirks
& UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
)
4102 ufshcd_change_power_mode(hba
, &orig_pwr_info
);
4106 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr
);
4109 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
4110 * state) and waits for it to take effect.
4112 * @hba: per adapter instance
4113 * @cmd: UIC command to execute
4115 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
4116 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
4117 * and device UniPro link and hence it's final completion would be indicated by
4118 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
4119 * addition to normal UIC command completion Status (UCCS). This function only
4120 * returns after the relevant status bits indicate the completion.
4122 * Returns 0 on success, non-zero value on failure
4124 static int ufshcd_uic_pwr_ctrl(struct ufs_hba
*hba
, struct uic_command
*cmd
)
4126 DECLARE_COMPLETION_ONSTACK(uic_async_done
);
4127 unsigned long flags
;
4130 bool reenable_intr
= false;
4132 mutex_lock(&hba
->uic_cmd_mutex
);
4133 ufshcd_add_delay_before_dme_cmd(hba
);
4135 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4136 if (ufshcd_is_link_broken(hba
)) {
4140 hba
->uic_async_done
= &uic_async_done
;
4141 if (ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
) & UIC_COMMAND_COMPL
) {
4142 ufshcd_disable_intr(hba
, UIC_COMMAND_COMPL
);
4144 * Make sure UIC command completion interrupt is disabled before
4145 * issuing UIC command.
4148 reenable_intr
= true;
4150 ret
= __ufshcd_send_uic_cmd(hba
, cmd
, false);
4151 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4154 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
4155 cmd
->command
, cmd
->argument3
, ret
);
4159 if (!wait_for_completion_timeout(hba
->uic_async_done
,
4160 msecs_to_jiffies(UIC_CMD_TIMEOUT
))) {
4162 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
4163 cmd
->command
, cmd
->argument3
);
4165 if (!cmd
->cmd_active
) {
4166 dev_err(hba
->dev
, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
4176 status
= ufshcd_get_upmcrs(hba
);
4177 if (status
!= PWR_LOCAL
) {
4179 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
4180 cmd
->command
, status
);
4181 ret
= (status
!= PWR_OK
) ? status
: -1;
4185 ufshcd_print_host_state(hba
);
4186 ufshcd_print_pwr_info(hba
);
4187 ufshcd_print_evt_hist(hba
);
4190 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4191 hba
->active_uic_cmd
= NULL
;
4192 hba
->uic_async_done
= NULL
;
4194 ufshcd_enable_intr(hba
, UIC_COMMAND_COMPL
);
4196 ufshcd_set_link_broken(hba
);
4197 ufshcd_schedule_eh_work(hba
);
4200 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4201 mutex_unlock(&hba
->uic_cmd_mutex
);
4207 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
4208 * using DME_SET primitives.
4209 * @hba: per adapter instance
4210 * @mode: powr mode value
4212 * Returns 0 on success, non-zero value on failure
4214 int ufshcd_uic_change_pwr_mode(struct ufs_hba
*hba
, u8 mode
)
4216 struct uic_command uic_cmd
= {0};
4219 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
) {
4220 ret
= ufshcd_dme_set(hba
,
4221 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP
, 0), 1);
4223 dev_err(hba
->dev
, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
4229 uic_cmd
.command
= UIC_CMD_DME_SET
;
4230 uic_cmd
.argument1
= UIC_ARG_MIB(PA_PWRMODE
);
4231 uic_cmd
.argument3
= mode
;
4232 ufshcd_hold(hba
, false);
4233 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
4234 ufshcd_release(hba
);
4239 EXPORT_SYMBOL_GPL(ufshcd_uic_change_pwr_mode
);
4241 int ufshcd_link_recovery(struct ufs_hba
*hba
)
4244 unsigned long flags
;
4246 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4247 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
4248 ufshcd_set_eh_in_progress(hba
);
4249 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4251 /* Reset the attached device */
4252 ufshcd_device_reset(hba
);
4254 ret
= ufshcd_host_reset_and_restore(hba
);
4256 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4258 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
4259 ufshcd_clear_eh_in_progress(hba
);
4260 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4263 dev_err(hba
->dev
, "%s: link recovery failed, err %d",
4268 EXPORT_SYMBOL_GPL(ufshcd_link_recovery
);
4270 int ufshcd_uic_hibern8_enter(struct ufs_hba
*hba
)
4273 struct uic_command uic_cmd
= {0};
4274 ktime_t start
= ktime_get();
4276 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_ENTER
, PRE_CHANGE
);
4278 uic_cmd
.command
= UIC_CMD_DME_HIBER_ENTER
;
4279 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
4280 trace_ufshcd_profile_hibern8(dev_name(hba
->dev
), "enter",
4281 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
4284 dev_err(hba
->dev
, "%s: hibern8 enter failed. ret = %d\n",
4287 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_ENTER
,
4292 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter
);
4294 int ufshcd_uic_hibern8_exit(struct ufs_hba
*hba
)
4296 struct uic_command uic_cmd
= {0};
4298 ktime_t start
= ktime_get();
4300 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_EXIT
, PRE_CHANGE
);
4302 uic_cmd
.command
= UIC_CMD_DME_HIBER_EXIT
;
4303 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
4304 trace_ufshcd_profile_hibern8(dev_name(hba
->dev
), "exit",
4305 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
4308 dev_err(hba
->dev
, "%s: hibern8 exit failed. ret = %d\n",
4311 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_EXIT
,
4313 hba
->ufs_stats
.last_hibern8_exit_tstamp
= local_clock();
4314 hba
->ufs_stats
.hibern8_exit_cnt
++;
4319 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit
);
4321 void ufshcd_auto_hibern8_update(struct ufs_hba
*hba
, u32 ahit
)
4323 unsigned long flags
;
4324 bool update
= false;
4326 if (!ufshcd_is_auto_hibern8_supported(hba
))
4329 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4330 if (hba
->ahit
!= ahit
) {
4334 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4337 !pm_runtime_suspended(&hba
->ufs_device_wlun
->sdev_gendev
)) {
4338 ufshcd_rpm_get_sync(hba
);
4339 ufshcd_hold(hba
, false);
4340 ufshcd_auto_hibern8_enable(hba
);
4341 ufshcd_release(hba
);
4342 ufshcd_rpm_put_sync(hba
);
4345 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update
);
4347 void ufshcd_auto_hibern8_enable(struct ufs_hba
*hba
)
4349 if (!ufshcd_is_auto_hibern8_supported(hba
))
4352 ufshcd_writel(hba
, hba
->ahit
, REG_AUTO_HIBERNATE_IDLE_TIMER
);
4356 * ufshcd_init_pwr_info - setting the POR (power on reset)
4357 * values in hba power info
4358 * @hba: per-adapter instance
4360 static void ufshcd_init_pwr_info(struct ufs_hba
*hba
)
4362 hba
->pwr_info
.gear_rx
= UFS_PWM_G1
;
4363 hba
->pwr_info
.gear_tx
= UFS_PWM_G1
;
4364 hba
->pwr_info
.lane_rx
= 1;
4365 hba
->pwr_info
.lane_tx
= 1;
4366 hba
->pwr_info
.pwr_rx
= SLOWAUTO_MODE
;
4367 hba
->pwr_info
.pwr_tx
= SLOWAUTO_MODE
;
4368 hba
->pwr_info
.hs_rate
= 0;
4372 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4373 * @hba: per-adapter instance
4375 static int ufshcd_get_max_pwr_mode(struct ufs_hba
*hba
)
4377 struct ufs_pa_layer_attr
*pwr_info
= &hba
->max_pwr_info
.info
;
4379 if (hba
->max_pwr_info
.is_valid
)
4382 if (hba
->quirks
& UFSHCD_QUIRK_HIBERN_FASTAUTO
) {
4383 pwr_info
->pwr_tx
= FASTAUTO_MODE
;
4384 pwr_info
->pwr_rx
= FASTAUTO_MODE
;
4386 pwr_info
->pwr_tx
= FAST_MODE
;
4387 pwr_info
->pwr_rx
= FAST_MODE
;
4389 pwr_info
->hs_rate
= PA_HS_MODE_B
;
4391 /* Get the connected lane count */
4392 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES
),
4393 &pwr_info
->lane_rx
);
4394 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
4395 &pwr_info
->lane_tx
);
4397 if (!pwr_info
->lane_rx
|| !pwr_info
->lane_tx
) {
4398 dev_err(hba
->dev
, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4406 * First, get the maximum gears of HS speed.
4407 * If a zero value, it means there is no HSGEAR capability.
4408 * Then, get the maximum gears of PWM speed.
4410 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_MAXRXHSGEAR
), &pwr_info
->gear_rx
);
4411 if (!pwr_info
->gear_rx
) {
4412 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_MAXRXPWMGEAR
),
4413 &pwr_info
->gear_rx
);
4414 if (!pwr_info
->gear_rx
) {
4415 dev_err(hba
->dev
, "%s: invalid max pwm rx gear read = %d\n",
4416 __func__
, pwr_info
->gear_rx
);
4419 pwr_info
->pwr_rx
= SLOW_MODE
;
4422 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_MAXRXHSGEAR
),
4423 &pwr_info
->gear_tx
);
4424 if (!pwr_info
->gear_tx
) {
4425 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_MAXRXPWMGEAR
),
4426 &pwr_info
->gear_tx
);
4427 if (!pwr_info
->gear_tx
) {
4428 dev_err(hba
->dev
, "%s: invalid max pwm tx gear read = %d\n",
4429 __func__
, pwr_info
->gear_tx
);
4432 pwr_info
->pwr_tx
= SLOW_MODE
;
4435 hba
->max_pwr_info
.is_valid
= true;
4439 static int ufshcd_change_power_mode(struct ufs_hba
*hba
,
4440 struct ufs_pa_layer_attr
*pwr_mode
)
4444 /* if already configured to the requested pwr_mode */
4445 if (!hba
->force_pmc
&&
4446 pwr_mode
->gear_rx
== hba
->pwr_info
.gear_rx
&&
4447 pwr_mode
->gear_tx
== hba
->pwr_info
.gear_tx
&&
4448 pwr_mode
->lane_rx
== hba
->pwr_info
.lane_rx
&&
4449 pwr_mode
->lane_tx
== hba
->pwr_info
.lane_tx
&&
4450 pwr_mode
->pwr_rx
== hba
->pwr_info
.pwr_rx
&&
4451 pwr_mode
->pwr_tx
== hba
->pwr_info
.pwr_tx
&&
4452 pwr_mode
->hs_rate
== hba
->pwr_info
.hs_rate
) {
4453 dev_dbg(hba
->dev
, "%s: power already configured\n", __func__
);
4458 * Configure attributes for power mode change with below.
4459 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4460 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4463 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXGEAR
), pwr_mode
->gear_rx
);
4464 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_ACTIVERXDATALANES
),
4466 if (pwr_mode
->pwr_rx
== FASTAUTO_MODE
||
4467 pwr_mode
->pwr_rx
== FAST_MODE
)
4468 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXTERMINATION
), true);
4470 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXTERMINATION
), false);
4472 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXGEAR
), pwr_mode
->gear_tx
);
4473 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_ACTIVETXDATALANES
),
4475 if (pwr_mode
->pwr_tx
== FASTAUTO_MODE
||
4476 pwr_mode
->pwr_tx
== FAST_MODE
)
4477 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXTERMINATION
), true);
4479 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXTERMINATION
), false);
4481 if (pwr_mode
->pwr_rx
== FASTAUTO_MODE
||
4482 pwr_mode
->pwr_tx
== FASTAUTO_MODE
||
4483 pwr_mode
->pwr_rx
== FAST_MODE
||
4484 pwr_mode
->pwr_tx
== FAST_MODE
)
4485 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_HSSERIES
),
4488 if (!(hba
->quirks
& UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING
)) {
4489 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA0
),
4490 DL_FC0ProtectionTimeOutVal_Default
);
4491 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA1
),
4492 DL_TC0ReplayTimeOutVal_Default
);
4493 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA2
),
4494 DL_AFC0ReqTimeOutVal_Default
);
4495 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA3
),
4496 DL_FC1ProtectionTimeOutVal_Default
);
4497 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA4
),
4498 DL_TC1ReplayTimeOutVal_Default
);
4499 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA5
),
4500 DL_AFC1ReqTimeOutVal_Default
);
4502 ufshcd_dme_set(hba
, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal
),
4503 DL_FC0ProtectionTimeOutVal_Default
);
4504 ufshcd_dme_set(hba
, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal
),
4505 DL_TC0ReplayTimeOutVal_Default
);
4506 ufshcd_dme_set(hba
, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal
),
4507 DL_AFC0ReqTimeOutVal_Default
);
4510 ret
= ufshcd_uic_change_pwr_mode(hba
, pwr_mode
->pwr_rx
<< 4
4511 | pwr_mode
->pwr_tx
);
4515 "%s: power mode change failed %d\n", __func__
, ret
);
4517 ufshcd_vops_pwr_change_notify(hba
, POST_CHANGE
, NULL
,
4520 memcpy(&hba
->pwr_info
, pwr_mode
,
4521 sizeof(struct ufs_pa_layer_attr
));
4528 * ufshcd_config_pwr_mode - configure a new power mode
4529 * @hba: per-adapter instance
4530 * @desired_pwr_mode: desired power configuration
4532 int ufshcd_config_pwr_mode(struct ufs_hba
*hba
,
4533 struct ufs_pa_layer_attr
*desired_pwr_mode
)
4535 struct ufs_pa_layer_attr final_params
= { 0 };
4538 ret
= ufshcd_vops_pwr_change_notify(hba
, PRE_CHANGE
,
4539 desired_pwr_mode
, &final_params
);
4542 memcpy(&final_params
, desired_pwr_mode
, sizeof(final_params
));
4544 ret
= ufshcd_change_power_mode(hba
, &final_params
);
4548 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode
);
4551 * ufshcd_complete_dev_init() - checks device readiness
4552 * @hba: per-adapter instance
4554 * Set fDeviceInit flag and poll until device toggles it.
4556 static int ufshcd_complete_dev_init(struct ufs_hba
*hba
)
4559 bool flag_res
= true;
4562 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_SET_FLAG
,
4563 QUERY_FLAG_IDN_FDEVICEINIT
, 0, NULL
);
4566 "%s: setting fDeviceInit flag failed with error %d\n",
4571 /* Poll fDeviceInit flag to be cleared */
4572 timeout
= ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT
);
4574 err
= ufshcd_query_flag(hba
, UPIU_QUERY_OPCODE_READ_FLAG
,
4575 QUERY_FLAG_IDN_FDEVICEINIT
, 0, &flag_res
);
4578 usleep_range(500, 1000);
4579 } while (ktime_before(ktime_get(), timeout
));
4583 "%s: reading fDeviceInit flag failed with error %d\n",
4585 } else if (flag_res
) {
4587 "%s: fDeviceInit was not cleared by the device\n",
4596 * ufshcd_make_hba_operational - Make UFS controller operational
4597 * @hba: per adapter instance
4599 * To bring UFS host controller to operational state,
4600 * 1. Enable required interrupts
4601 * 2. Configure interrupt aggregation
4602 * 3. Program UTRL and UTMRL base address
4603 * 4. Configure run-stop-registers
4605 * Returns 0 on success, non-zero value on failure
4607 int ufshcd_make_hba_operational(struct ufs_hba
*hba
)
4612 /* Enable required interrupts */
4613 ufshcd_enable_intr(hba
, UFSHCD_ENABLE_INTRS
);
4615 /* Configure interrupt aggregation */
4616 if (ufshcd_is_intr_aggr_allowed(hba
))
4617 ufshcd_config_intr_aggr(hba
, hba
->nutrs
- 1, INT_AGGR_DEF_TO
);
4619 ufshcd_disable_intr_aggr(hba
);
4621 /* Configure UTRL and UTMRL base address registers */
4622 ufshcd_writel(hba
, lower_32_bits(hba
->utrdl_dma_addr
),
4623 REG_UTP_TRANSFER_REQ_LIST_BASE_L
);
4624 ufshcd_writel(hba
, upper_32_bits(hba
->utrdl_dma_addr
),
4625 REG_UTP_TRANSFER_REQ_LIST_BASE_H
);
4626 ufshcd_writel(hba
, lower_32_bits(hba
->utmrdl_dma_addr
),
4627 REG_UTP_TASK_REQ_LIST_BASE_L
);
4628 ufshcd_writel(hba
, upper_32_bits(hba
->utmrdl_dma_addr
),
4629 REG_UTP_TASK_REQ_LIST_BASE_H
);
4632 * Make sure base address and interrupt setup are updated before
4633 * enabling the run/stop registers below.
4638 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4640 reg
= ufshcd_readl(hba
, REG_CONTROLLER_STATUS
);
4641 if (!(ufshcd_get_lists_status(reg
))) {
4642 ufshcd_enable_run_stop_reg(hba
);
4645 "Host controller not ready to process requests");
4651 EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational
);
4654 * ufshcd_hba_stop - Send controller to reset state
4655 * @hba: per adapter instance
4657 void ufshcd_hba_stop(struct ufs_hba
*hba
)
4659 unsigned long flags
;
4663 * Obtain the host lock to prevent that the controller is disabled
4664 * while the UFS interrupt handler is active on another CPU.
4666 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4667 ufshcd_writel(hba
, CONTROLLER_DISABLE
, REG_CONTROLLER_ENABLE
);
4668 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4670 err
= ufshcd_wait_for_register(hba
, REG_CONTROLLER_ENABLE
,
4671 CONTROLLER_ENABLE
, CONTROLLER_DISABLE
,
4674 dev_err(hba
->dev
, "%s: Controller disable failed\n", __func__
);
4676 EXPORT_SYMBOL_GPL(ufshcd_hba_stop
);
4679 * ufshcd_hba_execute_hce - initialize the controller
4680 * @hba: per adapter instance
4682 * The controller resets itself and controller firmware initialization
4683 * sequence kicks off. When controller is ready it will set
4684 * the Host Controller Enable bit to 1.
4686 * Returns 0 on success, non-zero value on failure
4688 static int ufshcd_hba_execute_hce(struct ufs_hba
*hba
)
4690 int retry_outer
= 3;
4694 if (ufshcd_is_hba_active(hba
))
4695 /* change controller state to "reset state" */
4696 ufshcd_hba_stop(hba
);
4698 /* UniPro link is disabled at this point */
4699 ufshcd_set_link_off(hba
);
4701 ufshcd_vops_hce_enable_notify(hba
, PRE_CHANGE
);
4703 /* start controller initialization sequence */
4704 ufshcd_hba_start(hba
);
4707 * To initialize a UFS host controller HCE bit must be set to 1.
4708 * During initialization the HCE bit value changes from 1->0->1.
4709 * When the host controller completes initialization sequence
4710 * it sets the value of HCE bit to 1. The same HCE bit is read back
4711 * to check if the controller has completed initialization sequence.
4712 * So without this delay the value HCE = 1, set in the previous
4713 * instruction might be read back.
4714 * This delay can be changed based on the controller.
4716 ufshcd_delay_us(hba
->vps
->hba_enable_delay_us
, 100);
4718 /* wait for the host controller to complete initialization */
4720 while (!ufshcd_is_hba_active(hba
)) {
4725 "Controller enable failed\n");
4732 usleep_range(1000, 1100);
4735 /* enable UIC related interrupts */
4736 ufshcd_enable_intr(hba
, UFSHCD_UIC_MASK
);
4738 ufshcd_vops_hce_enable_notify(hba
, POST_CHANGE
);
4743 int ufshcd_hba_enable(struct ufs_hba
*hba
)
4747 if (hba
->quirks
& UFSHCI_QUIRK_BROKEN_HCE
) {
4748 ufshcd_set_link_off(hba
);
4749 ufshcd_vops_hce_enable_notify(hba
, PRE_CHANGE
);
4751 /* enable UIC related interrupts */
4752 ufshcd_enable_intr(hba
, UFSHCD_UIC_MASK
);
4753 ret
= ufshcd_dme_reset(hba
);
4755 dev_err(hba
->dev
, "DME_RESET failed\n");
4759 ret
= ufshcd_dme_enable(hba
);
4761 dev_err(hba
->dev
, "Enabling DME failed\n");
4765 ufshcd_vops_hce_enable_notify(hba
, POST_CHANGE
);
4767 ret
= ufshcd_hba_execute_hce(hba
);
4772 EXPORT_SYMBOL_GPL(ufshcd_hba_enable
);
4774 static int ufshcd_disable_tx_lcc(struct ufs_hba
*hba
, bool peer
)
4776 int tx_lanes
= 0, i
, err
= 0;
4779 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
4782 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
4784 for (i
= 0; i
< tx_lanes
; i
++) {
4786 err
= ufshcd_dme_set(hba
,
4787 UIC_ARG_MIB_SEL(TX_LCC_ENABLE
,
4788 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i
)),
4791 err
= ufshcd_dme_peer_set(hba
,
4792 UIC_ARG_MIB_SEL(TX_LCC_ENABLE
,
4793 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i
)),
4796 dev_err(hba
->dev
, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4797 __func__
, peer
, i
, err
);
4805 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba
*hba
)
4807 return ufshcd_disable_tx_lcc(hba
, true);
4810 void ufshcd_update_evt_hist(struct ufs_hba
*hba
, u32 id
, u32 val
)
4812 struct ufs_event_hist
*e
;
4814 if (id
>= UFS_EVT_CNT
)
4817 e
= &hba
->ufs_stats
.event
[id
];
4818 e
->val
[e
->pos
] = val
;
4819 e
->tstamp
[e
->pos
] = local_clock();
4821 e
->pos
= (e
->pos
+ 1) % UFS_EVENT_HIST_LENGTH
;
4823 ufshcd_vops_event_notify(hba
, id
, &val
);
4825 EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist
);
4828 * ufshcd_link_startup - Initialize unipro link startup
4829 * @hba: per adapter instance
4831 * Returns 0 for success, non-zero in case of failure
4833 static int ufshcd_link_startup(struct ufs_hba
*hba
)
4836 int retries
= DME_LINKSTARTUP_RETRIES
;
4837 bool link_startup_again
= false;
4840 * If UFS device isn't active then we will have to issue link startup
4841 * 2 times to make sure the device state move to active.
4843 if (!ufshcd_is_ufs_dev_active(hba
))
4844 link_startup_again
= true;
4848 ufshcd_vops_link_startup_notify(hba
, PRE_CHANGE
);
4850 ret
= ufshcd_dme_link_startup(hba
);
4852 /* check if device is detected by inter-connect layer */
4853 if (!ret
&& !ufshcd_is_device_present(hba
)) {
4854 ufshcd_update_evt_hist(hba
,
4855 UFS_EVT_LINK_STARTUP_FAIL
,
4857 dev_err(hba
->dev
, "%s: Device not present\n", __func__
);
4863 * DME link lost indication is only received when link is up,
4864 * but we can't be sure if the link is up until link startup
4865 * succeeds. So reset the local Uni-Pro and try again.
4867 if (ret
&& retries
&& ufshcd_hba_enable(hba
)) {
4868 ufshcd_update_evt_hist(hba
,
4869 UFS_EVT_LINK_STARTUP_FAIL
,
4873 } while (ret
&& retries
--);
4876 /* failed to get the link up... retire */
4877 ufshcd_update_evt_hist(hba
,
4878 UFS_EVT_LINK_STARTUP_FAIL
,
4883 if (link_startup_again
) {
4884 link_startup_again
= false;
4885 retries
= DME_LINKSTARTUP_RETRIES
;
4889 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4890 ufshcd_init_pwr_info(hba
);
4891 ufshcd_print_pwr_info(hba
);
4893 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_LCC
) {
4894 ret
= ufshcd_disable_device_tx_lcc(hba
);
4899 /* Include any host controller configuration via UIC commands */
4900 ret
= ufshcd_vops_link_startup_notify(hba
, POST_CHANGE
);
4904 /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
4905 ufshcd_readl(hba
, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER
);
4906 ret
= ufshcd_make_hba_operational(hba
);
4909 dev_err(hba
->dev
, "link startup failed %d\n", ret
);
4910 ufshcd_print_host_state(hba
);
4911 ufshcd_print_pwr_info(hba
);
4912 ufshcd_print_evt_hist(hba
);
4918 * ufshcd_verify_dev_init() - Verify device initialization
4919 * @hba: per-adapter instance
4921 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4922 * device Transport Protocol (UTP) layer is ready after a reset.
4923 * If the UTP layer at the device side is not initialized, it may
4924 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4925 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4927 static int ufshcd_verify_dev_init(struct ufs_hba
*hba
)
4932 ufshcd_hold(hba
, false);
4933 mutex_lock(&hba
->dev_cmd
.lock
);
4934 for (retries
= NOP_OUT_RETRIES
; retries
> 0; retries
--) {
4935 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_NOP
,
4936 hba
->nop_out_timeout
);
4938 if (!err
|| err
== -ETIMEDOUT
)
4941 dev_dbg(hba
->dev
, "%s: error %d retrying\n", __func__
, err
);
4943 mutex_unlock(&hba
->dev_cmd
.lock
);
4944 ufshcd_release(hba
);
4947 dev_err(hba
->dev
, "%s: NOP OUT failed %d\n", __func__
, err
);
4952 * ufshcd_setup_links - associate link b/w device wlun and other luns
4953 * @sdev: pointer to SCSI device
4954 * @hba: pointer to ufs hba
4956 static void ufshcd_setup_links(struct ufs_hba
*hba
, struct scsi_device
*sdev
)
4958 struct device_link
*link
;
4961 * Device wlun is the supplier & rest of the luns are consumers.
4962 * This ensures that device wlun suspends after all other luns.
4964 if (hba
->ufs_device_wlun
) {
4965 link
= device_link_add(&sdev
->sdev_gendev
,
4966 &hba
->ufs_device_wlun
->sdev_gendev
,
4967 DL_FLAG_PM_RUNTIME
| DL_FLAG_RPM_ACTIVE
);
4969 dev_err(&sdev
->sdev_gendev
, "Failed establishing link - %s\n",
4970 dev_name(&hba
->ufs_device_wlun
->sdev_gendev
));
4974 /* Ignore REPORT_LUN wlun probing */
4975 if (hba
->luns_avail
== 1) {
4976 ufshcd_rpm_put(hba
);
4981 * Device wlun is probed. The assumption is that WLUNs are
4982 * scanned before other LUNs.
4989 * ufshcd_lu_init - Initialize the relevant parameters of the LU
4990 * @hba: per-adapter instance
4991 * @sdev: pointer to SCSI device
4993 static void ufshcd_lu_init(struct ufs_hba
*hba
, struct scsi_device
*sdev
)
4995 int len
= QUERY_DESC_MAX_SIZE
;
4996 u8 lun
= ufshcd_scsi_to_upiu_lun(sdev
->lun
);
4997 u8 lun_qdepth
= hba
->nutrs
;
5001 desc_buf
= kzalloc(len
, GFP_KERNEL
);
5005 ret
= ufshcd_read_unit_desc_param(hba
, lun
, 0, desc_buf
, len
);
5007 if (ret
== -EOPNOTSUPP
)
5008 /* If LU doesn't support unit descriptor, its queue depth is set to 1 */
5014 if (desc_buf
[UNIT_DESC_PARAM_LU_Q_DEPTH
]) {
5016 * In per-LU queueing architecture, bLUQueueDepth will not be 0, then we will
5017 * use the smaller between UFSHCI CAP.NUTRS and UFS LU bLUQueueDepth
5019 lun_qdepth
= min_t(int, desc_buf
[UNIT_DESC_PARAM_LU_Q_DEPTH
], hba
->nutrs
);
5022 * According to UFS device specification, the write protection mode is only supported by
5023 * normal LU, not supported by WLUN.
5025 if (hba
->dev_info
.f_power_on_wp_en
&& lun
< hba
->dev_info
.max_lu_supported
&&
5026 !hba
->dev_info
.is_lu_power_on_wp
&&
5027 desc_buf
[UNIT_DESC_PARAM_LU_WR_PROTECT
] == UFS_LU_POWER_ON_WP
)
5028 hba
->dev_info
.is_lu_power_on_wp
= true;
5030 /* In case of RPMB LU, check if advanced RPMB mode is enabled */
5031 if (desc_buf
[UNIT_DESC_PARAM_UNIT_INDEX
] == UFS_UPIU_RPMB_WLUN
&&
5032 desc_buf
[RPMB_UNIT_DESC_PARAM_REGION_EN
] & BIT(4))
5033 hba
->dev_info
.b_advanced_rpmb_en
= true;
5039 * For WLUNs that don't support unit descriptor, queue depth is set to 1. For LUs whose
5040 * bLUQueueDepth == 0, the queue depth is set to a maximum value that host can queue.
5042 dev_dbg(hba
->dev
, "Set LU %x queue depth %d\n", lun
, lun_qdepth
);
5043 scsi_change_queue_depth(sdev
, lun_qdepth
);
5047 * ufshcd_slave_alloc - handle initial SCSI device configurations
5048 * @sdev: pointer to SCSI device
5052 static int ufshcd_slave_alloc(struct scsi_device
*sdev
)
5054 struct ufs_hba
*hba
;
5056 hba
= shost_priv(sdev
->host
);
5058 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
5059 sdev
->use_10_for_ms
= 1;
5061 /* DBD field should be set to 1 in mode sense(10) */
5062 sdev
->set_dbd_for_ms
= 1;
5064 /* allow SCSI layer to restart the device in case of errors */
5065 sdev
->allow_restart
= 1;
5067 /* REPORT SUPPORTED OPERATION CODES is not supported */
5068 sdev
->no_report_opcodes
= 1;
5070 /* WRITE_SAME command is not supported */
5071 sdev
->no_write_same
= 1;
5073 ufshcd_lu_init(hba
, sdev
);
5075 ufshcd_setup_links(hba
, sdev
);
5081 * ufshcd_change_queue_depth - change queue depth
5082 * @sdev: pointer to SCSI device
5083 * @depth: required depth to set
5085 * Change queue depth and make sure the max. limits are not crossed.
5087 static int ufshcd_change_queue_depth(struct scsi_device
*sdev
, int depth
)
5089 return scsi_change_queue_depth(sdev
, min(depth
, sdev
->host
->can_queue
));
5092 static void ufshcd_hpb_destroy(struct ufs_hba
*hba
, struct scsi_device
*sdev
)
5094 /* skip well-known LU */
5095 if ((sdev
->lun
>= UFS_UPIU_MAX_UNIT_NUM_ID
) ||
5096 !(hba
->dev_info
.hpb_enabled
) || !ufshpb_is_allowed(hba
))
5099 ufshpb_destroy_lu(hba
, sdev
);
5102 static void ufshcd_hpb_configure(struct ufs_hba
*hba
, struct scsi_device
*sdev
)
5104 /* skip well-known LU */
5105 if ((sdev
->lun
>= UFS_UPIU_MAX_UNIT_NUM_ID
) ||
5106 !(hba
->dev_info
.hpb_enabled
) || !ufshpb_is_allowed(hba
))
5109 ufshpb_init_hpb_lu(hba
, sdev
);
5113 * ufshcd_slave_configure - adjust SCSI device configurations
5114 * @sdev: pointer to SCSI device
5116 static int ufshcd_slave_configure(struct scsi_device
*sdev
)
5118 struct ufs_hba
*hba
= shost_priv(sdev
->host
);
5119 struct request_queue
*q
= sdev
->request_queue
;
5121 ufshcd_hpb_configure(hba
, sdev
);
5123 blk_queue_update_dma_pad(q
, PRDT_DATA_BYTE_COUNT_PAD
- 1);
5124 if (hba
->quirks
& UFSHCD_QUIRK_4KB_DMA_ALIGNMENT
)
5125 blk_queue_update_dma_alignment(q
, 4096 - 1);
5127 * Block runtime-pm until all consumers are added.
5128 * Refer ufshcd_setup_links().
5130 if (is_device_wlun(sdev
))
5131 pm_runtime_get_noresume(&sdev
->sdev_gendev
);
5132 else if (ufshcd_is_rpm_autosuspend_allowed(hba
))
5133 sdev
->rpm_autosuspend
= 1;
5135 * Do not print messages during runtime PM to avoid never-ending cycles
5136 * of messages written back to storage by user space causing runtime
5137 * resume, causing more messages and so on.
5139 sdev
->silence_suspend
= 1;
5141 ufshcd_crypto_register(hba
, q
);
5147 * ufshcd_slave_destroy - remove SCSI device configurations
5148 * @sdev: pointer to SCSI device
5150 static void ufshcd_slave_destroy(struct scsi_device
*sdev
)
5152 struct ufs_hba
*hba
;
5153 unsigned long flags
;
5155 hba
= shost_priv(sdev
->host
);
5157 ufshcd_hpb_destroy(hba
, sdev
);
5159 /* Drop the reference as it won't be needed anymore */
5160 if (ufshcd_scsi_to_upiu_lun(sdev
->lun
) == UFS_UPIU_UFS_DEVICE_WLUN
) {
5161 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5162 hba
->ufs_device_wlun
= NULL
;
5163 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5164 } else if (hba
->ufs_device_wlun
) {
5165 struct device
*supplier
= NULL
;
5167 /* Ensure UFS Device WLUN exists and does not disappear */
5168 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5169 if (hba
->ufs_device_wlun
) {
5170 supplier
= &hba
->ufs_device_wlun
->sdev_gendev
;
5171 get_device(supplier
);
5173 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5177 * If a LUN fails to probe (e.g. absent BOOT WLUN), the
5178 * device will not have been registered but can still
5179 * have a device link holding a reference to the device.
5181 device_link_remove(&sdev
->sdev_gendev
, supplier
);
5182 put_device(supplier
);
5188 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
5189 * @lrbp: pointer to local reference block of completed command
5190 * @scsi_status: SCSI command status
5192 * Returns value base on SCSI command status
5195 ufshcd_scsi_cmd_status(struct ufshcd_lrb
*lrbp
, int scsi_status
)
5199 switch (scsi_status
) {
5200 case SAM_STAT_CHECK_CONDITION
:
5201 ufshcd_copy_sense_data(lrbp
);
5204 result
|= DID_OK
<< 16 | scsi_status
;
5206 case SAM_STAT_TASK_SET_FULL
:
5208 case SAM_STAT_TASK_ABORTED
:
5209 ufshcd_copy_sense_data(lrbp
);
5210 result
|= scsi_status
;
5213 result
|= DID_ERROR
<< 16;
5215 } /* end of switch */
5221 * ufshcd_transfer_rsp_status - Get overall status of the response
5222 * @hba: per adapter instance
5223 * @lrbp: pointer to local reference block of completed command
5224 * @cqe: pointer to the completion queue entry
5226 * Returns result of the command to notify SCSI midlayer
5229 ufshcd_transfer_rsp_status(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
,
5230 struct cq_entry
*cqe
)
5236 /* overall command status of utrd */
5237 ocs
= ufshcd_get_tr_ocs(lrbp
, cqe
);
5239 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR
) {
5240 if (be32_to_cpu(lrbp
->ucd_rsp_ptr
->header
.dword_1
) &
5241 MASK_RSP_UPIU_RESULT
)
5247 result
= ufshcd_get_req_rsp(lrbp
->ucd_rsp_ptr
);
5248 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
5250 case UPIU_TRANSACTION_RESPONSE
:
5252 * get the response UPIU result to extract
5253 * the SCSI command status
5255 result
= ufshcd_get_rsp_upiu_result(lrbp
->ucd_rsp_ptr
);
5258 * get the result based on SCSI status response
5259 * to notify the SCSI midlayer of the command status
5261 scsi_status
= result
& MASK_SCSI_STATUS
;
5262 result
= ufshcd_scsi_cmd_status(lrbp
, scsi_status
);
5265 * Currently we are only supporting BKOPs exception
5266 * events hence we can ignore BKOPs exception event
5267 * during power management callbacks. BKOPs exception
5268 * event is not expected to be raised in runtime suspend
5269 * callback as it allows the urgent bkops.
5270 * During system suspend, we are anyway forcefully
5271 * disabling the bkops and if urgent bkops is needed
5272 * it will be enabled on system resume. Long term
5273 * solution could be to abort the system suspend if
5274 * UFS device needs urgent BKOPs.
5276 if (!hba
->pm_op_in_progress
&&
5277 !ufshcd_eh_in_progress(hba
) &&
5278 ufshcd_is_exception_event(lrbp
->ucd_rsp_ptr
))
5279 /* Flushed in suspend */
5280 schedule_work(&hba
->eeh_work
);
5282 if (scsi_status
== SAM_STAT_GOOD
)
5283 ufshpb_rsp_upiu(hba
, lrbp
);
5285 case UPIU_TRANSACTION_REJECT_UPIU
:
5286 /* TODO: handle Reject UPIU Response */
5287 result
= DID_ERROR
<< 16;
5289 "Reject UPIU not fully implemented\n");
5293 "Unexpected request response code = %x\n",
5295 result
= DID_ERROR
<< 16;
5300 result
|= DID_ABORT
<< 16;
5302 case OCS_INVALID_COMMAND_STATUS
:
5303 result
|= DID_REQUEUE
<< 16;
5305 case OCS_INVALID_CMD_TABLE_ATTR
:
5306 case OCS_INVALID_PRDT_ATTR
:
5307 case OCS_MISMATCH_DATA_BUF_SIZE
:
5308 case OCS_MISMATCH_RESP_UPIU_SIZE
:
5309 case OCS_PEER_COMM_FAILURE
:
5310 case OCS_FATAL_ERROR
:
5311 case OCS_DEVICE_FATAL_ERROR
:
5312 case OCS_INVALID_CRYPTO_CONFIG
:
5313 case OCS_GENERAL_CRYPTO_ERROR
:
5315 result
|= DID_ERROR
<< 16;
5317 "OCS error from controller = %x for tag %d\n",
5318 ocs
, lrbp
->task_tag
);
5319 ufshcd_print_evt_hist(hba
);
5320 ufshcd_print_host_state(hba
);
5322 } /* end of switch */
5324 if ((host_byte(result
) != DID_OK
) &&
5325 (host_byte(result
) != DID_REQUEUE
) && !hba
->silence_err_logs
)
5326 ufshcd_print_trs(hba
, 1 << lrbp
->task_tag
, true);
5330 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba
*hba
,
5333 if (!ufshcd_is_auto_hibern8_supported(hba
) ||
5334 !ufshcd_is_auto_hibern8_enabled(hba
))
5337 if (!(intr_mask
& UFSHCD_UIC_HIBERN8_MASK
))
5340 if (hba
->active_uic_cmd
&&
5341 (hba
->active_uic_cmd
->command
== UIC_CMD_DME_HIBER_ENTER
||
5342 hba
->active_uic_cmd
->command
== UIC_CMD_DME_HIBER_EXIT
))
5349 * ufshcd_uic_cmd_compl - handle completion of uic command
5350 * @hba: per adapter instance
5351 * @intr_status: interrupt status generated by the controller
5354 * IRQ_HANDLED - If interrupt is valid
5355 * IRQ_NONE - If invalid interrupt
5357 static irqreturn_t
ufshcd_uic_cmd_compl(struct ufs_hba
*hba
, u32 intr_status
)
5359 irqreturn_t retval
= IRQ_NONE
;
5361 spin_lock(hba
->host
->host_lock
);
5362 if (ufshcd_is_auto_hibern8_error(hba
, intr_status
))
5363 hba
->errors
|= (UFSHCD_UIC_HIBERN8_MASK
& intr_status
);
5365 if ((intr_status
& UIC_COMMAND_COMPL
) && hba
->active_uic_cmd
) {
5366 hba
->active_uic_cmd
->argument2
|=
5367 ufshcd_get_uic_cmd_result(hba
);
5368 hba
->active_uic_cmd
->argument3
=
5369 ufshcd_get_dme_attr_val(hba
);
5370 if (!hba
->uic_async_done
)
5371 hba
->active_uic_cmd
->cmd_active
= 0;
5372 complete(&hba
->active_uic_cmd
->done
);
5373 retval
= IRQ_HANDLED
;
5376 if ((intr_status
& UFSHCD_UIC_PWR_MASK
) && hba
->uic_async_done
) {
5377 hba
->active_uic_cmd
->cmd_active
= 0;
5378 complete(hba
->uic_async_done
);
5379 retval
= IRQ_HANDLED
;
5382 if (retval
== IRQ_HANDLED
)
5383 ufshcd_add_uic_command_trace(hba
, hba
->active_uic_cmd
,
5385 spin_unlock(hba
->host
->host_lock
);
5389 /* Release the resources allocated for processing a SCSI command. */
5390 static void ufshcd_release_scsi_cmd(struct ufs_hba
*hba
,
5391 struct ufshcd_lrb
*lrbp
)
5393 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
5395 scsi_dma_unmap(cmd
);
5396 lrbp
->cmd
= NULL
; /* Mark the command as completed. */
5397 ufshcd_release(hba
);
5398 ufshcd_clk_scaling_update_busy(hba
);
5402 * ufshcd_compl_one_cqe - handle a completion queue entry
5403 * @hba: per adapter instance
5404 * @task_tag: the task tag of the request to be completed
5405 * @cqe: pointer to the completion queue entry
5407 void ufshcd_compl_one_cqe(struct ufs_hba
*hba
, int task_tag
,
5408 struct cq_entry
*cqe
)
5410 struct ufshcd_lrb
*lrbp
;
5411 struct scsi_cmnd
*cmd
;
5413 lrbp
= &hba
->lrb
[task_tag
];
5414 lrbp
->compl_time_stamp
= ktime_get();
5417 if (unlikely(ufshcd_should_inform_monitor(hba
, lrbp
)))
5418 ufshcd_update_monitor(hba
, lrbp
);
5419 ufshcd_add_command_trace(hba
, task_tag
, UFS_CMD_COMP
);
5420 cmd
->result
= ufshcd_transfer_rsp_status(hba
, lrbp
, cqe
);
5421 ufshcd_release_scsi_cmd(hba
, lrbp
);
5422 /* Do not touch lrbp after scsi done */
5424 } else if (lrbp
->command_type
== UTP_CMD_TYPE_DEV_MANAGE
||
5425 lrbp
->command_type
== UTP_CMD_TYPE_UFS_STORAGE
) {
5426 if (hba
->dev_cmd
.complete
) {
5427 hba
->dev_cmd
.cqe
= cqe
;
5428 ufshcd_add_command_trace(hba
, task_tag
, UFS_DEV_COMP
);
5429 complete(hba
->dev_cmd
.complete
);
5430 ufshcd_clk_scaling_update_busy(hba
);
5436 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
5437 * @hba: per adapter instance
5438 * @completed_reqs: bitmask that indicates which requests to complete
5440 static void __ufshcd_transfer_req_compl(struct ufs_hba
*hba
,
5441 unsigned long completed_reqs
)
5445 for_each_set_bit(tag
, &completed_reqs
, hba
->nutrs
)
5446 ufshcd_compl_one_cqe(hba
, tag
, NULL
);
5449 /* Any value that is not an existing queue number is fine for this constant. */
5451 UFSHCD_POLL_FROM_INTERRUPT_CONTEXT
= -1
5454 static void ufshcd_clear_polled(struct ufs_hba
*hba
,
5455 unsigned long *completed_reqs
)
5459 for_each_set_bit(tag
, completed_reqs
, hba
->nutrs
) {
5460 struct scsi_cmnd
*cmd
= hba
->lrb
[tag
].cmd
;
5464 if (scsi_cmd_to_rq(cmd
)->cmd_flags
& REQ_POLLED
)
5465 __clear_bit(tag
, completed_reqs
);
5470 * Returns > 0 if one or more commands have been completed or 0 if no
5471 * requests have been completed.
5473 static int ufshcd_poll(struct Scsi_Host
*shost
, unsigned int queue_num
)
5475 struct ufs_hba
*hba
= shost_priv(shost
);
5476 unsigned long completed_reqs
, flags
;
5478 struct ufs_hw_queue
*hwq
;
5480 if (is_mcq_enabled(hba
)) {
5481 hwq
= &hba
->uhq
[queue_num
+ UFSHCD_MCQ_IO_QUEUE_OFFSET
];
5483 return ufshcd_mcq_poll_cqe_lock(hba
, hwq
);
5486 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
5487 tr_doorbell
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
5488 completed_reqs
= ~tr_doorbell
& hba
->outstanding_reqs
;
5489 WARN_ONCE(completed_reqs
& ~hba
->outstanding_reqs
,
5490 "completed: %#lx; outstanding: %#lx\n", completed_reqs
,
5491 hba
->outstanding_reqs
);
5492 if (queue_num
== UFSHCD_POLL_FROM_INTERRUPT_CONTEXT
) {
5493 /* Do not complete polled requests from interrupt context. */
5494 ufshcd_clear_polled(hba
, &completed_reqs
);
5496 hba
->outstanding_reqs
&= ~completed_reqs
;
5497 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
5500 __ufshcd_transfer_req_compl(hba
, completed_reqs
);
5502 return completed_reqs
!= 0;
5506 * ufshcd_transfer_req_compl - handle SCSI and query command completion
5507 * @hba: per adapter instance
5510 * IRQ_HANDLED - If interrupt is valid
5511 * IRQ_NONE - If invalid interrupt
5513 static irqreturn_t
ufshcd_transfer_req_compl(struct ufs_hba
*hba
)
5515 /* Resetting interrupt aggregation counters first and reading the
5516 * DOOR_BELL afterward allows us to handle all the completed requests.
5517 * In order to prevent other interrupts starvation the DB is read once
5518 * after reset. The down side of this solution is the possibility of
5519 * false interrupt if device completes another request after resetting
5520 * aggregation and before reading the DB.
5522 if (ufshcd_is_intr_aggr_allowed(hba
) &&
5523 !(hba
->quirks
& UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR
))
5524 ufshcd_reset_intr_aggr(hba
);
5526 if (ufs_fail_completion())
5530 * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
5531 * do not want polling to trigger spurious interrupt complaints.
5533 ufshcd_poll(hba
->host
, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT
);
5538 int __ufshcd_write_ee_control(struct ufs_hba
*hba
, u32 ee_ctrl_mask
)
5540 return ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
5541 QUERY_ATTR_IDN_EE_CONTROL
, 0, 0,
5545 int ufshcd_write_ee_control(struct ufs_hba
*hba
)
5549 mutex_lock(&hba
->ee_ctrl_mutex
);
5550 err
= __ufshcd_write_ee_control(hba
, hba
->ee_ctrl_mask
);
5551 mutex_unlock(&hba
->ee_ctrl_mutex
);
5553 dev_err(hba
->dev
, "%s: failed to write ee control %d\n",
5558 int ufshcd_update_ee_control(struct ufs_hba
*hba
, u16
*mask
,
5559 const u16
*other_mask
, u16 set
, u16 clr
)
5561 u16 new_mask
, ee_ctrl_mask
;
5564 mutex_lock(&hba
->ee_ctrl_mutex
);
5565 new_mask
= (*mask
& ~clr
) | set
;
5566 ee_ctrl_mask
= new_mask
| *other_mask
;
5567 if (ee_ctrl_mask
!= hba
->ee_ctrl_mask
)
5568 err
= __ufshcd_write_ee_control(hba
, ee_ctrl_mask
);
5569 /* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */
5571 hba
->ee_ctrl_mask
= ee_ctrl_mask
;
5574 mutex_unlock(&hba
->ee_ctrl_mutex
);
5579 * ufshcd_disable_ee - disable exception event
5580 * @hba: per-adapter instance
5581 * @mask: exception event to disable
5583 * Disables exception event in the device so that the EVENT_ALERT
5586 * Returns zero on success, non-zero error value on failure.
5588 static inline int ufshcd_disable_ee(struct ufs_hba
*hba
, u16 mask
)
5590 return ufshcd_update_ee_drv_mask(hba
, 0, mask
);
5594 * ufshcd_enable_ee - enable exception event
5595 * @hba: per-adapter instance
5596 * @mask: exception event to enable
5598 * Enable corresponding exception event in the device to allow
5599 * device to alert host in critical scenarios.
5601 * Returns zero on success, non-zero error value on failure.
5603 static inline int ufshcd_enable_ee(struct ufs_hba
*hba
, u16 mask
)
5605 return ufshcd_update_ee_drv_mask(hba
, mask
, 0);
5609 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5610 * @hba: per-adapter instance
5612 * Allow device to manage background operations on its own. Enabling
5613 * this might lead to inconsistent latencies during normal data transfers
5614 * as the device is allowed to manage its own way of handling background
5617 * Returns zero on success, non-zero on failure.
5619 static int ufshcd_enable_auto_bkops(struct ufs_hba
*hba
)
5623 if (hba
->auto_bkops_enabled
)
5626 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_SET_FLAG
,
5627 QUERY_FLAG_IDN_BKOPS_EN
, 0, NULL
);
5629 dev_err(hba
->dev
, "%s: failed to enable bkops %d\n",
5634 hba
->auto_bkops_enabled
= true;
5635 trace_ufshcd_auto_bkops_state(dev_name(hba
->dev
), "Enabled");
5637 /* No need of URGENT_BKOPS exception from the device */
5638 err
= ufshcd_disable_ee(hba
, MASK_EE_URGENT_BKOPS
);
5640 dev_err(hba
->dev
, "%s: failed to disable exception event %d\n",
5647 * ufshcd_disable_auto_bkops - block device in doing background operations
5648 * @hba: per-adapter instance
5650 * Disabling background operations improves command response latency but
5651 * has drawback of device moving into critical state where the device is
5652 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5653 * host is idle so that BKOPS are managed effectively without any negative
5656 * Returns zero on success, non-zero on failure.
5658 static int ufshcd_disable_auto_bkops(struct ufs_hba
*hba
)
5662 if (!hba
->auto_bkops_enabled
)
5666 * If host assisted BKOPs is to be enabled, make sure
5667 * urgent bkops exception is allowed.
5669 err
= ufshcd_enable_ee(hba
, MASK_EE_URGENT_BKOPS
);
5671 dev_err(hba
->dev
, "%s: failed to enable exception event %d\n",
5676 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_CLEAR_FLAG
,
5677 QUERY_FLAG_IDN_BKOPS_EN
, 0, NULL
);
5679 dev_err(hba
->dev
, "%s: failed to disable bkops %d\n",
5681 ufshcd_disable_ee(hba
, MASK_EE_URGENT_BKOPS
);
5685 hba
->auto_bkops_enabled
= false;
5686 trace_ufshcd_auto_bkops_state(dev_name(hba
->dev
), "Disabled");
5687 hba
->is_urgent_bkops_lvl_checked
= false;
5693 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5694 * @hba: per adapter instance
5696 * After a device reset the device may toggle the BKOPS_EN flag
5697 * to default value. The s/w tracking variables should be updated
5698 * as well. This function would change the auto-bkops state based on
5699 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5701 static void ufshcd_force_reset_auto_bkops(struct ufs_hba
*hba
)
5703 if (ufshcd_keep_autobkops_enabled_except_suspend(hba
)) {
5704 hba
->auto_bkops_enabled
= false;
5705 hba
->ee_ctrl_mask
|= MASK_EE_URGENT_BKOPS
;
5706 ufshcd_enable_auto_bkops(hba
);
5708 hba
->auto_bkops_enabled
= true;
5709 hba
->ee_ctrl_mask
&= ~MASK_EE_URGENT_BKOPS
;
5710 ufshcd_disable_auto_bkops(hba
);
5712 hba
->urgent_bkops_lvl
= BKOPS_STATUS_PERF_IMPACT
;
5713 hba
->is_urgent_bkops_lvl_checked
= false;
5716 static inline int ufshcd_get_bkops_status(struct ufs_hba
*hba
, u32
*status
)
5718 return ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5719 QUERY_ATTR_IDN_BKOPS_STATUS
, 0, 0, status
);
5723 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5724 * @hba: per-adapter instance
5725 * @status: bkops_status value
5727 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5728 * flag in the device to permit background operations if the device
5729 * bkops_status is greater than or equal to "status" argument passed to
5730 * this function, disable otherwise.
5732 * Returns 0 for success, non-zero in case of failure.
5734 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5735 * to know whether auto bkops is enabled or disabled after this function
5736 * returns control to it.
5738 static int ufshcd_bkops_ctrl(struct ufs_hba
*hba
,
5739 enum bkops_status status
)
5742 u32 curr_status
= 0;
5744 err
= ufshcd_get_bkops_status(hba
, &curr_status
);
5746 dev_err(hba
->dev
, "%s: failed to get BKOPS status %d\n",
5749 } else if (curr_status
> BKOPS_STATUS_MAX
) {
5750 dev_err(hba
->dev
, "%s: invalid BKOPS status %d\n",
5751 __func__
, curr_status
);
5756 if (curr_status
>= status
)
5757 err
= ufshcd_enable_auto_bkops(hba
);
5759 err
= ufshcd_disable_auto_bkops(hba
);
5765 * ufshcd_urgent_bkops - handle urgent bkops exception event
5766 * @hba: per-adapter instance
5768 * Enable fBackgroundOpsEn flag in the device to permit background
5771 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5772 * and negative error value for any other failure.
5774 static int ufshcd_urgent_bkops(struct ufs_hba
*hba
)
5776 return ufshcd_bkops_ctrl(hba
, hba
->urgent_bkops_lvl
);
5779 static inline int ufshcd_get_ee_status(struct ufs_hba
*hba
, u32
*status
)
5781 return ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5782 QUERY_ATTR_IDN_EE_STATUS
, 0, 0, status
);
5785 static void ufshcd_bkops_exception_event_handler(struct ufs_hba
*hba
)
5788 u32 curr_status
= 0;
5790 if (hba
->is_urgent_bkops_lvl_checked
)
5791 goto enable_auto_bkops
;
5793 err
= ufshcd_get_bkops_status(hba
, &curr_status
);
5795 dev_err(hba
->dev
, "%s: failed to get BKOPS status %d\n",
5801 * We are seeing that some devices are raising the urgent bkops
5802 * exception events even when BKOPS status doesn't indicate performace
5803 * impacted or critical. Handle these device by determining their urgent
5804 * bkops status at runtime.
5806 if (curr_status
< BKOPS_STATUS_PERF_IMPACT
) {
5807 dev_err(hba
->dev
, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5808 __func__
, curr_status
);
5809 /* update the current status as the urgent bkops level */
5810 hba
->urgent_bkops_lvl
= curr_status
;
5811 hba
->is_urgent_bkops_lvl_checked
= true;
5815 err
= ufshcd_enable_auto_bkops(hba
);
5818 dev_err(hba
->dev
, "%s: failed to handle urgent bkops %d\n",
5822 static void ufshcd_temp_exception_event_handler(struct ufs_hba
*hba
, u16 status
)
5826 if (ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5827 QUERY_ATTR_IDN_CASE_ROUGH_TEMP
, 0, 0, &value
))
5830 dev_info(hba
->dev
, "exception Tcase %d\n", value
- 80);
5832 ufs_hwmon_notify_event(hba
, status
& MASK_EE_URGENT_TEMP
);
5835 * A placeholder for the platform vendors to add whatever additional
5840 static int __ufshcd_wb_toggle(struct ufs_hba
*hba
, bool set
, enum flag_idn idn
)
5843 enum query_opcode opcode
= set
? UPIU_QUERY_OPCODE_SET_FLAG
:
5844 UPIU_QUERY_OPCODE_CLEAR_FLAG
;
5846 index
= ufshcd_wb_get_query_index(hba
);
5847 return ufshcd_query_flag_retry(hba
, opcode
, idn
, index
, NULL
);
5850 int ufshcd_wb_toggle(struct ufs_hba
*hba
, bool enable
)
5854 if (!ufshcd_is_wb_allowed(hba
) ||
5855 hba
->dev_info
.wb_enabled
== enable
)
5858 ret
= __ufshcd_wb_toggle(hba
, enable
, QUERY_FLAG_IDN_WB_EN
);
5860 dev_err(hba
->dev
, "%s: Write Booster %s failed %d\n",
5861 __func__
, enable
? "enabling" : "disabling", ret
);
5865 hba
->dev_info
.wb_enabled
= enable
;
5866 dev_dbg(hba
->dev
, "%s: Write Booster %s\n",
5867 __func__
, enable
? "enabled" : "disabled");
5872 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba
*hba
,
5877 ret
= __ufshcd_wb_toggle(hba
, enable
,
5878 QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8
);
5880 dev_err(hba
->dev
, "%s: WB-Buf Flush during H8 %s failed %d\n",
5881 __func__
, enable
? "enabling" : "disabling", ret
);
5884 dev_dbg(hba
->dev
, "%s: WB-Buf Flush during H8 %s\n",
5885 __func__
, enable
? "enabled" : "disabled");
5888 int ufshcd_wb_toggle_buf_flush(struct ufs_hba
*hba
, bool enable
)
5892 if (!ufshcd_is_wb_allowed(hba
) ||
5893 hba
->dev_info
.wb_buf_flush_enabled
== enable
)
5896 ret
= __ufshcd_wb_toggle(hba
, enable
, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN
);
5898 dev_err(hba
->dev
, "%s: WB-Buf Flush %s failed %d\n",
5899 __func__
, enable
? "enabling" : "disabling", ret
);
5903 hba
->dev_info
.wb_buf_flush_enabled
= enable
;
5904 dev_dbg(hba
->dev
, "%s: WB-Buf Flush %s\n",
5905 __func__
, enable
? "enabled" : "disabled");
5910 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba
*hba
,
5917 index
= ufshcd_wb_get_query_index(hba
);
5918 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5919 QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE
,
5920 index
, 0, &cur_buf
);
5922 dev_err(hba
->dev
, "%s: dCurWriteBoosterBufferSize read failed %d\n",
5928 dev_info(hba
->dev
, "dCurWBBuf: %d WB disabled until free-space is available\n",
5932 /* Let it continue to flush when available buffer exceeds threshold */
5933 return avail_buf
< hba
->vps
->wb_flush_threshold
;
5936 static void ufshcd_wb_force_disable(struct ufs_hba
*hba
)
5938 if (ufshcd_is_wb_buf_flush_allowed(hba
))
5939 ufshcd_wb_toggle_buf_flush(hba
, false);
5941 ufshcd_wb_toggle_buf_flush_during_h8(hba
, false);
5942 ufshcd_wb_toggle(hba
, false);
5943 hba
->caps
&= ~UFSHCD_CAP_WB_EN
;
5945 dev_info(hba
->dev
, "%s: WB force disabled\n", __func__
);
5948 static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba
*hba
)
5954 index
= ufshcd_wb_get_query_index(hba
);
5955 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5956 QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST
,
5957 index
, 0, &lifetime
);
5960 "%s: bWriteBoosterBufferLifeTimeEst read failed %d\n",
5965 if (lifetime
== UFS_WB_EXCEED_LIFETIME
) {
5966 dev_err(hba
->dev
, "%s: WB buf lifetime is exhausted 0x%02X\n",
5967 __func__
, lifetime
);
5971 dev_dbg(hba
->dev
, "%s: WB buf lifetime is 0x%02X\n",
5972 __func__
, lifetime
);
5977 static bool ufshcd_wb_need_flush(struct ufs_hba
*hba
)
5983 if (!ufshcd_is_wb_allowed(hba
))
5986 if (!ufshcd_is_wb_buf_lifetime_available(hba
)) {
5987 ufshcd_wb_force_disable(hba
);
5992 * The ufs device needs the vcc to be ON to flush.
5993 * With user-space reduction enabled, it's enough to enable flush
5994 * by checking only the available buffer. The threshold
5995 * defined here is > 90% full.
5996 * With user-space preserved enabled, the current-buffer
5997 * should be checked too because the wb buffer size can reduce
5998 * when disk tends to be full. This info is provided by current
5999 * buffer (dCurrentWriteBoosterBufferSize). There's no point in
6000 * keeping vcc on when current buffer is empty.
6002 index
= ufshcd_wb_get_query_index(hba
);
6003 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
6004 QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE
,
6005 index
, 0, &avail_buf
);
6007 dev_warn(hba
->dev
, "%s: dAvailableWriteBoosterBufferSize read failed %d\n",
6012 if (!hba
->dev_info
.b_presrv_uspc_en
)
6013 return avail_buf
<= UFS_WB_BUF_REMAIN_PERCENT(10);
6015 return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba
, avail_buf
);
6018 static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct
*work
)
6020 struct ufs_hba
*hba
= container_of(to_delayed_work(work
),
6022 rpm_dev_flush_recheck_work
);
6024 * To prevent unnecessary VCC power drain after device finishes
6025 * WriteBooster buffer flush or Auto BKOPs, force runtime resume
6026 * after a certain delay to recheck the threshold by next runtime
6029 ufshcd_rpm_get_sync(hba
);
6030 ufshcd_rpm_put_sync(hba
);
6034 * ufshcd_exception_event_handler - handle exceptions raised by device
6035 * @work: pointer to work data
6037 * Read bExceptionEventStatus attribute from the device and handle the
6038 * exception event accordingly.
6040 static void ufshcd_exception_event_handler(struct work_struct
*work
)
6042 struct ufs_hba
*hba
;
6045 hba
= container_of(work
, struct ufs_hba
, eeh_work
);
6047 ufshcd_scsi_block_requests(hba
);
6048 err
= ufshcd_get_ee_status(hba
, &status
);
6050 dev_err(hba
->dev
, "%s: failed to get exception status %d\n",
6055 trace_ufshcd_exception_event(dev_name(hba
->dev
), status
);
6057 if (status
& hba
->ee_drv_mask
& MASK_EE_URGENT_BKOPS
)
6058 ufshcd_bkops_exception_event_handler(hba
);
6060 if (status
& hba
->ee_drv_mask
& MASK_EE_URGENT_TEMP
)
6061 ufshcd_temp_exception_event_handler(hba
, status
);
6063 ufs_debugfs_exception_event(hba
, status
);
6065 ufshcd_scsi_unblock_requests(hba
);
6068 /* Complete requests that have door-bell cleared */
6069 static void ufshcd_complete_requests(struct ufs_hba
*hba
)
6071 ufshcd_transfer_req_compl(hba
);
6072 ufshcd_tmc_handler(hba
);
6076 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
6077 * to recover from the DL NAC errors or not.
6078 * @hba: per-adapter instance
6080 * Returns true if error handling is required, false otherwise
6082 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba
*hba
)
6084 unsigned long flags
;
6085 bool err_handling
= true;
6087 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6089 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
6090 * device fatal error and/or DL NAC & REPLAY timeout errors.
6092 if (hba
->saved_err
& (CONTROLLER_FATAL_ERROR
| SYSTEM_BUS_FATAL_ERROR
))
6095 if ((hba
->saved_err
& DEVICE_FATAL_ERROR
) ||
6096 ((hba
->saved_err
& UIC_ERROR
) &&
6097 (hba
->saved_uic_err
& UFSHCD_UIC_DL_TCx_REPLAY_ERROR
)))
6100 if ((hba
->saved_err
& UIC_ERROR
) &&
6101 (hba
->saved_uic_err
& UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)) {
6104 * wait for 50ms to see if we can get any other errors or not.
6106 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6108 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6111 * now check if we have got any other severe errors other than
6114 if ((hba
->saved_err
& INT_FATAL_ERRORS
) ||
6115 ((hba
->saved_err
& UIC_ERROR
) &&
6116 (hba
->saved_uic_err
& ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)))
6120 * As DL NAC is the only error received so far, send out NOP
6121 * command to confirm if link is still active or not.
6122 * - If we don't get any response then do error recovery.
6123 * - If we get response then clear the DL NAC error bit.
6126 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6127 err
= ufshcd_verify_dev_init(hba
);
6128 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6133 /* Link seems to be alive hence ignore the DL NAC errors */
6134 if (hba
->saved_uic_err
== UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)
6135 hba
->saved_err
&= ~UIC_ERROR
;
6136 /* clear NAC error */
6137 hba
->saved_uic_err
&= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
;
6138 if (!hba
->saved_uic_err
)
6139 err_handling
= false;
6142 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6143 return err_handling
;
6146 /* host lock must be held before calling this func */
6147 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba
*hba
)
6149 return (hba
->saved_uic_err
& UFSHCD_UIC_DL_PA_INIT_ERROR
) ||
6150 (hba
->saved_err
& (INT_FATAL_ERRORS
| UFSHCD_UIC_HIBERN8_MASK
));
6153 void ufshcd_schedule_eh_work(struct ufs_hba
*hba
)
6155 lockdep_assert_held(hba
->host
->host_lock
);
6157 /* handle fatal errors only when link is not in error state */
6158 if (hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
) {
6159 if (hba
->force_reset
|| ufshcd_is_link_broken(hba
) ||
6160 ufshcd_is_saved_err_fatal(hba
))
6161 hba
->ufshcd_state
= UFSHCD_STATE_EH_SCHEDULED_FATAL
;
6163 hba
->ufshcd_state
= UFSHCD_STATE_EH_SCHEDULED_NON_FATAL
;
6164 queue_work(hba
->eh_wq
, &hba
->eh_work
);
6168 static void ufshcd_clk_scaling_allow(struct ufs_hba
*hba
, bool allow
)
6170 down_write(&hba
->clk_scaling_lock
);
6171 hba
->clk_scaling
.is_allowed
= allow
;
6172 up_write(&hba
->clk_scaling_lock
);
6175 static void ufshcd_clk_scaling_suspend(struct ufs_hba
*hba
, bool suspend
)
6178 if (hba
->clk_scaling
.is_enabled
)
6179 ufshcd_suspend_clkscaling(hba
);
6180 ufshcd_clk_scaling_allow(hba
, false);
6182 ufshcd_clk_scaling_allow(hba
, true);
6183 if (hba
->clk_scaling
.is_enabled
)
6184 ufshcd_resume_clkscaling(hba
);
6188 static void ufshcd_err_handling_prepare(struct ufs_hba
*hba
)
6190 ufshcd_rpm_get_sync(hba
);
6191 if (pm_runtime_status_suspended(&hba
->ufs_device_wlun
->sdev_gendev
) ||
6192 hba
->is_sys_suspended
) {
6193 enum ufs_pm_op pm_op
;
6196 * Don't assume anything of resume, if
6197 * resume fails, irq and clocks can be OFF, and powers
6198 * can be OFF or in LPM.
6200 ufshcd_setup_hba_vreg(hba
, true);
6201 ufshcd_enable_irq(hba
);
6202 ufshcd_setup_vreg(hba
, true);
6203 ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq
);
6204 ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq2
);
6205 ufshcd_hold(hba
, false);
6206 if (!ufshcd_is_clkgating_allowed(hba
))
6207 ufshcd_setup_clocks(hba
, true);
6208 ufshcd_release(hba
);
6209 pm_op
= hba
->is_sys_suspended
? UFS_SYSTEM_PM
: UFS_RUNTIME_PM
;
6210 ufshcd_vops_resume(hba
, pm_op
);
6212 ufshcd_hold(hba
, false);
6213 if (ufshcd_is_clkscaling_supported(hba
) &&
6214 hba
->clk_scaling
.is_enabled
)
6215 ufshcd_suspend_clkscaling(hba
);
6216 ufshcd_clk_scaling_allow(hba
, false);
6218 ufshcd_scsi_block_requests(hba
);
6219 /* Drain ufshcd_queuecommand() */
6221 cancel_work_sync(&hba
->eeh_work
);
6224 static void ufshcd_err_handling_unprepare(struct ufs_hba
*hba
)
6226 ufshcd_scsi_unblock_requests(hba
);
6227 ufshcd_release(hba
);
6228 if (ufshcd_is_clkscaling_supported(hba
))
6229 ufshcd_clk_scaling_suspend(hba
, false);
6230 ufshcd_rpm_put(hba
);
6233 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba
*hba
)
6235 return (!hba
->is_powered
|| hba
->shutting_down
||
6236 !hba
->ufs_device_wlun
||
6237 hba
->ufshcd_state
== UFSHCD_STATE_ERROR
||
6238 (!(hba
->saved_err
|| hba
->saved_uic_err
|| hba
->force_reset
||
6239 ufshcd_is_link_broken(hba
))));
6243 static void ufshcd_recover_pm_error(struct ufs_hba
*hba
)
6245 struct Scsi_Host
*shost
= hba
->host
;
6246 struct scsi_device
*sdev
;
6247 struct request_queue
*q
;
6250 hba
->is_sys_suspended
= false;
6252 * Set RPM status of wlun device to RPM_ACTIVE,
6253 * this also clears its runtime error.
6255 ret
= pm_runtime_set_active(&hba
->ufs_device_wlun
->sdev_gendev
);
6257 /* hba device might have a runtime error otherwise */
6259 ret
= pm_runtime_set_active(hba
->dev
);
6261 * If wlun device had runtime error, we also need to resume those
6262 * consumer scsi devices in case any of them has failed to be
6263 * resumed due to supplier runtime resume failure. This is to unblock
6264 * blk_queue_enter in case there are bios waiting inside it.
6267 shost_for_each_device(sdev
, shost
) {
6268 q
= sdev
->request_queue
;
6269 if (q
->dev
&& (q
->rpm_status
== RPM_SUSPENDED
||
6270 q
->rpm_status
== RPM_SUSPENDING
))
6271 pm_request_resume(q
->dev
);
6276 static inline void ufshcd_recover_pm_error(struct ufs_hba
*hba
)
6281 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba
*hba
)
6283 struct ufs_pa_layer_attr
*pwr_info
= &hba
->pwr_info
;
6286 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_PWRMODE
), &mode
);
6288 if (pwr_info
->pwr_rx
!= ((mode
>> PWRMODE_RX_OFFSET
) & PWRMODE_MASK
))
6291 if (pwr_info
->pwr_tx
!= (mode
& PWRMODE_MASK
))
6297 static bool ufshcd_abort_all(struct ufs_hba
*hba
)
6299 bool needs_reset
= false;
6302 /* Clear pending transfer requests */
6303 for_each_set_bit(tag
, &hba
->outstanding_reqs
, hba
->nutrs
) {
6304 ret
= ufshcd_try_to_abort_task(hba
, tag
);
6305 dev_err(hba
->dev
, "Aborting tag %d / CDB %#02x %s\n", tag
,
6306 hba
->lrb
[tag
].cmd
? hba
->lrb
[tag
].cmd
->cmnd
[0] : -1,
6307 ret
? "failed" : "succeeded");
6314 /* Clear pending task management requests */
6315 for_each_set_bit(tag
, &hba
->outstanding_tasks
, hba
->nutmrs
) {
6316 if (ufshcd_clear_tm_cmd(hba
, tag
)) {
6323 /* Complete the requests that are cleared by s/w */
6324 ufshcd_complete_requests(hba
);
6330 * ufshcd_err_handler - handle UFS errors that require s/w attention
6331 * @work: pointer to work structure
6333 static void ufshcd_err_handler(struct work_struct
*work
)
6335 int retries
= MAX_ERR_HANDLER_RETRIES
;
6336 struct ufs_hba
*hba
;
6337 unsigned long flags
;
6342 hba
= container_of(work
, struct ufs_hba
, eh_work
);
6345 "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
6346 __func__
, ufshcd_state_name
[hba
->ufshcd_state
],
6347 hba
->is_powered
, hba
->shutting_down
, hba
->saved_err
,
6348 hba
->saved_uic_err
, hba
->force_reset
,
6349 ufshcd_is_link_broken(hba
) ? "; link is broken" : "");
6351 down(&hba
->host_sem
);
6352 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6353 if (ufshcd_err_handling_should_stop(hba
)) {
6354 if (hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
)
6355 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
6356 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6360 ufshcd_set_eh_in_progress(hba
);
6361 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6362 ufshcd_err_handling_prepare(hba
);
6363 /* Complete requests that have door-bell cleared by h/w */
6364 ufshcd_complete_requests(hba
);
6365 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6367 needs_restore
= false;
6368 needs_reset
= false;
6370 if (hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
)
6371 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
6373 * A full reset and restore might have happened after preparation
6374 * is finished, double check whether we should stop.
6376 if (ufshcd_err_handling_should_stop(hba
))
6377 goto skip_err_handling
;
6379 if (hba
->dev_quirks
& UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
) {
6382 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6383 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
6384 ret
= ufshcd_quirk_dl_nac_errors(hba
);
6385 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6386 if (!ret
&& ufshcd_err_handling_should_stop(hba
))
6387 goto skip_err_handling
;
6390 if ((hba
->saved_err
& (INT_FATAL_ERRORS
| UFSHCD_UIC_HIBERN8_MASK
)) ||
6391 (hba
->saved_uic_err
&&
6392 (hba
->saved_uic_err
!= UFSHCD_UIC_PA_GENERIC_ERROR
))) {
6393 bool pr_prdt
= !!(hba
->saved_err
& SYSTEM_BUS_FATAL_ERROR
);
6395 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6396 ufshcd_print_host_state(hba
);
6397 ufshcd_print_pwr_info(hba
);
6398 ufshcd_print_evt_hist(hba
);
6399 ufshcd_print_tmrs(hba
, hba
->outstanding_tasks
);
6400 ufshcd_print_trs(hba
, hba
->outstanding_reqs
, pr_prdt
);
6401 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6405 * if host reset is required then skip clearing the pending
6406 * transfers forcefully because they will get cleared during
6407 * host reset and restore
6409 if (hba
->force_reset
|| ufshcd_is_link_broken(hba
) ||
6410 ufshcd_is_saved_err_fatal(hba
) ||
6411 ((hba
->saved_err
& UIC_ERROR
) &&
6412 (hba
->saved_uic_err
& (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
|
6413 UFSHCD_UIC_DL_TCx_REPLAY_ERROR
)))) {
6419 * If LINERESET was caught, UFS might have been put to PWM mode,
6420 * check if power mode restore is needed.
6422 if (hba
->saved_uic_err
& UFSHCD_UIC_PA_GENERIC_ERROR
) {
6423 hba
->saved_uic_err
&= ~UFSHCD_UIC_PA_GENERIC_ERROR
;
6424 if (!hba
->saved_uic_err
)
6425 hba
->saved_err
&= ~UIC_ERROR
;
6426 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6427 if (ufshcd_is_pwr_mode_restore_needed(hba
))
6428 needs_restore
= true;
6429 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6430 if (!hba
->saved_err
&& !needs_restore
)
6431 goto skip_err_handling
;
6434 hba
->silence_err_logs
= true;
6435 /* release lock as clear command might sleep */
6436 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6438 needs_reset
= ufshcd_abort_all(hba
);
6440 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6441 hba
->silence_err_logs
= false;
6446 * After all reqs and tasks are cleared from doorbell,
6447 * now it is safe to retore power mode.
6449 if (needs_restore
) {
6450 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6452 * Hold the scaling lock just in case dev cmds
6453 * are sent via bsg and/or sysfs.
6455 down_write(&hba
->clk_scaling_lock
);
6456 hba
->force_pmc
= true;
6457 pmc_err
= ufshcd_config_pwr_mode(hba
, &(hba
->pwr_info
));
6460 dev_err(hba
->dev
, "%s: Failed to restore power mode, err = %d\n",
6463 hba
->force_pmc
= false;
6464 ufshcd_print_pwr_info(hba
);
6465 up_write(&hba
->clk_scaling_lock
);
6466 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6470 /* Fatal errors need reset */
6474 hba
->force_reset
= false;
6475 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6476 err
= ufshcd_reset_and_restore(hba
);
6478 dev_err(hba
->dev
, "%s: reset and restore failed with err %d\n",
6481 ufshcd_recover_pm_error(hba
);
6482 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6487 if (hba
->ufshcd_state
== UFSHCD_STATE_RESET
)
6488 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
6489 if (hba
->saved_err
|| hba
->saved_uic_err
)
6490 dev_err_ratelimited(hba
->dev
, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
6491 __func__
, hba
->saved_err
, hba
->saved_uic_err
);
6493 /* Exit in an operational state or dead */
6494 if (hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
&&
6495 hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
) {
6498 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
6500 ufshcd_clear_eh_in_progress(hba
);
6501 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6502 ufshcd_err_handling_unprepare(hba
);
6505 dev_info(hba
->dev
, "%s finished; HBA state %s\n", __func__
,
6506 ufshcd_state_name
[hba
->ufshcd_state
]);
6510 * ufshcd_update_uic_error - check and set fatal UIC error flags.
6511 * @hba: per-adapter instance
6514 * IRQ_HANDLED - If interrupt is valid
6515 * IRQ_NONE - If invalid interrupt
6517 static irqreturn_t
ufshcd_update_uic_error(struct ufs_hba
*hba
)
6520 irqreturn_t retval
= IRQ_NONE
;
6522 /* PHY layer error */
6523 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER
);
6524 if ((reg
& UIC_PHY_ADAPTER_LAYER_ERROR
) &&
6525 (reg
& UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK
)) {
6526 ufshcd_update_evt_hist(hba
, UFS_EVT_PA_ERR
, reg
);
6528 * To know whether this error is fatal or not, DB timeout
6529 * must be checked but this error is handled separately.
6531 if (reg
& UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK
)
6532 dev_dbg(hba
->dev
, "%s: UIC Lane error reported\n",
6535 /* Got a LINERESET indication. */
6536 if (reg
& UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR
) {
6537 struct uic_command
*cmd
= NULL
;
6539 hba
->uic_error
|= UFSHCD_UIC_PA_GENERIC_ERROR
;
6540 if (hba
->uic_async_done
&& hba
->active_uic_cmd
)
6541 cmd
= hba
->active_uic_cmd
;
6543 * Ignore the LINERESET during power mode change
6544 * operation via DME_SET command.
6546 if (cmd
&& (cmd
->command
== UIC_CMD_DME_SET
))
6547 hba
->uic_error
&= ~UFSHCD_UIC_PA_GENERIC_ERROR
;
6549 retval
|= IRQ_HANDLED
;
6552 /* PA_INIT_ERROR is fatal and needs UIC reset */
6553 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_DATA_LINK_LAYER
);
6554 if ((reg
& UIC_DATA_LINK_LAYER_ERROR
) &&
6555 (reg
& UIC_DATA_LINK_LAYER_ERROR_CODE_MASK
)) {
6556 ufshcd_update_evt_hist(hba
, UFS_EVT_DL_ERR
, reg
);
6558 if (reg
& UIC_DATA_LINK_LAYER_ERROR_PA_INIT
)
6559 hba
->uic_error
|= UFSHCD_UIC_DL_PA_INIT_ERROR
;
6560 else if (hba
->dev_quirks
&
6561 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
) {
6562 if (reg
& UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED
)
6564 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
;
6565 else if (reg
& UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT
)
6566 hba
->uic_error
|= UFSHCD_UIC_DL_TCx_REPLAY_ERROR
;
6568 retval
|= IRQ_HANDLED
;
6571 /* UIC NL/TL/DME errors needs software retry */
6572 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_NETWORK_LAYER
);
6573 if ((reg
& UIC_NETWORK_LAYER_ERROR
) &&
6574 (reg
& UIC_NETWORK_LAYER_ERROR_CODE_MASK
)) {
6575 ufshcd_update_evt_hist(hba
, UFS_EVT_NL_ERR
, reg
);
6576 hba
->uic_error
|= UFSHCD_UIC_NL_ERROR
;
6577 retval
|= IRQ_HANDLED
;
6580 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_TRANSPORT_LAYER
);
6581 if ((reg
& UIC_TRANSPORT_LAYER_ERROR
) &&
6582 (reg
& UIC_TRANSPORT_LAYER_ERROR_CODE_MASK
)) {
6583 ufshcd_update_evt_hist(hba
, UFS_EVT_TL_ERR
, reg
);
6584 hba
->uic_error
|= UFSHCD_UIC_TL_ERROR
;
6585 retval
|= IRQ_HANDLED
;
6588 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_DME
);
6589 if ((reg
& UIC_DME_ERROR
) &&
6590 (reg
& UIC_DME_ERROR_CODE_MASK
)) {
6591 ufshcd_update_evt_hist(hba
, UFS_EVT_DME_ERR
, reg
);
6592 hba
->uic_error
|= UFSHCD_UIC_DME_ERROR
;
6593 retval
|= IRQ_HANDLED
;
6596 dev_dbg(hba
->dev
, "%s: UIC error flags = 0x%08x\n",
6597 __func__
, hba
->uic_error
);
6602 * ufshcd_check_errors - Check for errors that need s/w attention
6603 * @hba: per-adapter instance
6604 * @intr_status: interrupt status generated by the controller
6607 * IRQ_HANDLED - If interrupt is valid
6608 * IRQ_NONE - If invalid interrupt
6610 static irqreturn_t
ufshcd_check_errors(struct ufs_hba
*hba
, u32 intr_status
)
6612 bool queue_eh_work
= false;
6613 irqreturn_t retval
= IRQ_NONE
;
6615 spin_lock(hba
->host
->host_lock
);
6616 hba
->errors
|= UFSHCD_ERROR_MASK
& intr_status
;
6618 if (hba
->errors
& INT_FATAL_ERRORS
) {
6619 ufshcd_update_evt_hist(hba
, UFS_EVT_FATAL_ERR
,
6621 queue_eh_work
= true;
6624 if (hba
->errors
& UIC_ERROR
) {
6626 retval
= ufshcd_update_uic_error(hba
);
6628 queue_eh_work
= true;
6631 if (hba
->errors
& UFSHCD_UIC_HIBERN8_MASK
) {
6633 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
6634 __func__
, (hba
->errors
& UIC_HIBERNATE_ENTER
) ?
6636 hba
->errors
, ufshcd_get_upmcrs(hba
));
6637 ufshcd_update_evt_hist(hba
, UFS_EVT_AUTO_HIBERN8_ERR
,
6639 ufshcd_set_link_broken(hba
);
6640 queue_eh_work
= true;
6643 if (queue_eh_work
) {
6645 * update the transfer error masks to sticky bits, let's do this
6646 * irrespective of current ufshcd_state.
6648 hba
->saved_err
|= hba
->errors
;
6649 hba
->saved_uic_err
|= hba
->uic_error
;
6651 /* dump controller state before resetting */
6652 if ((hba
->saved_err
&
6653 (INT_FATAL_ERRORS
| UFSHCD_UIC_HIBERN8_MASK
)) ||
6654 (hba
->saved_uic_err
&&
6655 (hba
->saved_uic_err
!= UFSHCD_UIC_PA_GENERIC_ERROR
))) {
6656 dev_err(hba
->dev
, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
6657 __func__
, hba
->saved_err
,
6658 hba
->saved_uic_err
);
6659 ufshcd_dump_regs(hba
, 0, UFSHCI_REG_SPACE_SIZE
,
6661 ufshcd_print_pwr_info(hba
);
6663 ufshcd_schedule_eh_work(hba
);
6664 retval
|= IRQ_HANDLED
;
6667 * if (!queue_eh_work) -
6668 * Other errors are either non-fatal where host recovers
6669 * itself without s/w intervention or errors that will be
6670 * handled by the SCSI core layer.
6674 spin_unlock(hba
->host
->host_lock
);
6679 * ufshcd_tmc_handler - handle task management function completion
6680 * @hba: per adapter instance
6683 * IRQ_HANDLED - If interrupt is valid
6684 * IRQ_NONE - If invalid interrupt
6686 static irqreturn_t
ufshcd_tmc_handler(struct ufs_hba
*hba
)
6688 unsigned long flags
, pending
, issued
;
6689 irqreturn_t ret
= IRQ_NONE
;
6692 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6693 pending
= ufshcd_readl(hba
, REG_UTP_TASK_REQ_DOOR_BELL
);
6694 issued
= hba
->outstanding_tasks
& ~pending
;
6695 for_each_set_bit(tag
, &issued
, hba
->nutmrs
) {
6696 struct request
*req
= hba
->tmf_rqs
[tag
];
6697 struct completion
*c
= req
->end_io_data
;
6702 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6708 * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events
6709 * @hba: per adapter instance
6711 * Returns IRQ_HANDLED if interrupt is handled
6713 static irqreturn_t
ufshcd_handle_mcq_cq_events(struct ufs_hba
*hba
)
6715 struct ufs_hw_queue
*hwq
;
6716 unsigned long outstanding_cqs
;
6717 unsigned int nr_queues
;
6721 ret
= ufshcd_vops_get_outstanding_cqs(hba
, &outstanding_cqs
);
6723 outstanding_cqs
= (1U << hba
->nr_hw_queues
) - 1;
6725 /* Exclude the poll queues */
6726 nr_queues
= hba
->nr_hw_queues
- hba
->nr_queues
[HCTX_TYPE_POLL
];
6727 for_each_set_bit(i
, &outstanding_cqs
, nr_queues
) {
6730 events
= ufshcd_mcq_read_cqis(hba
, i
);
6732 ufshcd_mcq_write_cqis(hba
, events
, i
);
6734 if (events
& UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS
)
6735 ufshcd_mcq_poll_cqe_nolock(hba
, hwq
);
6742 * ufshcd_sl_intr - Interrupt service routine
6743 * @hba: per adapter instance
6744 * @intr_status: contains interrupts generated by the controller
6747 * IRQ_HANDLED - If interrupt is valid
6748 * IRQ_NONE - If invalid interrupt
6750 static irqreturn_t
ufshcd_sl_intr(struct ufs_hba
*hba
, u32 intr_status
)
6752 irqreturn_t retval
= IRQ_NONE
;
6754 if (intr_status
& UFSHCD_UIC_MASK
)
6755 retval
|= ufshcd_uic_cmd_compl(hba
, intr_status
);
6757 if (intr_status
& UFSHCD_ERROR_MASK
|| hba
->errors
)
6758 retval
|= ufshcd_check_errors(hba
, intr_status
);
6760 if (intr_status
& UTP_TASK_REQ_COMPL
)
6761 retval
|= ufshcd_tmc_handler(hba
);
6763 if (intr_status
& UTP_TRANSFER_REQ_COMPL
)
6764 retval
|= ufshcd_transfer_req_compl(hba
);
6766 if (intr_status
& MCQ_CQ_EVENT_STATUS
)
6767 retval
|= ufshcd_handle_mcq_cq_events(hba
);
6773 * ufshcd_intr - Main interrupt service routine
6775 * @__hba: pointer to adapter instance
6778 * IRQ_HANDLED - If interrupt is valid
6779 * IRQ_NONE - If invalid interrupt
6781 static irqreturn_t
ufshcd_intr(int irq
, void *__hba
)
6783 u32 intr_status
, enabled_intr_status
= 0;
6784 irqreturn_t retval
= IRQ_NONE
;
6785 struct ufs_hba
*hba
= __hba
;
6786 int retries
= hba
->nutrs
;
6788 intr_status
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
6789 hba
->ufs_stats
.last_intr_status
= intr_status
;
6790 hba
->ufs_stats
.last_intr_ts
= local_clock();
6793 * There could be max of hba->nutrs reqs in flight and in worst case
6794 * if the reqs get finished 1 by 1 after the interrupt status is
6795 * read, make sure we handle them by checking the interrupt status
6796 * again in a loop until we process all of the reqs before returning.
6798 while (intr_status
&& retries
--) {
6799 enabled_intr_status
=
6800 intr_status
& ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
6801 ufshcd_writel(hba
, intr_status
, REG_INTERRUPT_STATUS
);
6802 if (enabled_intr_status
)
6803 retval
|= ufshcd_sl_intr(hba
, enabled_intr_status
);
6805 intr_status
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
6808 if (enabled_intr_status
&& retval
== IRQ_NONE
&&
6809 (!(enabled_intr_status
& UTP_TRANSFER_REQ_COMPL
) ||
6810 hba
->outstanding_reqs
) && !ufshcd_eh_in_progress(hba
)) {
6811 dev_err(hba
->dev
, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
6814 hba
->ufs_stats
.last_intr_status
,
6815 enabled_intr_status
);
6816 ufshcd_dump_regs(hba
, 0, UFSHCI_REG_SPACE_SIZE
, "host_regs: ");
6822 static int ufshcd_clear_tm_cmd(struct ufs_hba
*hba
, int tag
)
6825 u32 mask
= 1 << tag
;
6826 unsigned long flags
;
6828 if (!test_bit(tag
, &hba
->outstanding_tasks
))
6831 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6832 ufshcd_utmrl_clear(hba
, tag
);
6833 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6835 /* poll for max. 1 sec to clear door bell register by h/w */
6836 err
= ufshcd_wait_for_register(hba
,
6837 REG_UTP_TASK_REQ_DOOR_BELL
,
6838 mask
, 0, 1000, 1000);
6840 dev_err(hba
->dev
, "Clearing task management function with tag %d %s\n",
6841 tag
, err
? "succeeded" : "failed");
6847 static int __ufshcd_issue_tm_cmd(struct ufs_hba
*hba
,
6848 struct utp_task_req_desc
*treq
, u8 tm_function
)
6850 struct request_queue
*q
= hba
->tmf_queue
;
6851 struct Scsi_Host
*host
= hba
->host
;
6852 DECLARE_COMPLETION_ONSTACK(wait
);
6853 struct request
*req
;
6854 unsigned long flags
;
6858 * blk_mq_alloc_request() is used here only to get a free tag.
6860 req
= blk_mq_alloc_request(q
, REQ_OP_DRV_OUT
, 0);
6862 return PTR_ERR(req
);
6864 req
->end_io_data
= &wait
;
6865 ufshcd_hold(hba
, false);
6867 spin_lock_irqsave(host
->host_lock
, flags
);
6869 task_tag
= req
->tag
;
6870 WARN_ONCE(task_tag
< 0 || task_tag
>= hba
->nutmrs
, "Invalid tag %d\n",
6872 hba
->tmf_rqs
[req
->tag
] = req
;
6873 treq
->upiu_req
.req_header
.dword_0
|= cpu_to_be32(task_tag
);
6875 memcpy(hba
->utmrdl_base_addr
+ task_tag
, treq
, sizeof(*treq
));
6876 ufshcd_vops_setup_task_mgmt(hba
, task_tag
, tm_function
);
6878 /* send command to the controller */
6879 __set_bit(task_tag
, &hba
->outstanding_tasks
);
6881 ufshcd_writel(hba
, 1 << task_tag
, REG_UTP_TASK_REQ_DOOR_BELL
);
6882 /* Make sure that doorbell is committed immediately */
6885 spin_unlock_irqrestore(host
->host_lock
, flags
);
6887 ufshcd_add_tm_upiu_trace(hba
, task_tag
, UFS_TM_SEND
);
6889 /* wait until the task management command is completed */
6890 err
= wait_for_completion_io_timeout(&wait
,
6891 msecs_to_jiffies(TM_CMD_TIMEOUT
));
6893 ufshcd_add_tm_upiu_trace(hba
, task_tag
, UFS_TM_ERR
);
6894 dev_err(hba
->dev
, "%s: task management cmd 0x%.2x timed-out\n",
6895 __func__
, tm_function
);
6896 if (ufshcd_clear_tm_cmd(hba
, task_tag
))
6897 dev_WARN(hba
->dev
, "%s: unable to clear tm cmd (slot %d) after timeout\n",
6898 __func__
, task_tag
);
6902 memcpy(treq
, hba
->utmrdl_base_addr
+ task_tag
, sizeof(*treq
));
6904 ufshcd_add_tm_upiu_trace(hba
, task_tag
, UFS_TM_COMP
);
6907 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6908 hba
->tmf_rqs
[req
->tag
] = NULL
;
6909 __clear_bit(task_tag
, &hba
->outstanding_tasks
);
6910 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6912 ufshcd_release(hba
);
6913 blk_mq_free_request(req
);
6919 * ufshcd_issue_tm_cmd - issues task management commands to controller
6920 * @hba: per adapter instance
6921 * @lun_id: LUN ID to which TM command is sent
6922 * @task_id: task ID to which the TM command is applicable
6923 * @tm_function: task management function opcode
6924 * @tm_response: task management service response return value
6926 * Returns non-zero value on error, zero on success.
6928 static int ufshcd_issue_tm_cmd(struct ufs_hba
*hba
, int lun_id
, int task_id
,
6929 u8 tm_function
, u8
*tm_response
)
6931 struct utp_task_req_desc treq
= { { 0 }, };
6932 enum utp_ocs ocs_value
;
6935 /* Configure task request descriptor */
6936 treq
.header
.dword_0
= cpu_to_le32(UTP_REQ_DESC_INT_CMD
);
6937 treq
.header
.dword_2
= cpu_to_le32(OCS_INVALID_COMMAND_STATUS
);
6939 /* Configure task request UPIU */
6940 treq
.upiu_req
.req_header
.dword_0
= cpu_to_be32(lun_id
<< 8) |
6941 cpu_to_be32(UPIU_TRANSACTION_TASK_REQ
<< 24);
6942 treq
.upiu_req
.req_header
.dword_1
= cpu_to_be32(tm_function
<< 16);
6945 * The host shall provide the same value for LUN field in the basic
6946 * header and for Input Parameter.
6948 treq
.upiu_req
.input_param1
= cpu_to_be32(lun_id
);
6949 treq
.upiu_req
.input_param2
= cpu_to_be32(task_id
);
6951 err
= __ufshcd_issue_tm_cmd(hba
, &treq
, tm_function
);
6952 if (err
== -ETIMEDOUT
)
6955 ocs_value
= le32_to_cpu(treq
.header
.dword_2
) & MASK_OCS
;
6956 if (ocs_value
!= OCS_SUCCESS
)
6957 dev_err(hba
->dev
, "%s: failed, ocs = 0x%x\n",
6958 __func__
, ocs_value
);
6959 else if (tm_response
)
6960 *tm_response
= be32_to_cpu(treq
.upiu_rsp
.output_param1
) &
6961 MASK_TM_SERVICE_RESP
;
6966 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
6967 * @hba: per-adapter instance
6968 * @req_upiu: upiu request
6969 * @rsp_upiu: upiu reply
6970 * @desc_buff: pointer to descriptor buffer, NULL if NA
6971 * @buff_len: descriptor size, 0 if NA
6972 * @cmd_type: specifies the type (NOP, Query...)
6973 * @desc_op: descriptor operation
6975 * Those type of requests uses UTP Transfer Request Descriptor - utrd.
6976 * Therefore, it "rides" the device management infrastructure: uses its tag and
6977 * tasks work queues.
6979 * Since there is only one available tag for device management commands,
6980 * the caller is expected to hold the hba->dev_cmd.lock mutex.
6982 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba
*hba
,
6983 struct utp_upiu_req
*req_upiu
,
6984 struct utp_upiu_req
*rsp_upiu
,
6985 u8
*desc_buff
, int *buff_len
,
6986 enum dev_cmd_type cmd_type
,
6987 enum query_opcode desc_op
)
6989 DECLARE_COMPLETION_ONSTACK(wait
);
6990 const u32 tag
= hba
->reserved_slot
;
6991 struct ufshcd_lrb
*lrbp
;
6995 /* Protects use of hba->reserved_slot. */
6996 lockdep_assert_held(&hba
->dev_cmd
.lock
);
6998 down_read(&hba
->clk_scaling_lock
);
7000 lrbp
= &hba
->lrb
[tag
];
7003 lrbp
->task_tag
= tag
;
7005 lrbp
->intr_cmd
= true;
7006 ufshcd_prepare_lrbp_crypto(NULL
, lrbp
);
7007 hba
->dev_cmd
.type
= cmd_type
;
7009 if (hba
->ufs_version
<= ufshci_version(1, 1))
7010 lrbp
->command_type
= UTP_CMD_TYPE_DEV_MANAGE
;
7012 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
7014 /* update the task tag in the request upiu */
7015 req_upiu
->header
.dword_0
|= cpu_to_be32(tag
);
7017 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, DMA_NONE
, 0);
7019 /* just copy the upiu request as it is */
7020 memcpy(lrbp
->ucd_req_ptr
, req_upiu
, sizeof(*lrbp
->ucd_req_ptr
));
7021 if (desc_buff
&& desc_op
== UPIU_QUERY_OPCODE_WRITE_DESC
) {
7022 /* The Data Segment Area is optional depending upon the query
7023 * function value. for WRITE DESCRIPTOR, the data segment
7024 * follows right after the tsf.
7026 memcpy(lrbp
->ucd_req_ptr
+ 1, desc_buff
, *buff_len
);
7030 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
7032 hba
->dev_cmd
.complete
= &wait
;
7034 ufshcd_add_query_upiu_trace(hba
, UFS_QUERY_SEND
, lrbp
->ucd_req_ptr
);
7036 ufshcd_send_command(hba
, tag
, hba
->dev_cmd_queue
);
7038 * ignore the returning value here - ufshcd_check_query_response is
7039 * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
7040 * read the response directly ignoring all errors.
7042 ufshcd_wait_for_dev_cmd(hba
, lrbp
, QUERY_REQ_TIMEOUT
);
7044 /* just copy the upiu response as it is */
7045 memcpy(rsp_upiu
, lrbp
->ucd_rsp_ptr
, sizeof(*rsp_upiu
));
7046 if (desc_buff
&& desc_op
== UPIU_QUERY_OPCODE_READ_DESC
) {
7047 u8
*descp
= (u8
*)lrbp
->ucd_rsp_ptr
+ sizeof(*rsp_upiu
);
7048 u16 resp_len
= be32_to_cpu(lrbp
->ucd_rsp_ptr
->header
.dword_2
) &
7049 MASK_QUERY_DATA_SEG_LEN
;
7051 if (*buff_len
>= resp_len
) {
7052 memcpy(desc_buff
, descp
, resp_len
);
7053 *buff_len
= resp_len
;
7056 "%s: rsp size %d is bigger than buffer size %d",
7057 __func__
, resp_len
, *buff_len
);
7062 ufshcd_add_query_upiu_trace(hba
, err
? UFS_QUERY_ERR
: UFS_QUERY_COMP
,
7063 (struct utp_upiu_req
*)lrbp
->ucd_rsp_ptr
);
7065 up_read(&hba
->clk_scaling_lock
);
7070 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
7071 * @hba: per-adapter instance
7072 * @req_upiu: upiu request
7073 * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
7074 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
7075 * @desc_buff: pointer to descriptor buffer, NULL if NA
7076 * @buff_len: descriptor size, 0 if NA
7077 * @desc_op: descriptor operation
7079 * Supports UTP Transfer requests (nop and query), and UTP Task
7080 * Management requests.
7081 * It is up to the caller to fill the upiu conent properly, as it will
7082 * be copied without any further input validations.
7084 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba
*hba
,
7085 struct utp_upiu_req
*req_upiu
,
7086 struct utp_upiu_req
*rsp_upiu
,
7088 u8
*desc_buff
, int *buff_len
,
7089 enum query_opcode desc_op
)
7092 enum dev_cmd_type cmd_type
= DEV_CMD_TYPE_QUERY
;
7093 struct utp_task_req_desc treq
= { { 0 }, };
7094 enum utp_ocs ocs_value
;
7095 u8 tm_f
= be32_to_cpu(req_upiu
->header
.dword_1
) >> 16 & MASK_TM_FUNC
;
7098 case UPIU_TRANSACTION_NOP_OUT
:
7099 cmd_type
= DEV_CMD_TYPE_NOP
;
7101 case UPIU_TRANSACTION_QUERY_REQ
:
7102 ufshcd_hold(hba
, false);
7103 mutex_lock(&hba
->dev_cmd
.lock
);
7104 err
= ufshcd_issue_devman_upiu_cmd(hba
, req_upiu
, rsp_upiu
,
7105 desc_buff
, buff_len
,
7107 mutex_unlock(&hba
->dev_cmd
.lock
);
7108 ufshcd_release(hba
);
7111 case UPIU_TRANSACTION_TASK_REQ
:
7112 treq
.header
.dword_0
= cpu_to_le32(UTP_REQ_DESC_INT_CMD
);
7113 treq
.header
.dword_2
= cpu_to_le32(OCS_INVALID_COMMAND_STATUS
);
7115 memcpy(&treq
.upiu_req
, req_upiu
, sizeof(*req_upiu
));
7117 err
= __ufshcd_issue_tm_cmd(hba
, &treq
, tm_f
);
7118 if (err
== -ETIMEDOUT
)
7121 ocs_value
= le32_to_cpu(treq
.header
.dword_2
) & MASK_OCS
;
7122 if (ocs_value
!= OCS_SUCCESS
) {
7123 dev_err(hba
->dev
, "%s: failed, ocs = 0x%x\n", __func__
,
7128 memcpy(rsp_upiu
, &treq
.upiu_rsp
, sizeof(*rsp_upiu
));
7141 * ufshcd_advanced_rpmb_req_handler - handle advanced RPMB request
7142 * @hba: per adapter instance
7143 * @req_upiu: upiu request
7144 * @rsp_upiu: upiu reply
7145 * @req_ehs: EHS field which contains Advanced RPMB Request Message
7146 * @rsp_ehs: EHS field which returns Advanced RPMB Response Message
7147 * @sg_cnt: The number of sg lists actually used
7148 * @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
7149 * @dir: DMA direction
7151 * Returns zero on success, non-zero on failure
7153 int ufshcd_advanced_rpmb_req_handler(struct ufs_hba
*hba
, struct utp_upiu_req
*req_upiu
,
7154 struct utp_upiu_req
*rsp_upiu
, struct ufs_ehs
*req_ehs
,
7155 struct ufs_ehs
*rsp_ehs
, int sg_cnt
, struct scatterlist
*sg_list
,
7156 enum dma_data_direction dir
)
7158 DECLARE_COMPLETION_ONSTACK(wait
);
7159 const u32 tag
= hba
->reserved_slot
;
7160 struct ufshcd_lrb
*lrbp
;
7167 /* Protects use of hba->reserved_slot. */
7168 ufshcd_hold(hba
, false);
7169 mutex_lock(&hba
->dev_cmd
.lock
);
7170 down_read(&hba
->clk_scaling_lock
);
7172 lrbp
= &hba
->lrb
[tag
];
7175 lrbp
->task_tag
= tag
;
7176 lrbp
->lun
= UFS_UPIU_RPMB_WLUN
;
7178 lrbp
->intr_cmd
= true;
7179 ufshcd_prepare_lrbp_crypto(NULL
, lrbp
);
7180 hba
->dev_cmd
.type
= DEV_CMD_TYPE_RPMB
;
7182 /* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */
7183 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
7185 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, dir
, 2);
7187 /* update the task tag and LUN in the request upiu */
7188 req_upiu
->header
.dword_0
|= cpu_to_be32(upiu_flags
<< 16 | UFS_UPIU_RPMB_WLUN
<< 8 | tag
);
7190 /* copy the UPIU(contains CDB) request as it is */
7191 memcpy(lrbp
->ucd_req_ptr
, req_upiu
, sizeof(*lrbp
->ucd_req_ptr
));
7192 /* Copy EHS, starting with byte32, immediately after the CDB package */
7193 memcpy(lrbp
->ucd_req_ptr
+ 1, req_ehs
, sizeof(*req_ehs
));
7195 if (dir
!= DMA_NONE
&& sg_list
)
7196 ufshcd_sgl_to_prdt(hba
, lrbp
, sg_cnt
, sg_list
);
7198 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
7200 hba
->dev_cmd
.complete
= &wait
;
7202 ufshcd_send_command(hba
, tag
, hba
->dev_cmd_queue
);
7204 err
= ufshcd_wait_for_dev_cmd(hba
, lrbp
, ADVANCED_RPMB_REQ_TIMEOUT
);
7207 /* Just copy the upiu response as it is */
7208 memcpy(rsp_upiu
, lrbp
->ucd_rsp_ptr
, sizeof(*rsp_upiu
));
7209 /* Get the response UPIU result */
7210 result
= ufshcd_get_rsp_upiu_result(lrbp
->ucd_rsp_ptr
);
7212 ehs_len
= be32_to_cpu(lrbp
->ucd_rsp_ptr
->header
.dword_2
) >> 24;
7214 * Since the bLength in EHS indicates the total size of the EHS Header and EHS Data
7215 * in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB
7218 if (ehs_len
== 2 && rsp_ehs
) {
7220 * ucd_rsp_ptr points to a buffer with a length of 512 bytes
7221 * (ALIGNED_UPIU_SIZE = 512), and the EHS data just starts from byte32
7223 ehs_data
= (u8
*)lrbp
->ucd_rsp_ptr
+ EHS_OFFSET_IN_RESPONSE
;
7224 memcpy(rsp_ehs
, ehs_data
, ehs_len
* 32);
7228 up_read(&hba
->clk_scaling_lock
);
7229 mutex_unlock(&hba
->dev_cmd
.lock
);
7230 ufshcd_release(hba
);
7231 return err
? : result
;
7235 * ufshcd_eh_device_reset_handler() - Reset a single logical unit.
7236 * @cmd: SCSI command pointer
7238 * Returns SUCCESS/FAILED
7240 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd
*cmd
)
7242 unsigned long flags
, pending_reqs
= 0, not_cleared
= 0;
7243 struct Scsi_Host
*host
;
7244 struct ufs_hba
*hba
;
7249 host
= cmd
->device
->host
;
7250 hba
= shost_priv(host
);
7252 lun
= ufshcd_scsi_to_upiu_lun(cmd
->device
->lun
);
7253 err
= ufshcd_issue_tm_cmd(hba
, lun
, 0, UFS_LOGICAL_RESET
, &resp
);
7254 if (err
|| resp
!= UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
7260 /* clear the commands that were pending for corresponding LUN */
7261 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
7262 for_each_set_bit(pos
, &hba
->outstanding_reqs
, hba
->nutrs
)
7263 if (hba
->lrb
[pos
].lun
== lun
)
7264 __set_bit(pos
, &pending_reqs
);
7265 hba
->outstanding_reqs
&= ~pending_reqs
;
7266 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
7268 if (ufshcd_clear_cmds(hba
, pending_reqs
) < 0) {
7269 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
7270 not_cleared
= pending_reqs
&
7271 ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
7272 hba
->outstanding_reqs
|= not_cleared
;
7273 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
7275 dev_err(hba
->dev
, "%s: failed to clear requests %#lx\n",
7276 __func__
, not_cleared
);
7278 __ufshcd_transfer_req_compl(hba
, pending_reqs
& ~not_cleared
);
7281 hba
->req_abort_count
= 0;
7282 ufshcd_update_evt_hist(hba
, UFS_EVT_DEV_RESET
, (u32
)err
);
7286 dev_err(hba
->dev
, "%s: failed with err %d\n", __func__
, err
);
7292 static void ufshcd_set_req_abort_skip(struct ufs_hba
*hba
, unsigned long bitmap
)
7294 struct ufshcd_lrb
*lrbp
;
7297 for_each_set_bit(tag
, &bitmap
, hba
->nutrs
) {
7298 lrbp
= &hba
->lrb
[tag
];
7299 lrbp
->req_abort_skip
= true;
7304 * ufshcd_try_to_abort_task - abort a specific task
7305 * @hba: Pointer to adapter instance
7306 * @tag: Task tag/index to be aborted
7308 * Abort the pending command in device by sending UFS_ABORT_TASK task management
7309 * command, and in host controller by clearing the door-bell register. There can
7310 * be race between controller sending the command to the device while abort is
7311 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
7312 * really issued and then try to abort it.
7314 * Returns zero on success, non-zero on failure
7316 static int ufshcd_try_to_abort_task(struct ufs_hba
*hba
, int tag
)
7318 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[tag
];
7324 for (poll_cnt
= 100; poll_cnt
; poll_cnt
--) {
7325 err
= ufshcd_issue_tm_cmd(hba
, lrbp
->lun
, lrbp
->task_tag
,
7326 UFS_QUERY_TASK
, &resp
);
7327 if (!err
&& resp
== UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED
) {
7328 /* cmd pending in the device */
7329 dev_err(hba
->dev
, "%s: cmd pending in the device. tag = %d\n",
7332 } else if (!err
&& resp
== UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
7334 * cmd not pending in the device, check if it is
7337 dev_err(hba
->dev
, "%s: cmd at tag %d not pending in the device.\n",
7339 reg
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
7340 if (reg
& (1 << tag
)) {
7341 /* sleep for max. 200us to stabilize */
7342 usleep_range(100, 200);
7345 /* command completed already */
7346 dev_err(hba
->dev
, "%s: cmd at tag %d successfully cleared from DB.\n",
7351 "%s: no response from device. tag = %d, err %d\n",
7352 __func__
, tag
, err
);
7354 err
= resp
; /* service response error */
7364 err
= ufshcd_issue_tm_cmd(hba
, lrbp
->lun
, lrbp
->task_tag
,
7365 UFS_ABORT_TASK
, &resp
);
7366 if (err
|| resp
!= UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
7368 err
= resp
; /* service response error */
7369 dev_err(hba
->dev
, "%s: issued. tag = %d, err %d\n",
7370 __func__
, tag
, err
);
7375 err
= ufshcd_clear_cmds(hba
, 1U << tag
);
7377 dev_err(hba
->dev
, "%s: Failed clearing cmd at tag %d, err %d\n",
7378 __func__
, tag
, err
);
7385 * ufshcd_abort - scsi host template eh_abort_handler callback
7386 * @cmd: SCSI command pointer
7388 * Returns SUCCESS/FAILED
7390 static int ufshcd_abort(struct scsi_cmnd
*cmd
)
7392 struct Scsi_Host
*host
= cmd
->device
->host
;
7393 struct ufs_hba
*hba
= shost_priv(host
);
7394 int tag
= scsi_cmd_to_rq(cmd
)->tag
;
7395 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[tag
];
7396 unsigned long flags
;
7401 WARN_ONCE(tag
< 0, "Invalid tag %d\n", tag
);
7403 ufshcd_hold(hba
, false);
7404 reg
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
7405 /* If command is already aborted/completed, return FAILED. */
7406 if (!(test_bit(tag
, &hba
->outstanding_reqs
))) {
7408 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
7409 __func__
, tag
, hba
->outstanding_reqs
, reg
);
7413 /* Print Transfer Request of aborted task */
7414 dev_info(hba
->dev
, "%s: Device abort task at tag %d\n", __func__
, tag
);
7417 * Print detailed info about aborted request.
7418 * As more than one request might get aborted at the same time,
7419 * print full information only for the first aborted request in order
7420 * to reduce repeated printouts. For other aborted requests only print
7423 scsi_print_command(cmd
);
7424 if (!hba
->req_abort_count
) {
7425 ufshcd_update_evt_hist(hba
, UFS_EVT_ABORT
, tag
);
7426 ufshcd_print_evt_hist(hba
);
7427 ufshcd_print_host_state(hba
);
7428 ufshcd_print_pwr_info(hba
);
7429 ufshcd_print_trs(hba
, 1 << tag
, true);
7431 ufshcd_print_trs(hba
, 1 << tag
, false);
7433 hba
->req_abort_count
++;
7435 if (!(reg
& (1 << tag
))) {
7437 "%s: cmd was completed, but without a notifying intr, tag = %d",
7439 __ufshcd_transfer_req_compl(hba
, 1UL << tag
);
7444 * Task abort to the device W-LUN is illegal. When this command
7445 * will fail, due to spec violation, scsi err handling next step
7446 * will be to send LU reset which, again, is a spec violation.
7447 * To avoid these unnecessary/illegal steps, first we clean up
7448 * the lrb taken by this cmd and re-set it in outstanding_reqs,
7449 * then queue the eh_work and bail.
7451 if (lrbp
->lun
== UFS_UPIU_UFS_DEVICE_WLUN
) {
7452 ufshcd_update_evt_hist(hba
, UFS_EVT_ABORT
, lrbp
->lun
);
7454 spin_lock_irqsave(host
->host_lock
, flags
);
7455 hba
->force_reset
= true;
7456 ufshcd_schedule_eh_work(hba
);
7457 spin_unlock_irqrestore(host
->host_lock
, flags
);
7461 /* Skip task abort in case previous aborts failed and report failure */
7462 if (lrbp
->req_abort_skip
) {
7463 dev_err(hba
->dev
, "%s: skipping abort\n", __func__
);
7464 ufshcd_set_req_abort_skip(hba
, hba
->outstanding_reqs
);
7468 err
= ufshcd_try_to_abort_task(hba
, tag
);
7470 dev_err(hba
->dev
, "%s: failed with err %d\n", __func__
, err
);
7471 ufshcd_set_req_abort_skip(hba
, hba
->outstanding_reqs
);
7477 * Clear the corresponding bit from outstanding_reqs since the command
7478 * has been aborted successfully.
7480 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
7481 outstanding
= __test_and_clear_bit(tag
, &hba
->outstanding_reqs
);
7482 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
7485 ufshcd_release_scsi_cmd(hba
, lrbp
);
7490 /* Matches the ufshcd_hold() call at the start of this function. */
7491 ufshcd_release(hba
);
7496 * ufshcd_host_reset_and_restore - reset and restore host controller
7497 * @hba: per-adapter instance
7499 * Note that host controller reset may issue DME_RESET to
7500 * local and remote (device) Uni-Pro stack and the attributes
7501 * are reset to default state.
7503 * Returns zero on success, non-zero on failure
7505 static int ufshcd_host_reset_and_restore(struct ufs_hba
*hba
)
7510 * Stop the host controller and complete the requests
7513 ufshpb_toggle_state(hba
, HPB_PRESENT
, HPB_RESET
);
7514 ufshcd_hba_stop(hba
);
7515 hba
->silence_err_logs
= true;
7516 ufshcd_complete_requests(hba
);
7517 hba
->silence_err_logs
= false;
7519 /* scale up clocks to max frequency before full reinitialization */
7520 ufshcd_scale_clks(hba
, true);
7522 err
= ufshcd_hba_enable(hba
);
7524 /* Establish the link again and restore the device */
7526 err
= ufshcd_probe_hba(hba
, false);
7529 dev_err(hba
->dev
, "%s: Host init failed %d\n", __func__
, err
);
7530 ufshcd_update_evt_hist(hba
, UFS_EVT_HOST_RESET
, (u32
)err
);
7535 * ufshcd_reset_and_restore - reset and re-initialize host/device
7536 * @hba: per-adapter instance
7538 * Reset and recover device, host and re-establish link. This
7539 * is helpful to recover the communication in fatal error conditions.
7541 * Returns zero on success, non-zero on failure
7543 static int ufshcd_reset_and_restore(struct ufs_hba
*hba
)
7546 u32 saved_uic_err
= 0;
7548 unsigned long flags
;
7549 int retries
= MAX_HOST_RESET_RETRIES
;
7551 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7554 * This is a fresh start, cache and clear saved error first,
7555 * in case new error generated during reset and restore.
7557 saved_err
|= hba
->saved_err
;
7558 saved_uic_err
|= hba
->saved_uic_err
;
7560 hba
->saved_uic_err
= 0;
7561 hba
->force_reset
= false;
7562 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
7563 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7565 /* Reset the attached device */
7566 ufshcd_device_reset(hba
);
7568 err
= ufshcd_host_reset_and_restore(hba
);
7570 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7573 /* Do not exit unless operational or dead */
7574 if (hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
&&
7575 hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
&&
7576 hba
->ufshcd_state
!= UFSHCD_STATE_EH_SCHEDULED_NON_FATAL
)
7578 } while (err
&& --retries
);
7581 * Inform scsi mid-layer that we did reset and allow to handle
7582 * Unit Attention properly.
7584 scsi_report_bus_reset(hba
->host
, 0);
7586 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
7587 hba
->saved_err
|= saved_err
;
7588 hba
->saved_uic_err
|= saved_uic_err
;
7590 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7596 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
7597 * @cmd: SCSI command pointer
7599 * Returns SUCCESS/FAILED
7601 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd
*cmd
)
7604 unsigned long flags
;
7605 struct ufs_hba
*hba
;
7607 hba
= shost_priv(cmd
->device
->host
);
7609 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7610 hba
->force_reset
= true;
7611 ufshcd_schedule_eh_work(hba
);
7612 dev_err(hba
->dev
, "%s: reset in progress - 1\n", __func__
);
7613 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7615 flush_work(&hba
->eh_work
);
7617 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7618 if (hba
->ufshcd_state
== UFSHCD_STATE_ERROR
)
7620 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7626 * ufshcd_get_max_icc_level - calculate the ICC level
7627 * @sup_curr_uA: max. current supported by the regulator
7628 * @start_scan: row at the desc table to start scan from
7629 * @buff: power descriptor buffer
7631 * Returns calculated max ICC level for specific regulator
7633 static u32
ufshcd_get_max_icc_level(int sup_curr_uA
, u32 start_scan
,
7641 for (i
= start_scan
; i
>= 0; i
--) {
7642 data
= get_unaligned_be16(&buff
[2 * i
]);
7643 unit
= (data
& ATTR_ICC_LVL_UNIT_MASK
) >>
7644 ATTR_ICC_LVL_UNIT_OFFSET
;
7645 curr_uA
= data
& ATTR_ICC_LVL_VALUE_MASK
;
7647 case UFSHCD_NANO_AMP
:
7648 curr_uA
= curr_uA
/ 1000;
7650 case UFSHCD_MILI_AMP
:
7651 curr_uA
= curr_uA
* 1000;
7654 curr_uA
= curr_uA
* 1000 * 1000;
7656 case UFSHCD_MICRO_AMP
:
7660 if (sup_curr_uA
>= curr_uA
)
7665 pr_err("%s: Couldn't find valid icc_level = %d", __func__
, i
);
7672 * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level
7673 * In case regulators are not initialized we'll return 0
7674 * @hba: per-adapter instance
7675 * @desc_buf: power descriptor buffer to extract ICC levels from.
7677 * Returns calculated ICC level
7679 static u32
ufshcd_find_max_sup_active_icc_level(struct ufs_hba
*hba
,
7684 if (!hba
->vreg_info
.vcc
|| !hba
->vreg_info
.vccq
||
7685 !hba
->vreg_info
.vccq2
) {
7687 * Using dev_dbg to avoid messages during runtime PM to avoid
7688 * never-ending cycles of messages written back to storage by
7689 * user space causing runtime resume, causing more messages and
7693 "%s: Regulator capability was not set, actvIccLevel=%d",
7694 __func__
, icc_level
);
7698 if (hba
->vreg_info
.vcc
->max_uA
)
7699 icc_level
= ufshcd_get_max_icc_level(
7700 hba
->vreg_info
.vcc
->max_uA
,
7701 POWER_DESC_MAX_ACTV_ICC_LVLS
- 1,
7702 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCC_0
]);
7704 if (hba
->vreg_info
.vccq
->max_uA
)
7705 icc_level
= ufshcd_get_max_icc_level(
7706 hba
->vreg_info
.vccq
->max_uA
,
7708 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCCQ_0
]);
7710 if (hba
->vreg_info
.vccq2
->max_uA
)
7711 icc_level
= ufshcd_get_max_icc_level(
7712 hba
->vreg_info
.vccq2
->max_uA
,
7714 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCCQ2_0
]);
7719 static void ufshcd_set_active_icc_lvl(struct ufs_hba
*hba
)
7725 desc_buf
= kzalloc(QUERY_DESC_MAX_SIZE
, GFP_KERNEL
);
7729 ret
= ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_POWER
, 0, 0,
7730 desc_buf
, QUERY_DESC_MAX_SIZE
);
7733 "%s: Failed reading power descriptor ret = %d",
7738 icc_level
= ufshcd_find_max_sup_active_icc_level(hba
, desc_buf
);
7739 dev_dbg(hba
->dev
, "%s: setting icc_level 0x%x", __func__
, icc_level
);
7741 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
7742 QUERY_ATTR_IDN_ACTIVE_ICC_LVL
, 0, 0, &icc_level
);
7746 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
7747 __func__
, icc_level
, ret
);
7753 static inline void ufshcd_blk_pm_runtime_init(struct scsi_device
*sdev
)
7755 scsi_autopm_get_device(sdev
);
7756 blk_pm_runtime_init(sdev
->request_queue
, &sdev
->sdev_gendev
);
7757 if (sdev
->rpm_autosuspend
)
7758 pm_runtime_set_autosuspend_delay(&sdev
->sdev_gendev
,
7759 RPM_AUTOSUSPEND_DELAY_MS
);
7760 scsi_autopm_put_device(sdev
);
7764 * ufshcd_scsi_add_wlus - Adds required W-LUs
7765 * @hba: per-adapter instance
7767 * UFS device specification requires the UFS devices to support 4 well known
7769 * "REPORT_LUNS" (address: 01h)
7770 * "UFS Device" (address: 50h)
7771 * "RPMB" (address: 44h)
7772 * "BOOT" (address: 30h)
7773 * UFS device's power management needs to be controlled by "POWER CONDITION"
7774 * field of SSU (START STOP UNIT) command. But this "power condition" field
7775 * will take effect only when its sent to "UFS device" well known logical unit
7776 * hence we require the scsi_device instance to represent this logical unit in
7777 * order for the UFS host driver to send the SSU command for power management.
7779 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
7780 * Block) LU so user space process can control this LU. User space may also
7781 * want to have access to BOOT LU.
7783 * This function adds scsi device instances for each of all well known LUs
7784 * (except "REPORT LUNS" LU).
7786 * Returns zero on success (all required W-LUs are added successfully),
7787 * non-zero error value on failure (if failed to add any of the required W-LU).
7789 static int ufshcd_scsi_add_wlus(struct ufs_hba
*hba
)
7792 struct scsi_device
*sdev_boot
, *sdev_rpmb
;
7794 hba
->ufs_device_wlun
= __scsi_add_device(hba
->host
, 0, 0,
7795 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN
), NULL
);
7796 if (IS_ERR(hba
->ufs_device_wlun
)) {
7797 ret
= PTR_ERR(hba
->ufs_device_wlun
);
7798 hba
->ufs_device_wlun
= NULL
;
7801 scsi_device_put(hba
->ufs_device_wlun
);
7803 sdev_rpmb
= __scsi_add_device(hba
->host
, 0, 0,
7804 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN
), NULL
);
7805 if (IS_ERR(sdev_rpmb
)) {
7806 ret
= PTR_ERR(sdev_rpmb
);
7807 goto remove_ufs_device_wlun
;
7809 ufshcd_blk_pm_runtime_init(sdev_rpmb
);
7810 scsi_device_put(sdev_rpmb
);
7812 sdev_boot
= __scsi_add_device(hba
->host
, 0, 0,
7813 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN
), NULL
);
7814 if (IS_ERR(sdev_boot
)) {
7815 dev_err(hba
->dev
, "%s: BOOT WLUN not found\n", __func__
);
7817 ufshcd_blk_pm_runtime_init(sdev_boot
);
7818 scsi_device_put(sdev_boot
);
7822 remove_ufs_device_wlun
:
7823 scsi_remove_device(hba
->ufs_device_wlun
);
7828 static void ufshcd_wb_probe(struct ufs_hba
*hba
, const u8
*desc_buf
)
7830 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
7832 u32 d_lu_wb_buf_alloc
;
7833 u32 ext_ufs_feature
;
7835 if (!ufshcd_is_wb_allowed(hba
))
7839 * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
7840 * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
7843 if (!(dev_info
->wspecversion
>= 0x310 ||
7844 dev_info
->wspecversion
== 0x220 ||
7845 (hba
->dev_quirks
& UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
)))
7848 ext_ufs_feature
= get_unaligned_be32(desc_buf
+
7849 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP
);
7851 if (!(ext_ufs_feature
& UFS_DEV_WRITE_BOOSTER_SUP
))
7855 * WB may be supported but not configured while provisioning. The spec
7856 * says, in dedicated wb buffer mode, a max of 1 lun would have wb
7857 * buffer configured.
7859 dev_info
->wb_buffer_type
= desc_buf
[DEVICE_DESC_PARAM_WB_TYPE
];
7861 dev_info
->b_presrv_uspc_en
=
7862 desc_buf
[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN
];
7864 if (dev_info
->wb_buffer_type
== WB_BUF_MODE_SHARED
) {
7865 if (!get_unaligned_be32(desc_buf
+
7866 DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS
))
7869 for (lun
= 0; lun
< UFS_UPIU_MAX_WB_LUN_ID
; lun
++) {
7870 d_lu_wb_buf_alloc
= 0;
7871 ufshcd_read_unit_desc_param(hba
,
7873 UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS
,
7874 (u8
*)&d_lu_wb_buf_alloc
,
7875 sizeof(d_lu_wb_buf_alloc
));
7876 if (d_lu_wb_buf_alloc
) {
7877 dev_info
->wb_dedicated_lu
= lun
;
7882 if (!d_lu_wb_buf_alloc
)
7886 if (!ufshcd_is_wb_buf_lifetime_available(hba
))
7892 hba
->caps
&= ~UFSHCD_CAP_WB_EN
;
7895 static void ufshcd_temp_notif_probe(struct ufs_hba
*hba
, const u8
*desc_buf
)
7897 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
7898 u32 ext_ufs_feature
;
7901 if (!(hba
->caps
& UFSHCD_CAP_TEMP_NOTIF
) || dev_info
->wspecversion
< 0x300)
7904 ext_ufs_feature
= get_unaligned_be32(desc_buf
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP
);
7906 if (ext_ufs_feature
& UFS_DEV_LOW_TEMP_NOTIF
)
7907 mask
|= MASK_EE_TOO_LOW_TEMP
;
7909 if (ext_ufs_feature
& UFS_DEV_HIGH_TEMP_NOTIF
)
7910 mask
|= MASK_EE_TOO_HIGH_TEMP
;
7913 ufshcd_enable_ee(hba
, mask
);
7914 ufs_hwmon_probe(hba
, mask
);
7918 static void ufshcd_ext_iid_probe(struct ufs_hba
*hba
, u8
*desc_buf
)
7920 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
7921 u32 ext_ufs_feature
;
7925 /* Only UFS-4.0 and above may support EXT_IID */
7926 if (dev_info
->wspecversion
< 0x400)
7929 ext_ufs_feature
= get_unaligned_be32(desc_buf
+
7930 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP
);
7931 if (!(ext_ufs_feature
& UFS_DEV_EXT_IID_SUP
))
7934 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
7935 QUERY_ATTR_IDN_EXT_IID_EN
, 0, 0, &ext_iid_en
);
7937 dev_err(hba
->dev
, "failed reading bEXTIIDEn. err = %d\n", err
);
7940 dev_info
->b_ext_iid_en
= ext_iid_en
;
7943 void ufshcd_fixup_dev_quirks(struct ufs_hba
*hba
,
7944 const struct ufs_dev_quirk
*fixups
)
7946 const struct ufs_dev_quirk
*f
;
7947 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
7952 for (f
= fixups
; f
->quirk
; f
++) {
7953 if ((f
->wmanufacturerid
== dev_info
->wmanufacturerid
||
7954 f
->wmanufacturerid
== UFS_ANY_VENDOR
) &&
7955 ((dev_info
->model
&&
7956 STR_PRFX_EQUAL(f
->model
, dev_info
->model
)) ||
7957 !strcmp(f
->model
, UFS_ANY_MODEL
)))
7958 hba
->dev_quirks
|= f
->quirk
;
7961 EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks
);
7963 static void ufs_fixup_device_setup(struct ufs_hba
*hba
)
7965 /* fix by general quirk table */
7966 ufshcd_fixup_dev_quirks(hba
, ufs_fixups
);
7968 /* allow vendors to fix quirks */
7969 ufshcd_vops_fixup_dev_quirks(hba
);
7972 static int ufs_get_device_desc(struct ufs_hba
*hba
)
7976 u8 b_ufs_feature_sup
;
7978 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
7980 desc_buf
= kzalloc(QUERY_DESC_MAX_SIZE
, GFP_KERNEL
);
7986 err
= ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_DEVICE
, 0, 0, desc_buf
,
7987 QUERY_DESC_MAX_SIZE
);
7989 dev_err(hba
->dev
, "%s: Failed reading Device Desc. err = %d\n",
7995 * getting vendor (manufacturerID) and Bank Index in big endian
7998 dev_info
->wmanufacturerid
= desc_buf
[DEVICE_DESC_PARAM_MANF_ID
] << 8 |
7999 desc_buf
[DEVICE_DESC_PARAM_MANF_ID
+ 1];
8001 /* getting Specification Version in big endian format */
8002 dev_info
->wspecversion
= desc_buf
[DEVICE_DESC_PARAM_SPEC_VER
] << 8 |
8003 desc_buf
[DEVICE_DESC_PARAM_SPEC_VER
+ 1];
8004 dev_info
->bqueuedepth
= desc_buf
[DEVICE_DESC_PARAM_Q_DPTH
];
8005 b_ufs_feature_sup
= desc_buf
[DEVICE_DESC_PARAM_UFS_FEAT
];
8007 model_index
= desc_buf
[DEVICE_DESC_PARAM_PRDCT_NAME
];
8009 if (dev_info
->wspecversion
>= UFS_DEV_HPB_SUPPORT_VERSION
&&
8010 (b_ufs_feature_sup
& UFS_DEV_HPB_SUPPORT
)) {
8011 bool hpb_en
= false;
8013 ufshpb_get_dev_info(hba
, desc_buf
);
8015 if (!ufshpb_is_legacy(hba
))
8016 err
= ufshcd_query_flag_retry(hba
,
8017 UPIU_QUERY_OPCODE_READ_FLAG
,
8018 QUERY_FLAG_IDN_HPB_EN
, 0,
8021 if (ufshpb_is_legacy(hba
) || (!err
&& hpb_en
))
8022 dev_info
->hpb_enabled
= true;
8025 err
= ufshcd_read_string_desc(hba
, model_index
,
8026 &dev_info
->model
, SD_ASCII_STD
);
8028 dev_err(hba
->dev
, "%s: Failed reading Product Name. err = %d\n",
8033 hba
->luns_avail
= desc_buf
[DEVICE_DESC_PARAM_NUM_LU
] +
8034 desc_buf
[DEVICE_DESC_PARAM_NUM_WLU
];
8036 ufs_fixup_device_setup(hba
);
8038 ufshcd_wb_probe(hba
, desc_buf
);
8040 ufshcd_temp_notif_probe(hba
, desc_buf
);
8042 if (hba
->ext_iid_sup
)
8043 ufshcd_ext_iid_probe(hba
, desc_buf
);
8046 * ufshcd_read_string_desc returns size of the string
8047 * reset the error value
8056 static void ufs_put_device_desc(struct ufs_hba
*hba
)
8058 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8060 kfree(dev_info
->model
);
8061 dev_info
->model
= NULL
;
8065 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
8066 * @hba: per-adapter instance
8068 * PA_TActivate parameter can be tuned manually if UniPro version is less than
8069 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
8070 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
8071 * the hibern8 exit latency.
8073 * Returns zero on success, non-zero error value on failure.
8075 static int ufshcd_tune_pa_tactivate(struct ufs_hba
*hba
)
8078 u32 peer_rx_min_activatetime
= 0, tuned_pa_tactivate
;
8080 ret
= ufshcd_dme_peer_get(hba
,
8082 RX_MIN_ACTIVATETIME_CAPABILITY
,
8083 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
8084 &peer_rx_min_activatetime
);
8088 /* make sure proper unit conversion is applied */
8089 tuned_pa_tactivate
=
8090 ((peer_rx_min_activatetime
* RX_MIN_ACTIVATETIME_UNIT_US
)
8091 / PA_TACTIVATE_TIME_UNIT_US
);
8092 ret
= ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
),
8093 tuned_pa_tactivate
);
8100 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
8101 * @hba: per-adapter instance
8103 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
8104 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
8105 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
8106 * This optimal value can help reduce the hibern8 exit latency.
8108 * Returns zero on success, non-zero error value on failure.
8110 static int ufshcd_tune_pa_hibern8time(struct ufs_hba
*hba
)
8113 u32 local_tx_hibern8_time_cap
= 0, peer_rx_hibern8_time_cap
= 0;
8114 u32 max_hibern8_time
, tuned_pa_hibern8time
;
8116 ret
= ufshcd_dme_get(hba
,
8117 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY
,
8118 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
8119 &local_tx_hibern8_time_cap
);
8123 ret
= ufshcd_dme_peer_get(hba
,
8124 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY
,
8125 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
8126 &peer_rx_hibern8_time_cap
);
8130 max_hibern8_time
= max(local_tx_hibern8_time_cap
,
8131 peer_rx_hibern8_time_cap
);
8132 /* make sure proper unit conversion is applied */
8133 tuned_pa_hibern8time
= ((max_hibern8_time
* HIBERN8TIME_UNIT_US
)
8134 / PA_HIBERN8_TIME_UNIT_US
);
8135 ret
= ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_HIBERN8TIME
),
8136 tuned_pa_hibern8time
);
8142 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
8143 * less than device PA_TACTIVATE time.
8144 * @hba: per-adapter instance
8146 * Some UFS devices require host PA_TACTIVATE to be lower than device
8147 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
8150 * Returns zero on success, non-zero error value on failure.
8152 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba
*hba
)
8155 u32 granularity
, peer_granularity
;
8156 u32 pa_tactivate
, peer_pa_tactivate
;
8157 u32 pa_tactivate_us
, peer_pa_tactivate_us
;
8158 static const u8 gran_to_us_table
[] = {1, 4, 8, 16, 32, 100};
8160 ret
= ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_GRANULARITY
),
8165 ret
= ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_GRANULARITY
),
8170 if ((granularity
< PA_GRANULARITY_MIN_VAL
) ||
8171 (granularity
> PA_GRANULARITY_MAX_VAL
)) {
8172 dev_err(hba
->dev
, "%s: invalid host PA_GRANULARITY %d",
8173 __func__
, granularity
);
8177 if ((peer_granularity
< PA_GRANULARITY_MIN_VAL
) ||
8178 (peer_granularity
> PA_GRANULARITY_MAX_VAL
)) {
8179 dev_err(hba
->dev
, "%s: invalid device PA_GRANULARITY %d",
8180 __func__
, peer_granularity
);
8184 ret
= ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_TACTIVATE
), &pa_tactivate
);
8188 ret
= ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_TACTIVATE
),
8189 &peer_pa_tactivate
);
8193 pa_tactivate_us
= pa_tactivate
* gran_to_us_table
[granularity
- 1];
8194 peer_pa_tactivate_us
= peer_pa_tactivate
*
8195 gran_to_us_table
[peer_granularity
- 1];
8197 if (pa_tactivate_us
>= peer_pa_tactivate_us
) {
8198 u32 new_peer_pa_tactivate
;
8200 new_peer_pa_tactivate
= pa_tactivate_us
/
8201 gran_to_us_table
[peer_granularity
- 1];
8202 new_peer_pa_tactivate
++;
8203 ret
= ufshcd_dme_peer_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
),
8204 new_peer_pa_tactivate
);
8211 static void ufshcd_tune_unipro_params(struct ufs_hba
*hba
)
8213 if (ufshcd_is_unipro_pa_params_tuning_req(hba
)) {
8214 ufshcd_tune_pa_tactivate(hba
);
8215 ufshcd_tune_pa_hibern8time(hba
);
8218 ufshcd_vops_apply_dev_quirks(hba
);
8220 if (hba
->dev_quirks
& UFS_DEVICE_QUIRK_PA_TACTIVATE
)
8221 /* set 1ms timeout for PA_TACTIVATE */
8222 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
), 10);
8224 if (hba
->dev_quirks
& UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE
)
8225 ufshcd_quirk_tune_host_pa_tactivate(hba
);
8228 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba
*hba
)
8230 hba
->ufs_stats
.hibern8_exit_cnt
= 0;
8231 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
8232 hba
->req_abort_count
= 0;
8235 static int ufshcd_device_geo_params_init(struct ufs_hba
*hba
)
8240 desc_buf
= kzalloc(QUERY_DESC_MAX_SIZE
, GFP_KERNEL
);
8246 err
= ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_GEOMETRY
, 0, 0,
8247 desc_buf
, QUERY_DESC_MAX_SIZE
);
8249 dev_err(hba
->dev
, "%s: Failed reading Geometry Desc. err = %d\n",
8254 if (desc_buf
[GEOMETRY_DESC_PARAM_MAX_NUM_LUN
] == 1)
8255 hba
->dev_info
.max_lu_supported
= 32;
8256 else if (desc_buf
[GEOMETRY_DESC_PARAM_MAX_NUM_LUN
] == 0)
8257 hba
->dev_info
.max_lu_supported
= 8;
8259 if (desc_buf
[QUERY_DESC_LENGTH_OFFSET
] >=
8260 GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS
)
8261 ufshpb_get_geo_info(hba
, desc_buf
);
8268 struct ufs_ref_clk
{
8269 unsigned long freq_hz
;
8270 enum ufs_ref_clk_freq val
;
8273 static const struct ufs_ref_clk ufs_ref_clk_freqs
[] = {
8274 {19200000, REF_CLK_FREQ_19_2_MHZ
},
8275 {26000000, REF_CLK_FREQ_26_MHZ
},
8276 {38400000, REF_CLK_FREQ_38_4_MHZ
},
8277 {52000000, REF_CLK_FREQ_52_MHZ
},
8278 {0, REF_CLK_FREQ_INVAL
},
8281 static enum ufs_ref_clk_freq
8282 ufs_get_bref_clk_from_hz(unsigned long freq
)
8286 for (i
= 0; ufs_ref_clk_freqs
[i
].freq_hz
; i
++)
8287 if (ufs_ref_clk_freqs
[i
].freq_hz
== freq
)
8288 return ufs_ref_clk_freqs
[i
].val
;
8290 return REF_CLK_FREQ_INVAL
;
8293 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba
*hba
, struct clk
*refclk
)
8297 freq
= clk_get_rate(refclk
);
8299 hba
->dev_ref_clk_freq
=
8300 ufs_get_bref_clk_from_hz(freq
);
8302 if (hba
->dev_ref_clk_freq
== REF_CLK_FREQ_INVAL
)
8304 "invalid ref_clk setting = %ld\n", freq
);
8307 static int ufshcd_set_dev_ref_clk(struct ufs_hba
*hba
)
8311 u32 freq
= hba
->dev_ref_clk_freq
;
8313 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
8314 QUERY_ATTR_IDN_REF_CLK_FREQ
, 0, 0, &ref_clk
);
8317 dev_err(hba
->dev
, "failed reading bRefClkFreq. err = %d\n",
8322 if (ref_clk
== freq
)
8323 goto out
; /* nothing to update */
8325 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
8326 QUERY_ATTR_IDN_REF_CLK_FREQ
, 0, 0, &freq
);
8329 dev_err(hba
->dev
, "bRefClkFreq setting to %lu Hz failed\n",
8330 ufs_ref_clk_freqs
[freq
].freq_hz
);
8334 dev_dbg(hba
->dev
, "bRefClkFreq setting to %lu Hz succeeded\n",
8335 ufs_ref_clk_freqs
[freq
].freq_hz
);
8341 static int ufshcd_device_params_init(struct ufs_hba
*hba
)
8346 /* Init UFS geometry descriptor related parameters */
8347 ret
= ufshcd_device_geo_params_init(hba
);
8351 /* Check and apply UFS device quirks */
8352 ret
= ufs_get_device_desc(hba
);
8354 dev_err(hba
->dev
, "%s: Failed getting device info. err = %d\n",
8359 ufshcd_get_ref_clk_gating_wait(hba
);
8361 if (!ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_READ_FLAG
,
8362 QUERY_FLAG_IDN_PWR_ON_WPE
, 0, &flag
))
8363 hba
->dev_info
.f_power_on_wp_en
= flag
;
8365 /* Probe maximum power mode co-supported by both UFS host and device */
8366 if (ufshcd_get_max_pwr_mode(hba
))
8368 "%s: Failed getting max supported power mode\n",
8375 * ufshcd_add_lus - probe and add UFS logical units
8376 * @hba: per-adapter instance
8378 static int ufshcd_add_lus(struct ufs_hba
*hba
)
8382 /* Add required well known logical units to scsi mid layer */
8383 ret
= ufshcd_scsi_add_wlus(hba
);
8387 /* Initialize devfreq after UFS device is detected */
8388 if (ufshcd_is_clkscaling_supported(hba
)) {
8389 memcpy(&hba
->clk_scaling
.saved_pwr_info
.info
,
8391 sizeof(struct ufs_pa_layer_attr
));
8392 hba
->clk_scaling
.saved_pwr_info
.is_valid
= true;
8393 hba
->clk_scaling
.is_allowed
= true;
8395 ret
= ufshcd_devfreq_init(hba
);
8399 hba
->clk_scaling
.is_enabled
= true;
8400 ufshcd_init_clk_scaling_sysfs(hba
);
8405 scsi_scan_host(hba
->host
);
8406 pm_runtime_put_sync(hba
->dev
);
8412 /* SDB - Single Doorbell */
8413 static void ufshcd_release_sdb_queue(struct ufs_hba
*hba
, int nutrs
)
8415 size_t ucdl_size
, utrdl_size
;
8417 ucdl_size
= sizeof(struct utp_transfer_cmd_desc
) * nutrs
;
8418 dmam_free_coherent(hba
->dev
, ucdl_size
, hba
->ucdl_base_addr
,
8419 hba
->ucdl_dma_addr
);
8421 utrdl_size
= sizeof(struct utp_transfer_req_desc
) * nutrs
;
8422 dmam_free_coherent(hba
->dev
, utrdl_size
, hba
->utrdl_base_addr
,
8423 hba
->utrdl_dma_addr
);
8425 devm_kfree(hba
->dev
, hba
->lrb
);
8428 static int ufshcd_alloc_mcq(struct ufs_hba
*hba
)
8431 int old_nutrs
= hba
->nutrs
;
8433 ret
= ufshcd_mcq_decide_queue_depth(hba
);
8438 ret
= ufshcd_mcq_init(hba
);
8443 * Previously allocated memory for nutrs may not be enough in MCQ mode.
8444 * Number of supported tags in MCQ mode may be larger than SDB mode.
8446 if (hba
->nutrs
!= old_nutrs
) {
8447 ufshcd_release_sdb_queue(hba
, old_nutrs
);
8448 ret
= ufshcd_memory_alloc(hba
);
8451 ufshcd_host_memory_configure(hba
);
8454 ret
= ufshcd_mcq_memory_alloc(hba
);
8460 hba
->nutrs
= old_nutrs
;
8464 static void ufshcd_config_mcq(struct ufs_hba
*hba
)
8468 ret
= ufshcd_mcq_vops_config_esi(hba
);
8469 dev_info(hba
->dev
, "ESI %sconfigured\n", ret
? "is not " : "");
8471 ufshcd_enable_intr(hba
, UFSHCD_ENABLE_MCQ_INTRS
);
8472 ufshcd_mcq_make_queues_operational(hba
);
8473 ufshcd_mcq_config_mac(hba
, hba
->nutrs
);
8475 hba
->host
->can_queue
= hba
->nutrs
- UFSHCD_NUM_RESERVED
;
8476 hba
->reserved_slot
= hba
->nutrs
- UFSHCD_NUM_RESERVED
;
8478 /* Select MCQ mode */
8479 ufshcd_writel(hba
, ufshcd_readl(hba
, REG_UFS_MEM_CFG
) | 0x1,
8481 hba
->mcq_enabled
= true;
8483 dev_info(hba
->dev
, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n",
8484 hba
->nr_hw_queues
, hba
->nr_queues
[HCTX_TYPE_DEFAULT
],
8485 hba
->nr_queues
[HCTX_TYPE_READ
], hba
->nr_queues
[HCTX_TYPE_POLL
],
8489 static int ufshcd_device_init(struct ufs_hba
*hba
, bool init_dev_params
)
8492 struct Scsi_Host
*host
= hba
->host
;
8494 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
8496 ret
= ufshcd_link_startup(hba
);
8500 if (hba
->quirks
& UFSHCD_QUIRK_SKIP_PH_CONFIGURATION
)
8503 /* Debug counters initialization */
8504 ufshcd_clear_dbg_ufs_stats(hba
);
8506 /* UniPro link is active now */
8507 ufshcd_set_link_active(hba
);
8509 /* Reconfigure MCQ upon reset */
8510 if (is_mcq_enabled(hba
) && !init_dev_params
)
8511 ufshcd_config_mcq(hba
);
8513 /* Verify device initialization by sending NOP OUT UPIU */
8514 ret
= ufshcd_verify_dev_init(hba
);
8518 /* Initiate UFS initialization, and waiting until completion */
8519 ret
= ufshcd_complete_dev_init(hba
);
8524 * Initialize UFS device parameters used by driver, these
8525 * parameters are associated with UFS descriptors.
8527 if (init_dev_params
) {
8528 ret
= ufshcd_device_params_init(hba
);
8531 if (is_mcq_supported(hba
) && !hba
->scsi_host_added
) {
8532 ret
= ufshcd_alloc_mcq(hba
);
8534 /* Continue with SDB mode */
8535 use_mcq_mode
= false;
8536 dev_err(hba
->dev
, "MCQ mode is disabled, err=%d\n",
8539 ret
= scsi_add_host(host
, hba
->dev
);
8541 dev_err(hba
->dev
, "scsi_add_host failed\n");
8544 hba
->scsi_host_added
= true;
8546 /* MCQ may be disabled if ufshcd_alloc_mcq() fails */
8547 if (is_mcq_supported(hba
) && use_mcq_mode
)
8548 ufshcd_config_mcq(hba
);
8551 ufshcd_tune_unipro_params(hba
);
8553 /* UFS device is also active now */
8554 ufshcd_set_ufs_dev_active(hba
);
8555 ufshcd_force_reset_auto_bkops(hba
);
8557 /* Gear up to HS gear if supported */
8558 if (hba
->max_pwr_info
.is_valid
) {
8560 * Set the right value to bRefClkFreq before attempting to
8561 * switch to HS gears.
8563 if (hba
->dev_ref_clk_freq
!= REF_CLK_FREQ_INVAL
)
8564 ufshcd_set_dev_ref_clk(hba
);
8565 ret
= ufshcd_config_pwr_mode(hba
, &hba
->max_pwr_info
.info
);
8567 dev_err(hba
->dev
, "%s: Failed setting power mode, err = %d\n",
8577 * ufshcd_probe_hba - probe hba to detect device and initialize it
8578 * @hba: per-adapter instance
8579 * @init_dev_params: whether or not to call ufshcd_device_params_init().
8581 * Execute link-startup and verify device initialization
8583 static int ufshcd_probe_hba(struct ufs_hba
*hba
, bool init_dev_params
)
8585 ktime_t start
= ktime_get();
8586 unsigned long flags
;
8589 ret
= ufshcd_device_init(hba
, init_dev_params
);
8593 if (hba
->quirks
& UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH
) {
8594 /* Reset the device and controller before doing reinit */
8595 ufshcd_device_reset(hba
);
8596 ufshcd_hba_stop(hba
);
8597 ufshcd_vops_reinit_notify(hba
);
8598 ret
= ufshcd_hba_enable(hba
);
8600 dev_err(hba
->dev
, "Host controller enable failed\n");
8601 ufshcd_print_evt_hist(hba
);
8602 ufshcd_print_host_state(hba
);
8606 /* Reinit the device */
8607 ret
= ufshcd_device_init(hba
, init_dev_params
);
8612 ufshcd_print_pwr_info(hba
);
8615 * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
8616 * and for removable UFS card as well, hence always set the parameter.
8617 * Note: Error handler may issue the device reset hence resetting
8618 * bActiveICCLevel as well so it is always safe to set this here.
8620 ufshcd_set_active_icc_lvl(hba
);
8622 /* Enable UFS Write Booster if supported */
8623 ufshcd_configure_wb(hba
);
8625 if (hba
->ee_usr_mask
)
8626 ufshcd_write_ee_control(hba
);
8627 /* Enable Auto-Hibernate if configured */
8628 ufshcd_auto_hibern8_enable(hba
);
8630 ufshpb_toggle_state(hba
, HPB_RESET
, HPB_PRESENT
);
8632 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
8634 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
8635 else if (hba
->ufshcd_state
== UFSHCD_STATE_RESET
)
8636 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
8637 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
8639 trace_ufshcd_init(dev_name(hba
->dev
), ret
,
8640 ktime_to_us(ktime_sub(ktime_get(), start
)),
8641 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
8646 * ufshcd_async_scan - asynchronous execution for probing hba
8647 * @data: data pointer to pass to this function
8648 * @cookie: cookie data
8650 static void ufshcd_async_scan(void *data
, async_cookie_t cookie
)
8652 struct ufs_hba
*hba
= (struct ufs_hba
*)data
;
8655 down(&hba
->host_sem
);
8656 /* Initialize hba, detect and initialize UFS device */
8657 ret
= ufshcd_probe_hba(hba
, true);
8662 /* Probe and add UFS logical units */
8663 ret
= ufshcd_add_lus(hba
);
8666 * If we failed to initialize the device or the device is not
8667 * present, turn off the power/clocks etc.
8670 pm_runtime_put_sync(hba
->dev
);
8671 ufshcd_hba_exit(hba
);
8675 static enum scsi_timeout_action
ufshcd_eh_timed_out(struct scsi_cmnd
*scmd
)
8677 struct ufs_hba
*hba
= shost_priv(scmd
->device
->host
);
8679 if (!hba
->system_suspending
) {
8680 /* Activate the error handler in the SCSI core. */
8681 return SCSI_EH_NOT_HANDLED
;
8685 * If we get here we know that no TMFs are outstanding and also that
8686 * the only pending command is a START STOP UNIT command. Handle the
8687 * timeout of that command directly to prevent a deadlock between
8688 * ufshcd_set_dev_pwr_mode() and ufshcd_err_handler().
8690 ufshcd_link_recovery(hba
);
8691 dev_info(hba
->dev
, "%s() finished; outstanding_tasks = %#lx.\n",
8692 __func__
, hba
->outstanding_tasks
);
8694 return hba
->outstanding_reqs
? SCSI_EH_RESET_TIMER
: SCSI_EH_DONE
;
8697 static const struct attribute_group
*ufshcd_driver_groups
[] = {
8698 &ufs_sysfs_unit_descriptor_group
,
8699 &ufs_sysfs_lun_attributes_group
,
8700 #ifdef CONFIG_SCSI_UFS_HPB
8701 &ufs_sysfs_hpb_stat_group
,
8702 &ufs_sysfs_hpb_param_group
,
8707 static struct ufs_hba_variant_params ufs_hba_vps
= {
8708 .hba_enable_delay_us
= 1000,
8709 .wb_flush_threshold
= UFS_WB_BUF_REMAIN_PERCENT(40),
8710 .devfreq_profile
.polling_ms
= 100,
8711 .devfreq_profile
.target
= ufshcd_devfreq_target
,
8712 .devfreq_profile
.get_dev_status
= ufshcd_devfreq_get_dev_status
,
8713 .ondemand_data
.upthreshold
= 70,
8714 .ondemand_data
.downdifferential
= 5,
8717 static struct scsi_host_template ufshcd_driver_template
= {
8718 .module
= THIS_MODULE
,
8720 .proc_name
= UFSHCD
,
8721 .map_queues
= ufshcd_map_queues
,
8722 .queuecommand
= ufshcd_queuecommand
,
8723 .mq_poll
= ufshcd_poll
,
8724 .slave_alloc
= ufshcd_slave_alloc
,
8725 .slave_configure
= ufshcd_slave_configure
,
8726 .slave_destroy
= ufshcd_slave_destroy
,
8727 .change_queue_depth
= ufshcd_change_queue_depth
,
8728 .eh_abort_handler
= ufshcd_abort
,
8729 .eh_device_reset_handler
= ufshcd_eh_device_reset_handler
,
8730 .eh_host_reset_handler
= ufshcd_eh_host_reset_handler
,
8731 .eh_timed_out
= ufshcd_eh_timed_out
,
8733 .sg_tablesize
= SG_ALL
,
8734 .cmd_per_lun
= UFSHCD_CMD_PER_LUN
,
8735 .can_queue
= UFSHCD_CAN_QUEUE
,
8736 .max_segment_size
= PRDT_DATA_BYTE_COUNT_MAX
,
8737 .max_sectors
= (1 << 20) / SECTOR_SIZE
, /* 1 MiB */
8738 .max_host_blocked
= 1,
8739 .track_queue_depth
= 1,
8740 .sdev_groups
= ufshcd_driver_groups
,
8741 .rpm_autosuspend_delay
= RPM_AUTOSUSPEND_DELAY_MS
,
8744 static int ufshcd_config_vreg_load(struct device
*dev
, struct ufs_vreg
*vreg
,
8753 * "set_load" operation shall be required on those regulators
8754 * which specifically configured current limitation. Otherwise
8755 * zero max_uA may cause unexpected behavior when regulator is
8756 * enabled or set as high power mode.
8761 ret
= regulator_set_load(vreg
->reg
, ua
);
8763 dev_err(dev
, "%s: %s set load (ua=%d) failed, err=%d\n",
8764 __func__
, vreg
->name
, ua
, ret
);
8770 static inline int ufshcd_config_vreg_lpm(struct ufs_hba
*hba
,
8771 struct ufs_vreg
*vreg
)
8773 return ufshcd_config_vreg_load(hba
->dev
, vreg
, UFS_VREG_LPM_LOAD_UA
);
8776 static inline int ufshcd_config_vreg_hpm(struct ufs_hba
*hba
,
8777 struct ufs_vreg
*vreg
)
8782 return ufshcd_config_vreg_load(hba
->dev
, vreg
, vreg
->max_uA
);
8785 static int ufshcd_config_vreg(struct device
*dev
,
8786 struct ufs_vreg
*vreg
, bool on
)
8788 if (regulator_count_voltages(vreg
->reg
) <= 0)
8791 return ufshcd_config_vreg_load(dev
, vreg
, on
? vreg
->max_uA
: 0);
8794 static int ufshcd_enable_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
8798 if (!vreg
|| vreg
->enabled
)
8801 ret
= ufshcd_config_vreg(dev
, vreg
, true);
8803 ret
= regulator_enable(vreg
->reg
);
8806 vreg
->enabled
= true;
8808 dev_err(dev
, "%s: %s enable failed, err=%d\n",
8809 __func__
, vreg
->name
, ret
);
8814 static int ufshcd_disable_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
8818 if (!vreg
|| !vreg
->enabled
|| vreg
->always_on
)
8821 ret
= regulator_disable(vreg
->reg
);
8824 /* ignore errors on applying disable config */
8825 ufshcd_config_vreg(dev
, vreg
, false);
8826 vreg
->enabled
= false;
8828 dev_err(dev
, "%s: %s disable failed, err=%d\n",
8829 __func__
, vreg
->name
, ret
);
8835 static int ufshcd_setup_vreg(struct ufs_hba
*hba
, bool on
)
8838 struct device
*dev
= hba
->dev
;
8839 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
8841 ret
= ufshcd_toggle_vreg(dev
, info
->vcc
, on
);
8845 ret
= ufshcd_toggle_vreg(dev
, info
->vccq
, on
);
8849 ret
= ufshcd_toggle_vreg(dev
, info
->vccq2
, on
);
8853 ufshcd_toggle_vreg(dev
, info
->vccq2
, false);
8854 ufshcd_toggle_vreg(dev
, info
->vccq
, false);
8855 ufshcd_toggle_vreg(dev
, info
->vcc
, false);
8860 static int ufshcd_setup_hba_vreg(struct ufs_hba
*hba
, bool on
)
8862 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
8864 return ufshcd_toggle_vreg(hba
->dev
, info
->vdd_hba
, on
);
8867 int ufshcd_get_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
8874 vreg
->reg
= devm_regulator_get(dev
, vreg
->name
);
8875 if (IS_ERR(vreg
->reg
)) {
8876 ret
= PTR_ERR(vreg
->reg
);
8877 dev_err(dev
, "%s: %s get failed, err=%d\n",
8878 __func__
, vreg
->name
, ret
);
8883 EXPORT_SYMBOL_GPL(ufshcd_get_vreg
);
8885 static int ufshcd_init_vreg(struct ufs_hba
*hba
)
8888 struct device
*dev
= hba
->dev
;
8889 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
8891 ret
= ufshcd_get_vreg(dev
, info
->vcc
);
8895 ret
= ufshcd_get_vreg(dev
, info
->vccq
);
8897 ret
= ufshcd_get_vreg(dev
, info
->vccq2
);
8902 static int ufshcd_init_hba_vreg(struct ufs_hba
*hba
)
8904 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
8906 return ufshcd_get_vreg(hba
->dev
, info
->vdd_hba
);
8909 static int ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
)
8912 struct ufs_clk_info
*clki
;
8913 struct list_head
*head
= &hba
->clk_list_head
;
8914 unsigned long flags
;
8915 ktime_t start
= ktime_get();
8916 bool clk_state_changed
= false;
8918 if (list_empty(head
))
8921 ret
= ufshcd_vops_setup_clocks(hba
, on
, PRE_CHANGE
);
8925 list_for_each_entry(clki
, head
, list
) {
8926 if (!IS_ERR_OR_NULL(clki
->clk
)) {
8928 * Don't disable clocks which are needed
8929 * to keep the link active.
8931 if (ufshcd_is_link_active(hba
) &&
8932 clki
->keep_link_active
)
8935 clk_state_changed
= on
^ clki
->enabled
;
8936 if (on
&& !clki
->enabled
) {
8937 ret
= clk_prepare_enable(clki
->clk
);
8939 dev_err(hba
->dev
, "%s: %s prepare enable failed, %d\n",
8940 __func__
, clki
->name
, ret
);
8943 } else if (!on
&& clki
->enabled
) {
8944 clk_disable_unprepare(clki
->clk
);
8947 dev_dbg(hba
->dev
, "%s: clk: %s %sabled\n", __func__
,
8948 clki
->name
, on
? "en" : "dis");
8952 ret
= ufshcd_vops_setup_clocks(hba
, on
, POST_CHANGE
);
8958 list_for_each_entry(clki
, head
, list
) {
8959 if (!IS_ERR_OR_NULL(clki
->clk
) && clki
->enabled
)
8960 clk_disable_unprepare(clki
->clk
);
8962 } else if (!ret
&& on
) {
8963 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
8964 hba
->clk_gating
.state
= CLKS_ON
;
8965 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
8966 hba
->clk_gating
.state
);
8967 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
8970 if (clk_state_changed
)
8971 trace_ufshcd_profile_clk_gating(dev_name(hba
->dev
),
8972 (on
? "on" : "off"),
8973 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
8977 static enum ufs_ref_clk_freq
ufshcd_parse_ref_clk_property(struct ufs_hba
*hba
)
8980 int ret
= device_property_read_u32(hba
->dev
, "ref-clk-freq", &freq
);
8983 dev_dbg(hba
->dev
, "Cannot query 'ref-clk-freq' property = %d", ret
);
8984 return REF_CLK_FREQ_INVAL
;
8987 return ufs_get_bref_clk_from_hz(freq
);
8990 static int ufshcd_init_clocks(struct ufs_hba
*hba
)
8993 struct ufs_clk_info
*clki
;
8994 struct device
*dev
= hba
->dev
;
8995 struct list_head
*head
= &hba
->clk_list_head
;
8997 if (list_empty(head
))
9000 list_for_each_entry(clki
, head
, list
) {
9004 clki
->clk
= devm_clk_get(dev
, clki
->name
);
9005 if (IS_ERR(clki
->clk
)) {
9006 ret
= PTR_ERR(clki
->clk
);
9007 dev_err(dev
, "%s: %s clk get failed, %d\n",
9008 __func__
, clki
->name
, ret
);
9013 * Parse device ref clk freq as per device tree "ref_clk".
9014 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
9015 * in ufshcd_alloc_host().
9017 if (!strcmp(clki
->name
, "ref_clk"))
9018 ufshcd_parse_dev_ref_clk_freq(hba
, clki
->clk
);
9020 if (clki
->max_freq
) {
9021 ret
= clk_set_rate(clki
->clk
, clki
->max_freq
);
9023 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
9024 __func__
, clki
->name
,
9025 clki
->max_freq
, ret
);
9028 clki
->curr_freq
= clki
->max_freq
;
9030 dev_dbg(dev
, "%s: clk: %s, rate: %lu\n", __func__
,
9031 clki
->name
, clk_get_rate(clki
->clk
));
9037 static int ufshcd_variant_hba_init(struct ufs_hba
*hba
)
9044 err
= ufshcd_vops_init(hba
);
9046 dev_err(hba
->dev
, "%s: variant %s init failed err %d\n",
9047 __func__
, ufshcd_get_var_name(hba
), err
);
9052 static void ufshcd_variant_hba_exit(struct ufs_hba
*hba
)
9057 ufshcd_vops_exit(hba
);
9060 static int ufshcd_hba_init(struct ufs_hba
*hba
)
9065 * Handle host controller power separately from the UFS device power
9066 * rails as it will help controlling the UFS host controller power
9067 * collapse easily which is different than UFS device power collapse.
9068 * Also, enable the host controller power before we go ahead with rest
9069 * of the initialization here.
9071 err
= ufshcd_init_hba_vreg(hba
);
9075 err
= ufshcd_setup_hba_vreg(hba
, true);
9079 err
= ufshcd_init_clocks(hba
);
9081 goto out_disable_hba_vreg
;
9083 if (hba
->dev_ref_clk_freq
== REF_CLK_FREQ_INVAL
)
9084 hba
->dev_ref_clk_freq
= ufshcd_parse_ref_clk_property(hba
);
9086 err
= ufshcd_setup_clocks(hba
, true);
9088 goto out_disable_hba_vreg
;
9090 err
= ufshcd_init_vreg(hba
);
9092 goto out_disable_clks
;
9094 err
= ufshcd_setup_vreg(hba
, true);
9096 goto out_disable_clks
;
9098 err
= ufshcd_variant_hba_init(hba
);
9100 goto out_disable_vreg
;
9102 ufs_debugfs_hba_init(hba
);
9104 hba
->is_powered
= true;
9108 ufshcd_setup_vreg(hba
, false);
9110 ufshcd_setup_clocks(hba
, false);
9111 out_disable_hba_vreg
:
9112 ufshcd_setup_hba_vreg(hba
, false);
9117 static void ufshcd_hba_exit(struct ufs_hba
*hba
)
9119 if (hba
->is_powered
) {
9120 ufshcd_exit_clk_scaling(hba
);
9121 ufshcd_exit_clk_gating(hba
);
9123 destroy_workqueue(hba
->eh_wq
);
9124 ufs_debugfs_hba_exit(hba
);
9125 ufshcd_variant_hba_exit(hba
);
9126 ufshcd_setup_vreg(hba
, false);
9127 ufshcd_setup_clocks(hba
, false);
9128 ufshcd_setup_hba_vreg(hba
, false);
9129 hba
->is_powered
= false;
9130 ufs_put_device_desc(hba
);
9134 static int ufshcd_execute_start_stop(struct scsi_device
*sdev
,
9135 enum ufs_dev_pwr_mode pwr_mode
,
9136 struct scsi_sense_hdr
*sshdr
)
9138 unsigned char cdb
[6] = { START_STOP
, 0, 0, 0, pwr_mode
<< 4, 0 };
9139 struct request
*req
;
9140 struct scsi_cmnd
*scmd
;
9143 req
= scsi_alloc_request(sdev
->request_queue
, REQ_OP_DRV_IN
,
9146 return PTR_ERR(req
);
9148 scmd
= blk_mq_rq_to_pdu(req
);
9149 scmd
->cmd_len
= COMMAND_SIZE(cdb
[0]);
9150 memcpy(scmd
->cmnd
, cdb
, scmd
->cmd_len
);
9151 scmd
->allowed
= 0/*retries*/;
9152 scmd
->flags
|= SCMD_FAIL_IF_RECOVERING
;
9153 req
->timeout
= 1 * HZ
;
9154 req
->rq_flags
|= RQF_PM
| RQF_QUIET
;
9156 blk_execute_rq(req
, /*at_head=*/true);
9159 scsi_normalize_sense(scmd
->sense_buffer
, scmd
->sense_len
,
9163 blk_mq_free_request(req
);
9169 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
9171 * @hba: per adapter instance
9172 * @pwr_mode: device power mode to set
9174 * Returns 0 if requested power mode is set successfully
9175 * Returns < 0 if failed to set the requested power mode
9177 static int ufshcd_set_dev_pwr_mode(struct ufs_hba
*hba
,
9178 enum ufs_dev_pwr_mode pwr_mode
)
9180 struct scsi_sense_hdr sshdr
;
9181 struct scsi_device
*sdp
;
9182 unsigned long flags
;
9185 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
9186 sdp
= hba
->ufs_device_wlun
;
9187 if (sdp
&& scsi_device_online(sdp
))
9188 ret
= scsi_device_get(sdp
);
9191 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
9197 * If scsi commands fail, the scsi mid-layer schedules scsi error-
9198 * handling, which would wait for host to be resumed. Since we know
9199 * we are functional while we are here, skip host resume in error
9202 hba
->host
->eh_noresume
= 1;
9205 * Current function would be generally called from the power management
9206 * callbacks hence set the RQF_PM flag so that it doesn't resume the
9207 * already suspended childs.
9209 for (retries
= 3; retries
> 0; --retries
) {
9210 ret
= ufshcd_execute_start_stop(sdp
, pwr_mode
, &sshdr
);
9212 * scsi_execute() only returns a negative value if the request
9219 sdev_printk(KERN_WARNING
, sdp
,
9220 "START_STOP failed for power mode: %d, result %x\n",
9223 if (scsi_sense_valid(&sshdr
))
9224 scsi_print_sense_hdr(sdp
, NULL
, &sshdr
);
9228 hba
->curr_dev_pwr_mode
= pwr_mode
;
9231 scsi_device_put(sdp
);
9232 hba
->host
->eh_noresume
= 0;
9236 static int ufshcd_link_state_transition(struct ufs_hba
*hba
,
9237 enum uic_link_state req_link_state
,
9238 bool check_for_bkops
)
9242 if (req_link_state
== hba
->uic_link_state
)
9245 if (req_link_state
== UIC_LINK_HIBERN8_STATE
) {
9246 ret
= ufshcd_uic_hibern8_enter(hba
);
9248 ufshcd_set_link_hibern8(hba
);
9250 dev_err(hba
->dev
, "%s: hibern8 enter failed %d\n",
9256 * If autobkops is enabled, link can't be turned off because
9257 * turning off the link would also turn off the device, except in the
9258 * case of DeepSleep where the device is expected to remain powered.
9260 else if ((req_link_state
== UIC_LINK_OFF_STATE
) &&
9261 (!check_for_bkops
|| !hba
->auto_bkops_enabled
)) {
9263 * Let's make sure that link is in low power mode, we are doing
9264 * this currently by putting the link in Hibern8. Otherway to
9265 * put the link in low power mode is to send the DME end point
9266 * to device and then send the DME reset command to local
9267 * unipro. But putting the link in hibern8 is much faster.
9269 * Note also that putting the link in Hibern8 is a requirement
9270 * for entering DeepSleep.
9272 ret
= ufshcd_uic_hibern8_enter(hba
);
9274 dev_err(hba
->dev
, "%s: hibern8 enter failed %d\n",
9279 * Change controller state to "reset state" which
9280 * should also put the link in off/reset state
9282 ufshcd_hba_stop(hba
);
9284 * TODO: Check if we need any delay to make sure that
9285 * controller is reset
9287 ufshcd_set_link_off(hba
);
9294 static void ufshcd_vreg_set_lpm(struct ufs_hba
*hba
)
9296 bool vcc_off
= false;
9299 * It seems some UFS devices may keep drawing more than sleep current
9300 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
9301 * To avoid this situation, add 2ms delay before putting these UFS
9302 * rails in LPM mode.
9304 if (!ufshcd_is_link_active(hba
) &&
9305 hba
->dev_quirks
& UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
)
9306 usleep_range(2000, 2100);
9309 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
9312 * If UFS device and link is in OFF state, all power supplies (VCC,
9313 * VCCQ, VCCQ2) can be turned off if power on write protect is not
9314 * required. If UFS link is inactive (Hibern8 or OFF state) and device
9315 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
9317 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
9318 * in low power state which would save some power.
9320 * If Write Booster is enabled and the device needs to flush the WB
9321 * buffer OR if bkops status is urgent for WB, keep Vcc on.
9323 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
) &&
9324 !hba
->dev_info
.is_lu_power_on_wp
) {
9325 ufshcd_setup_vreg(hba
, false);
9327 } else if (!ufshcd_is_ufs_dev_active(hba
)) {
9328 ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, false);
9330 if (ufshcd_is_link_hibern8(hba
) || ufshcd_is_link_off(hba
)) {
9331 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq
);
9332 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq2
);
9337 * Some UFS devices require delay after VCC power rail is turned-off.
9339 if (vcc_off
&& hba
->vreg_info
.vcc
&&
9340 hba
->dev_quirks
& UFS_DEVICE_QUIRK_DELAY_AFTER_LPM
)
9341 usleep_range(5000, 5100);
9345 static int ufshcd_vreg_set_hpm(struct ufs_hba
*hba
)
9349 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
) &&
9350 !hba
->dev_info
.is_lu_power_on_wp
) {
9351 ret
= ufshcd_setup_vreg(hba
, true);
9352 } else if (!ufshcd_is_ufs_dev_active(hba
)) {
9353 if (!ufshcd_is_link_active(hba
)) {
9354 ret
= ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq
);
9357 ret
= ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq2
);
9361 ret
= ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, true);
9366 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq
);
9368 ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, false);
9372 #endif /* CONFIG_PM */
9374 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba
*hba
)
9376 if (ufshcd_is_link_off(hba
) || ufshcd_can_aggressive_pc(hba
))
9377 ufshcd_setup_hba_vreg(hba
, false);
9380 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba
*hba
)
9382 if (ufshcd_is_link_off(hba
) || ufshcd_can_aggressive_pc(hba
))
9383 ufshcd_setup_hba_vreg(hba
, true);
9386 static int __ufshcd_wl_suspend(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
9389 bool check_for_bkops
;
9390 enum ufs_pm_level pm_lvl
;
9391 enum ufs_dev_pwr_mode req_dev_pwr_mode
;
9392 enum uic_link_state req_link_state
;
9394 hba
->pm_op_in_progress
= true;
9395 if (pm_op
!= UFS_SHUTDOWN_PM
) {
9396 pm_lvl
= pm_op
== UFS_RUNTIME_PM
?
9397 hba
->rpm_lvl
: hba
->spm_lvl
;
9398 req_dev_pwr_mode
= ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl
);
9399 req_link_state
= ufs_get_pm_lvl_to_link_pwr_state(pm_lvl
);
9401 req_dev_pwr_mode
= UFS_POWERDOWN_PWR_MODE
;
9402 req_link_state
= UIC_LINK_OFF_STATE
;
9405 ufshpb_suspend(hba
);
9408 * If we can't transition into any of the low power modes
9409 * just gate the clocks.
9411 ufshcd_hold(hba
, false);
9412 hba
->clk_gating
.is_suspended
= true;
9414 if (ufshcd_is_clkscaling_supported(hba
))
9415 ufshcd_clk_scaling_suspend(hba
, true);
9417 if (req_dev_pwr_mode
== UFS_ACTIVE_PWR_MODE
&&
9418 req_link_state
== UIC_LINK_ACTIVE_STATE
) {
9422 if ((req_dev_pwr_mode
== hba
->curr_dev_pwr_mode
) &&
9423 (req_link_state
== hba
->uic_link_state
))
9424 goto enable_scaling
;
9426 /* UFS device & link must be active before we enter in this function */
9427 if (!ufshcd_is_ufs_dev_active(hba
) || !ufshcd_is_link_active(hba
)) {
9429 goto enable_scaling
;
9432 if (pm_op
== UFS_RUNTIME_PM
) {
9433 if (ufshcd_can_autobkops_during_suspend(hba
)) {
9435 * The device is idle with no requests in the queue,
9436 * allow background operations if bkops status shows
9437 * that performance might be impacted.
9439 ret
= ufshcd_urgent_bkops(hba
);
9441 goto enable_scaling
;
9443 /* make sure that auto bkops is disabled */
9444 ufshcd_disable_auto_bkops(hba
);
9447 * If device needs to do BKOP or WB buffer flush during
9448 * Hibern8, keep device power mode as "active power mode"
9451 hba
->dev_info
.b_rpm_dev_flush_capable
=
9452 hba
->auto_bkops_enabled
||
9453 (((req_link_state
== UIC_LINK_HIBERN8_STATE
) ||
9454 ((req_link_state
== UIC_LINK_ACTIVE_STATE
) &&
9455 ufshcd_is_auto_hibern8_enabled(hba
))) &&
9456 ufshcd_wb_need_flush(hba
));
9459 flush_work(&hba
->eeh_work
);
9461 ret
= ufshcd_vops_suspend(hba
, pm_op
, PRE_CHANGE
);
9463 goto enable_scaling
;
9465 if (req_dev_pwr_mode
!= hba
->curr_dev_pwr_mode
) {
9466 if (pm_op
!= UFS_RUNTIME_PM
)
9467 /* ensure that bkops is disabled */
9468 ufshcd_disable_auto_bkops(hba
);
9470 if (!hba
->dev_info
.b_rpm_dev_flush_capable
) {
9471 ret
= ufshcd_set_dev_pwr_mode(hba
, req_dev_pwr_mode
);
9473 goto enable_scaling
;
9478 * In the case of DeepSleep, the device is expected to remain powered
9479 * with the link off, so do not check for bkops.
9481 check_for_bkops
= !ufshcd_is_ufs_dev_deepsleep(hba
);
9482 ret
= ufshcd_link_state_transition(hba
, req_link_state
, check_for_bkops
);
9484 goto set_dev_active
;
9488 * Call vendor specific suspend callback. As these callbacks may access
9489 * vendor specific host controller register space call them before the
9490 * host clocks are ON.
9492 ret
= ufshcd_vops_suspend(hba
, pm_op
, POST_CHANGE
);
9494 goto set_link_active
;
9499 * Device hardware reset is required to exit DeepSleep. Also, for
9500 * DeepSleep, the link is off so host reset and restore will be done
9503 if (ufshcd_is_ufs_dev_deepsleep(hba
)) {
9504 ufshcd_device_reset(hba
);
9505 WARN_ON(!ufshcd_is_link_off(hba
));
9507 if (ufshcd_is_link_hibern8(hba
) && !ufshcd_uic_hibern8_exit(hba
))
9508 ufshcd_set_link_active(hba
);
9509 else if (ufshcd_is_link_off(hba
))
9510 ufshcd_host_reset_and_restore(hba
);
9512 /* Can also get here needing to exit DeepSleep */
9513 if (ufshcd_is_ufs_dev_deepsleep(hba
)) {
9514 ufshcd_device_reset(hba
);
9515 ufshcd_host_reset_and_restore(hba
);
9517 if (!ufshcd_set_dev_pwr_mode(hba
, UFS_ACTIVE_PWR_MODE
))
9518 ufshcd_disable_auto_bkops(hba
);
9520 if (ufshcd_is_clkscaling_supported(hba
))
9521 ufshcd_clk_scaling_suspend(hba
, false);
9523 hba
->dev_info
.b_rpm_dev_flush_capable
= false;
9525 if (hba
->dev_info
.b_rpm_dev_flush_capable
) {
9526 schedule_delayed_work(&hba
->rpm_dev_flush_recheck_work
,
9527 msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS
));
9531 ufshcd_update_evt_hist(hba
, UFS_EVT_WL_SUSP_ERR
, (u32
)ret
);
9532 hba
->clk_gating
.is_suspended
= false;
9533 ufshcd_release(hba
);
9536 hba
->pm_op_in_progress
= false;
9541 static int __ufshcd_wl_resume(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
9544 enum uic_link_state old_link_state
= hba
->uic_link_state
;
9546 hba
->pm_op_in_progress
= true;
9549 * Call vendor specific resume callback. As these callbacks may access
9550 * vendor specific host controller register space call them when the
9551 * host clocks are ON.
9553 ret
= ufshcd_vops_resume(hba
, pm_op
);
9557 /* For DeepSleep, the only supported option is to have the link off */
9558 WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba
) && !ufshcd_is_link_off(hba
));
9560 if (ufshcd_is_link_hibern8(hba
)) {
9561 ret
= ufshcd_uic_hibern8_exit(hba
);
9563 ufshcd_set_link_active(hba
);
9565 dev_err(hba
->dev
, "%s: hibern8 exit failed %d\n",
9567 goto vendor_suspend
;
9569 } else if (ufshcd_is_link_off(hba
)) {
9571 * A full initialization of the host and the device is
9572 * required since the link was put to off during suspend.
9573 * Note, in the case of DeepSleep, the device will exit
9574 * DeepSleep due to device reset.
9576 ret
= ufshcd_reset_and_restore(hba
);
9578 * ufshcd_reset_and_restore() should have already
9579 * set the link state as active
9581 if (ret
|| !ufshcd_is_link_active(hba
))
9582 goto vendor_suspend
;
9585 if (!ufshcd_is_ufs_dev_active(hba
)) {
9586 ret
= ufshcd_set_dev_pwr_mode(hba
, UFS_ACTIVE_PWR_MODE
);
9588 goto set_old_link_state
;
9591 if (ufshcd_keep_autobkops_enabled_except_suspend(hba
))
9592 ufshcd_enable_auto_bkops(hba
);
9595 * If BKOPs operations are urgently needed at this moment then
9596 * keep auto-bkops enabled or else disable it.
9598 ufshcd_urgent_bkops(hba
);
9600 if (hba
->ee_usr_mask
)
9601 ufshcd_write_ee_control(hba
);
9603 if (ufshcd_is_clkscaling_supported(hba
))
9604 ufshcd_clk_scaling_suspend(hba
, false);
9606 if (hba
->dev_info
.b_rpm_dev_flush_capable
) {
9607 hba
->dev_info
.b_rpm_dev_flush_capable
= false;
9608 cancel_delayed_work(&hba
->rpm_dev_flush_recheck_work
);
9611 /* Enable Auto-Hibernate if configured */
9612 ufshcd_auto_hibern8_enable(hba
);
9618 ufshcd_link_state_transition(hba
, old_link_state
, 0);
9620 ufshcd_vops_suspend(hba
, pm_op
, PRE_CHANGE
);
9621 ufshcd_vops_suspend(hba
, pm_op
, POST_CHANGE
);
9624 ufshcd_update_evt_hist(hba
, UFS_EVT_WL_RES_ERR
, (u32
)ret
);
9625 hba
->clk_gating
.is_suspended
= false;
9626 ufshcd_release(hba
);
9627 hba
->pm_op_in_progress
= false;
9631 static int ufshcd_wl_runtime_suspend(struct device
*dev
)
9633 struct scsi_device
*sdev
= to_scsi_device(dev
);
9634 struct ufs_hba
*hba
;
9636 ktime_t start
= ktime_get();
9638 hba
= shost_priv(sdev
->host
);
9640 ret
= __ufshcd_wl_suspend(hba
, UFS_RUNTIME_PM
);
9642 dev_err(&sdev
->sdev_gendev
, "%s failed: %d\n", __func__
, ret
);
9644 trace_ufshcd_wl_runtime_suspend(dev_name(dev
), ret
,
9645 ktime_to_us(ktime_sub(ktime_get(), start
)),
9646 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9651 static int ufshcd_wl_runtime_resume(struct device
*dev
)
9653 struct scsi_device
*sdev
= to_scsi_device(dev
);
9654 struct ufs_hba
*hba
;
9656 ktime_t start
= ktime_get();
9658 hba
= shost_priv(sdev
->host
);
9660 ret
= __ufshcd_wl_resume(hba
, UFS_RUNTIME_PM
);
9662 dev_err(&sdev
->sdev_gendev
, "%s failed: %d\n", __func__
, ret
);
9664 trace_ufshcd_wl_runtime_resume(dev_name(dev
), ret
,
9665 ktime_to_us(ktime_sub(ktime_get(), start
)),
9666 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9672 #ifdef CONFIG_PM_SLEEP
9673 static int ufshcd_wl_suspend(struct device
*dev
)
9675 struct scsi_device
*sdev
= to_scsi_device(dev
);
9676 struct ufs_hba
*hba
;
9678 ktime_t start
= ktime_get();
9680 hba
= shost_priv(sdev
->host
);
9681 down(&hba
->host_sem
);
9682 hba
->system_suspending
= true;
9684 if (pm_runtime_suspended(dev
))
9687 ret
= __ufshcd_wl_suspend(hba
, UFS_SYSTEM_PM
);
9689 dev_err(&sdev
->sdev_gendev
, "%s failed: %d\n", __func__
, ret
);
9695 hba
->is_sys_suspended
= true;
9696 trace_ufshcd_wl_suspend(dev_name(dev
), ret
,
9697 ktime_to_us(ktime_sub(ktime_get(), start
)),
9698 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9703 static int ufshcd_wl_resume(struct device
*dev
)
9705 struct scsi_device
*sdev
= to_scsi_device(dev
);
9706 struct ufs_hba
*hba
;
9708 ktime_t start
= ktime_get();
9710 hba
= shost_priv(sdev
->host
);
9712 if (pm_runtime_suspended(dev
))
9715 ret
= __ufshcd_wl_resume(hba
, UFS_SYSTEM_PM
);
9717 dev_err(&sdev
->sdev_gendev
, "%s failed: %d\n", __func__
, ret
);
9719 trace_ufshcd_wl_resume(dev_name(dev
), ret
,
9720 ktime_to_us(ktime_sub(ktime_get(), start
)),
9721 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9723 hba
->is_sys_suspended
= false;
9724 hba
->system_suspending
= false;
9730 static void ufshcd_wl_shutdown(struct device
*dev
)
9732 struct scsi_device
*sdev
= to_scsi_device(dev
);
9733 struct ufs_hba
*hba
;
9735 hba
= shost_priv(sdev
->host
);
9737 down(&hba
->host_sem
);
9738 hba
->shutting_down
= true;
9741 /* Turn on everything while shutting down */
9742 ufshcd_rpm_get_sync(hba
);
9743 scsi_device_quiesce(sdev
);
9744 shost_for_each_device(sdev
, hba
->host
) {
9745 if (sdev
== hba
->ufs_device_wlun
)
9747 scsi_device_quiesce(sdev
);
9749 __ufshcd_wl_suspend(hba
, UFS_SHUTDOWN_PM
);
9753 * ufshcd_suspend - helper function for suspend operations
9754 * @hba: per adapter instance
9756 * This function will put disable irqs, turn off clocks
9757 * and set vreg and hba-vreg in lpm mode.
9759 static int ufshcd_suspend(struct ufs_hba
*hba
)
9763 if (!hba
->is_powered
)
9766 * Disable the host irq as host controller as there won't be any
9767 * host controller transaction expected till resume.
9769 ufshcd_disable_irq(hba
);
9770 ret
= ufshcd_setup_clocks(hba
, false);
9772 ufshcd_enable_irq(hba
);
9775 if (ufshcd_is_clkgating_allowed(hba
)) {
9776 hba
->clk_gating
.state
= CLKS_OFF
;
9777 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
9778 hba
->clk_gating
.state
);
9781 ufshcd_vreg_set_lpm(hba
);
9782 /* Put the host controller in low power mode if possible */
9783 ufshcd_hba_vreg_set_lpm(hba
);
9789 * ufshcd_resume - helper function for resume operations
9790 * @hba: per adapter instance
9792 * This function basically turns on the regulators, clocks and
9795 * Returns 0 for success and non-zero for failure
9797 static int ufshcd_resume(struct ufs_hba
*hba
)
9801 if (!hba
->is_powered
)
9804 ufshcd_hba_vreg_set_hpm(hba
);
9805 ret
= ufshcd_vreg_set_hpm(hba
);
9809 /* Make sure clocks are enabled before accessing controller */
9810 ret
= ufshcd_setup_clocks(hba
, true);
9814 /* enable the host irq as host controller would be active soon */
9815 ufshcd_enable_irq(hba
);
9820 ufshcd_vreg_set_lpm(hba
);
9823 ufshcd_update_evt_hist(hba
, UFS_EVT_RESUME_ERR
, (u32
)ret
);
9826 #endif /* CONFIG_PM */
9828 #ifdef CONFIG_PM_SLEEP
9830 * ufshcd_system_suspend - system suspend callback
9831 * @dev: Device associated with the UFS controller.
9833 * Executed before putting the system into a sleep state in which the contents
9834 * of main memory are preserved.
9836 * Returns 0 for success and non-zero for failure
9838 int ufshcd_system_suspend(struct device
*dev
)
9840 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
9842 ktime_t start
= ktime_get();
9844 if (pm_runtime_suspended(hba
->dev
))
9847 ret
= ufshcd_suspend(hba
);
9849 trace_ufshcd_system_suspend(dev_name(hba
->dev
), ret
,
9850 ktime_to_us(ktime_sub(ktime_get(), start
)),
9851 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9854 EXPORT_SYMBOL(ufshcd_system_suspend
);
9857 * ufshcd_system_resume - system resume callback
9858 * @dev: Device associated with the UFS controller.
9860 * Executed after waking the system up from a sleep state in which the contents
9861 * of main memory were preserved.
9863 * Returns 0 for success and non-zero for failure
9865 int ufshcd_system_resume(struct device
*dev
)
9867 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
9868 ktime_t start
= ktime_get();
9871 if (pm_runtime_suspended(hba
->dev
))
9874 ret
= ufshcd_resume(hba
);
9877 trace_ufshcd_system_resume(dev_name(hba
->dev
), ret
,
9878 ktime_to_us(ktime_sub(ktime_get(), start
)),
9879 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9883 EXPORT_SYMBOL(ufshcd_system_resume
);
9884 #endif /* CONFIG_PM_SLEEP */
9888 * ufshcd_runtime_suspend - runtime suspend callback
9889 * @dev: Device associated with the UFS controller.
9891 * Check the description of ufshcd_suspend() function for more details.
9893 * Returns 0 for success and non-zero for failure
9895 int ufshcd_runtime_suspend(struct device
*dev
)
9897 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
9899 ktime_t start
= ktime_get();
9901 ret
= ufshcd_suspend(hba
);
9903 trace_ufshcd_runtime_suspend(dev_name(hba
->dev
), ret
,
9904 ktime_to_us(ktime_sub(ktime_get(), start
)),
9905 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9908 EXPORT_SYMBOL(ufshcd_runtime_suspend
);
9911 * ufshcd_runtime_resume - runtime resume routine
9912 * @dev: Device associated with the UFS controller.
9914 * This function basically brings controller
9915 * to active state. Following operations are done in this function:
9917 * 1. Turn on all the controller related clocks
9918 * 2. Turn ON VCC rail
9920 int ufshcd_runtime_resume(struct device
*dev
)
9922 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
9924 ktime_t start
= ktime_get();
9926 ret
= ufshcd_resume(hba
);
9928 trace_ufshcd_runtime_resume(dev_name(hba
->dev
), ret
,
9929 ktime_to_us(ktime_sub(ktime_get(), start
)),
9930 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9933 EXPORT_SYMBOL(ufshcd_runtime_resume
);
9934 #endif /* CONFIG_PM */
9937 * ufshcd_shutdown - shutdown routine
9938 * @hba: per adapter instance
9940 * This function would turn off both UFS device and UFS hba
9941 * regulators. It would also disable clocks.
9943 * Returns 0 always to allow force shutdown even in case of errors.
9945 int ufshcd_shutdown(struct ufs_hba
*hba
)
9947 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
))
9948 ufshcd_suspend(hba
);
9950 hba
->is_powered
= false;
9951 /* allow force shutdown even in case of errors */
9954 EXPORT_SYMBOL(ufshcd_shutdown
);
9957 * ufshcd_remove - de-allocate SCSI host and host memory space
9958 * data structure memory
9959 * @hba: per adapter instance
9961 void ufshcd_remove(struct ufs_hba
*hba
)
9963 if (hba
->ufs_device_wlun
)
9964 ufshcd_rpm_get_sync(hba
);
9965 ufs_hwmon_remove(hba
);
9966 ufs_bsg_remove(hba
);
9968 ufs_sysfs_remove_nodes(hba
->dev
);
9969 blk_mq_destroy_queue(hba
->tmf_queue
);
9970 blk_put_queue(hba
->tmf_queue
);
9971 blk_mq_free_tag_set(&hba
->tmf_tag_set
);
9972 scsi_remove_host(hba
->host
);
9973 /* disable interrupts */
9974 ufshcd_disable_intr(hba
, hba
->intr_mask
);
9975 ufshcd_hba_stop(hba
);
9976 ufshcd_hba_exit(hba
);
9978 EXPORT_SYMBOL_GPL(ufshcd_remove
);
9980 #ifdef CONFIG_PM_SLEEP
9981 int ufshcd_system_freeze(struct device
*dev
)
9984 return ufshcd_system_suspend(dev
);
9987 EXPORT_SYMBOL_GPL(ufshcd_system_freeze
);
9989 int ufshcd_system_restore(struct device
*dev
)
9992 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
9995 ret
= ufshcd_system_resume(dev
);
9999 /* Configure UTRL and UTMRL base address registers */
10000 ufshcd_writel(hba
, lower_32_bits(hba
->utrdl_dma_addr
),
10001 REG_UTP_TRANSFER_REQ_LIST_BASE_L
);
10002 ufshcd_writel(hba
, upper_32_bits(hba
->utrdl_dma_addr
),
10003 REG_UTP_TRANSFER_REQ_LIST_BASE_H
);
10004 ufshcd_writel(hba
, lower_32_bits(hba
->utmrdl_dma_addr
),
10005 REG_UTP_TASK_REQ_LIST_BASE_L
);
10006 ufshcd_writel(hba
, upper_32_bits(hba
->utmrdl_dma_addr
),
10007 REG_UTP_TASK_REQ_LIST_BASE_H
);
10009 * Make sure that UTRL and UTMRL base address registers
10010 * are updated with the latest queue addresses. Only after
10011 * updating these addresses, we can queue the new commands.
10015 /* Resuming from hibernate, assume that link was OFF */
10016 ufshcd_set_link_off(hba
);
10021 EXPORT_SYMBOL_GPL(ufshcd_system_restore
);
10023 int ufshcd_system_thaw(struct device
*dev
)
10025 return ufshcd_system_resume(dev
);
10027 EXPORT_SYMBOL_GPL(ufshcd_system_thaw
);
10028 #endif /* CONFIG_PM_SLEEP */
10031 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
10032 * @hba: pointer to Host Bus Adapter (HBA)
10034 void ufshcd_dealloc_host(struct ufs_hba
*hba
)
10036 scsi_host_put(hba
->host
);
10038 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host
);
10041 * ufshcd_set_dma_mask - Set dma mask based on the controller
10042 * addressing capability
10043 * @hba: per adapter instance
10045 * Returns 0 for success, non-zero for failure
10047 static int ufshcd_set_dma_mask(struct ufs_hba
*hba
)
10049 if (hba
->capabilities
& MASK_64_ADDRESSING_SUPPORT
) {
10050 if (!dma_set_mask_and_coherent(hba
->dev
, DMA_BIT_MASK(64)))
10053 return dma_set_mask_and_coherent(hba
->dev
, DMA_BIT_MASK(32));
10057 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
10058 * @dev: pointer to device handle
10059 * @hba_handle: driver private handle
10060 * Returns 0 on success, non-zero value on failure
10062 int ufshcd_alloc_host(struct device
*dev
, struct ufs_hba
**hba_handle
)
10064 struct Scsi_Host
*host
;
10065 struct ufs_hba
*hba
;
10070 "Invalid memory reference for dev is NULL\n");
10075 host
= scsi_host_alloc(&ufshcd_driver_template
,
10076 sizeof(struct ufs_hba
));
10078 dev_err(dev
, "scsi_host_alloc failed\n");
10082 host
->nr_maps
= HCTX_TYPE_POLL
+ 1;
10083 hba
= shost_priv(host
);
10086 hba
->dev_ref_clk_freq
= REF_CLK_FREQ_INVAL
;
10087 hba
->nop_out_timeout
= NOP_OUT_TIMEOUT
;
10088 ufshcd_set_sg_entry_size(hba
, sizeof(struct ufshcd_sg_entry
));
10089 INIT_LIST_HEAD(&hba
->clk_list_head
);
10090 spin_lock_init(&hba
->outstanding_lock
);
10097 EXPORT_SYMBOL(ufshcd_alloc_host
);
10099 /* This function exists because blk_mq_alloc_tag_set() requires this. */
10100 static blk_status_t
ufshcd_queue_tmf(struct blk_mq_hw_ctx
*hctx
,
10101 const struct blk_mq_queue_data
*qd
)
10103 WARN_ON_ONCE(true);
10104 return BLK_STS_NOTSUPP
;
10107 static const struct blk_mq_ops ufshcd_tmf_ops
= {
10108 .queue_rq
= ufshcd_queue_tmf
,
10112 * ufshcd_init - Driver initialization routine
10113 * @hba: per-adapter instance
10114 * @mmio_base: base register address
10115 * @irq: Interrupt line of device
10116 * Returns 0 on success, non-zero value on failure
10118 int ufshcd_init(struct ufs_hba
*hba
, void __iomem
*mmio_base
, unsigned int irq
)
10121 struct Scsi_Host
*host
= hba
->host
;
10122 struct device
*dev
= hba
->dev
;
10123 char eh_wq_name
[sizeof("ufs_eh_wq_00")];
10126 * dev_set_drvdata() must be called before any callbacks are registered
10127 * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon,
10130 dev_set_drvdata(dev
, hba
);
10134 "Invalid memory reference for mmio_base is NULL\n");
10139 hba
->mmio_base
= mmio_base
;
10141 hba
->vps
= &ufs_hba_vps
;
10143 err
= ufshcd_hba_init(hba
);
10147 /* Read capabilities registers */
10148 err
= ufshcd_hba_capabilities(hba
);
10152 /* Get UFS version supported by the controller */
10153 hba
->ufs_version
= ufshcd_get_ufs_version(hba
);
10155 /* Get Interrupt bit mask per version */
10156 hba
->intr_mask
= ufshcd_get_intr_mask(hba
);
10158 err
= ufshcd_set_dma_mask(hba
);
10160 dev_err(hba
->dev
, "set dma mask failed\n");
10164 /* Allocate memory for host memory space */
10165 err
= ufshcd_memory_alloc(hba
);
10167 dev_err(hba
->dev
, "Memory allocation failed\n");
10171 /* Configure LRB */
10172 ufshcd_host_memory_configure(hba
);
10174 host
->can_queue
= hba
->nutrs
- UFSHCD_NUM_RESERVED
;
10175 host
->cmd_per_lun
= hba
->nutrs
- UFSHCD_NUM_RESERVED
;
10176 host
->max_id
= UFSHCD_MAX_ID
;
10177 host
->max_lun
= UFS_MAX_LUNS
;
10178 host
->max_channel
= UFSHCD_MAX_CHANNEL
;
10179 host
->unique_id
= host
->host_no
;
10180 host
->max_cmd_len
= UFS_CDB_SIZE
;
10182 hba
->max_pwr_info
.is_valid
= false;
10184 /* Initialize work queues */
10185 snprintf(eh_wq_name
, sizeof(eh_wq_name
), "ufs_eh_wq_%d",
10186 hba
->host
->host_no
);
10187 hba
->eh_wq
= create_singlethread_workqueue(eh_wq_name
);
10189 dev_err(hba
->dev
, "%s: failed to create eh workqueue\n",
10194 INIT_WORK(&hba
->eh_work
, ufshcd_err_handler
);
10195 INIT_WORK(&hba
->eeh_work
, ufshcd_exception_event_handler
);
10197 sema_init(&hba
->host_sem
, 1);
10199 /* Initialize UIC command mutex */
10200 mutex_init(&hba
->uic_cmd_mutex
);
10202 /* Initialize mutex for device management commands */
10203 mutex_init(&hba
->dev_cmd
.lock
);
10205 /* Initialize mutex for exception event control */
10206 mutex_init(&hba
->ee_ctrl_mutex
);
10208 init_rwsem(&hba
->clk_scaling_lock
);
10210 ufshcd_init_clk_gating(hba
);
10212 ufshcd_init_clk_scaling(hba
);
10215 * In order to avoid any spurious interrupt immediately after
10216 * registering UFS controller interrupt handler, clear any pending UFS
10217 * interrupt status and disable all the UFS interrupts.
10219 ufshcd_writel(hba
, ufshcd_readl(hba
, REG_INTERRUPT_STATUS
),
10220 REG_INTERRUPT_STATUS
);
10221 ufshcd_writel(hba
, 0, REG_INTERRUPT_ENABLE
);
10223 * Make sure that UFS interrupts are disabled and any pending interrupt
10224 * status is cleared before registering UFS interrupt handler.
10228 /* IRQ registration */
10229 err
= devm_request_irq(dev
, irq
, ufshcd_intr
, IRQF_SHARED
, UFSHCD
, hba
);
10231 dev_err(hba
->dev
, "request irq failed\n");
10234 hba
->is_irq_enabled
= true;
10237 if (!is_mcq_supported(hba
)) {
10238 err
= scsi_add_host(host
, hba
->dev
);
10240 dev_err(hba
->dev
, "scsi_add_host failed\n");
10245 hba
->tmf_tag_set
= (struct blk_mq_tag_set
) {
10247 .queue_depth
= hba
->nutmrs
,
10248 .ops
= &ufshcd_tmf_ops
,
10249 .flags
= BLK_MQ_F_NO_SCHED
,
10251 err
= blk_mq_alloc_tag_set(&hba
->tmf_tag_set
);
10253 goto out_remove_scsi_host
;
10254 hba
->tmf_queue
= blk_mq_init_queue(&hba
->tmf_tag_set
);
10255 if (IS_ERR(hba
->tmf_queue
)) {
10256 err
= PTR_ERR(hba
->tmf_queue
);
10257 goto free_tmf_tag_set
;
10259 hba
->tmf_rqs
= devm_kcalloc(hba
->dev
, hba
->nutmrs
,
10260 sizeof(*hba
->tmf_rqs
), GFP_KERNEL
);
10261 if (!hba
->tmf_rqs
) {
10263 goto free_tmf_queue
;
10266 /* Reset the attached device */
10267 ufshcd_device_reset(hba
);
10269 ufshcd_init_crypto(hba
);
10271 /* Host controller enable */
10272 err
= ufshcd_hba_enable(hba
);
10274 dev_err(hba
->dev
, "Host controller enable failed\n");
10275 ufshcd_print_evt_hist(hba
);
10276 ufshcd_print_host_state(hba
);
10277 goto free_tmf_queue
;
10281 * Set the default power management level for runtime and system PM.
10282 * Default power saving mode is to keep UFS link in Hibern8 state
10283 * and UFS device in sleep state.
10285 hba
->rpm_lvl
= ufs_get_desired_pm_lvl_for_dev_link_state(
10286 UFS_SLEEP_PWR_MODE
,
10287 UIC_LINK_HIBERN8_STATE
);
10288 hba
->spm_lvl
= ufs_get_desired_pm_lvl_for_dev_link_state(
10289 UFS_SLEEP_PWR_MODE
,
10290 UIC_LINK_HIBERN8_STATE
);
10292 INIT_DELAYED_WORK(&hba
->rpm_dev_flush_recheck_work
,
10293 ufshcd_rpm_dev_flush_recheck_work
);
10295 /* Set the default auto-hiberate idle timer value to 150 ms */
10296 if (ufshcd_is_auto_hibern8_supported(hba
) && !hba
->ahit
) {
10297 hba
->ahit
= FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK
, 150) |
10298 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK
, 3);
10301 /* Hold auto suspend until async scan completes */
10302 pm_runtime_get_sync(dev
);
10303 atomic_set(&hba
->scsi_block_reqs_cnt
, 0);
10305 * We are assuming that device wasn't put in sleep/power-down
10306 * state exclusively during the boot stage before kernel.
10307 * This assumption helps avoid doing link startup twice during
10308 * ufshcd_probe_hba().
10310 ufshcd_set_ufs_dev_active(hba
);
10312 async_schedule(ufshcd_async_scan
, hba
);
10313 ufs_sysfs_add_nodes(hba
->dev
);
10315 device_enable_async_suspend(dev
);
10319 blk_mq_destroy_queue(hba
->tmf_queue
);
10320 blk_put_queue(hba
->tmf_queue
);
10322 blk_mq_free_tag_set(&hba
->tmf_tag_set
);
10323 out_remove_scsi_host
:
10324 scsi_remove_host(hba
->host
);
10326 hba
->is_irq_enabled
= false;
10327 ufshcd_hba_exit(hba
);
10331 EXPORT_SYMBOL_GPL(ufshcd_init
);
10333 void ufshcd_resume_complete(struct device
*dev
)
10335 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10337 if (hba
->complete_put
) {
10338 ufshcd_rpm_put(hba
);
10339 hba
->complete_put
= false;
10342 EXPORT_SYMBOL_GPL(ufshcd_resume_complete
);
10344 static bool ufshcd_rpm_ok_for_spm(struct ufs_hba
*hba
)
10346 struct device
*dev
= &hba
->ufs_device_wlun
->sdev_gendev
;
10347 enum ufs_dev_pwr_mode dev_pwr_mode
;
10348 enum uic_link_state link_state
;
10349 unsigned long flags
;
10352 spin_lock_irqsave(&dev
->power
.lock
, flags
);
10353 dev_pwr_mode
= ufs_get_pm_lvl_to_dev_pwr_mode(hba
->spm_lvl
);
10354 link_state
= ufs_get_pm_lvl_to_link_pwr_state(hba
->spm_lvl
);
10355 res
= pm_runtime_suspended(dev
) &&
10356 hba
->curr_dev_pwr_mode
== dev_pwr_mode
&&
10357 hba
->uic_link_state
== link_state
&&
10358 !hba
->dev_info
.b_rpm_dev_flush_capable
;
10359 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
10364 int __ufshcd_suspend_prepare(struct device
*dev
, bool rpm_ok_for_spm
)
10366 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10370 * SCSI assumes that runtime-pm and system-pm for scsi drivers
10371 * are same. And it doesn't wake up the device for system-suspend
10372 * if it's runtime suspended. But ufs doesn't follow that.
10373 * Refer ufshcd_resume_complete()
10375 if (hba
->ufs_device_wlun
) {
10376 /* Prevent runtime suspend */
10377 ufshcd_rpm_get_noresume(hba
);
10379 * Check if already runtime suspended in same state as system
10380 * suspend would be.
10382 if (!rpm_ok_for_spm
|| !ufshcd_rpm_ok_for_spm(hba
)) {
10383 /* RPM state is not ok for SPM, so runtime resume */
10384 ret
= ufshcd_rpm_resume(hba
);
10385 if (ret
< 0 && ret
!= -EACCES
) {
10386 ufshcd_rpm_put(hba
);
10390 hba
->complete_put
= true;
10394 EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare
);
10396 int ufshcd_suspend_prepare(struct device
*dev
)
10398 return __ufshcd_suspend_prepare(dev
, true);
10400 EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare
);
10402 #ifdef CONFIG_PM_SLEEP
10403 static int ufshcd_wl_poweroff(struct device
*dev
)
10405 struct scsi_device
*sdev
= to_scsi_device(dev
);
10406 struct ufs_hba
*hba
= shost_priv(sdev
->host
);
10408 __ufshcd_wl_suspend(hba
, UFS_SHUTDOWN_PM
);
10413 static int ufshcd_wl_probe(struct device
*dev
)
10415 struct scsi_device
*sdev
= to_scsi_device(dev
);
10417 if (!is_device_wlun(sdev
))
10420 blk_pm_runtime_init(sdev
->request_queue
, dev
);
10421 pm_runtime_set_autosuspend_delay(dev
, 0);
10422 pm_runtime_allow(dev
);
10427 static int ufshcd_wl_remove(struct device
*dev
)
10429 pm_runtime_forbid(dev
);
10433 static const struct dev_pm_ops ufshcd_wl_pm_ops
= {
10434 #ifdef CONFIG_PM_SLEEP
10435 .suspend
= ufshcd_wl_suspend
,
10436 .resume
= ufshcd_wl_resume
,
10437 .freeze
= ufshcd_wl_suspend
,
10438 .thaw
= ufshcd_wl_resume
,
10439 .poweroff
= ufshcd_wl_poweroff
,
10440 .restore
= ufshcd_wl_resume
,
10442 SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend
, ufshcd_wl_runtime_resume
, NULL
)
10446 * ufs_dev_wlun_template - describes ufs device wlun
10447 * ufs-device wlun - used to send pm commands
10448 * All luns are consumers of ufs-device wlun.
10450 * Currently, no sd driver is present for wluns.
10451 * Hence the no specific pm operations are performed.
10452 * With ufs design, SSU should be sent to ufs-device wlun.
10453 * Hence register a scsi driver for ufs wluns only.
10455 static struct scsi_driver ufs_dev_wlun_template
= {
10457 .name
= "ufs_device_wlun",
10458 .owner
= THIS_MODULE
,
10459 .probe
= ufshcd_wl_probe
,
10460 .remove
= ufshcd_wl_remove
,
10461 .pm
= &ufshcd_wl_pm_ops
,
10462 .shutdown
= ufshcd_wl_shutdown
,
10466 static int __init
ufshcd_core_init(void)
10470 ufs_debugfs_init();
10472 ret
= scsi_register_driver(&ufs_dev_wlun_template
.gendrv
);
10474 ufs_debugfs_exit();
10478 static void __exit
ufshcd_core_exit(void)
10480 ufs_debugfs_exit();
10481 scsi_unregister_driver(&ufs_dev_wlun_template
.gendrv
);
10484 module_init(ufshcd_core_init
);
10485 module_exit(ufshcd_core_exit
);
10487 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
10488 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
10489 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
10490 MODULE_LICENSE("GPL");