1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Universal Flash Storage Host controller driver Core
4 * Copyright (C) 2011-2013 Samsung India Software Operations
5 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
8 * Santosh Yaraganavi <santosh.sy@samsung.com>
9 * Vinayak Holikatti <h.vinayak@samsung.com>
12 #include <linux/async.h>
13 #include <linux/devfreq.h>
14 #include <linux/nls.h>
16 #include <linux/bitfield.h>
17 #include <linux/blk-pm.h>
18 #include <linux/blkdev.h>
19 #include <linux/clk.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/sched/clock.h>
25 #include <linux/iopoll.h>
26 #include <scsi/scsi_cmnd.h>
27 #include <scsi/scsi_dbg.h>
28 #include <scsi/scsi_driver.h>
29 #include <scsi/scsi_eh.h>
30 #include "ufshcd-priv.h"
31 #include <ufs/ufs_quirks.h>
32 #include <ufs/unipro.h>
33 #include "ufs-sysfs.h"
34 #include "ufs-debugfs.h"
35 #include "ufs-fault-injection.h"
37 #include "ufshcd-crypto.h"
38 #include <asm/unaligned.h>
40 #define CREATE_TRACE_POINTS
41 #include <trace/events/ufs.h>
43 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
47 #define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
52 /* UIC command timeout, unit: ms */
53 #define UIC_CMD_TIMEOUT 500
55 /* NOP OUT retries waiting for NOP IN response */
56 #define NOP_OUT_RETRIES 10
57 /* Timeout after 50 msecs if NOP OUT hangs without response */
58 #define NOP_OUT_TIMEOUT 50 /* msecs */
60 /* Query request retries */
61 #define QUERY_REQ_RETRIES 3
62 /* Query request timeout */
63 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
65 /* Advanced RPMB request timeout */
66 #define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */
68 /* Task management command timeout */
69 #define TM_CMD_TIMEOUT 100 /* msecs */
71 /* maximum number of retries for a general UIC command */
72 #define UFS_UIC_COMMAND_RETRIES 3
74 /* maximum number of link-startup retries */
75 #define DME_LINKSTARTUP_RETRIES 3
77 /* maximum number of reset retries before giving up */
78 #define MAX_HOST_RESET_RETRIES 5
80 /* Maximum number of error handler retries before giving up */
81 #define MAX_ERR_HANDLER_RETRIES 5
83 /* Expose the flag value from utp_upiu_query.value */
84 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
86 /* Interrupt aggregation default timeout, unit: 40us */
87 #define INT_AGGR_DEF_TO 0x02
89 /* default delay of autosuspend: 2000 ms */
90 #define RPM_AUTOSUSPEND_DELAY_MS 2000
92 /* Default delay of RPM device flush delayed work */
93 #define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
95 /* Default value of wait time before gating device ref clock */
96 #define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
98 /* Polling time to wait for fDeviceInit */
99 #define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
101 /* UFSHC 4.0 compliant HC support this mode. */
102 static bool use_mcq_mode
= true;
104 static bool is_mcq_supported(struct ufs_hba
*hba
)
106 return hba
->mcq_sup
&& use_mcq_mode
;
109 module_param(use_mcq_mode
, bool, 0644);
110 MODULE_PARM_DESC(use_mcq_mode
, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
112 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
116 _ret = ufshcd_enable_vreg(_dev, _vreg); \
118 _ret = ufshcd_disable_vreg(_dev, _vreg); \
122 #define ufshcd_hex_dump(prefix_str, buf, len) do { \
123 size_t __len = (len); \
124 print_hex_dump(KERN_ERR, prefix_str, \
125 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
126 16, 4, buf, __len, false); \
129 int ufshcd_dump_regs(struct ufs_hba
*hba
, size_t offset
, size_t len
,
135 if (offset
% 4 != 0 || len
% 4 != 0) /* keep readl happy */
138 regs
= kzalloc(len
, GFP_ATOMIC
);
142 for (pos
= 0; pos
< len
; pos
+= 4) {
144 pos
>= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER
&&
145 pos
<= REG_UIC_ERROR_CODE_DME
)
147 regs
[pos
/ 4] = ufshcd_readl(hba
, offset
+ pos
);
150 ufshcd_hex_dump(prefix
, regs
, len
);
155 EXPORT_SYMBOL_GPL(ufshcd_dump_regs
);
158 UFSHCD_MAX_CHANNEL
= 0,
160 UFSHCD_CMD_PER_LUN
= 32 - UFSHCD_NUM_RESERVED
,
161 UFSHCD_CAN_QUEUE
= 32 - UFSHCD_NUM_RESERVED
,
164 static const char *const ufshcd_state_name
[] = {
165 [UFSHCD_STATE_RESET
] = "reset",
166 [UFSHCD_STATE_OPERATIONAL
] = "operational",
167 [UFSHCD_STATE_ERROR
] = "error",
168 [UFSHCD_STATE_EH_SCHEDULED_FATAL
] = "eh_fatal",
169 [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL
] = "eh_non_fatal",
172 /* UFSHCD error handling flags */
174 UFSHCD_EH_IN_PROGRESS
= (1 << 0),
177 /* UFSHCD UIC layer error flags */
179 UFSHCD_UIC_DL_PA_INIT_ERROR
= (1 << 0), /* Data link layer error */
180 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
= (1 << 1), /* Data link layer error */
181 UFSHCD_UIC_DL_TCx_REPLAY_ERROR
= (1 << 2), /* Data link layer error */
182 UFSHCD_UIC_NL_ERROR
= (1 << 3), /* Network layer error */
183 UFSHCD_UIC_TL_ERROR
= (1 << 4), /* Transport Layer error */
184 UFSHCD_UIC_DME_ERROR
= (1 << 5), /* DME error */
185 UFSHCD_UIC_PA_GENERIC_ERROR
= (1 << 6), /* Generic PA error */
188 #define ufshcd_set_eh_in_progress(h) \
189 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
190 #define ufshcd_eh_in_progress(h) \
191 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
192 #define ufshcd_clear_eh_in_progress(h) \
193 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
195 const struct ufs_pm_lvl_states ufs_pm_lvl_states
[] = {
196 [UFS_PM_LVL_0
] = {UFS_ACTIVE_PWR_MODE
, UIC_LINK_ACTIVE_STATE
},
197 [UFS_PM_LVL_1
] = {UFS_ACTIVE_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
198 [UFS_PM_LVL_2
] = {UFS_SLEEP_PWR_MODE
, UIC_LINK_ACTIVE_STATE
},
199 [UFS_PM_LVL_3
] = {UFS_SLEEP_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
200 [UFS_PM_LVL_4
] = {UFS_POWERDOWN_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
201 [UFS_PM_LVL_5
] = {UFS_POWERDOWN_PWR_MODE
, UIC_LINK_OFF_STATE
},
203 * For DeepSleep, the link is first put in hibern8 and then off.
204 * Leaving the link in hibern8 is not supported.
206 [UFS_PM_LVL_6
] = {UFS_DEEPSLEEP_PWR_MODE
, UIC_LINK_OFF_STATE
},
209 static inline enum ufs_dev_pwr_mode
210 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl
)
212 return ufs_pm_lvl_states
[lvl
].dev_state
;
215 static inline enum uic_link_state
216 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl
)
218 return ufs_pm_lvl_states
[lvl
].link_state
;
221 static inline enum ufs_pm_level
222 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state
,
223 enum uic_link_state link_state
)
225 enum ufs_pm_level lvl
;
227 for (lvl
= UFS_PM_LVL_0
; lvl
< UFS_PM_LVL_MAX
; lvl
++) {
228 if ((ufs_pm_lvl_states
[lvl
].dev_state
== dev_state
) &&
229 (ufs_pm_lvl_states
[lvl
].link_state
== link_state
))
233 /* if no match found, return the level 0 */
237 static const struct ufs_dev_quirk ufs_fixups
[] = {
238 /* UFS cards deviations table */
239 { .wmanufacturerid
= UFS_VENDOR_MICRON
,
240 .model
= UFS_ANY_MODEL
,
241 .quirk
= UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
},
242 { .wmanufacturerid
= UFS_VENDOR_SAMSUNG
,
243 .model
= UFS_ANY_MODEL
,
244 .quirk
= UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
|
245 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE
|
246 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
},
247 { .wmanufacturerid
= UFS_VENDOR_SKHYNIX
,
248 .model
= UFS_ANY_MODEL
,
249 .quirk
= UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME
},
250 { .wmanufacturerid
= UFS_VENDOR_SKHYNIX
,
251 .model
= "hB8aL1" /*H28U62301AMR*/,
252 .quirk
= UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME
},
253 { .wmanufacturerid
= UFS_VENDOR_TOSHIBA
,
254 .model
= UFS_ANY_MODEL
,
255 .quirk
= UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
},
256 { .wmanufacturerid
= UFS_VENDOR_TOSHIBA
,
257 .model
= "THGLF2G9C8KBADG",
258 .quirk
= UFS_DEVICE_QUIRK_PA_TACTIVATE
},
259 { .wmanufacturerid
= UFS_VENDOR_TOSHIBA
,
260 .model
= "THGLF2G9D8KBADG",
261 .quirk
= UFS_DEVICE_QUIRK_PA_TACTIVATE
},
265 static irqreturn_t
ufshcd_tmc_handler(struct ufs_hba
*hba
);
266 static void ufshcd_async_scan(void *data
, async_cookie_t cookie
);
267 static int ufshcd_reset_and_restore(struct ufs_hba
*hba
);
268 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd
*cmd
);
269 static int ufshcd_clear_tm_cmd(struct ufs_hba
*hba
, int tag
);
270 static void ufshcd_hba_exit(struct ufs_hba
*hba
);
271 static int ufshcd_probe_hba(struct ufs_hba
*hba
, bool init_dev_params
);
272 static int ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
);
273 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba
*hba
);
274 static int ufshcd_host_reset_and_restore(struct ufs_hba
*hba
);
275 static void ufshcd_resume_clkscaling(struct ufs_hba
*hba
);
276 static void ufshcd_suspend_clkscaling(struct ufs_hba
*hba
);
277 static void __ufshcd_suspend_clkscaling(struct ufs_hba
*hba
);
278 static int ufshcd_scale_clks(struct ufs_hba
*hba
, bool scale_up
);
279 static irqreturn_t
ufshcd_intr(int irq
, void *__hba
);
280 static int ufshcd_change_power_mode(struct ufs_hba
*hba
,
281 struct ufs_pa_layer_attr
*pwr_mode
);
282 static int ufshcd_setup_hba_vreg(struct ufs_hba
*hba
, bool on
);
283 static int ufshcd_setup_vreg(struct ufs_hba
*hba
, bool on
);
284 static inline int ufshcd_config_vreg_hpm(struct ufs_hba
*hba
,
285 struct ufs_vreg
*vreg
);
286 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba
*hba
,
288 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba
*hba
);
289 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba
*hba
);
291 static inline void ufshcd_enable_irq(struct ufs_hba
*hba
)
293 if (!hba
->is_irq_enabled
) {
294 enable_irq(hba
->irq
);
295 hba
->is_irq_enabled
= true;
299 static inline void ufshcd_disable_irq(struct ufs_hba
*hba
)
301 if (hba
->is_irq_enabled
) {
302 disable_irq(hba
->irq
);
303 hba
->is_irq_enabled
= false;
307 static void ufshcd_configure_wb(struct ufs_hba
*hba
)
309 if (!ufshcd_is_wb_allowed(hba
))
312 ufshcd_wb_toggle(hba
, true);
314 ufshcd_wb_toggle_buf_flush_during_h8(hba
, true);
316 if (ufshcd_is_wb_buf_flush_allowed(hba
))
317 ufshcd_wb_toggle_buf_flush(hba
, true);
320 static void ufshcd_scsi_unblock_requests(struct ufs_hba
*hba
)
322 if (atomic_dec_and_test(&hba
->scsi_block_reqs_cnt
))
323 scsi_unblock_requests(hba
->host
);
326 static void ufshcd_scsi_block_requests(struct ufs_hba
*hba
)
328 if (atomic_inc_return(&hba
->scsi_block_reqs_cnt
) == 1)
329 scsi_block_requests(hba
->host
);
332 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba
*hba
, unsigned int tag
,
333 enum ufs_trace_str_t str_t
)
335 struct utp_upiu_req
*rq
= hba
->lrb
[tag
].ucd_req_ptr
;
336 struct utp_upiu_header
*header
;
338 if (!trace_ufshcd_upiu_enabled())
341 if (str_t
== UFS_CMD_SEND
)
342 header
= &rq
->header
;
344 header
= &hba
->lrb
[tag
].ucd_rsp_ptr
->header
;
346 trace_ufshcd_upiu(dev_name(hba
->dev
), str_t
, header
, &rq
->sc
.cdb
,
350 static void ufshcd_add_query_upiu_trace(struct ufs_hba
*hba
,
351 enum ufs_trace_str_t str_t
,
352 struct utp_upiu_req
*rq_rsp
)
354 if (!trace_ufshcd_upiu_enabled())
357 trace_ufshcd_upiu(dev_name(hba
->dev
), str_t
, &rq_rsp
->header
,
358 &rq_rsp
->qr
, UFS_TSF_OSF
);
361 static void ufshcd_add_tm_upiu_trace(struct ufs_hba
*hba
, unsigned int tag
,
362 enum ufs_trace_str_t str_t
)
364 struct utp_task_req_desc
*descp
= &hba
->utmrdl_base_addr
[tag
];
366 if (!trace_ufshcd_upiu_enabled())
369 if (str_t
== UFS_TM_SEND
)
370 trace_ufshcd_upiu(dev_name(hba
->dev
), str_t
,
371 &descp
->upiu_req
.req_header
,
372 &descp
->upiu_req
.input_param1
,
375 trace_ufshcd_upiu(dev_name(hba
->dev
), str_t
,
376 &descp
->upiu_rsp
.rsp_header
,
377 &descp
->upiu_rsp
.output_param1
,
381 static void ufshcd_add_uic_command_trace(struct ufs_hba
*hba
,
382 const struct uic_command
*ucmd
,
383 enum ufs_trace_str_t str_t
)
387 if (!trace_ufshcd_uic_command_enabled())
390 if (str_t
== UFS_CMD_SEND
)
393 cmd
= ufshcd_readl(hba
, REG_UIC_COMMAND
);
395 trace_ufshcd_uic_command(dev_name(hba
->dev
), str_t
, cmd
,
396 ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_1
),
397 ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_2
),
398 ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_3
));
401 static void ufshcd_add_command_trace(struct ufs_hba
*hba
, unsigned int tag
,
402 enum ufs_trace_str_t str_t
)
405 u8 opcode
= 0, group_id
= 0;
409 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[tag
];
410 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
411 struct request
*rq
= scsi_cmd_to_rq(cmd
);
412 int transfer_len
= -1;
417 /* trace UPIU also */
418 ufshcd_add_cmd_upiu_trace(hba
, tag
, str_t
);
419 if (!trace_ufshcd_command_enabled())
422 opcode
= cmd
->cmnd
[0];
424 if (opcode
== READ_10
|| opcode
== WRITE_10
) {
426 * Currently we only fully trace read(10) and write(10) commands
429 be32_to_cpu(lrbp
->ucd_req_ptr
->sc
.exp_data_transfer_len
);
430 lba
= scsi_get_lba(cmd
);
431 if (opcode
== WRITE_10
)
432 group_id
= lrbp
->cmd
->cmnd
[6];
433 } else if (opcode
== UNMAP
) {
435 * The number of Bytes to be unmapped beginning with the lba.
437 transfer_len
= blk_rq_bytes(rq
);
438 lba
= scsi_get_lba(cmd
);
441 intr
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
443 if (is_mcq_enabled(hba
)) {
444 struct ufs_hw_queue
*hwq
= ufshcd_mcq_req_to_hwq(hba
, rq
);
448 doorbell
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
450 trace_ufshcd_command(dev_name(hba
->dev
), str_t
, tag
,
451 doorbell
, hwq_id
, transfer_len
, intr
, lba
, opcode
, group_id
);
454 static void ufshcd_print_clk_freqs(struct ufs_hba
*hba
)
456 struct ufs_clk_info
*clki
;
457 struct list_head
*head
= &hba
->clk_list_head
;
459 if (list_empty(head
))
462 list_for_each_entry(clki
, head
, list
) {
463 if (!IS_ERR_OR_NULL(clki
->clk
) && clki
->min_freq
&&
465 dev_err(hba
->dev
, "clk: %s, rate: %u\n",
466 clki
->name
, clki
->curr_freq
);
470 static void ufshcd_print_evt(struct ufs_hba
*hba
, u32 id
,
471 const char *err_name
)
475 const struct ufs_event_hist
*e
;
477 if (id
>= UFS_EVT_CNT
)
480 e
= &hba
->ufs_stats
.event
[id
];
482 for (i
= 0; i
< UFS_EVENT_HIST_LENGTH
; i
++) {
483 int p
= (i
+ e
->pos
) % UFS_EVENT_HIST_LENGTH
;
485 if (e
->tstamp
[p
] == 0)
487 dev_err(hba
->dev
, "%s[%d] = 0x%x at %lld us\n", err_name
, p
,
488 e
->val
[p
], div_u64(e
->tstamp
[p
], 1000));
493 dev_err(hba
->dev
, "No record of %s\n", err_name
);
495 dev_err(hba
->dev
, "%s: total cnt=%llu\n", err_name
, e
->cnt
);
498 static void ufshcd_print_evt_hist(struct ufs_hba
*hba
)
500 ufshcd_dump_regs(hba
, 0, UFSHCI_REG_SPACE_SIZE
, "host_regs: ");
502 ufshcd_print_evt(hba
, UFS_EVT_PA_ERR
, "pa_err");
503 ufshcd_print_evt(hba
, UFS_EVT_DL_ERR
, "dl_err");
504 ufshcd_print_evt(hba
, UFS_EVT_NL_ERR
, "nl_err");
505 ufshcd_print_evt(hba
, UFS_EVT_TL_ERR
, "tl_err");
506 ufshcd_print_evt(hba
, UFS_EVT_DME_ERR
, "dme_err");
507 ufshcd_print_evt(hba
, UFS_EVT_AUTO_HIBERN8_ERR
,
509 ufshcd_print_evt(hba
, UFS_EVT_FATAL_ERR
, "fatal_err");
510 ufshcd_print_evt(hba
, UFS_EVT_LINK_STARTUP_FAIL
,
511 "link_startup_fail");
512 ufshcd_print_evt(hba
, UFS_EVT_RESUME_ERR
, "resume_fail");
513 ufshcd_print_evt(hba
, UFS_EVT_SUSPEND_ERR
,
515 ufshcd_print_evt(hba
, UFS_EVT_WL_RES_ERR
, "wlun resume_fail");
516 ufshcd_print_evt(hba
, UFS_EVT_WL_SUSP_ERR
,
517 "wlun suspend_fail");
518 ufshcd_print_evt(hba
, UFS_EVT_DEV_RESET
, "dev_reset");
519 ufshcd_print_evt(hba
, UFS_EVT_HOST_RESET
, "host_reset");
520 ufshcd_print_evt(hba
, UFS_EVT_ABORT
, "task_abort");
522 ufshcd_vops_dbg_register_dump(hba
);
526 void ufshcd_print_tr(struct ufs_hba
*hba
, int tag
, bool pr_prdt
)
528 const struct ufshcd_lrb
*lrbp
;
531 lrbp
= &hba
->lrb
[tag
];
533 dev_err(hba
->dev
, "UPIU[%d] - issue time %lld us\n",
534 tag
, div_u64(lrbp
->issue_time_stamp_local_clock
, 1000));
535 dev_err(hba
->dev
, "UPIU[%d] - complete time %lld us\n",
536 tag
, div_u64(lrbp
->compl_time_stamp_local_clock
, 1000));
538 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
539 tag
, (u64
)lrbp
->utrd_dma_addr
);
541 ufshcd_hex_dump("UPIU TRD: ", lrbp
->utr_descriptor_ptr
,
542 sizeof(struct utp_transfer_req_desc
));
543 dev_err(hba
->dev
, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag
,
544 (u64
)lrbp
->ucd_req_dma_addr
);
545 ufshcd_hex_dump("UPIU REQ: ", lrbp
->ucd_req_ptr
,
546 sizeof(struct utp_upiu_req
));
547 dev_err(hba
->dev
, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag
,
548 (u64
)lrbp
->ucd_rsp_dma_addr
);
549 ufshcd_hex_dump("UPIU RSP: ", lrbp
->ucd_rsp_ptr
,
550 sizeof(struct utp_upiu_rsp
));
552 prdt_length
= le16_to_cpu(
553 lrbp
->utr_descriptor_ptr
->prd_table_length
);
554 if (hba
->quirks
& UFSHCD_QUIRK_PRDT_BYTE_GRAN
)
555 prdt_length
/= ufshcd_sg_entry_size(hba
);
558 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
560 (u64
)lrbp
->ucd_prdt_dma_addr
);
563 ufshcd_hex_dump("UPIU PRDT: ", lrbp
->ucd_prdt_ptr
,
564 ufshcd_sg_entry_size(hba
) * prdt_length
);
567 static bool ufshcd_print_tr_iter(struct request
*req
, void *priv
)
569 struct scsi_device
*sdev
= req
->q
->queuedata
;
570 struct Scsi_Host
*shost
= sdev
->host
;
571 struct ufs_hba
*hba
= shost_priv(shost
);
573 ufshcd_print_tr(hba
, req
->tag
, *(bool *)priv
);
579 * ufshcd_print_trs_all - print trs for all started requests.
580 * @hba: per-adapter instance.
581 * @pr_prdt: need to print prdt or not.
583 static void ufshcd_print_trs_all(struct ufs_hba
*hba
, bool pr_prdt
)
585 blk_mq_tagset_busy_iter(&hba
->host
->tag_set
, ufshcd_print_tr_iter
, &pr_prdt
);
588 static void ufshcd_print_tmrs(struct ufs_hba
*hba
, unsigned long bitmap
)
592 for_each_set_bit(tag
, &bitmap
, hba
->nutmrs
) {
593 struct utp_task_req_desc
*tmrdp
= &hba
->utmrdl_base_addr
[tag
];
595 dev_err(hba
->dev
, "TM[%d] - Task Management Header\n", tag
);
596 ufshcd_hex_dump("", tmrdp
, sizeof(*tmrdp
));
600 static void ufshcd_print_host_state(struct ufs_hba
*hba
)
602 const struct scsi_device
*sdev_ufs
= hba
->ufs_device_wlun
;
604 dev_err(hba
->dev
, "UFS Host state=%d\n", hba
->ufshcd_state
);
605 dev_err(hba
->dev
, "outstanding reqs=0x%lx tasks=0x%lx\n",
606 hba
->outstanding_reqs
, hba
->outstanding_tasks
);
607 dev_err(hba
->dev
, "saved_err=0x%x, saved_uic_err=0x%x\n",
608 hba
->saved_err
, hba
->saved_uic_err
);
609 dev_err(hba
->dev
, "Device power mode=%d, UIC link state=%d\n",
610 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
611 dev_err(hba
->dev
, "PM in progress=%d, sys. suspended=%d\n",
612 hba
->pm_op_in_progress
, hba
->is_sys_suspended
);
613 dev_err(hba
->dev
, "Auto BKOPS=%d, Host self-block=%d\n",
614 hba
->auto_bkops_enabled
, hba
->host
->host_self_blocked
);
615 dev_err(hba
->dev
, "Clk gate=%d\n", hba
->clk_gating
.state
);
617 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
618 div_u64(hba
->ufs_stats
.last_hibern8_exit_tstamp
, 1000),
619 hba
->ufs_stats
.hibern8_exit_cnt
);
620 dev_err(hba
->dev
, "last intr at %lld us, last intr status=0x%x\n",
621 div_u64(hba
->ufs_stats
.last_intr_ts
, 1000),
622 hba
->ufs_stats
.last_intr_status
);
623 dev_err(hba
->dev
, "error handling flags=0x%x, req. abort count=%d\n",
624 hba
->eh_flags
, hba
->req_abort_count
);
625 dev_err(hba
->dev
, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
626 hba
->ufs_version
, hba
->capabilities
, hba
->caps
);
627 dev_err(hba
->dev
, "quirks=0x%x, dev. quirks=0x%x\n", hba
->quirks
,
630 dev_err(hba
->dev
, "UFS dev info: %.8s %.16s rev %.4s\n",
631 sdev_ufs
->vendor
, sdev_ufs
->model
, sdev_ufs
->rev
);
633 ufshcd_print_clk_freqs(hba
);
637 * ufshcd_print_pwr_info - print power params as saved in hba
639 * @hba: per-adapter instance
641 static void ufshcd_print_pwr_info(struct ufs_hba
*hba
)
643 static const char * const names
[] = {
654 * Using dev_dbg to avoid messages during runtime PM to avoid
655 * never-ending cycles of messages written back to storage by user space
656 * causing runtime resume, causing more messages and so on.
658 dev_dbg(hba
->dev
, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
660 hba
->pwr_info
.gear_rx
, hba
->pwr_info
.gear_tx
,
661 hba
->pwr_info
.lane_rx
, hba
->pwr_info
.lane_tx
,
662 names
[hba
->pwr_info
.pwr_rx
],
663 names
[hba
->pwr_info
.pwr_tx
],
664 hba
->pwr_info
.hs_rate
);
667 static void ufshcd_device_reset(struct ufs_hba
*hba
)
671 err
= ufshcd_vops_device_reset(hba
);
674 ufshcd_set_ufs_dev_active(hba
);
675 if (ufshcd_is_wb_allowed(hba
)) {
676 hba
->dev_info
.wb_enabled
= false;
677 hba
->dev_info
.wb_buf_flush_enabled
= false;
680 if (err
!= -EOPNOTSUPP
)
681 ufshcd_update_evt_hist(hba
, UFS_EVT_DEV_RESET
, err
);
684 void ufshcd_delay_us(unsigned long us
, unsigned long tolerance
)
692 usleep_range(us
, us
+ tolerance
);
694 EXPORT_SYMBOL_GPL(ufshcd_delay_us
);
697 * ufshcd_wait_for_register - wait for register value to change
698 * @hba: per-adapter interface
699 * @reg: mmio register offset
700 * @mask: mask to apply to the read register value
701 * @val: value to wait for
702 * @interval_us: polling interval in microseconds
703 * @timeout_ms: timeout in milliseconds
705 * Return: -ETIMEDOUT on error, zero on success.
707 static int ufshcd_wait_for_register(struct ufs_hba
*hba
, u32 reg
, u32 mask
,
708 u32 val
, unsigned long interval_us
,
709 unsigned long timeout_ms
)
712 unsigned long timeout
= jiffies
+ msecs_to_jiffies(timeout_ms
);
714 /* ignore bits that we don't intend to wait on */
717 while ((ufshcd_readl(hba
, reg
) & mask
) != val
) {
718 usleep_range(interval_us
, interval_us
+ 50);
719 if (time_after(jiffies
, timeout
)) {
720 if ((ufshcd_readl(hba
, reg
) & mask
) != val
)
730 * ufshcd_get_intr_mask - Get the interrupt bit mask
731 * @hba: Pointer to adapter instance
733 * Return: interrupt bit mask per version
735 static inline u32
ufshcd_get_intr_mask(struct ufs_hba
*hba
)
737 if (hba
->ufs_version
== ufshci_version(1, 0))
738 return INTERRUPT_MASK_ALL_VER_10
;
739 if (hba
->ufs_version
<= ufshci_version(2, 0))
740 return INTERRUPT_MASK_ALL_VER_11
;
742 return INTERRUPT_MASK_ALL_VER_21
;
746 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
747 * @hba: Pointer to adapter instance
749 * Return: UFSHCI version supported by the controller
751 static inline u32
ufshcd_get_ufs_version(struct ufs_hba
*hba
)
755 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION
)
756 ufshci_ver
= ufshcd_vops_get_ufs_hci_version(hba
);
758 ufshci_ver
= ufshcd_readl(hba
, REG_UFS_VERSION
);
761 * UFSHCI v1.x uses a different version scheme, in order
762 * to allow the use of comparisons with the ufshci_version
763 * function, we convert it to the same scheme as ufs 2.0+.
765 if (ufshci_ver
& 0x00010000)
766 return ufshci_version(1, ufshci_ver
& 0x00000100);
772 * ufshcd_is_device_present - Check if any device connected to
773 * the host controller
774 * @hba: pointer to adapter instance
776 * Return: true if device present, false if no device detected
778 static inline bool ufshcd_is_device_present(struct ufs_hba
*hba
)
780 return ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) & DEVICE_PRESENT
;
784 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
785 * @lrbp: pointer to local command reference block
786 * @cqe: pointer to the completion queue entry
788 * This function is used to get the OCS field from UTRD
790 * Return: the OCS field in the UTRD.
792 static enum utp_ocs
ufshcd_get_tr_ocs(struct ufshcd_lrb
*lrbp
,
793 struct cq_entry
*cqe
)
796 return le32_to_cpu(cqe
->status
) & MASK_OCS
;
798 return lrbp
->utr_descriptor_ptr
->header
.ocs
& MASK_OCS
;
802 * ufshcd_utrl_clear() - Clear requests from the controller request list.
803 * @hba: per adapter instance
804 * @mask: mask with one bit set for each request to be cleared
806 static inline void ufshcd_utrl_clear(struct ufs_hba
*hba
, u32 mask
)
808 if (hba
->quirks
& UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR
)
811 * From the UFSHCI specification: "UTP Transfer Request List CLear
812 * Register (UTRLCLR): This field is bit significant. Each bit
813 * corresponds to a slot in the UTP Transfer Request List, where bit 0
814 * corresponds to request slot 0. A bit in this field is set to ‘0’
815 * by host software to indicate to the host controller that a transfer
816 * request slot is cleared. The host controller
817 * shall free up any resources associated to the request slot
818 * immediately, and shall set the associated bit in UTRLDBR to ‘0’. The
819 * host software indicates no change to request slots by setting the
820 * associated bits in this field to ‘1’. Bits in this field shall only
821 * be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’."
823 ufshcd_writel(hba
, ~mask
, REG_UTP_TRANSFER_REQ_LIST_CLEAR
);
827 * ufshcd_utmrl_clear - Clear a bit in UTMRLCLR register
828 * @hba: per adapter instance
829 * @pos: position of the bit to be cleared
831 static inline void ufshcd_utmrl_clear(struct ufs_hba
*hba
, u32 pos
)
833 if (hba
->quirks
& UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR
)
834 ufshcd_writel(hba
, (1 << pos
), REG_UTP_TASK_REQ_LIST_CLEAR
);
836 ufshcd_writel(hba
, ~(1 << pos
), REG_UTP_TASK_REQ_LIST_CLEAR
);
840 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
841 * @reg: Register value of host controller status
843 * Return: 0 on success; a positive value if failed.
845 static inline int ufshcd_get_lists_status(u32 reg
)
847 return !((reg
& UFSHCD_STATUS_READY
) == UFSHCD_STATUS_READY
);
851 * ufshcd_get_uic_cmd_result - Get the UIC command result
852 * @hba: Pointer to adapter instance
854 * This function gets the result of UIC command completion
856 * Return: 0 on success; non-zero value on error.
858 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba
*hba
)
860 return ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_2
) &
861 MASK_UIC_COMMAND_RESULT
;
865 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
866 * @hba: Pointer to adapter instance
868 * This function gets UIC command argument3
870 * Return: 0 on success; non-zero value on error.
872 static inline u32
ufshcd_get_dme_attr_val(struct ufs_hba
*hba
)
874 return ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_3
);
878 * ufshcd_get_req_rsp - returns the TR response transaction type
879 * @ucd_rsp_ptr: pointer to response UPIU
883 static inline enum upiu_response_transaction
884 ufshcd_get_req_rsp(struct utp_upiu_rsp
*ucd_rsp_ptr
)
886 return ucd_rsp_ptr
->header
.transaction_code
;
890 * ufshcd_is_exception_event - Check if the device raised an exception event
891 * @ucd_rsp_ptr: pointer to response UPIU
893 * The function checks if the device raised an exception event indicated in
894 * the Device Information field of response UPIU.
896 * Return: true if exception is raised, false otherwise.
898 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp
*ucd_rsp_ptr
)
900 return ucd_rsp_ptr
->header
.device_information
& 1;
904 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
905 * @hba: per adapter instance
908 ufshcd_reset_intr_aggr(struct ufs_hba
*hba
)
910 ufshcd_writel(hba
, INT_AGGR_ENABLE
|
911 INT_AGGR_COUNTER_AND_TIMER_RESET
,
912 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
916 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
917 * @hba: per adapter instance
918 * @cnt: Interrupt aggregation counter threshold
919 * @tmout: Interrupt aggregation timeout value
922 ufshcd_config_intr_aggr(struct ufs_hba
*hba
, u8 cnt
, u8 tmout
)
924 ufshcd_writel(hba
, INT_AGGR_ENABLE
| INT_AGGR_PARAM_WRITE
|
925 INT_AGGR_COUNTER_THLD_VAL(cnt
) |
926 INT_AGGR_TIMEOUT_VAL(tmout
),
927 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
931 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
932 * @hba: per adapter instance
934 static inline void ufshcd_disable_intr_aggr(struct ufs_hba
*hba
)
936 ufshcd_writel(hba
, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
940 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
941 * When run-stop registers are set to 1, it indicates the
942 * host controller that it can process the requests
943 * @hba: per adapter instance
945 static void ufshcd_enable_run_stop_reg(struct ufs_hba
*hba
)
947 ufshcd_writel(hba
, UTP_TASK_REQ_LIST_RUN_STOP_BIT
,
948 REG_UTP_TASK_REQ_LIST_RUN_STOP
);
949 ufshcd_writel(hba
, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT
,
950 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP
);
954 * ufshcd_hba_start - Start controller initialization sequence
955 * @hba: per adapter instance
957 static inline void ufshcd_hba_start(struct ufs_hba
*hba
)
959 u32 val
= CONTROLLER_ENABLE
;
961 if (ufshcd_crypto_enable(hba
))
962 val
|= CRYPTO_GENERAL_ENABLE
;
964 ufshcd_writel(hba
, val
, REG_CONTROLLER_ENABLE
);
968 * ufshcd_is_hba_active - Get controller state
969 * @hba: per adapter instance
971 * Return: true if and only if the controller is active.
973 bool ufshcd_is_hba_active(struct ufs_hba
*hba
)
975 return ufshcd_readl(hba
, REG_CONTROLLER_ENABLE
) & CONTROLLER_ENABLE
;
977 EXPORT_SYMBOL_GPL(ufshcd_is_hba_active
);
979 u32
ufshcd_get_local_unipro_ver(struct ufs_hba
*hba
)
981 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
982 if (hba
->ufs_version
<= ufshci_version(1, 1))
983 return UFS_UNIPRO_VER_1_41
;
985 return UFS_UNIPRO_VER_1_6
;
987 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver
);
989 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba
*hba
)
992 * If both host and device support UniPro ver1.6 or later, PA layer
993 * parameters tuning happens during link startup itself.
995 * We can manually tune PA layer parameters if either host or device
996 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
997 * logic simple, we will only do manual tuning if local unipro version
998 * doesn't support ver1.6 or later.
1000 return ufshcd_get_local_unipro_ver(hba
) < UFS_UNIPRO_VER_1_6
;
1004 * ufshcd_set_clk_freq - set UFS controller clock frequencies
1005 * @hba: per adapter instance
1006 * @scale_up: If True, set max possible frequency othewise set low frequency
1008 * Return: 0 if successful; < 0 upon failure.
1010 static int ufshcd_set_clk_freq(struct ufs_hba
*hba
, bool scale_up
)
1013 struct ufs_clk_info
*clki
;
1014 struct list_head
*head
= &hba
->clk_list_head
;
1016 if (list_empty(head
))
1019 list_for_each_entry(clki
, head
, list
) {
1020 if (!IS_ERR_OR_NULL(clki
->clk
)) {
1021 if (scale_up
&& clki
->max_freq
) {
1022 if (clki
->curr_freq
== clki
->max_freq
)
1025 ret
= clk_set_rate(clki
->clk
, clki
->max_freq
);
1027 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
1028 __func__
, clki
->name
,
1029 clki
->max_freq
, ret
);
1032 trace_ufshcd_clk_scaling(dev_name(hba
->dev
),
1033 "scaled up", clki
->name
,
1037 clki
->curr_freq
= clki
->max_freq
;
1039 } else if (!scale_up
&& clki
->min_freq
) {
1040 if (clki
->curr_freq
== clki
->min_freq
)
1043 ret
= clk_set_rate(clki
->clk
, clki
->min_freq
);
1045 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
1046 __func__
, clki
->name
,
1047 clki
->min_freq
, ret
);
1050 trace_ufshcd_clk_scaling(dev_name(hba
->dev
),
1051 "scaled down", clki
->name
,
1054 clki
->curr_freq
= clki
->min_freq
;
1057 dev_dbg(hba
->dev
, "%s: clk: %s, rate: %lu\n", __func__
,
1058 clki
->name
, clk_get_rate(clki
->clk
));
1066 * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1067 * @hba: per adapter instance
1068 * @scale_up: True if scaling up and false if scaling down
1070 * Return: 0 if successful; < 0 upon failure.
1072 static int ufshcd_scale_clks(struct ufs_hba
*hba
, bool scale_up
)
1075 ktime_t start
= ktime_get();
1077 ret
= ufshcd_vops_clk_scale_notify(hba
, scale_up
, PRE_CHANGE
);
1081 ret
= ufshcd_set_clk_freq(hba
, scale_up
);
1085 ret
= ufshcd_vops_clk_scale_notify(hba
, scale_up
, POST_CHANGE
);
1087 ufshcd_set_clk_freq(hba
, !scale_up
);
1090 trace_ufshcd_profile_clk_scaling(dev_name(hba
->dev
),
1091 (scale_up
? "up" : "down"),
1092 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
1097 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1098 * @hba: per adapter instance
1099 * @scale_up: True if scaling up and false if scaling down
1101 * Return: true if scaling is required, false otherwise.
1103 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba
*hba
,
1106 struct ufs_clk_info
*clki
;
1107 struct list_head
*head
= &hba
->clk_list_head
;
1109 if (list_empty(head
))
1112 list_for_each_entry(clki
, head
, list
) {
1113 if (!IS_ERR_OR_NULL(clki
->clk
)) {
1114 if (scale_up
&& clki
->max_freq
) {
1115 if (clki
->curr_freq
== clki
->max_freq
)
1118 } else if (!scale_up
&& clki
->min_freq
) {
1119 if (clki
->curr_freq
== clki
->min_freq
)
1130 * Determine the number of pending commands by counting the bits in the SCSI
1131 * device budget maps. This approach has been selected because a bit is set in
1132 * the budget map before scsi_host_queue_ready() checks the host_self_blocked
1133 * flag. The host_self_blocked flag can be modified by calling
1134 * scsi_block_requests() or scsi_unblock_requests().
1136 static u32
ufshcd_pending_cmds(struct ufs_hba
*hba
)
1138 const struct scsi_device
*sdev
;
1141 lockdep_assert_held(hba
->host
->host_lock
);
1142 __shost_for_each_device(sdev
, hba
->host
)
1143 pending
+= sbitmap_weight(&sdev
->budget_map
);
1149 * Wait until all pending SCSI commands and TMFs have finished or the timeout
1152 * Return: 0 upon success; -EBUSY upon timeout.
1154 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba
*hba
,
1155 u64 wait_timeout_us
)
1157 unsigned long flags
;
1161 bool timeout
= false, do_last_check
= false;
1165 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1167 * Wait for all the outstanding tasks/transfer requests.
1168 * Verify by checking the doorbell registers are clear.
1170 start
= ktime_get();
1172 if (hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
) {
1177 tm_doorbell
= ufshcd_readl(hba
, REG_UTP_TASK_REQ_DOOR_BELL
);
1178 tr_pending
= ufshcd_pending_cmds(hba
);
1179 if (!tm_doorbell
&& !tr_pending
) {
1182 } else if (do_last_check
) {
1186 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1187 io_schedule_timeout(msecs_to_jiffies(20));
1188 if (ktime_to_us(ktime_sub(ktime_get(), start
)) >
1192 * We might have scheduled out for long time so make
1193 * sure to check if doorbells are cleared by this time
1196 do_last_check
= true;
1198 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1199 } while (tm_doorbell
|| tr_pending
);
1203 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1204 __func__
, tm_doorbell
, tr_pending
);
1208 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1209 ufshcd_release(hba
);
1214 * ufshcd_scale_gear - scale up/down UFS gear
1215 * @hba: per adapter instance
1216 * @scale_up: True for scaling up gear and false for scaling down
1218 * Return: 0 for success; -EBUSY if scaling can't happen at this time;
1219 * non-zero for any other errors.
1221 static int ufshcd_scale_gear(struct ufs_hba
*hba
, bool scale_up
)
1224 struct ufs_pa_layer_attr new_pwr_info
;
1227 memcpy(&new_pwr_info
, &hba
->clk_scaling
.saved_pwr_info
,
1228 sizeof(struct ufs_pa_layer_attr
));
1230 memcpy(&new_pwr_info
, &hba
->pwr_info
,
1231 sizeof(struct ufs_pa_layer_attr
));
1233 if (hba
->pwr_info
.gear_tx
> hba
->clk_scaling
.min_gear
||
1234 hba
->pwr_info
.gear_rx
> hba
->clk_scaling
.min_gear
) {
1235 /* save the current power mode */
1236 memcpy(&hba
->clk_scaling
.saved_pwr_info
,
1238 sizeof(struct ufs_pa_layer_attr
));
1240 /* scale down gear */
1241 new_pwr_info
.gear_tx
= hba
->clk_scaling
.min_gear
;
1242 new_pwr_info
.gear_rx
= hba
->clk_scaling
.min_gear
;
1246 /* check if the power mode needs to be changed or not? */
1247 ret
= ufshcd_config_pwr_mode(hba
, &new_pwr_info
);
1249 dev_err(hba
->dev
, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1251 hba
->pwr_info
.gear_tx
, hba
->pwr_info
.gear_rx
,
1252 new_pwr_info
.gear_tx
, new_pwr_info
.gear_rx
);
1258 * Wait until all pending SCSI commands and TMFs have finished or the timeout
1261 * Return: 0 upon success; -EBUSY upon timeout.
1263 static int ufshcd_clock_scaling_prepare(struct ufs_hba
*hba
, u64 timeout_us
)
1267 * make sure that there are no outstanding requests when
1268 * clock scaling is in progress
1270 ufshcd_scsi_block_requests(hba
);
1271 mutex_lock(&hba
->wb_mutex
);
1272 down_write(&hba
->clk_scaling_lock
);
1274 if (!hba
->clk_scaling
.is_allowed
||
1275 ufshcd_wait_for_doorbell_clr(hba
, timeout_us
)) {
1277 up_write(&hba
->clk_scaling_lock
);
1278 mutex_unlock(&hba
->wb_mutex
);
1279 ufshcd_scsi_unblock_requests(hba
);
1283 /* let's not get into low power until clock scaling is completed */
1290 static void ufshcd_clock_scaling_unprepare(struct ufs_hba
*hba
, int err
, bool scale_up
)
1292 up_write(&hba
->clk_scaling_lock
);
1294 /* Enable Write Booster if we have scaled up else disable it */
1295 if (ufshcd_enable_wb_if_scaling_up(hba
) && !err
)
1296 ufshcd_wb_toggle(hba
, scale_up
);
1298 mutex_unlock(&hba
->wb_mutex
);
1300 ufshcd_scsi_unblock_requests(hba
);
1301 ufshcd_release(hba
);
1305 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1306 * @hba: per adapter instance
1307 * @scale_up: True for scaling up and false for scalin down
1309 * Return: 0 for success; -EBUSY if scaling can't happen at this time; non-zero
1310 * for any other errors.
1312 static int ufshcd_devfreq_scale(struct ufs_hba
*hba
, bool scale_up
)
1316 ret
= ufshcd_clock_scaling_prepare(hba
, 1 * USEC_PER_SEC
);
1320 /* scale down the gear before scaling down clocks */
1322 ret
= ufshcd_scale_gear(hba
, false);
1327 ret
= ufshcd_scale_clks(hba
, scale_up
);
1330 ufshcd_scale_gear(hba
, true);
1334 /* scale up the gear after scaling up clocks */
1336 ret
= ufshcd_scale_gear(hba
, true);
1338 ufshcd_scale_clks(hba
, false);
1344 ufshcd_clock_scaling_unprepare(hba
, ret
, scale_up
);
1348 static void ufshcd_clk_scaling_suspend_work(struct work_struct
*work
)
1350 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1351 clk_scaling
.suspend_work
);
1352 unsigned long irq_flags
;
1354 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1355 if (hba
->clk_scaling
.active_reqs
|| hba
->clk_scaling
.is_suspended
) {
1356 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1359 hba
->clk_scaling
.is_suspended
= true;
1360 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1362 __ufshcd_suspend_clkscaling(hba
);
1365 static void ufshcd_clk_scaling_resume_work(struct work_struct
*work
)
1367 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1368 clk_scaling
.resume_work
);
1369 unsigned long irq_flags
;
1371 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1372 if (!hba
->clk_scaling
.is_suspended
) {
1373 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1376 hba
->clk_scaling
.is_suspended
= false;
1377 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1379 devfreq_resume_device(hba
->devfreq
);
1382 static int ufshcd_devfreq_target(struct device
*dev
,
1383 unsigned long *freq
, u32 flags
)
1386 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1388 bool scale_up
, sched_clk_scaling_suspend_work
= false;
1389 struct list_head
*clk_list
= &hba
->clk_list_head
;
1390 struct ufs_clk_info
*clki
;
1391 unsigned long irq_flags
;
1393 if (!ufshcd_is_clkscaling_supported(hba
))
1396 clki
= list_first_entry(&hba
->clk_list_head
, struct ufs_clk_info
, list
);
1397 /* Override with the closest supported frequency */
1398 *freq
= (unsigned long) clk_round_rate(clki
->clk
, *freq
);
1399 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1400 if (ufshcd_eh_in_progress(hba
)) {
1401 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1405 if (!hba
->clk_scaling
.active_reqs
)
1406 sched_clk_scaling_suspend_work
= true;
1408 if (list_empty(clk_list
)) {
1409 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1413 /* Decide based on the rounded-off frequency and update */
1414 scale_up
= *freq
== clki
->max_freq
;
1416 *freq
= clki
->min_freq
;
1417 /* Update the frequency */
1418 if (!ufshcd_is_devfreq_scaling_required(hba
, scale_up
)) {
1419 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1421 goto out
; /* no state change required */
1423 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1425 start
= ktime_get();
1426 ret
= ufshcd_devfreq_scale(hba
, scale_up
);
1428 trace_ufshcd_profile_clk_scaling(dev_name(hba
->dev
),
1429 (scale_up
? "up" : "down"),
1430 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
1433 if (sched_clk_scaling_suspend_work
)
1434 queue_work(hba
->clk_scaling
.workq
,
1435 &hba
->clk_scaling
.suspend_work
);
1440 static int ufshcd_devfreq_get_dev_status(struct device
*dev
,
1441 struct devfreq_dev_status
*stat
)
1443 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1444 struct ufs_clk_scaling
*scaling
= &hba
->clk_scaling
;
1445 unsigned long flags
;
1446 struct list_head
*clk_list
= &hba
->clk_list_head
;
1447 struct ufs_clk_info
*clki
;
1450 if (!ufshcd_is_clkscaling_supported(hba
))
1453 memset(stat
, 0, sizeof(*stat
));
1455 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1456 curr_t
= ktime_get();
1457 if (!scaling
->window_start_t
)
1460 clki
= list_first_entry(clk_list
, struct ufs_clk_info
, list
);
1462 * If current frequency is 0, then the ondemand governor considers
1463 * there's no initial frequency set. And it always requests to set
1464 * to max. frequency.
1466 stat
->current_frequency
= clki
->curr_freq
;
1467 if (scaling
->is_busy_started
)
1468 scaling
->tot_busy_t
+= ktime_us_delta(curr_t
,
1469 scaling
->busy_start_t
);
1471 stat
->total_time
= ktime_us_delta(curr_t
, scaling
->window_start_t
);
1472 stat
->busy_time
= scaling
->tot_busy_t
;
1474 scaling
->window_start_t
= curr_t
;
1475 scaling
->tot_busy_t
= 0;
1477 if (scaling
->active_reqs
) {
1478 scaling
->busy_start_t
= curr_t
;
1479 scaling
->is_busy_started
= true;
1481 scaling
->busy_start_t
= 0;
1482 scaling
->is_busy_started
= false;
1484 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1488 static int ufshcd_devfreq_init(struct ufs_hba
*hba
)
1490 struct list_head
*clk_list
= &hba
->clk_list_head
;
1491 struct ufs_clk_info
*clki
;
1492 struct devfreq
*devfreq
;
1495 /* Skip devfreq if we don't have any clocks in the list */
1496 if (list_empty(clk_list
))
1499 clki
= list_first_entry(clk_list
, struct ufs_clk_info
, list
);
1500 dev_pm_opp_add(hba
->dev
, clki
->min_freq
, 0);
1501 dev_pm_opp_add(hba
->dev
, clki
->max_freq
, 0);
1503 ufshcd_vops_config_scaling_param(hba
, &hba
->vps
->devfreq_profile
,
1504 &hba
->vps
->ondemand_data
);
1505 devfreq
= devfreq_add_device(hba
->dev
,
1506 &hba
->vps
->devfreq_profile
,
1507 DEVFREQ_GOV_SIMPLE_ONDEMAND
,
1508 &hba
->vps
->ondemand_data
);
1509 if (IS_ERR(devfreq
)) {
1510 ret
= PTR_ERR(devfreq
);
1511 dev_err(hba
->dev
, "Unable to register with devfreq %d\n", ret
);
1513 dev_pm_opp_remove(hba
->dev
, clki
->min_freq
);
1514 dev_pm_opp_remove(hba
->dev
, clki
->max_freq
);
1518 hba
->devfreq
= devfreq
;
1523 static void ufshcd_devfreq_remove(struct ufs_hba
*hba
)
1525 struct list_head
*clk_list
= &hba
->clk_list_head
;
1526 struct ufs_clk_info
*clki
;
1531 devfreq_remove_device(hba
->devfreq
);
1532 hba
->devfreq
= NULL
;
1534 clki
= list_first_entry(clk_list
, struct ufs_clk_info
, list
);
1535 dev_pm_opp_remove(hba
->dev
, clki
->min_freq
);
1536 dev_pm_opp_remove(hba
->dev
, clki
->max_freq
);
1539 static void __ufshcd_suspend_clkscaling(struct ufs_hba
*hba
)
1541 unsigned long flags
;
1543 devfreq_suspend_device(hba
->devfreq
);
1544 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1545 hba
->clk_scaling
.window_start_t
= 0;
1546 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1549 static void ufshcd_suspend_clkscaling(struct ufs_hba
*hba
)
1551 unsigned long flags
;
1552 bool suspend
= false;
1554 cancel_work_sync(&hba
->clk_scaling
.suspend_work
);
1555 cancel_work_sync(&hba
->clk_scaling
.resume_work
);
1557 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1558 if (!hba
->clk_scaling
.is_suspended
) {
1560 hba
->clk_scaling
.is_suspended
= true;
1562 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1565 __ufshcd_suspend_clkscaling(hba
);
1568 static void ufshcd_resume_clkscaling(struct ufs_hba
*hba
)
1570 unsigned long flags
;
1571 bool resume
= false;
1573 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1574 if (hba
->clk_scaling
.is_suspended
) {
1576 hba
->clk_scaling
.is_suspended
= false;
1578 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1581 devfreq_resume_device(hba
->devfreq
);
1584 static ssize_t
ufshcd_clkscale_enable_show(struct device
*dev
,
1585 struct device_attribute
*attr
, char *buf
)
1587 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1589 return sysfs_emit(buf
, "%d\n", hba
->clk_scaling
.is_enabled
);
1592 static ssize_t
ufshcd_clkscale_enable_store(struct device
*dev
,
1593 struct device_attribute
*attr
, const char *buf
, size_t count
)
1595 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1599 if (kstrtou32(buf
, 0, &value
))
1602 down(&hba
->host_sem
);
1603 if (!ufshcd_is_user_access_allowed(hba
)) {
1609 if (value
== hba
->clk_scaling
.is_enabled
)
1612 ufshcd_rpm_get_sync(hba
);
1615 hba
->clk_scaling
.is_enabled
= value
;
1618 ufshcd_resume_clkscaling(hba
);
1620 ufshcd_suspend_clkscaling(hba
);
1621 err
= ufshcd_devfreq_scale(hba
, true);
1623 dev_err(hba
->dev
, "%s: failed to scale clocks up %d\n",
1627 ufshcd_release(hba
);
1628 ufshcd_rpm_put_sync(hba
);
1631 return err
? err
: count
;
1634 static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba
*hba
)
1636 hba
->clk_scaling
.enable_attr
.show
= ufshcd_clkscale_enable_show
;
1637 hba
->clk_scaling
.enable_attr
.store
= ufshcd_clkscale_enable_store
;
1638 sysfs_attr_init(&hba
->clk_scaling
.enable_attr
.attr
);
1639 hba
->clk_scaling
.enable_attr
.attr
.name
= "clkscale_enable";
1640 hba
->clk_scaling
.enable_attr
.attr
.mode
= 0644;
1641 if (device_create_file(hba
->dev
, &hba
->clk_scaling
.enable_attr
))
1642 dev_err(hba
->dev
, "Failed to create sysfs for clkscale_enable\n");
1645 static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba
*hba
)
1647 if (hba
->clk_scaling
.enable_attr
.attr
.name
)
1648 device_remove_file(hba
->dev
, &hba
->clk_scaling
.enable_attr
);
1651 static void ufshcd_init_clk_scaling(struct ufs_hba
*hba
)
1653 char wq_name
[sizeof("ufs_clkscaling_00")];
1655 if (!ufshcd_is_clkscaling_supported(hba
))
1658 if (!hba
->clk_scaling
.min_gear
)
1659 hba
->clk_scaling
.min_gear
= UFS_HS_G1
;
1661 INIT_WORK(&hba
->clk_scaling
.suspend_work
,
1662 ufshcd_clk_scaling_suspend_work
);
1663 INIT_WORK(&hba
->clk_scaling
.resume_work
,
1664 ufshcd_clk_scaling_resume_work
);
1666 snprintf(wq_name
, sizeof(wq_name
), "ufs_clkscaling_%d",
1667 hba
->host
->host_no
);
1668 hba
->clk_scaling
.workq
= create_singlethread_workqueue(wq_name
);
1670 hba
->clk_scaling
.is_initialized
= true;
1673 static void ufshcd_exit_clk_scaling(struct ufs_hba
*hba
)
1675 if (!hba
->clk_scaling
.is_initialized
)
1678 ufshcd_remove_clk_scaling_sysfs(hba
);
1679 destroy_workqueue(hba
->clk_scaling
.workq
);
1680 ufshcd_devfreq_remove(hba
);
1681 hba
->clk_scaling
.is_initialized
= false;
1684 static void ufshcd_ungate_work(struct work_struct
*work
)
1687 unsigned long flags
;
1688 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1689 clk_gating
.ungate_work
);
1691 cancel_delayed_work_sync(&hba
->clk_gating
.gate_work
);
1693 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1694 if (hba
->clk_gating
.state
== CLKS_ON
) {
1695 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1699 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1700 ufshcd_hba_vreg_set_hpm(hba
);
1701 ufshcd_setup_clocks(hba
, true);
1703 ufshcd_enable_irq(hba
);
1705 /* Exit from hibern8 */
1706 if (ufshcd_can_hibern8_during_gating(hba
)) {
1707 /* Prevent gating in this path */
1708 hba
->clk_gating
.is_suspended
= true;
1709 if (ufshcd_is_link_hibern8(hba
)) {
1710 ret
= ufshcd_uic_hibern8_exit(hba
);
1712 dev_err(hba
->dev
, "%s: hibern8 exit failed %d\n",
1715 ufshcd_set_link_active(hba
);
1717 hba
->clk_gating
.is_suspended
= false;
1722 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1723 * Also, exit from hibern8 mode and set the link as active.
1724 * @hba: per adapter instance
1726 void ufshcd_hold(struct ufs_hba
*hba
)
1729 unsigned long flags
;
1731 if (!ufshcd_is_clkgating_allowed(hba
) ||
1732 !hba
->clk_gating
.is_initialized
)
1734 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1735 hba
->clk_gating
.active_reqs
++;
1738 switch (hba
->clk_gating
.state
) {
1741 * Wait for the ungate work to complete if in progress.
1742 * Though the clocks may be in ON state, the link could
1743 * still be in hibner8 state if hibern8 is allowed
1744 * during clock gating.
1745 * Make sure we exit hibern8 state also in addition to
1748 if (ufshcd_can_hibern8_during_gating(hba
) &&
1749 ufshcd_is_link_hibern8(hba
)) {
1750 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1751 flush_result
= flush_work(&hba
->clk_gating
.ungate_work
);
1752 if (hba
->clk_gating
.is_suspended
&& !flush_result
)
1754 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1759 if (cancel_delayed_work(&hba
->clk_gating
.gate_work
)) {
1760 hba
->clk_gating
.state
= CLKS_ON
;
1761 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1762 hba
->clk_gating
.state
);
1766 * If we are here, it means gating work is either done or
1767 * currently running. Hence, fall through to cancel gating
1768 * work and to enable clocks.
1772 hba
->clk_gating
.state
= REQ_CLKS_ON
;
1773 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1774 hba
->clk_gating
.state
);
1775 queue_work(hba
->clk_gating
.clk_gating_workq
,
1776 &hba
->clk_gating
.ungate_work
);
1778 * fall through to check if we should wait for this
1779 * work to be done or not.
1783 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1784 flush_work(&hba
->clk_gating
.ungate_work
);
1785 /* Make sure state is CLKS_ON before returning */
1786 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1789 dev_err(hba
->dev
, "%s: clk gating is in invalid state %d\n",
1790 __func__
, hba
->clk_gating
.state
);
1793 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1795 EXPORT_SYMBOL_GPL(ufshcd_hold
);
1797 static void ufshcd_gate_work(struct work_struct
*work
)
1799 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1800 clk_gating
.gate_work
.work
);
1801 unsigned long flags
;
1804 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1806 * In case you are here to cancel this work the gating state
1807 * would be marked as REQ_CLKS_ON. In this case save time by
1808 * skipping the gating work and exit after changing the clock
1811 if (hba
->clk_gating
.is_suspended
||
1812 (hba
->clk_gating
.state
!= REQ_CLKS_OFF
)) {
1813 hba
->clk_gating
.state
= CLKS_ON
;
1814 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1815 hba
->clk_gating
.state
);
1819 if (hba
->clk_gating
.active_reqs
1820 || hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
1821 || hba
->outstanding_reqs
|| hba
->outstanding_tasks
1822 || hba
->active_uic_cmd
|| hba
->uic_async_done
)
1825 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1827 /* put the link into hibern8 mode before turning off clocks */
1828 if (ufshcd_can_hibern8_during_gating(hba
)) {
1829 ret
= ufshcd_uic_hibern8_enter(hba
);
1831 hba
->clk_gating
.state
= CLKS_ON
;
1832 dev_err(hba
->dev
, "%s: hibern8 enter failed %d\n",
1834 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1835 hba
->clk_gating
.state
);
1838 ufshcd_set_link_hibern8(hba
);
1841 ufshcd_disable_irq(hba
);
1843 ufshcd_setup_clocks(hba
, false);
1845 /* Put the host controller in low power mode if possible */
1846 ufshcd_hba_vreg_set_lpm(hba
);
1848 * In case you are here to cancel this work the gating state
1849 * would be marked as REQ_CLKS_ON. In this case keep the state
1850 * as REQ_CLKS_ON which would anyway imply that clocks are off
1851 * and a request to turn them on is pending. By doing this way,
1852 * we keep the state machine in tact and this would ultimately
1853 * prevent from doing cancel work multiple times when there are
1854 * new requests arriving before the current cancel work is done.
1856 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1857 if (hba
->clk_gating
.state
== REQ_CLKS_OFF
) {
1858 hba
->clk_gating
.state
= CLKS_OFF
;
1859 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1860 hba
->clk_gating
.state
);
1863 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1868 /* host lock must be held before calling this variant */
1869 static void __ufshcd_release(struct ufs_hba
*hba
)
1871 if (!ufshcd_is_clkgating_allowed(hba
))
1874 hba
->clk_gating
.active_reqs
--;
1876 if (hba
->clk_gating
.active_reqs
|| hba
->clk_gating
.is_suspended
||
1877 hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
||
1878 hba
->outstanding_tasks
|| !hba
->clk_gating
.is_initialized
||
1879 hba
->active_uic_cmd
|| hba
->uic_async_done
||
1880 hba
->clk_gating
.state
== CLKS_OFF
)
1883 hba
->clk_gating
.state
= REQ_CLKS_OFF
;
1884 trace_ufshcd_clk_gating(dev_name(hba
->dev
), hba
->clk_gating
.state
);
1885 queue_delayed_work(hba
->clk_gating
.clk_gating_workq
,
1886 &hba
->clk_gating
.gate_work
,
1887 msecs_to_jiffies(hba
->clk_gating
.delay_ms
));
1890 void ufshcd_release(struct ufs_hba
*hba
)
1892 unsigned long flags
;
1894 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1895 __ufshcd_release(hba
);
1896 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1898 EXPORT_SYMBOL_GPL(ufshcd_release
);
1900 static ssize_t
ufshcd_clkgate_delay_show(struct device
*dev
,
1901 struct device_attribute
*attr
, char *buf
)
1903 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1905 return sysfs_emit(buf
, "%lu\n", hba
->clk_gating
.delay_ms
);
1908 void ufshcd_clkgate_delay_set(struct device
*dev
, unsigned long value
)
1910 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1911 unsigned long flags
;
1913 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1914 hba
->clk_gating
.delay_ms
= value
;
1915 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1917 EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set
);
1919 static ssize_t
ufshcd_clkgate_delay_store(struct device
*dev
,
1920 struct device_attribute
*attr
, const char *buf
, size_t count
)
1922 unsigned long value
;
1924 if (kstrtoul(buf
, 0, &value
))
1927 ufshcd_clkgate_delay_set(dev
, value
);
1931 static ssize_t
ufshcd_clkgate_enable_show(struct device
*dev
,
1932 struct device_attribute
*attr
, char *buf
)
1934 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1936 return sysfs_emit(buf
, "%d\n", hba
->clk_gating
.is_enabled
);
1939 static ssize_t
ufshcd_clkgate_enable_store(struct device
*dev
,
1940 struct device_attribute
*attr
, const char *buf
, size_t count
)
1942 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1943 unsigned long flags
;
1946 if (kstrtou32(buf
, 0, &value
))
1951 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1952 if (value
== hba
->clk_gating
.is_enabled
)
1956 __ufshcd_release(hba
);
1958 hba
->clk_gating
.active_reqs
++;
1960 hba
->clk_gating
.is_enabled
= value
;
1962 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1966 static void ufshcd_init_clk_gating_sysfs(struct ufs_hba
*hba
)
1968 hba
->clk_gating
.delay_attr
.show
= ufshcd_clkgate_delay_show
;
1969 hba
->clk_gating
.delay_attr
.store
= ufshcd_clkgate_delay_store
;
1970 sysfs_attr_init(&hba
->clk_gating
.delay_attr
.attr
);
1971 hba
->clk_gating
.delay_attr
.attr
.name
= "clkgate_delay_ms";
1972 hba
->clk_gating
.delay_attr
.attr
.mode
= 0644;
1973 if (device_create_file(hba
->dev
, &hba
->clk_gating
.delay_attr
))
1974 dev_err(hba
->dev
, "Failed to create sysfs for clkgate_delay\n");
1976 hba
->clk_gating
.enable_attr
.show
= ufshcd_clkgate_enable_show
;
1977 hba
->clk_gating
.enable_attr
.store
= ufshcd_clkgate_enable_store
;
1978 sysfs_attr_init(&hba
->clk_gating
.enable_attr
.attr
);
1979 hba
->clk_gating
.enable_attr
.attr
.name
= "clkgate_enable";
1980 hba
->clk_gating
.enable_attr
.attr
.mode
= 0644;
1981 if (device_create_file(hba
->dev
, &hba
->clk_gating
.enable_attr
))
1982 dev_err(hba
->dev
, "Failed to create sysfs for clkgate_enable\n");
1985 static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba
*hba
)
1987 if (hba
->clk_gating
.delay_attr
.attr
.name
)
1988 device_remove_file(hba
->dev
, &hba
->clk_gating
.delay_attr
);
1989 if (hba
->clk_gating
.enable_attr
.attr
.name
)
1990 device_remove_file(hba
->dev
, &hba
->clk_gating
.enable_attr
);
1993 static void ufshcd_init_clk_gating(struct ufs_hba
*hba
)
1995 char wq_name
[sizeof("ufs_clk_gating_00")];
1997 if (!ufshcd_is_clkgating_allowed(hba
))
2000 hba
->clk_gating
.state
= CLKS_ON
;
2002 hba
->clk_gating
.delay_ms
= 150;
2003 INIT_DELAYED_WORK(&hba
->clk_gating
.gate_work
, ufshcd_gate_work
);
2004 INIT_WORK(&hba
->clk_gating
.ungate_work
, ufshcd_ungate_work
);
2006 snprintf(wq_name
, ARRAY_SIZE(wq_name
), "ufs_clk_gating_%d",
2007 hba
->host
->host_no
);
2008 hba
->clk_gating
.clk_gating_workq
= alloc_ordered_workqueue(wq_name
,
2009 WQ_MEM_RECLAIM
| WQ_HIGHPRI
);
2011 ufshcd_init_clk_gating_sysfs(hba
);
2013 hba
->clk_gating
.is_enabled
= true;
2014 hba
->clk_gating
.is_initialized
= true;
2017 static void ufshcd_exit_clk_gating(struct ufs_hba
*hba
)
2019 if (!hba
->clk_gating
.is_initialized
)
2022 ufshcd_remove_clk_gating_sysfs(hba
);
2024 /* Ungate the clock if necessary. */
2026 hba
->clk_gating
.is_initialized
= false;
2027 ufshcd_release(hba
);
2029 destroy_workqueue(hba
->clk_gating
.clk_gating_workq
);
2032 static void ufshcd_clk_scaling_start_busy(struct ufs_hba
*hba
)
2034 bool queue_resume_work
= false;
2035 ktime_t curr_t
= ktime_get();
2036 unsigned long flags
;
2038 if (!ufshcd_is_clkscaling_supported(hba
))
2041 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2042 if (!hba
->clk_scaling
.active_reqs
++)
2043 queue_resume_work
= true;
2045 if (!hba
->clk_scaling
.is_enabled
|| hba
->pm_op_in_progress
) {
2046 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2050 if (queue_resume_work
)
2051 queue_work(hba
->clk_scaling
.workq
,
2052 &hba
->clk_scaling
.resume_work
);
2054 if (!hba
->clk_scaling
.window_start_t
) {
2055 hba
->clk_scaling
.window_start_t
= curr_t
;
2056 hba
->clk_scaling
.tot_busy_t
= 0;
2057 hba
->clk_scaling
.is_busy_started
= false;
2060 if (!hba
->clk_scaling
.is_busy_started
) {
2061 hba
->clk_scaling
.busy_start_t
= curr_t
;
2062 hba
->clk_scaling
.is_busy_started
= true;
2064 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2067 static void ufshcd_clk_scaling_update_busy(struct ufs_hba
*hba
)
2069 struct ufs_clk_scaling
*scaling
= &hba
->clk_scaling
;
2070 unsigned long flags
;
2072 if (!ufshcd_is_clkscaling_supported(hba
))
2075 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2076 hba
->clk_scaling
.active_reqs
--;
2077 if (!scaling
->active_reqs
&& scaling
->is_busy_started
) {
2078 scaling
->tot_busy_t
+= ktime_to_us(ktime_sub(ktime_get(),
2079 scaling
->busy_start_t
));
2080 scaling
->busy_start_t
= 0;
2081 scaling
->is_busy_started
= false;
2083 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2086 static inline int ufshcd_monitor_opcode2dir(u8 opcode
)
2088 if (opcode
== READ_6
|| opcode
== READ_10
|| opcode
== READ_16
)
2090 else if (opcode
== WRITE_6
|| opcode
== WRITE_10
|| opcode
== WRITE_16
)
2096 static inline bool ufshcd_should_inform_monitor(struct ufs_hba
*hba
,
2097 struct ufshcd_lrb
*lrbp
)
2099 const struct ufs_hba_monitor
*m
= &hba
->monitor
;
2101 return (m
->enabled
&& lrbp
&& lrbp
->cmd
&&
2102 (!m
->chunk_size
|| m
->chunk_size
== lrbp
->cmd
->sdb
.length
) &&
2103 ktime_before(hba
->monitor
.enabled_ts
, lrbp
->issue_time_stamp
));
2106 static void ufshcd_start_monitor(struct ufs_hba
*hba
,
2107 const struct ufshcd_lrb
*lrbp
)
2109 int dir
= ufshcd_monitor_opcode2dir(*lrbp
->cmd
->cmnd
);
2110 unsigned long flags
;
2112 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2113 if (dir
>= 0 && hba
->monitor
.nr_queued
[dir
]++ == 0)
2114 hba
->monitor
.busy_start_ts
[dir
] = ktime_get();
2115 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2118 static void ufshcd_update_monitor(struct ufs_hba
*hba
, const struct ufshcd_lrb
*lrbp
)
2120 int dir
= ufshcd_monitor_opcode2dir(*lrbp
->cmd
->cmnd
);
2121 unsigned long flags
;
2123 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2124 if (dir
>= 0 && hba
->monitor
.nr_queued
[dir
] > 0) {
2125 const struct request
*req
= scsi_cmd_to_rq(lrbp
->cmd
);
2126 struct ufs_hba_monitor
*m
= &hba
->monitor
;
2127 ktime_t now
, inc
, lat
;
2129 now
= lrbp
->compl_time_stamp
;
2130 inc
= ktime_sub(now
, m
->busy_start_ts
[dir
]);
2131 m
->total_busy
[dir
] = ktime_add(m
->total_busy
[dir
], inc
);
2132 m
->nr_sec_rw
[dir
] += blk_rq_sectors(req
);
2134 /* Update latencies */
2136 lat
= ktime_sub(now
, lrbp
->issue_time_stamp
);
2137 m
->lat_sum
[dir
] += lat
;
2138 if (m
->lat_max
[dir
] < lat
|| !m
->lat_max
[dir
])
2139 m
->lat_max
[dir
] = lat
;
2140 if (m
->lat_min
[dir
] > lat
|| !m
->lat_min
[dir
])
2141 m
->lat_min
[dir
] = lat
;
2143 m
->nr_queued
[dir
]--;
2144 /* Push forward the busy start of monitor */
2145 m
->busy_start_ts
[dir
] = now
;
2147 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2151 * ufshcd_send_command - Send SCSI or device management commands
2152 * @hba: per adapter instance
2153 * @task_tag: Task tag of the command
2154 * @hwq: pointer to hardware queue instance
2157 void ufshcd_send_command(struct ufs_hba
*hba
, unsigned int task_tag
,
2158 struct ufs_hw_queue
*hwq
)
2160 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[task_tag
];
2161 unsigned long flags
;
2163 lrbp
->issue_time_stamp
= ktime_get();
2164 lrbp
->issue_time_stamp_local_clock
= local_clock();
2165 lrbp
->compl_time_stamp
= ktime_set(0, 0);
2166 lrbp
->compl_time_stamp_local_clock
= 0;
2167 ufshcd_add_command_trace(hba
, task_tag
, UFS_CMD_SEND
);
2168 ufshcd_clk_scaling_start_busy(hba
);
2169 if (unlikely(ufshcd_should_inform_monitor(hba
, lrbp
)))
2170 ufshcd_start_monitor(hba
, lrbp
);
2172 if (is_mcq_enabled(hba
)) {
2173 int utrd_size
= sizeof(struct utp_transfer_req_desc
);
2174 struct utp_transfer_req_desc
*src
= lrbp
->utr_descriptor_ptr
;
2175 struct utp_transfer_req_desc
*dest
= hwq
->sqe_base_addr
+ hwq
->sq_tail_slot
;
2177 spin_lock(&hwq
->sq_lock
);
2178 memcpy(dest
, src
, utrd_size
);
2179 ufshcd_inc_sq_tail(hwq
);
2180 spin_unlock(&hwq
->sq_lock
);
2182 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
2183 if (hba
->vops
&& hba
->vops
->setup_xfer_req
)
2184 hba
->vops
->setup_xfer_req(hba
, lrbp
->task_tag
,
2186 __set_bit(lrbp
->task_tag
, &hba
->outstanding_reqs
);
2187 ufshcd_writel(hba
, 1 << lrbp
->task_tag
,
2188 REG_UTP_TRANSFER_REQ_DOOR_BELL
);
2189 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
2194 * ufshcd_copy_sense_data - Copy sense data in case of check condition
2195 * @lrbp: pointer to local reference block
2197 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb
*lrbp
)
2199 u8
*const sense_buffer
= lrbp
->cmd
->sense_buffer
;
2203 resp_len
= be16_to_cpu(lrbp
->ucd_rsp_ptr
->header
.data_segment_length
);
2204 if (sense_buffer
&& resp_len
) {
2207 len
= be16_to_cpu(lrbp
->ucd_rsp_ptr
->sr
.sense_data_len
);
2208 len_to_copy
= min_t(int, UFS_SENSE_SIZE
, len
);
2210 memcpy(sense_buffer
, lrbp
->ucd_rsp_ptr
->sr
.sense_data
,
2216 * ufshcd_copy_query_response() - Copy the Query Response and the data
2218 * @hba: per adapter instance
2219 * @lrbp: pointer to local reference block
2221 * Return: 0 upon success; < 0 upon failure.
2224 int ufshcd_copy_query_response(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2226 struct ufs_query_res
*query_res
= &hba
->dev_cmd
.query
.response
;
2228 memcpy(&query_res
->upiu_res
, &lrbp
->ucd_rsp_ptr
->qr
, QUERY_OSF_SIZE
);
2230 /* Get the descriptor */
2231 if (hba
->dev_cmd
.query
.descriptor
&&
2232 lrbp
->ucd_rsp_ptr
->qr
.opcode
== UPIU_QUERY_OPCODE_READ_DESC
) {
2233 u8
*descp
= (u8
*)lrbp
->ucd_rsp_ptr
+
2234 GENERAL_UPIU_REQUEST_SIZE
;
2238 /* data segment length */
2239 resp_len
= be16_to_cpu(lrbp
->ucd_rsp_ptr
->header
2240 .data_segment_length
);
2241 buf_len
= be16_to_cpu(
2242 hba
->dev_cmd
.query
.request
.upiu_req
.length
);
2243 if (likely(buf_len
>= resp_len
)) {
2244 memcpy(hba
->dev_cmd
.query
.descriptor
, descp
, resp_len
);
2247 "%s: rsp size %d is bigger than buffer size %d",
2248 __func__
, resp_len
, buf_len
);
2257 * ufshcd_hba_capabilities - Read controller capabilities
2258 * @hba: per adapter instance
2260 * Return: 0 on success, negative on error.
2262 static inline int ufshcd_hba_capabilities(struct ufs_hba
*hba
)
2266 hba
->capabilities
= ufshcd_readl(hba
, REG_CONTROLLER_CAPABILITIES
);
2267 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS
)
2268 hba
->capabilities
&= ~MASK_64_ADDRESSING_SUPPORT
;
2270 /* nutrs and nutmrs are 0 based values */
2271 hba
->nutrs
= (hba
->capabilities
& MASK_TRANSFER_REQUESTS_SLOTS
) + 1;
2273 ((hba
->capabilities
& MASK_TASK_MANAGEMENT_REQUEST_SLOTS
) >> 16) + 1;
2274 hba
->reserved_slot
= hba
->nutrs
- 1;
2276 /* Read crypto capabilities */
2277 err
= ufshcd_hba_init_crypto_capabilities(hba
);
2279 dev_err(hba
->dev
, "crypto setup failed\n");
2283 hba
->mcq_sup
= FIELD_GET(MASK_MCQ_SUPPORT
, hba
->capabilities
);
2287 hba
->mcq_capabilities
= ufshcd_readl(hba
, REG_MCQCAP
);
2288 hba
->ext_iid_sup
= FIELD_GET(MASK_EXT_IID_SUPPORT
,
2289 hba
->mcq_capabilities
);
2295 * ufshcd_ready_for_uic_cmd - Check if controller is ready
2296 * to accept UIC commands
2297 * @hba: per adapter instance
2299 * Return: true on success, else false.
2301 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba
*hba
)
2304 int ret
= read_poll_timeout(ufshcd_readl
, val
, val
& UIC_COMMAND_READY
,
2305 500, UIC_CMD_TIMEOUT
* 1000, false, hba
,
2306 REG_CONTROLLER_STATUS
);
2307 return ret
== 0 ? true : false;
2311 * ufshcd_get_upmcrs - Get the power mode change request status
2312 * @hba: Pointer to adapter instance
2314 * This function gets the UPMCRS field of HCS register
2316 * Return: value of UPMCRS field.
2318 static inline u8
ufshcd_get_upmcrs(struct ufs_hba
*hba
)
2320 return (ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) >> 8) & 0x7;
2324 * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer
2325 * @hba: per adapter instance
2326 * @uic_cmd: UIC command
2329 ufshcd_dispatch_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
2331 lockdep_assert_held(&hba
->uic_cmd_mutex
);
2333 WARN_ON(hba
->active_uic_cmd
);
2335 hba
->active_uic_cmd
= uic_cmd
;
2338 ufshcd_writel(hba
, uic_cmd
->argument1
, REG_UIC_COMMAND_ARG_1
);
2339 ufshcd_writel(hba
, uic_cmd
->argument2
, REG_UIC_COMMAND_ARG_2
);
2340 ufshcd_writel(hba
, uic_cmd
->argument3
, REG_UIC_COMMAND_ARG_3
);
2342 ufshcd_add_uic_command_trace(hba
, uic_cmd
, UFS_CMD_SEND
);
2345 ufshcd_writel(hba
, uic_cmd
->command
& COMMAND_OPCODE_MASK
,
2350 * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command
2351 * @hba: per adapter instance
2352 * @uic_cmd: UIC command
2354 * Return: 0 only if success.
2357 ufshcd_wait_for_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
2360 unsigned long flags
;
2362 lockdep_assert_held(&hba
->uic_cmd_mutex
);
2364 if (wait_for_completion_timeout(&uic_cmd
->done
,
2365 msecs_to_jiffies(UIC_CMD_TIMEOUT
))) {
2366 ret
= uic_cmd
->argument2
& MASK_UIC_COMMAND_RESULT
;
2370 "uic cmd 0x%x with arg3 0x%x completion timeout\n",
2371 uic_cmd
->command
, uic_cmd
->argument3
);
2373 if (!uic_cmd
->cmd_active
) {
2374 dev_err(hba
->dev
, "%s: UIC cmd has been completed, return the result\n",
2376 ret
= uic_cmd
->argument2
& MASK_UIC_COMMAND_RESULT
;
2380 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2381 hba
->active_uic_cmd
= NULL
;
2382 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2388 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2389 * @hba: per adapter instance
2390 * @uic_cmd: UIC command
2391 * @completion: initialize the completion only if this is set to true
2393 * Return: 0 only if success.
2396 __ufshcd_send_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
,
2399 lockdep_assert_held(&hba
->uic_cmd_mutex
);
2401 if (!ufshcd_ready_for_uic_cmd(hba
)) {
2403 "Controller not ready to accept UIC commands\n");
2408 init_completion(&uic_cmd
->done
);
2410 uic_cmd
->cmd_active
= 1;
2411 ufshcd_dispatch_uic_cmd(hba
, uic_cmd
);
2417 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2418 * @hba: per adapter instance
2419 * @uic_cmd: UIC command
2421 * Return: 0 only if success.
2423 int ufshcd_send_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
2427 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_UIC_CMD
)
2431 mutex_lock(&hba
->uic_cmd_mutex
);
2432 ufshcd_add_delay_before_dme_cmd(hba
);
2434 ret
= __ufshcd_send_uic_cmd(hba
, uic_cmd
, true);
2436 ret
= ufshcd_wait_for_uic_cmd(hba
, uic_cmd
);
2438 mutex_unlock(&hba
->uic_cmd_mutex
);
2440 ufshcd_release(hba
);
2445 * ufshcd_sgl_to_prdt - SG list to PRTD (Physical Region Description Table, 4DW format)
2446 * @hba: per-adapter instance
2447 * @lrbp: pointer to local reference block
2448 * @sg_entries: The number of sg lists actually used
2449 * @sg_list: Pointer to SG list
2451 static void ufshcd_sgl_to_prdt(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
, int sg_entries
,
2452 struct scatterlist
*sg_list
)
2454 struct ufshcd_sg_entry
*prd
;
2455 struct scatterlist
*sg
;
2460 if (hba
->quirks
& UFSHCD_QUIRK_PRDT_BYTE_GRAN
)
2461 lrbp
->utr_descriptor_ptr
->prd_table_length
=
2462 cpu_to_le16(sg_entries
* ufshcd_sg_entry_size(hba
));
2464 lrbp
->utr_descriptor_ptr
->prd_table_length
= cpu_to_le16(sg_entries
);
2466 prd
= lrbp
->ucd_prdt_ptr
;
2468 for_each_sg(sg_list
, sg
, sg_entries
, i
) {
2469 const unsigned int len
= sg_dma_len(sg
);
2472 * From the UFSHCI spec: "Data Byte Count (DBC): A '0'
2473 * based value that indicates the length, in bytes, of
2474 * the data block. A maximum of length of 256KB may
2475 * exist for any entry. Bits 1:0 of this field shall be
2476 * 11b to indicate Dword granularity. A value of '3'
2477 * indicates 4 bytes, '7' indicates 8 bytes, etc."
2479 WARN_ONCE(len
> SZ_256K
, "len = %#x\n", len
);
2480 prd
->size
= cpu_to_le32(len
- 1);
2481 prd
->addr
= cpu_to_le64(sg
->dma_address
);
2483 prd
= (void *)prd
+ ufshcd_sg_entry_size(hba
);
2486 lrbp
->utr_descriptor_ptr
->prd_table_length
= 0;
2491 * ufshcd_map_sg - Map scatter-gather list to prdt
2492 * @hba: per adapter instance
2493 * @lrbp: pointer to local reference block
2495 * Return: 0 in case of success, non-zero value in case of failure.
2497 static int ufshcd_map_sg(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2499 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
2500 int sg_segments
= scsi_dma_map(cmd
);
2502 if (sg_segments
< 0)
2505 ufshcd_sgl_to_prdt(hba
, lrbp
, sg_segments
, scsi_sglist(cmd
));
2511 * ufshcd_enable_intr - enable interrupts
2512 * @hba: per adapter instance
2513 * @intrs: interrupt bits
2515 static void ufshcd_enable_intr(struct ufs_hba
*hba
, u32 intrs
)
2517 u32 set
= ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
2519 if (hba
->ufs_version
== ufshci_version(1, 0)) {
2521 rw
= set
& INTERRUPT_MASK_RW_VER_10
;
2522 set
= rw
| ((set
^ intrs
) & intrs
);
2527 ufshcd_writel(hba
, set
, REG_INTERRUPT_ENABLE
);
2531 * ufshcd_disable_intr - disable interrupts
2532 * @hba: per adapter instance
2533 * @intrs: interrupt bits
2535 static void ufshcd_disable_intr(struct ufs_hba
*hba
, u32 intrs
)
2537 u32 set
= ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
2539 if (hba
->ufs_version
== ufshci_version(1, 0)) {
2541 rw
= (set
& INTERRUPT_MASK_RW_VER_10
) &
2542 ~(intrs
& INTERRUPT_MASK_RW_VER_10
);
2543 set
= rw
| ((set
& intrs
) & ~INTERRUPT_MASK_RW_VER_10
);
2549 ufshcd_writel(hba
, set
, REG_INTERRUPT_ENABLE
);
2553 * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
2554 * descriptor according to request
2555 * @lrbp: pointer to local reference block
2556 * @upiu_flags: flags required in the header
2557 * @cmd_dir: requests data direction
2558 * @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments)
2560 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb
*lrbp
, u8
*upiu_flags
,
2561 enum dma_data_direction cmd_dir
, int ehs_length
)
2563 struct utp_transfer_req_desc
*req_desc
= lrbp
->utr_descriptor_ptr
;
2564 struct request_desc_header
*h
= &req_desc
->header
;
2565 enum utp_data_direction data_direction
;
2567 *h
= (typeof(*h
)){ };
2569 if (cmd_dir
== DMA_FROM_DEVICE
) {
2570 data_direction
= UTP_DEVICE_TO_HOST
;
2571 *upiu_flags
= UPIU_CMD_FLAGS_READ
;
2572 } else if (cmd_dir
== DMA_TO_DEVICE
) {
2573 data_direction
= UTP_HOST_TO_DEVICE
;
2574 *upiu_flags
= UPIU_CMD_FLAGS_WRITE
;
2576 data_direction
= UTP_NO_DATA_TRANSFER
;
2577 *upiu_flags
= UPIU_CMD_FLAGS_NONE
;
2580 h
->command_type
= lrbp
->command_type
;
2581 h
->data_direction
= data_direction
;
2582 h
->ehs_length
= ehs_length
;
2587 /* Prepare crypto related dwords */
2588 ufshcd_prepare_req_desc_hdr_crypto(lrbp
, h
);
2591 * assigning invalid value for command status. Controller
2592 * updates OCS on command completion, with the command
2595 h
->ocs
= OCS_INVALID_COMMAND_STATUS
;
2597 req_desc
->prd_table_length
= 0;
2601 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2603 * @lrbp: local reference block pointer
2604 * @upiu_flags: flags
2607 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb
*lrbp
, u8 upiu_flags
)
2609 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
2610 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2611 unsigned short cdb_len
;
2613 ucd_req_ptr
->header
= (struct utp_upiu_header
){
2614 .transaction_code
= UPIU_TRANSACTION_COMMAND
,
2615 .flags
= upiu_flags
,
2617 .task_tag
= lrbp
->task_tag
,
2618 .command_set_type
= UPIU_COMMAND_SET_TYPE_SCSI
,
2621 ucd_req_ptr
->sc
.exp_data_transfer_len
= cpu_to_be32(cmd
->sdb
.length
);
2623 cdb_len
= min_t(unsigned short, cmd
->cmd_len
, UFS_CDB_SIZE
);
2624 memset(ucd_req_ptr
->sc
.cdb
, 0, UFS_CDB_SIZE
);
2625 memcpy(ucd_req_ptr
->sc
.cdb
, cmd
->cmnd
, cdb_len
);
2627 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2631 * ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request
2633 * @lrbp: local reference block pointer
2634 * @upiu_flags: flags
2636 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba
*hba
,
2637 struct ufshcd_lrb
*lrbp
, u8 upiu_flags
)
2639 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2640 struct ufs_query
*query
= &hba
->dev_cmd
.query
;
2641 u16 len
= be16_to_cpu(query
->request
.upiu_req
.length
);
2643 /* Query request header */
2644 ucd_req_ptr
->header
= (struct utp_upiu_header
){
2645 .transaction_code
= UPIU_TRANSACTION_QUERY_REQ
,
2646 .flags
= upiu_flags
,
2648 .task_tag
= lrbp
->task_tag
,
2649 .query_function
= query
->request
.query_func
,
2650 /* Data segment length only need for WRITE_DESC */
2651 .data_segment_length
=
2652 query
->request
.upiu_req
.opcode
==
2653 UPIU_QUERY_OPCODE_WRITE_DESC
?
2658 /* Copy the Query Request buffer as is */
2659 memcpy(&ucd_req_ptr
->qr
, &query
->request
.upiu_req
,
2662 /* Copy the Descriptor */
2663 if (query
->request
.upiu_req
.opcode
== UPIU_QUERY_OPCODE_WRITE_DESC
)
2664 memcpy(ucd_req_ptr
+ 1, query
->descriptor
, len
);
2666 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2669 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb
*lrbp
)
2671 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2673 memset(ucd_req_ptr
, 0, sizeof(struct utp_upiu_req
));
2675 ucd_req_ptr
->header
= (struct utp_upiu_header
){
2676 .transaction_code
= UPIU_TRANSACTION_NOP_OUT
,
2677 .task_tag
= lrbp
->task_tag
,
2680 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2684 * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
2685 * for Device Management Purposes
2686 * @hba: per adapter instance
2687 * @lrbp: pointer to local reference block
2689 * Return: 0 upon success; < 0 upon failure.
2691 static int ufshcd_compose_devman_upiu(struct ufs_hba
*hba
,
2692 struct ufshcd_lrb
*lrbp
)
2697 if (hba
->ufs_version
<= ufshci_version(1, 1))
2698 lrbp
->command_type
= UTP_CMD_TYPE_DEV_MANAGE
;
2700 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
2702 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, DMA_NONE
, 0);
2703 if (hba
->dev_cmd
.type
== DEV_CMD_TYPE_QUERY
)
2704 ufshcd_prepare_utp_query_req_upiu(hba
, lrbp
, upiu_flags
);
2705 else if (hba
->dev_cmd
.type
== DEV_CMD_TYPE_NOP
)
2706 ufshcd_prepare_utp_nop_upiu(lrbp
);
2714 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2716 * @hba: per adapter instance
2717 * @lrbp: pointer to local reference block
2719 * Return: 0 upon success; < 0 upon failure.
2721 static int ufshcd_comp_scsi_upiu(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2726 if (hba
->ufs_version
<= ufshci_version(1, 1))
2727 lrbp
->command_type
= UTP_CMD_TYPE_SCSI
;
2729 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
2731 if (likely(lrbp
->cmd
)) {
2732 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, lrbp
->cmd
->sc_data_direction
, 0);
2733 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp
, upiu_flags
);
2742 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2743 * @upiu_wlun_id: UPIU W-LUN id
2745 * Return: SCSI W-LUN id.
2747 static inline u16
ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id
)
2749 return (upiu_wlun_id
& ~UFS_UPIU_WLUN_ID
) | SCSI_W_LUN_BASE
;
2752 static inline bool is_device_wlun(struct scsi_device
*sdev
)
2755 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN
);
2759 * Associate the UFS controller queue with the default and poll HCTX types.
2760 * Initialize the mq_map[] arrays.
2762 static void ufshcd_map_queues(struct Scsi_Host
*shost
)
2764 struct ufs_hba
*hba
= shost_priv(shost
);
2765 int i
, queue_offset
= 0;
2767 if (!is_mcq_supported(hba
)) {
2768 hba
->nr_queues
[HCTX_TYPE_DEFAULT
] = 1;
2769 hba
->nr_queues
[HCTX_TYPE_READ
] = 0;
2770 hba
->nr_queues
[HCTX_TYPE_POLL
] = 1;
2771 hba
->nr_hw_queues
= 1;
2774 for (i
= 0; i
< shost
->nr_maps
; i
++) {
2775 struct blk_mq_queue_map
*map
= &shost
->tag_set
.map
[i
];
2777 map
->nr_queues
= hba
->nr_queues
[i
];
2778 if (!map
->nr_queues
)
2780 map
->queue_offset
= queue_offset
;
2781 if (i
== HCTX_TYPE_POLL
&& !is_mcq_supported(hba
))
2782 map
->queue_offset
= 0;
2784 blk_mq_map_queues(map
);
2785 queue_offset
+= map
->nr_queues
;
2789 static void ufshcd_init_lrb(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrb
, int i
)
2791 struct utp_transfer_cmd_desc
*cmd_descp
= (void *)hba
->ucdl_base_addr
+
2792 i
* ufshcd_get_ucd_size(hba
);
2793 struct utp_transfer_req_desc
*utrdlp
= hba
->utrdl_base_addr
;
2794 dma_addr_t cmd_desc_element_addr
= hba
->ucdl_dma_addr
+
2795 i
* ufshcd_get_ucd_size(hba
);
2796 u16 response_offset
= offsetof(struct utp_transfer_cmd_desc
,
2798 u16 prdt_offset
= offsetof(struct utp_transfer_cmd_desc
, prd_table
);
2800 lrb
->utr_descriptor_ptr
= utrdlp
+ i
;
2801 lrb
->utrd_dma_addr
= hba
->utrdl_dma_addr
+
2802 i
* sizeof(struct utp_transfer_req_desc
);
2803 lrb
->ucd_req_ptr
= (struct utp_upiu_req
*)cmd_descp
->command_upiu
;
2804 lrb
->ucd_req_dma_addr
= cmd_desc_element_addr
;
2805 lrb
->ucd_rsp_ptr
= (struct utp_upiu_rsp
*)cmd_descp
->response_upiu
;
2806 lrb
->ucd_rsp_dma_addr
= cmd_desc_element_addr
+ response_offset
;
2807 lrb
->ucd_prdt_ptr
= (struct ufshcd_sg_entry
*)cmd_descp
->prd_table
;
2808 lrb
->ucd_prdt_dma_addr
= cmd_desc_element_addr
+ prdt_offset
;
2812 * ufshcd_queuecommand - main entry point for SCSI requests
2813 * @host: SCSI host pointer
2814 * @cmd: command from SCSI Midlayer
2816 * Return: 0 for success, non-zero in case of failure.
2818 static int ufshcd_queuecommand(struct Scsi_Host
*host
, struct scsi_cmnd
*cmd
)
2820 struct ufs_hba
*hba
= shost_priv(host
);
2821 int tag
= scsi_cmd_to_rq(cmd
)->tag
;
2822 struct ufshcd_lrb
*lrbp
;
2824 struct ufs_hw_queue
*hwq
= NULL
;
2826 WARN_ONCE(tag
< 0 || tag
>= hba
->nutrs
, "Invalid tag %d\n", tag
);
2828 switch (hba
->ufshcd_state
) {
2829 case UFSHCD_STATE_OPERATIONAL
:
2831 case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL
:
2833 * SCSI error handler can call ->queuecommand() while UFS error
2834 * handler is in progress. Error interrupts could change the
2835 * state from UFSHCD_STATE_RESET to
2836 * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests
2837 * being issued in that case.
2839 if (ufshcd_eh_in_progress(hba
)) {
2840 err
= SCSI_MLQUEUE_HOST_BUSY
;
2844 case UFSHCD_STATE_EH_SCHEDULED_FATAL
:
2846 * pm_runtime_get_sync() is used at error handling preparation
2847 * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
2848 * PM ops, it can never be finished if we let SCSI layer keep
2849 * retrying it, which gets err handler stuck forever. Neither
2850 * can we let the scsi cmd pass through, because UFS is in bad
2851 * state, the scsi cmd may eventually time out, which will get
2852 * err handler blocked for too long. So, just fail the scsi cmd
2853 * sent from PM ops, err handler can recover PM error anyways.
2855 if (hba
->pm_op_in_progress
) {
2856 hba
->force_reset
= true;
2857 set_host_byte(cmd
, DID_BAD_TARGET
);
2862 case UFSHCD_STATE_RESET
:
2863 err
= SCSI_MLQUEUE_HOST_BUSY
;
2865 case UFSHCD_STATE_ERROR
:
2866 set_host_byte(cmd
, DID_ERROR
);
2871 hba
->req_abort_count
= 0;
2875 lrbp
= &hba
->lrb
[tag
];
2877 lrbp
->task_tag
= tag
;
2878 lrbp
->lun
= ufshcd_scsi_to_upiu_lun(cmd
->device
->lun
);
2879 lrbp
->intr_cmd
= !ufshcd_is_intr_aggr_allowed(hba
);
2881 ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd
), lrbp
);
2883 lrbp
->req_abort_skip
= false;
2885 ufshcd_comp_scsi_upiu(hba
, lrbp
);
2887 err
= ufshcd_map_sg(hba
, lrbp
);
2889 ufshcd_release(hba
);
2893 if (is_mcq_enabled(hba
))
2894 hwq
= ufshcd_mcq_req_to_hwq(hba
, scsi_cmd_to_rq(cmd
));
2896 ufshcd_send_command(hba
, tag
, hwq
);
2899 if (ufs_trigger_eh()) {
2900 unsigned long flags
;
2902 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2903 ufshcd_schedule_eh_work(hba
);
2904 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2910 static int ufshcd_compose_dev_cmd(struct ufs_hba
*hba
,
2911 struct ufshcd_lrb
*lrbp
, enum dev_cmd_type cmd_type
, int tag
)
2914 lrbp
->task_tag
= tag
;
2915 lrbp
->lun
= 0; /* device management cmd is not specific to any LUN */
2916 lrbp
->intr_cmd
= true; /* No interrupt aggregation */
2917 ufshcd_prepare_lrbp_crypto(NULL
, lrbp
);
2918 hba
->dev_cmd
.type
= cmd_type
;
2920 return ufshcd_compose_devman_upiu(hba
, lrbp
);
2924 * Check with the block layer if the command is inflight
2925 * @cmd: command to check.
2927 * Return: true if command is inflight; false if not.
2929 bool ufshcd_cmd_inflight(struct scsi_cmnd
*cmd
)
2936 rq
= scsi_cmd_to_rq(cmd
);
2937 if (!blk_mq_request_started(rq
))
2944 * Clear the pending command in the controller and wait until
2945 * the controller confirms that the command has been cleared.
2946 * @hba: per adapter instance
2947 * @task_tag: The tag number of the command to be cleared.
2949 static int ufshcd_clear_cmd(struct ufs_hba
*hba
, u32 task_tag
)
2951 u32 mask
= 1U << task_tag
;
2952 unsigned long flags
;
2955 if (is_mcq_enabled(hba
)) {
2957 * MCQ mode. Clean up the MCQ resources similar to
2958 * what the ufshcd_utrl_clear() does for SDB mode.
2960 err
= ufshcd_mcq_sq_cleanup(hba
, task_tag
);
2962 dev_err(hba
->dev
, "%s: failed tag=%d. err=%d\n",
2963 __func__
, task_tag
, err
);
2969 /* clear outstanding transaction before retry */
2970 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2971 ufshcd_utrl_clear(hba
, mask
);
2972 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2975 * wait for h/w to clear corresponding bit in door-bell.
2976 * max. wait is 1 sec.
2978 return ufshcd_wait_for_register(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
,
2979 mask
, ~mask
, 1000, 1000);
2983 * ufshcd_dev_cmd_completion() - handles device management command responses
2984 * @hba: per adapter instance
2985 * @lrbp: pointer to local reference block
2987 * Return: 0 upon success; < 0 upon failure.
2990 ufshcd_dev_cmd_completion(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2992 enum upiu_response_transaction resp
;
2995 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
2996 resp
= ufshcd_get_req_rsp(lrbp
->ucd_rsp_ptr
);
2999 case UPIU_TRANSACTION_NOP_IN
:
3000 if (hba
->dev_cmd
.type
!= DEV_CMD_TYPE_NOP
) {
3002 dev_err(hba
->dev
, "%s: unexpected response %x\n",
3006 case UPIU_TRANSACTION_QUERY_RSP
: {
3007 u8 response
= lrbp
->ucd_rsp_ptr
->header
.response
;
3010 err
= ufshcd_copy_query_response(hba
, lrbp
);
3013 case UPIU_TRANSACTION_REJECT_UPIU
:
3014 /* TODO: handle Reject UPIU Response */
3016 dev_err(hba
->dev
, "%s: Reject UPIU not fully implemented\n",
3019 case UPIU_TRANSACTION_RESPONSE
:
3020 if (hba
->dev_cmd
.type
!= DEV_CMD_TYPE_RPMB
) {
3022 dev_err(hba
->dev
, "%s: unexpected response %x\n", __func__
, resp
);
3027 dev_err(hba
->dev
, "%s: Invalid device management cmd response: %x\n",
3035 static int ufshcd_wait_for_dev_cmd(struct ufs_hba
*hba
,
3036 struct ufshcd_lrb
*lrbp
, int max_timeout
)
3038 unsigned long time_left
= msecs_to_jiffies(max_timeout
);
3039 unsigned long flags
;
3044 time_left
= wait_for_completion_timeout(hba
->dev_cmd
.complete
,
3047 if (likely(time_left
)) {
3049 * The completion handler called complete() and the caller of
3050 * this function still owns the @lrbp tag so the code below does
3051 * not trigger any race conditions.
3053 hba
->dev_cmd
.complete
= NULL
;
3054 err
= ufshcd_get_tr_ocs(lrbp
, NULL
);
3056 err
= ufshcd_dev_cmd_completion(hba
, lrbp
);
3059 dev_dbg(hba
->dev
, "%s: dev_cmd request timedout, tag %d\n",
3060 __func__
, lrbp
->task_tag
);
3063 if (is_mcq_enabled(hba
)) {
3064 err
= ufshcd_clear_cmd(hba
, lrbp
->task_tag
);
3065 hba
->dev_cmd
.complete
= NULL
;
3070 if (ufshcd_clear_cmd(hba
, lrbp
->task_tag
) == 0) {
3071 /* successfully cleared the command, retry if needed */
3074 * Since clearing the command succeeded we also need to
3075 * clear the task tag bit from the outstanding_reqs
3078 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
3079 pending
= test_bit(lrbp
->task_tag
,
3080 &hba
->outstanding_reqs
);
3082 hba
->dev_cmd
.complete
= NULL
;
3083 __clear_bit(lrbp
->task_tag
,
3084 &hba
->outstanding_reqs
);
3086 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
3090 * The completion handler ran while we tried to
3091 * clear the command.
3097 dev_err(hba
->dev
, "%s: failed to clear tag %d\n",
3098 __func__
, lrbp
->task_tag
);
3100 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
3101 pending
= test_bit(lrbp
->task_tag
,
3102 &hba
->outstanding_reqs
);
3104 hba
->dev_cmd
.complete
= NULL
;
3105 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
3109 * The completion handler ran while we tried to
3110 * clear the command.
3122 * ufshcd_exec_dev_cmd - API for sending device management requests
3124 * @cmd_type: specifies the type (NOP, Query...)
3125 * @timeout: timeout in milliseconds
3127 * Return: 0 upon success; < 0 upon failure.
3129 * NOTE: Since there is only one available tag for device management commands,
3130 * it is expected you hold the hba->dev_cmd.lock mutex.
3132 static int ufshcd_exec_dev_cmd(struct ufs_hba
*hba
,
3133 enum dev_cmd_type cmd_type
, int timeout
)
3135 DECLARE_COMPLETION_ONSTACK(wait
);
3136 const u32 tag
= hba
->reserved_slot
;
3137 struct ufshcd_lrb
*lrbp
;
3140 /* Protects use of hba->reserved_slot. */
3141 lockdep_assert_held(&hba
->dev_cmd
.lock
);
3143 down_read(&hba
->clk_scaling_lock
);
3145 lrbp
= &hba
->lrb
[tag
];
3147 err
= ufshcd_compose_dev_cmd(hba
, lrbp
, cmd_type
, tag
);
3151 hba
->dev_cmd
.complete
= &wait
;
3153 ufshcd_add_query_upiu_trace(hba
, UFS_QUERY_SEND
, lrbp
->ucd_req_ptr
);
3155 ufshcd_send_command(hba
, tag
, hba
->dev_cmd_queue
);
3156 err
= ufshcd_wait_for_dev_cmd(hba
, lrbp
, timeout
);
3157 ufshcd_add_query_upiu_trace(hba
, err
? UFS_QUERY_ERR
: UFS_QUERY_COMP
,
3158 (struct utp_upiu_req
*)lrbp
->ucd_rsp_ptr
);
3161 up_read(&hba
->clk_scaling_lock
);
3166 * ufshcd_init_query() - init the query response and request parameters
3167 * @hba: per-adapter instance
3168 * @request: address of the request pointer to be initialized
3169 * @response: address of the response pointer to be initialized
3170 * @opcode: operation to perform
3171 * @idn: flag idn to access
3172 * @index: LU number to access
3173 * @selector: query/flag/descriptor further identification
3175 static inline void ufshcd_init_query(struct ufs_hba
*hba
,
3176 struct ufs_query_req
**request
, struct ufs_query_res
**response
,
3177 enum query_opcode opcode
, u8 idn
, u8 index
, u8 selector
)
3179 *request
= &hba
->dev_cmd
.query
.request
;
3180 *response
= &hba
->dev_cmd
.query
.response
;
3181 memset(*request
, 0, sizeof(struct ufs_query_req
));
3182 memset(*response
, 0, sizeof(struct ufs_query_res
));
3183 (*request
)->upiu_req
.opcode
= opcode
;
3184 (*request
)->upiu_req
.idn
= idn
;
3185 (*request
)->upiu_req
.index
= index
;
3186 (*request
)->upiu_req
.selector
= selector
;
3189 static int ufshcd_query_flag_retry(struct ufs_hba
*hba
,
3190 enum query_opcode opcode
, enum flag_idn idn
, u8 index
, bool *flag_res
)
3195 for (retries
= 0; retries
< QUERY_REQ_RETRIES
; retries
++) {
3196 ret
= ufshcd_query_flag(hba
, opcode
, idn
, index
, flag_res
);
3199 "%s: failed with error %d, retries %d\n",
3200 __func__
, ret
, retries
);
3207 "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n",
3208 __func__
, opcode
, idn
, ret
, retries
);
3213 * ufshcd_query_flag() - API function for sending flag query requests
3214 * @hba: per-adapter instance
3215 * @opcode: flag query to perform
3216 * @idn: flag idn to access
3217 * @index: flag index to access
3218 * @flag_res: the flag value after the query request completes
3220 * Return: 0 for success, non-zero in case of failure.
3222 int ufshcd_query_flag(struct ufs_hba
*hba
, enum query_opcode opcode
,
3223 enum flag_idn idn
, u8 index
, bool *flag_res
)
3225 struct ufs_query_req
*request
= NULL
;
3226 struct ufs_query_res
*response
= NULL
;
3227 int err
, selector
= 0;
3228 int timeout
= QUERY_REQ_TIMEOUT
;
3233 mutex_lock(&hba
->dev_cmd
.lock
);
3234 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
3238 case UPIU_QUERY_OPCODE_SET_FLAG
:
3239 case UPIU_QUERY_OPCODE_CLEAR_FLAG
:
3240 case UPIU_QUERY_OPCODE_TOGGLE_FLAG
:
3241 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
3243 case UPIU_QUERY_OPCODE_READ_FLAG
:
3244 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
3246 /* No dummy reads */
3247 dev_err(hba
->dev
, "%s: Invalid argument for read request\n",
3255 "%s: Expected query flag opcode but got = %d\n",
3261 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, timeout
);
3265 "%s: Sending flag query for idn %d failed, err = %d\n",
3266 __func__
, idn
, err
);
3271 *flag_res
= (be32_to_cpu(response
->upiu_res
.value
) &
3272 MASK_QUERY_UPIU_FLAG_LOC
) & 0x1;
3275 mutex_unlock(&hba
->dev_cmd
.lock
);
3276 ufshcd_release(hba
);
3281 * ufshcd_query_attr - API function for sending attribute requests
3282 * @hba: per-adapter instance
3283 * @opcode: attribute opcode
3284 * @idn: attribute idn to access
3285 * @index: index field
3286 * @selector: selector field
3287 * @attr_val: the attribute value after the query request completes
3289 * Return: 0 for success, non-zero in case of failure.
3291 int ufshcd_query_attr(struct ufs_hba
*hba
, enum query_opcode opcode
,
3292 enum attr_idn idn
, u8 index
, u8 selector
, u32
*attr_val
)
3294 struct ufs_query_req
*request
= NULL
;
3295 struct ufs_query_res
*response
= NULL
;
3301 dev_err(hba
->dev
, "%s: attribute value required for opcode 0x%x\n",
3308 mutex_lock(&hba
->dev_cmd
.lock
);
3309 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
3313 case UPIU_QUERY_OPCODE_WRITE_ATTR
:
3314 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
3315 request
->upiu_req
.value
= cpu_to_be32(*attr_val
);
3317 case UPIU_QUERY_OPCODE_READ_ATTR
:
3318 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
3321 dev_err(hba
->dev
, "%s: Expected query attr opcode but got = 0x%.2x\n",
3327 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
3330 dev_err(hba
->dev
, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3331 __func__
, opcode
, idn
, index
, err
);
3335 *attr_val
= be32_to_cpu(response
->upiu_res
.value
);
3338 mutex_unlock(&hba
->dev_cmd
.lock
);
3339 ufshcd_release(hba
);
3344 * ufshcd_query_attr_retry() - API function for sending query
3345 * attribute with retries
3346 * @hba: per-adapter instance
3347 * @opcode: attribute opcode
3348 * @idn: attribute idn to access
3349 * @index: index field
3350 * @selector: selector field
3351 * @attr_val: the attribute value after the query request
3354 * Return: 0 for success, non-zero in case of failure.
3356 int ufshcd_query_attr_retry(struct ufs_hba
*hba
,
3357 enum query_opcode opcode
, enum attr_idn idn
, u8 index
, u8 selector
,
3363 for (retries
= QUERY_REQ_RETRIES
; retries
> 0; retries
--) {
3364 ret
= ufshcd_query_attr(hba
, opcode
, idn
, index
,
3365 selector
, attr_val
);
3367 dev_dbg(hba
->dev
, "%s: failed with error %d, retries %d\n",
3368 __func__
, ret
, retries
);
3375 "%s: query attribute, idn %d, failed with error %d after %d retries\n",
3376 __func__
, idn
, ret
, QUERY_REQ_RETRIES
);
3380 static int __ufshcd_query_descriptor(struct ufs_hba
*hba
,
3381 enum query_opcode opcode
, enum desc_idn idn
, u8 index
,
3382 u8 selector
, u8
*desc_buf
, int *buf_len
)
3384 struct ufs_query_req
*request
= NULL
;
3385 struct ufs_query_res
*response
= NULL
;
3391 dev_err(hba
->dev
, "%s: descriptor buffer required for opcode 0x%x\n",
3396 if (*buf_len
< QUERY_DESC_MIN_SIZE
|| *buf_len
> QUERY_DESC_MAX_SIZE
) {
3397 dev_err(hba
->dev
, "%s: descriptor buffer size (%d) is out of range\n",
3398 __func__
, *buf_len
);
3404 mutex_lock(&hba
->dev_cmd
.lock
);
3405 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
3407 hba
->dev_cmd
.query
.descriptor
= desc_buf
;
3408 request
->upiu_req
.length
= cpu_to_be16(*buf_len
);
3411 case UPIU_QUERY_OPCODE_WRITE_DESC
:
3412 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
3414 case UPIU_QUERY_OPCODE_READ_DESC
:
3415 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
3419 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3425 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
3428 dev_err(hba
->dev
, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3429 __func__
, opcode
, idn
, index
, err
);
3433 *buf_len
= be16_to_cpu(response
->upiu_res
.length
);
3436 hba
->dev_cmd
.query
.descriptor
= NULL
;
3437 mutex_unlock(&hba
->dev_cmd
.lock
);
3438 ufshcd_release(hba
);
3443 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3444 * @hba: per-adapter instance
3445 * @opcode: attribute opcode
3446 * @idn: attribute idn to access
3447 * @index: index field
3448 * @selector: selector field
3449 * @desc_buf: the buffer that contains the descriptor
3450 * @buf_len: length parameter passed to the device
3452 * The buf_len parameter will contain, on return, the length parameter
3453 * received on the response.
3455 * Return: 0 for success, non-zero in case of failure.
3457 int ufshcd_query_descriptor_retry(struct ufs_hba
*hba
,
3458 enum query_opcode opcode
,
3459 enum desc_idn idn
, u8 index
,
3461 u8
*desc_buf
, int *buf_len
)
3466 for (retries
= QUERY_REQ_RETRIES
; retries
> 0; retries
--) {
3467 err
= __ufshcd_query_descriptor(hba
, opcode
, idn
, index
,
3468 selector
, desc_buf
, buf_len
);
3469 if (!err
|| err
== -EINVAL
)
3477 * ufshcd_read_desc_param - read the specified descriptor parameter
3478 * @hba: Pointer to adapter instance
3479 * @desc_id: descriptor idn value
3480 * @desc_index: descriptor index
3481 * @param_offset: offset of the parameter to read
3482 * @param_read_buf: pointer to buffer where parameter would be read
3483 * @param_size: sizeof(param_read_buf)
3485 * Return: 0 in case of success, non-zero otherwise.
3487 int ufshcd_read_desc_param(struct ufs_hba
*hba
,
3488 enum desc_idn desc_id
,
3496 int buff_len
= QUERY_DESC_MAX_SIZE
;
3497 bool is_kmalloc
= true;
3500 if (desc_id
>= QUERY_DESC_IDN_MAX
|| !param_size
)
3503 /* Check whether we need temp memory */
3504 if (param_offset
!= 0 || param_size
< buff_len
) {
3505 desc_buf
= kzalloc(buff_len
, GFP_KERNEL
);
3509 desc_buf
= param_read_buf
;
3513 /* Request for full descriptor */
3514 ret
= ufshcd_query_descriptor_retry(hba
, UPIU_QUERY_OPCODE_READ_DESC
,
3515 desc_id
, desc_index
, 0,
3516 desc_buf
, &buff_len
);
3518 dev_err(hba
->dev
, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
3519 __func__
, desc_id
, desc_index
, param_offset
, ret
);
3523 /* Update descriptor length */
3524 buff_len
= desc_buf
[QUERY_DESC_LENGTH_OFFSET
];
3526 if (param_offset
>= buff_len
) {
3527 dev_err(hba
->dev
, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3528 __func__
, param_offset
, desc_id
, buff_len
);
3534 if (desc_buf
[QUERY_DESC_DESC_TYPE_OFFSET
] != desc_id
) {
3535 dev_err(hba
->dev
, "%s: invalid desc_id %d in descriptor header\n",
3536 __func__
, desc_buf
[QUERY_DESC_DESC_TYPE_OFFSET
]);
3542 /* Make sure we don't copy more data than available */
3543 if (param_offset
>= buff_len
)
3546 memcpy(param_read_buf
, &desc_buf
[param_offset
],
3547 min_t(u32
, param_size
, buff_len
- param_offset
));
3556 * struct uc_string_id - unicode string
3558 * @len: size of this descriptor inclusive
3559 * @type: descriptor type
3560 * @uc: unicode string character
3562 struct uc_string_id
{
3568 /* replace non-printable or non-ASCII characters with spaces */
3569 static inline char ufshcd_remove_non_printable(u8 ch
)
3571 return (ch
>= 0x20 && ch
<= 0x7e) ? ch
: ' ';
3575 * ufshcd_read_string_desc - read string descriptor
3576 * @hba: pointer to adapter instance
3577 * @desc_index: descriptor index
3578 * @buf: pointer to buffer where descriptor would be read,
3579 * the caller should free the memory.
3580 * @ascii: if true convert from unicode to ascii characters
3581 * null terminated string.
3584 * * string size on success.
3585 * * -ENOMEM: on allocation failure
3586 * * -EINVAL: on a wrong parameter
3588 int ufshcd_read_string_desc(struct ufs_hba
*hba
, u8 desc_index
,
3589 u8
**buf
, bool ascii
)
3591 struct uc_string_id
*uc_str
;
3598 uc_str
= kzalloc(QUERY_DESC_MAX_SIZE
, GFP_KERNEL
);
3602 ret
= ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_STRING
, desc_index
, 0,
3603 (u8
*)uc_str
, QUERY_DESC_MAX_SIZE
);
3605 dev_err(hba
->dev
, "Reading String Desc failed after %d retries. err = %d\n",
3606 QUERY_REQ_RETRIES
, ret
);
3611 if (uc_str
->len
<= QUERY_DESC_HDR_SIZE
) {
3612 dev_dbg(hba
->dev
, "String Desc is of zero length\n");
3621 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3622 ascii_len
= (uc_str
->len
- QUERY_DESC_HDR_SIZE
) / 2 + 1;
3623 str
= kzalloc(ascii_len
, GFP_KERNEL
);
3630 * the descriptor contains string in UTF16 format
3631 * we need to convert to utf-8 so it can be displayed
3633 ret
= utf16s_to_utf8s(uc_str
->uc
,
3634 uc_str
->len
- QUERY_DESC_HDR_SIZE
,
3635 UTF16_BIG_ENDIAN
, str
, ascii_len
);
3637 /* replace non-printable or non-ASCII characters with spaces */
3638 for (i
= 0; i
< ret
; i
++)
3639 str
[i
] = ufshcd_remove_non_printable(str
[i
]);
3644 str
= kmemdup(uc_str
, uc_str
->len
, GFP_KERNEL
);
3658 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3659 * @hba: Pointer to adapter instance
3661 * @param_offset: offset of the parameter to read
3662 * @param_read_buf: pointer to buffer where parameter would be read
3663 * @param_size: sizeof(param_read_buf)
3665 * Return: 0 in case of success, non-zero otherwise.
3667 static inline int ufshcd_read_unit_desc_param(struct ufs_hba
*hba
,
3669 enum unit_desc_param param_offset
,
3674 * Unit descriptors are only available for general purpose LUs (LUN id
3675 * from 0 to 7) and RPMB Well known LU.
3677 if (!ufs_is_valid_unit_desc_lun(&hba
->dev_info
, lun
))
3680 return ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_UNIT
, lun
,
3681 param_offset
, param_read_buf
, param_size
);
3684 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba
*hba
)
3687 u32 gating_wait
= UFSHCD_REF_CLK_GATING_WAIT_US
;
3689 if (hba
->dev_info
.wspecversion
>= 0x300) {
3690 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
3691 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME
, 0, 0,
3694 dev_err(hba
->dev
, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3697 if (gating_wait
== 0) {
3698 gating_wait
= UFSHCD_REF_CLK_GATING_WAIT_US
;
3699 dev_err(hba
->dev
, "Undefined ref clk gating wait time, use default %uus\n",
3703 hba
->dev_info
.clk_gating_wait_us
= gating_wait
;
3710 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3711 * @hba: per adapter instance
3713 * 1. Allocate DMA memory for Command Descriptor array
3714 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3715 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3716 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3718 * 4. Allocate memory for local reference block(lrb).
3720 * Return: 0 for success, non-zero in case of failure.
3722 static int ufshcd_memory_alloc(struct ufs_hba
*hba
)
3724 size_t utmrdl_size
, utrdl_size
, ucdl_size
;
3726 /* Allocate memory for UTP command descriptors */
3727 ucdl_size
= ufshcd_get_ucd_size(hba
) * hba
->nutrs
;
3728 hba
->ucdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3730 &hba
->ucdl_dma_addr
,
3734 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3736 if (!hba
->ucdl_base_addr
||
3737 WARN_ON(hba
->ucdl_dma_addr
& (128 - 1))) {
3739 "Command Descriptor Memory allocation failed\n");
3744 * Allocate memory for UTP Transfer descriptors
3745 * UFSHCI requires 1KB alignment of UTRD
3747 utrdl_size
= (sizeof(struct utp_transfer_req_desc
) * hba
->nutrs
);
3748 hba
->utrdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3750 &hba
->utrdl_dma_addr
,
3752 if (!hba
->utrdl_base_addr
||
3753 WARN_ON(hba
->utrdl_dma_addr
& (SZ_1K
- 1))) {
3755 "Transfer Descriptor Memory allocation failed\n");
3760 * Skip utmrdl allocation; it may have been
3761 * allocated during first pass and not released during
3762 * MCQ memory allocation.
3763 * See ufshcd_release_sdb_queue() and ufshcd_config_mcq()
3765 if (hba
->utmrdl_base_addr
)
3768 * Allocate memory for UTP Task Management descriptors
3769 * UFSHCI requires 1KB alignment of UTMRD
3771 utmrdl_size
= sizeof(struct utp_task_req_desc
) * hba
->nutmrs
;
3772 hba
->utmrdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3774 &hba
->utmrdl_dma_addr
,
3776 if (!hba
->utmrdl_base_addr
||
3777 WARN_ON(hba
->utmrdl_dma_addr
& (SZ_1K
- 1))) {
3779 "Task Management Descriptor Memory allocation failed\n");
3784 /* Allocate memory for local reference block */
3785 hba
->lrb
= devm_kcalloc(hba
->dev
,
3786 hba
->nutrs
, sizeof(struct ufshcd_lrb
),
3789 dev_err(hba
->dev
, "LRB Memory allocation failed\n");
3798 * ufshcd_host_memory_configure - configure local reference block with
3800 * @hba: per adapter instance
3802 * Configure Host memory space
3803 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3805 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3807 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3808 * into local reference block.
3810 static void ufshcd_host_memory_configure(struct ufs_hba
*hba
)
3812 struct utp_transfer_req_desc
*utrdlp
;
3813 dma_addr_t cmd_desc_dma_addr
;
3814 dma_addr_t cmd_desc_element_addr
;
3815 u16 response_offset
;
3820 utrdlp
= hba
->utrdl_base_addr
;
3823 offsetof(struct utp_transfer_cmd_desc
, response_upiu
);
3825 offsetof(struct utp_transfer_cmd_desc
, prd_table
);
3827 cmd_desc_size
= ufshcd_get_ucd_size(hba
);
3828 cmd_desc_dma_addr
= hba
->ucdl_dma_addr
;
3830 for (i
= 0; i
< hba
->nutrs
; i
++) {
3831 /* Configure UTRD with command descriptor base address */
3832 cmd_desc_element_addr
=
3833 (cmd_desc_dma_addr
+ (cmd_desc_size
* i
));
3834 utrdlp
[i
].command_desc_base_addr
=
3835 cpu_to_le64(cmd_desc_element_addr
);
3837 /* Response upiu and prdt offset should be in double words */
3838 if (hba
->quirks
& UFSHCD_QUIRK_PRDT_BYTE_GRAN
) {
3839 utrdlp
[i
].response_upiu_offset
=
3840 cpu_to_le16(response_offset
);
3841 utrdlp
[i
].prd_table_offset
=
3842 cpu_to_le16(prdt_offset
);
3843 utrdlp
[i
].response_upiu_length
=
3844 cpu_to_le16(ALIGNED_UPIU_SIZE
);
3846 utrdlp
[i
].response_upiu_offset
=
3847 cpu_to_le16(response_offset
>> 2);
3848 utrdlp
[i
].prd_table_offset
=
3849 cpu_to_le16(prdt_offset
>> 2);
3850 utrdlp
[i
].response_upiu_length
=
3851 cpu_to_le16(ALIGNED_UPIU_SIZE
>> 2);
3854 ufshcd_init_lrb(hba
, &hba
->lrb
[i
], i
);
3859 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3860 * @hba: per adapter instance
3862 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3863 * in order to initialize the Unipro link startup procedure.
3864 * Once the Unipro links are up, the device connected to the controller
3867 * Return: 0 on success, non-zero value on failure.
3869 static int ufshcd_dme_link_startup(struct ufs_hba
*hba
)
3871 struct uic_command uic_cmd
= {0};
3874 uic_cmd
.command
= UIC_CMD_DME_LINK_STARTUP
;
3876 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
3879 "dme-link-startup: error code %d\n", ret
);
3883 * ufshcd_dme_reset - UIC command for DME_RESET
3884 * @hba: per adapter instance
3886 * DME_RESET command is issued in order to reset UniPro stack.
3887 * This function now deals with cold reset.
3889 * Return: 0 on success, non-zero value on failure.
3891 static int ufshcd_dme_reset(struct ufs_hba
*hba
)
3893 struct uic_command uic_cmd
= {0};
3896 uic_cmd
.command
= UIC_CMD_DME_RESET
;
3898 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
3901 "dme-reset: error code %d\n", ret
);
3906 int ufshcd_dme_configure_adapt(struct ufs_hba
*hba
,
3912 if (agreed_gear
< UFS_HS_G4
)
3913 adapt_val
= PA_NO_ADAPT
;
3915 ret
= ufshcd_dme_set(hba
,
3916 UIC_ARG_MIB(PA_TXHSADAPTTYPE
),
3920 EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt
);
3923 * ufshcd_dme_enable - UIC command for DME_ENABLE
3924 * @hba: per adapter instance
3926 * DME_ENABLE command is issued in order to enable UniPro stack.
3928 * Return: 0 on success, non-zero value on failure.
3930 static int ufshcd_dme_enable(struct ufs_hba
*hba
)
3932 struct uic_command uic_cmd
= {0};
3935 uic_cmd
.command
= UIC_CMD_DME_ENABLE
;
3937 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
3940 "dme-enable: error code %d\n", ret
);
3945 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba
*hba
)
3947 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3948 unsigned long min_sleep_time_us
;
3950 if (!(hba
->quirks
& UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
))
3954 * last_dme_cmd_tstamp will be 0 only for 1st call to
3957 if (unlikely(!ktime_to_us(hba
->last_dme_cmd_tstamp
))) {
3958 min_sleep_time_us
= MIN_DELAY_BEFORE_DME_CMDS_US
;
3960 unsigned long delta
=
3961 (unsigned long) ktime_to_us(
3962 ktime_sub(ktime_get(),
3963 hba
->last_dme_cmd_tstamp
));
3965 if (delta
< MIN_DELAY_BEFORE_DME_CMDS_US
)
3967 MIN_DELAY_BEFORE_DME_CMDS_US
- delta
;
3969 return; /* no more delay required */
3972 /* allow sleep for extra 50us if needed */
3973 usleep_range(min_sleep_time_us
, min_sleep_time_us
+ 50);
3977 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3978 * @hba: per adapter instance
3979 * @attr_sel: uic command argument1
3980 * @attr_set: attribute set type as uic command argument2
3981 * @mib_val: setting value as uic command argument3
3982 * @peer: indicate whether peer or local
3984 * Return: 0 on success, non-zero value on failure.
3986 int ufshcd_dme_set_attr(struct ufs_hba
*hba
, u32 attr_sel
,
3987 u8 attr_set
, u32 mib_val
, u8 peer
)
3989 struct uic_command uic_cmd
= {0};
3990 static const char *const action
[] = {
3994 const char *set
= action
[!!peer
];
3996 int retries
= UFS_UIC_COMMAND_RETRIES
;
3998 uic_cmd
.command
= peer
?
3999 UIC_CMD_DME_PEER_SET
: UIC_CMD_DME_SET
;
4000 uic_cmd
.argument1
= attr_sel
;
4001 uic_cmd
.argument2
= UIC_ARG_ATTR_TYPE(attr_set
);
4002 uic_cmd
.argument3
= mib_val
;
4005 /* for peer attributes we retry upon failure */
4006 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
4008 dev_dbg(hba
->dev
, "%s: attr-id 0x%x val 0x%x error code %d\n",
4009 set
, UIC_GET_ATTR_ID(attr_sel
), mib_val
, ret
);
4010 } while (ret
&& peer
&& --retries
);
4013 dev_err(hba
->dev
, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
4014 set
, UIC_GET_ATTR_ID(attr_sel
), mib_val
,
4015 UFS_UIC_COMMAND_RETRIES
- retries
);
4019 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr
);
4022 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
4023 * @hba: per adapter instance
4024 * @attr_sel: uic command argument1
4025 * @mib_val: the value of the attribute as returned by the UIC command
4026 * @peer: indicate whether peer or local
4028 * Return: 0 on success, non-zero value on failure.
4030 int ufshcd_dme_get_attr(struct ufs_hba
*hba
, u32 attr_sel
,
4031 u32
*mib_val
, u8 peer
)
4033 struct uic_command uic_cmd
= {0};
4034 static const char *const action
[] = {
4038 const char *get
= action
[!!peer
];
4040 int retries
= UFS_UIC_COMMAND_RETRIES
;
4041 struct ufs_pa_layer_attr orig_pwr_info
;
4042 struct ufs_pa_layer_attr temp_pwr_info
;
4043 bool pwr_mode_change
= false;
4045 if (peer
&& (hba
->quirks
& UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
)) {
4046 orig_pwr_info
= hba
->pwr_info
;
4047 temp_pwr_info
= orig_pwr_info
;
4049 if (orig_pwr_info
.pwr_tx
== FAST_MODE
||
4050 orig_pwr_info
.pwr_rx
== FAST_MODE
) {
4051 temp_pwr_info
.pwr_tx
= FASTAUTO_MODE
;
4052 temp_pwr_info
.pwr_rx
= FASTAUTO_MODE
;
4053 pwr_mode_change
= true;
4054 } else if (orig_pwr_info
.pwr_tx
== SLOW_MODE
||
4055 orig_pwr_info
.pwr_rx
== SLOW_MODE
) {
4056 temp_pwr_info
.pwr_tx
= SLOWAUTO_MODE
;
4057 temp_pwr_info
.pwr_rx
= SLOWAUTO_MODE
;
4058 pwr_mode_change
= true;
4060 if (pwr_mode_change
) {
4061 ret
= ufshcd_change_power_mode(hba
, &temp_pwr_info
);
4067 uic_cmd
.command
= peer
?
4068 UIC_CMD_DME_PEER_GET
: UIC_CMD_DME_GET
;
4069 uic_cmd
.argument1
= attr_sel
;
4072 /* for peer attributes we retry upon failure */
4073 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
4075 dev_dbg(hba
->dev
, "%s: attr-id 0x%x error code %d\n",
4076 get
, UIC_GET_ATTR_ID(attr_sel
), ret
);
4077 } while (ret
&& peer
&& --retries
);
4080 dev_err(hba
->dev
, "%s: attr-id 0x%x failed %d retries\n",
4081 get
, UIC_GET_ATTR_ID(attr_sel
),
4082 UFS_UIC_COMMAND_RETRIES
- retries
);
4084 if (mib_val
&& !ret
)
4085 *mib_val
= uic_cmd
.argument3
;
4087 if (peer
&& (hba
->quirks
& UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
)
4089 ufshcd_change_power_mode(hba
, &orig_pwr_info
);
4093 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr
);
4096 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
4097 * state) and waits for it to take effect.
4099 * @hba: per adapter instance
4100 * @cmd: UIC command to execute
4102 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
4103 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
4104 * and device UniPro link and hence it's final completion would be indicated by
4105 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
4106 * addition to normal UIC command completion Status (UCCS). This function only
4107 * returns after the relevant status bits indicate the completion.
4109 * Return: 0 on success, non-zero value on failure.
4111 static int ufshcd_uic_pwr_ctrl(struct ufs_hba
*hba
, struct uic_command
*cmd
)
4113 DECLARE_COMPLETION_ONSTACK(uic_async_done
);
4114 unsigned long flags
;
4117 bool reenable_intr
= false;
4119 mutex_lock(&hba
->uic_cmd_mutex
);
4120 ufshcd_add_delay_before_dme_cmd(hba
);
4122 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4123 if (ufshcd_is_link_broken(hba
)) {
4127 hba
->uic_async_done
= &uic_async_done
;
4128 if (ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
) & UIC_COMMAND_COMPL
) {
4129 ufshcd_disable_intr(hba
, UIC_COMMAND_COMPL
);
4131 * Make sure UIC command completion interrupt is disabled before
4132 * issuing UIC command.
4135 reenable_intr
= true;
4137 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4138 ret
= __ufshcd_send_uic_cmd(hba
, cmd
, false);
4141 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
4142 cmd
->command
, cmd
->argument3
, ret
);
4146 if (!wait_for_completion_timeout(hba
->uic_async_done
,
4147 msecs_to_jiffies(UIC_CMD_TIMEOUT
))) {
4149 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
4150 cmd
->command
, cmd
->argument3
);
4152 if (!cmd
->cmd_active
) {
4153 dev_err(hba
->dev
, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
4163 status
= ufshcd_get_upmcrs(hba
);
4164 if (status
!= PWR_LOCAL
) {
4166 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
4167 cmd
->command
, status
);
4168 ret
= (status
!= PWR_OK
) ? status
: -1;
4172 ufshcd_print_host_state(hba
);
4173 ufshcd_print_pwr_info(hba
);
4174 ufshcd_print_evt_hist(hba
);
4177 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4178 hba
->active_uic_cmd
= NULL
;
4179 hba
->uic_async_done
= NULL
;
4181 ufshcd_enable_intr(hba
, UIC_COMMAND_COMPL
);
4183 ufshcd_set_link_broken(hba
);
4184 ufshcd_schedule_eh_work(hba
);
4187 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4188 mutex_unlock(&hba
->uic_cmd_mutex
);
4194 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
4195 * using DME_SET primitives.
4196 * @hba: per adapter instance
4197 * @mode: powr mode value
4199 * Return: 0 on success, non-zero value on failure.
4201 int ufshcd_uic_change_pwr_mode(struct ufs_hba
*hba
, u8 mode
)
4203 struct uic_command uic_cmd
= {0};
4206 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
) {
4207 ret
= ufshcd_dme_set(hba
,
4208 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP
, 0), 1);
4210 dev_err(hba
->dev
, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
4216 uic_cmd
.command
= UIC_CMD_DME_SET
;
4217 uic_cmd
.argument1
= UIC_ARG_MIB(PA_PWRMODE
);
4218 uic_cmd
.argument3
= mode
;
4220 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
4221 ufshcd_release(hba
);
4226 EXPORT_SYMBOL_GPL(ufshcd_uic_change_pwr_mode
);
4228 int ufshcd_link_recovery(struct ufs_hba
*hba
)
4231 unsigned long flags
;
4233 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4234 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
4235 ufshcd_set_eh_in_progress(hba
);
4236 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4238 /* Reset the attached device */
4239 ufshcd_device_reset(hba
);
4241 ret
= ufshcd_host_reset_and_restore(hba
);
4243 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4245 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
4246 ufshcd_clear_eh_in_progress(hba
);
4247 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4250 dev_err(hba
->dev
, "%s: link recovery failed, err %d",
4255 EXPORT_SYMBOL_GPL(ufshcd_link_recovery
);
4257 int ufshcd_uic_hibern8_enter(struct ufs_hba
*hba
)
4260 struct uic_command uic_cmd
= {0};
4261 ktime_t start
= ktime_get();
4263 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_ENTER
, PRE_CHANGE
);
4265 uic_cmd
.command
= UIC_CMD_DME_HIBER_ENTER
;
4266 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
4267 trace_ufshcd_profile_hibern8(dev_name(hba
->dev
), "enter",
4268 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
4271 dev_err(hba
->dev
, "%s: hibern8 enter failed. ret = %d\n",
4274 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_ENTER
,
4279 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter
);
4281 int ufshcd_uic_hibern8_exit(struct ufs_hba
*hba
)
4283 struct uic_command uic_cmd
= {0};
4285 ktime_t start
= ktime_get();
4287 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_EXIT
, PRE_CHANGE
);
4289 uic_cmd
.command
= UIC_CMD_DME_HIBER_EXIT
;
4290 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
4291 trace_ufshcd_profile_hibern8(dev_name(hba
->dev
), "exit",
4292 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
4295 dev_err(hba
->dev
, "%s: hibern8 exit failed. ret = %d\n",
4298 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_EXIT
,
4300 hba
->ufs_stats
.last_hibern8_exit_tstamp
= local_clock();
4301 hba
->ufs_stats
.hibern8_exit_cnt
++;
4306 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit
);
4308 void ufshcd_auto_hibern8_update(struct ufs_hba
*hba
, u32 ahit
)
4310 unsigned long flags
;
4311 bool update
= false;
4313 if (!ufshcd_is_auto_hibern8_supported(hba
))
4316 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4317 if (hba
->ahit
!= ahit
) {
4321 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4324 !pm_runtime_suspended(&hba
->ufs_device_wlun
->sdev_gendev
)) {
4325 ufshcd_rpm_get_sync(hba
);
4327 ufshcd_auto_hibern8_enable(hba
);
4328 ufshcd_release(hba
);
4329 ufshcd_rpm_put_sync(hba
);
4332 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update
);
4334 void ufshcd_auto_hibern8_enable(struct ufs_hba
*hba
)
4336 if (!ufshcd_is_auto_hibern8_supported(hba
))
4339 ufshcd_writel(hba
, hba
->ahit
, REG_AUTO_HIBERNATE_IDLE_TIMER
);
4343 * ufshcd_init_pwr_info - setting the POR (power on reset)
4344 * values in hba power info
4345 * @hba: per-adapter instance
4347 static void ufshcd_init_pwr_info(struct ufs_hba
*hba
)
4349 hba
->pwr_info
.gear_rx
= UFS_PWM_G1
;
4350 hba
->pwr_info
.gear_tx
= UFS_PWM_G1
;
4351 hba
->pwr_info
.lane_rx
= UFS_LANE_1
;
4352 hba
->pwr_info
.lane_tx
= UFS_LANE_1
;
4353 hba
->pwr_info
.pwr_rx
= SLOWAUTO_MODE
;
4354 hba
->pwr_info
.pwr_tx
= SLOWAUTO_MODE
;
4355 hba
->pwr_info
.hs_rate
= 0;
4359 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4360 * @hba: per-adapter instance
4362 * Return: 0 upon success; < 0 upon failure.
4364 static int ufshcd_get_max_pwr_mode(struct ufs_hba
*hba
)
4366 struct ufs_pa_layer_attr
*pwr_info
= &hba
->max_pwr_info
.info
;
4368 if (hba
->max_pwr_info
.is_valid
)
4371 if (hba
->quirks
& UFSHCD_QUIRK_HIBERN_FASTAUTO
) {
4372 pwr_info
->pwr_tx
= FASTAUTO_MODE
;
4373 pwr_info
->pwr_rx
= FASTAUTO_MODE
;
4375 pwr_info
->pwr_tx
= FAST_MODE
;
4376 pwr_info
->pwr_rx
= FAST_MODE
;
4378 pwr_info
->hs_rate
= PA_HS_MODE_B
;
4380 /* Get the connected lane count */
4381 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES
),
4382 &pwr_info
->lane_rx
);
4383 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
4384 &pwr_info
->lane_tx
);
4386 if (!pwr_info
->lane_rx
|| !pwr_info
->lane_tx
) {
4387 dev_err(hba
->dev
, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4395 * First, get the maximum gears of HS speed.
4396 * If a zero value, it means there is no HSGEAR capability.
4397 * Then, get the maximum gears of PWM speed.
4399 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_MAXRXHSGEAR
), &pwr_info
->gear_rx
);
4400 if (!pwr_info
->gear_rx
) {
4401 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_MAXRXPWMGEAR
),
4402 &pwr_info
->gear_rx
);
4403 if (!pwr_info
->gear_rx
) {
4404 dev_err(hba
->dev
, "%s: invalid max pwm rx gear read = %d\n",
4405 __func__
, pwr_info
->gear_rx
);
4408 pwr_info
->pwr_rx
= SLOW_MODE
;
4411 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_MAXRXHSGEAR
),
4412 &pwr_info
->gear_tx
);
4413 if (!pwr_info
->gear_tx
) {
4414 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_MAXRXPWMGEAR
),
4415 &pwr_info
->gear_tx
);
4416 if (!pwr_info
->gear_tx
) {
4417 dev_err(hba
->dev
, "%s: invalid max pwm tx gear read = %d\n",
4418 __func__
, pwr_info
->gear_tx
);
4421 pwr_info
->pwr_tx
= SLOW_MODE
;
4424 hba
->max_pwr_info
.is_valid
= true;
4428 static int ufshcd_change_power_mode(struct ufs_hba
*hba
,
4429 struct ufs_pa_layer_attr
*pwr_mode
)
4433 /* if already configured to the requested pwr_mode */
4434 if (!hba
->force_pmc
&&
4435 pwr_mode
->gear_rx
== hba
->pwr_info
.gear_rx
&&
4436 pwr_mode
->gear_tx
== hba
->pwr_info
.gear_tx
&&
4437 pwr_mode
->lane_rx
== hba
->pwr_info
.lane_rx
&&
4438 pwr_mode
->lane_tx
== hba
->pwr_info
.lane_tx
&&
4439 pwr_mode
->pwr_rx
== hba
->pwr_info
.pwr_rx
&&
4440 pwr_mode
->pwr_tx
== hba
->pwr_info
.pwr_tx
&&
4441 pwr_mode
->hs_rate
== hba
->pwr_info
.hs_rate
) {
4442 dev_dbg(hba
->dev
, "%s: power already configured\n", __func__
);
4447 * Configure attributes for power mode change with below.
4448 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4449 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4452 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXGEAR
), pwr_mode
->gear_rx
);
4453 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_ACTIVERXDATALANES
),
4455 if (pwr_mode
->pwr_rx
== FASTAUTO_MODE
||
4456 pwr_mode
->pwr_rx
== FAST_MODE
)
4457 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXTERMINATION
), true);
4459 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXTERMINATION
), false);
4461 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXGEAR
), pwr_mode
->gear_tx
);
4462 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_ACTIVETXDATALANES
),
4464 if (pwr_mode
->pwr_tx
== FASTAUTO_MODE
||
4465 pwr_mode
->pwr_tx
== FAST_MODE
)
4466 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXTERMINATION
), true);
4468 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXTERMINATION
), false);
4470 if (pwr_mode
->pwr_rx
== FASTAUTO_MODE
||
4471 pwr_mode
->pwr_tx
== FASTAUTO_MODE
||
4472 pwr_mode
->pwr_rx
== FAST_MODE
||
4473 pwr_mode
->pwr_tx
== FAST_MODE
)
4474 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_HSSERIES
),
4477 if (!(hba
->quirks
& UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING
)) {
4478 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA0
),
4479 DL_FC0ProtectionTimeOutVal_Default
);
4480 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA1
),
4481 DL_TC0ReplayTimeOutVal_Default
);
4482 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA2
),
4483 DL_AFC0ReqTimeOutVal_Default
);
4484 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA3
),
4485 DL_FC1ProtectionTimeOutVal_Default
);
4486 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA4
),
4487 DL_TC1ReplayTimeOutVal_Default
);
4488 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA5
),
4489 DL_AFC1ReqTimeOutVal_Default
);
4491 ufshcd_dme_set(hba
, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal
),
4492 DL_FC0ProtectionTimeOutVal_Default
);
4493 ufshcd_dme_set(hba
, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal
),
4494 DL_TC0ReplayTimeOutVal_Default
);
4495 ufshcd_dme_set(hba
, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal
),
4496 DL_AFC0ReqTimeOutVal_Default
);
4499 ret
= ufshcd_uic_change_pwr_mode(hba
, pwr_mode
->pwr_rx
<< 4
4500 | pwr_mode
->pwr_tx
);
4504 "%s: power mode change failed %d\n", __func__
, ret
);
4506 ufshcd_vops_pwr_change_notify(hba
, POST_CHANGE
, NULL
,
4509 memcpy(&hba
->pwr_info
, pwr_mode
,
4510 sizeof(struct ufs_pa_layer_attr
));
4517 * ufshcd_config_pwr_mode - configure a new power mode
4518 * @hba: per-adapter instance
4519 * @desired_pwr_mode: desired power configuration
4521 * Return: 0 upon success; < 0 upon failure.
4523 int ufshcd_config_pwr_mode(struct ufs_hba
*hba
,
4524 struct ufs_pa_layer_attr
*desired_pwr_mode
)
4526 struct ufs_pa_layer_attr final_params
= { 0 };
4529 ret
= ufshcd_vops_pwr_change_notify(hba
, PRE_CHANGE
,
4530 desired_pwr_mode
, &final_params
);
4533 memcpy(&final_params
, desired_pwr_mode
, sizeof(final_params
));
4535 ret
= ufshcd_change_power_mode(hba
, &final_params
);
4539 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode
);
4542 * ufshcd_complete_dev_init() - checks device readiness
4543 * @hba: per-adapter instance
4545 * Set fDeviceInit flag and poll until device toggles it.
4547 * Return: 0 upon success; < 0 upon failure.
4549 static int ufshcd_complete_dev_init(struct ufs_hba
*hba
)
4552 bool flag_res
= true;
4555 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_SET_FLAG
,
4556 QUERY_FLAG_IDN_FDEVICEINIT
, 0, NULL
);
4559 "%s: setting fDeviceInit flag failed with error %d\n",
4564 /* Poll fDeviceInit flag to be cleared */
4565 timeout
= ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT
);
4567 err
= ufshcd_query_flag(hba
, UPIU_QUERY_OPCODE_READ_FLAG
,
4568 QUERY_FLAG_IDN_FDEVICEINIT
, 0, &flag_res
);
4571 usleep_range(500, 1000);
4572 } while (ktime_before(ktime_get(), timeout
));
4576 "%s: reading fDeviceInit flag failed with error %d\n",
4578 } else if (flag_res
) {
4580 "%s: fDeviceInit was not cleared by the device\n",
4589 * ufshcd_make_hba_operational - Make UFS controller operational
4590 * @hba: per adapter instance
4592 * To bring UFS host controller to operational state,
4593 * 1. Enable required interrupts
4594 * 2. Configure interrupt aggregation
4595 * 3. Program UTRL and UTMRL base address
4596 * 4. Configure run-stop-registers
4598 * Return: 0 on success, non-zero value on failure.
4600 int ufshcd_make_hba_operational(struct ufs_hba
*hba
)
4605 /* Enable required interrupts */
4606 ufshcd_enable_intr(hba
, UFSHCD_ENABLE_INTRS
);
4608 /* Configure interrupt aggregation */
4609 if (ufshcd_is_intr_aggr_allowed(hba
))
4610 ufshcd_config_intr_aggr(hba
, hba
->nutrs
- 1, INT_AGGR_DEF_TO
);
4612 ufshcd_disable_intr_aggr(hba
);
4614 /* Configure UTRL and UTMRL base address registers */
4615 ufshcd_writel(hba
, lower_32_bits(hba
->utrdl_dma_addr
),
4616 REG_UTP_TRANSFER_REQ_LIST_BASE_L
);
4617 ufshcd_writel(hba
, upper_32_bits(hba
->utrdl_dma_addr
),
4618 REG_UTP_TRANSFER_REQ_LIST_BASE_H
);
4619 ufshcd_writel(hba
, lower_32_bits(hba
->utmrdl_dma_addr
),
4620 REG_UTP_TASK_REQ_LIST_BASE_L
);
4621 ufshcd_writel(hba
, upper_32_bits(hba
->utmrdl_dma_addr
),
4622 REG_UTP_TASK_REQ_LIST_BASE_H
);
4625 * Make sure base address and interrupt setup are updated before
4626 * enabling the run/stop registers below.
4631 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4633 reg
= ufshcd_readl(hba
, REG_CONTROLLER_STATUS
);
4634 if (!(ufshcd_get_lists_status(reg
))) {
4635 ufshcd_enable_run_stop_reg(hba
);
4638 "Host controller not ready to process requests");
4644 EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational
);
4647 * ufshcd_hba_stop - Send controller to reset state
4648 * @hba: per adapter instance
4650 void ufshcd_hba_stop(struct ufs_hba
*hba
)
4652 unsigned long flags
;
4656 * Obtain the host lock to prevent that the controller is disabled
4657 * while the UFS interrupt handler is active on another CPU.
4659 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4660 ufshcd_writel(hba
, CONTROLLER_DISABLE
, REG_CONTROLLER_ENABLE
);
4661 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4663 err
= ufshcd_wait_for_register(hba
, REG_CONTROLLER_ENABLE
,
4664 CONTROLLER_ENABLE
, CONTROLLER_DISABLE
,
4667 dev_err(hba
->dev
, "%s: Controller disable failed\n", __func__
);
4669 EXPORT_SYMBOL_GPL(ufshcd_hba_stop
);
4672 * ufshcd_hba_execute_hce - initialize the controller
4673 * @hba: per adapter instance
4675 * The controller resets itself and controller firmware initialization
4676 * sequence kicks off. When controller is ready it will set
4677 * the Host Controller Enable bit to 1.
4679 * Return: 0 on success, non-zero value on failure.
4681 static int ufshcd_hba_execute_hce(struct ufs_hba
*hba
)
4683 int retry_outer
= 3;
4687 if (ufshcd_is_hba_active(hba
))
4688 /* change controller state to "reset state" */
4689 ufshcd_hba_stop(hba
);
4691 /* UniPro link is disabled at this point */
4692 ufshcd_set_link_off(hba
);
4694 ufshcd_vops_hce_enable_notify(hba
, PRE_CHANGE
);
4696 /* start controller initialization sequence */
4697 ufshcd_hba_start(hba
);
4700 * To initialize a UFS host controller HCE bit must be set to 1.
4701 * During initialization the HCE bit value changes from 1->0->1.
4702 * When the host controller completes initialization sequence
4703 * it sets the value of HCE bit to 1. The same HCE bit is read back
4704 * to check if the controller has completed initialization sequence.
4705 * So without this delay the value HCE = 1, set in the previous
4706 * instruction might be read back.
4707 * This delay can be changed based on the controller.
4709 ufshcd_delay_us(hba
->vps
->hba_enable_delay_us
, 100);
4711 /* wait for the host controller to complete initialization */
4713 while (!ufshcd_is_hba_active(hba
)) {
4718 "Controller enable failed\n");
4725 usleep_range(1000, 1100);
4728 /* enable UIC related interrupts */
4729 ufshcd_enable_intr(hba
, UFSHCD_UIC_MASK
);
4731 ufshcd_vops_hce_enable_notify(hba
, POST_CHANGE
);
4736 int ufshcd_hba_enable(struct ufs_hba
*hba
)
4740 if (hba
->quirks
& UFSHCI_QUIRK_BROKEN_HCE
) {
4741 ufshcd_set_link_off(hba
);
4742 ufshcd_vops_hce_enable_notify(hba
, PRE_CHANGE
);
4744 /* enable UIC related interrupts */
4745 ufshcd_enable_intr(hba
, UFSHCD_UIC_MASK
);
4746 ret
= ufshcd_dme_reset(hba
);
4748 dev_err(hba
->dev
, "DME_RESET failed\n");
4752 ret
= ufshcd_dme_enable(hba
);
4754 dev_err(hba
->dev
, "Enabling DME failed\n");
4758 ufshcd_vops_hce_enable_notify(hba
, POST_CHANGE
);
4760 ret
= ufshcd_hba_execute_hce(hba
);
4765 EXPORT_SYMBOL_GPL(ufshcd_hba_enable
);
4767 static int ufshcd_disable_tx_lcc(struct ufs_hba
*hba
, bool peer
)
4769 int tx_lanes
= 0, i
, err
= 0;
4772 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
4775 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
4777 for (i
= 0; i
< tx_lanes
; i
++) {
4779 err
= ufshcd_dme_set(hba
,
4780 UIC_ARG_MIB_SEL(TX_LCC_ENABLE
,
4781 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i
)),
4784 err
= ufshcd_dme_peer_set(hba
,
4785 UIC_ARG_MIB_SEL(TX_LCC_ENABLE
,
4786 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i
)),
4789 dev_err(hba
->dev
, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4790 __func__
, peer
, i
, err
);
4798 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba
*hba
)
4800 return ufshcd_disable_tx_lcc(hba
, true);
4803 void ufshcd_update_evt_hist(struct ufs_hba
*hba
, u32 id
, u32 val
)
4805 struct ufs_event_hist
*e
;
4807 if (id
>= UFS_EVT_CNT
)
4810 e
= &hba
->ufs_stats
.event
[id
];
4811 e
->val
[e
->pos
] = val
;
4812 e
->tstamp
[e
->pos
] = local_clock();
4814 e
->pos
= (e
->pos
+ 1) % UFS_EVENT_HIST_LENGTH
;
4816 ufshcd_vops_event_notify(hba
, id
, &val
);
4818 EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist
);
4821 * ufshcd_link_startup - Initialize unipro link startup
4822 * @hba: per adapter instance
4824 * Return: 0 for success, non-zero in case of failure.
4826 static int ufshcd_link_startup(struct ufs_hba
*hba
)
4829 int retries
= DME_LINKSTARTUP_RETRIES
;
4830 bool link_startup_again
= false;
4833 * If UFS device isn't active then we will have to issue link startup
4834 * 2 times to make sure the device state move to active.
4836 if (!ufshcd_is_ufs_dev_active(hba
))
4837 link_startup_again
= true;
4841 ufshcd_vops_link_startup_notify(hba
, PRE_CHANGE
);
4843 ret
= ufshcd_dme_link_startup(hba
);
4845 /* check if device is detected by inter-connect layer */
4846 if (!ret
&& !ufshcd_is_device_present(hba
)) {
4847 ufshcd_update_evt_hist(hba
,
4848 UFS_EVT_LINK_STARTUP_FAIL
,
4850 dev_err(hba
->dev
, "%s: Device not present\n", __func__
);
4856 * DME link lost indication is only received when link is up,
4857 * but we can't be sure if the link is up until link startup
4858 * succeeds. So reset the local Uni-Pro and try again.
4860 if (ret
&& retries
&& ufshcd_hba_enable(hba
)) {
4861 ufshcd_update_evt_hist(hba
,
4862 UFS_EVT_LINK_STARTUP_FAIL
,
4866 } while (ret
&& retries
--);
4869 /* failed to get the link up... retire */
4870 ufshcd_update_evt_hist(hba
,
4871 UFS_EVT_LINK_STARTUP_FAIL
,
4876 if (link_startup_again
) {
4877 link_startup_again
= false;
4878 retries
= DME_LINKSTARTUP_RETRIES
;
4882 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4883 ufshcd_init_pwr_info(hba
);
4884 ufshcd_print_pwr_info(hba
);
4886 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_LCC
) {
4887 ret
= ufshcd_disable_device_tx_lcc(hba
);
4892 /* Include any host controller configuration via UIC commands */
4893 ret
= ufshcd_vops_link_startup_notify(hba
, POST_CHANGE
);
4897 /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
4898 ufshcd_readl(hba
, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER
);
4899 ret
= ufshcd_make_hba_operational(hba
);
4902 dev_err(hba
->dev
, "link startup failed %d\n", ret
);
4903 ufshcd_print_host_state(hba
);
4904 ufshcd_print_pwr_info(hba
);
4905 ufshcd_print_evt_hist(hba
);
4911 * ufshcd_verify_dev_init() - Verify device initialization
4912 * @hba: per-adapter instance
4914 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4915 * device Transport Protocol (UTP) layer is ready after a reset.
4916 * If the UTP layer at the device side is not initialized, it may
4917 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4918 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4920 * Return: 0 upon success; < 0 upon failure.
4922 static int ufshcd_verify_dev_init(struct ufs_hba
*hba
)
4928 mutex_lock(&hba
->dev_cmd
.lock
);
4929 for (retries
= NOP_OUT_RETRIES
; retries
> 0; retries
--) {
4930 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_NOP
,
4931 hba
->nop_out_timeout
);
4933 if (!err
|| err
== -ETIMEDOUT
)
4936 dev_dbg(hba
->dev
, "%s: error %d retrying\n", __func__
, err
);
4938 mutex_unlock(&hba
->dev_cmd
.lock
);
4939 ufshcd_release(hba
);
4942 dev_err(hba
->dev
, "%s: NOP OUT failed %d\n", __func__
, err
);
4947 * ufshcd_setup_links - associate link b/w device wlun and other luns
4948 * @sdev: pointer to SCSI device
4949 * @hba: pointer to ufs hba
4951 static void ufshcd_setup_links(struct ufs_hba
*hba
, struct scsi_device
*sdev
)
4953 struct device_link
*link
;
4956 * Device wlun is the supplier & rest of the luns are consumers.
4957 * This ensures that device wlun suspends after all other luns.
4959 if (hba
->ufs_device_wlun
) {
4960 link
= device_link_add(&sdev
->sdev_gendev
,
4961 &hba
->ufs_device_wlun
->sdev_gendev
,
4962 DL_FLAG_PM_RUNTIME
| DL_FLAG_RPM_ACTIVE
);
4964 dev_err(&sdev
->sdev_gendev
, "Failed establishing link - %s\n",
4965 dev_name(&hba
->ufs_device_wlun
->sdev_gendev
));
4969 /* Ignore REPORT_LUN wlun probing */
4970 if (hba
->luns_avail
== 1) {
4971 ufshcd_rpm_put(hba
);
4976 * Device wlun is probed. The assumption is that WLUNs are
4977 * scanned before other LUNs.
4984 * ufshcd_lu_init - Initialize the relevant parameters of the LU
4985 * @hba: per-adapter instance
4986 * @sdev: pointer to SCSI device
4988 static void ufshcd_lu_init(struct ufs_hba
*hba
, struct scsi_device
*sdev
)
4990 int len
= QUERY_DESC_MAX_SIZE
;
4991 u8 lun
= ufshcd_scsi_to_upiu_lun(sdev
->lun
);
4992 u8 lun_qdepth
= hba
->nutrs
;
4996 desc_buf
= kzalloc(len
, GFP_KERNEL
);
5000 ret
= ufshcd_read_unit_desc_param(hba
, lun
, 0, desc_buf
, len
);
5002 if (ret
== -EOPNOTSUPP
)
5003 /* If LU doesn't support unit descriptor, its queue depth is set to 1 */
5009 if (desc_buf
[UNIT_DESC_PARAM_LU_Q_DEPTH
]) {
5011 * In per-LU queueing architecture, bLUQueueDepth will not be 0, then we will
5012 * use the smaller between UFSHCI CAP.NUTRS and UFS LU bLUQueueDepth
5014 lun_qdepth
= min_t(int, desc_buf
[UNIT_DESC_PARAM_LU_Q_DEPTH
], hba
->nutrs
);
5017 * According to UFS device specification, the write protection mode is only supported by
5018 * normal LU, not supported by WLUN.
5020 if (hba
->dev_info
.f_power_on_wp_en
&& lun
< hba
->dev_info
.max_lu_supported
&&
5021 !hba
->dev_info
.is_lu_power_on_wp
&&
5022 desc_buf
[UNIT_DESC_PARAM_LU_WR_PROTECT
] == UFS_LU_POWER_ON_WP
)
5023 hba
->dev_info
.is_lu_power_on_wp
= true;
5025 /* In case of RPMB LU, check if advanced RPMB mode is enabled */
5026 if (desc_buf
[UNIT_DESC_PARAM_UNIT_INDEX
] == UFS_UPIU_RPMB_WLUN
&&
5027 desc_buf
[RPMB_UNIT_DESC_PARAM_REGION_EN
] & BIT(4))
5028 hba
->dev_info
.b_advanced_rpmb_en
= true;
5034 * For WLUNs that don't support unit descriptor, queue depth is set to 1. For LUs whose
5035 * bLUQueueDepth == 0, the queue depth is set to a maximum value that host can queue.
5037 dev_dbg(hba
->dev
, "Set LU %x queue depth %d\n", lun
, lun_qdepth
);
5038 scsi_change_queue_depth(sdev
, lun_qdepth
);
5042 * ufshcd_slave_alloc - handle initial SCSI device configurations
5043 * @sdev: pointer to SCSI device
5047 static int ufshcd_slave_alloc(struct scsi_device
*sdev
)
5049 struct ufs_hba
*hba
;
5051 hba
= shost_priv(sdev
->host
);
5053 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
5054 sdev
->use_10_for_ms
= 1;
5056 /* DBD field should be set to 1 in mode sense(10) */
5057 sdev
->set_dbd_for_ms
= 1;
5059 /* allow SCSI layer to restart the device in case of errors */
5060 sdev
->allow_restart
= 1;
5062 /* REPORT SUPPORTED OPERATION CODES is not supported */
5063 sdev
->no_report_opcodes
= 1;
5065 /* WRITE_SAME command is not supported */
5066 sdev
->no_write_same
= 1;
5068 ufshcd_lu_init(hba
, sdev
);
5070 ufshcd_setup_links(hba
, sdev
);
5076 * ufshcd_change_queue_depth - change queue depth
5077 * @sdev: pointer to SCSI device
5078 * @depth: required depth to set
5080 * Change queue depth and make sure the max. limits are not crossed.
5082 * Return: new queue depth.
5084 static int ufshcd_change_queue_depth(struct scsi_device
*sdev
, int depth
)
5086 return scsi_change_queue_depth(sdev
, min(depth
, sdev
->host
->can_queue
));
5090 * ufshcd_slave_configure - adjust SCSI device configurations
5091 * @sdev: pointer to SCSI device
5093 * Return: 0 (success).
5095 static int ufshcd_slave_configure(struct scsi_device
*sdev
)
5097 struct ufs_hba
*hba
= shost_priv(sdev
->host
);
5098 struct request_queue
*q
= sdev
->request_queue
;
5100 blk_queue_update_dma_pad(q
, PRDT_DATA_BYTE_COUNT_PAD
- 1);
5101 if (hba
->quirks
& UFSHCD_QUIRK_4KB_DMA_ALIGNMENT
)
5102 blk_queue_update_dma_alignment(q
, SZ_4K
- 1);
5104 * Block runtime-pm until all consumers are added.
5105 * Refer ufshcd_setup_links().
5107 if (is_device_wlun(sdev
))
5108 pm_runtime_get_noresume(&sdev
->sdev_gendev
);
5109 else if (ufshcd_is_rpm_autosuspend_allowed(hba
))
5110 sdev
->rpm_autosuspend
= 1;
5112 * Do not print messages during runtime PM to avoid never-ending cycles
5113 * of messages written back to storage by user space causing runtime
5114 * resume, causing more messages and so on.
5116 sdev
->silence_suspend
= 1;
5118 ufshcd_crypto_register(hba
, q
);
5124 * ufshcd_slave_destroy - remove SCSI device configurations
5125 * @sdev: pointer to SCSI device
5127 static void ufshcd_slave_destroy(struct scsi_device
*sdev
)
5129 struct ufs_hba
*hba
;
5130 unsigned long flags
;
5132 hba
= shost_priv(sdev
->host
);
5134 /* Drop the reference as it won't be needed anymore */
5135 if (ufshcd_scsi_to_upiu_lun(sdev
->lun
) == UFS_UPIU_UFS_DEVICE_WLUN
) {
5136 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5137 hba
->ufs_device_wlun
= NULL
;
5138 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5139 } else if (hba
->ufs_device_wlun
) {
5140 struct device
*supplier
= NULL
;
5142 /* Ensure UFS Device WLUN exists and does not disappear */
5143 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5144 if (hba
->ufs_device_wlun
) {
5145 supplier
= &hba
->ufs_device_wlun
->sdev_gendev
;
5146 get_device(supplier
);
5148 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5152 * If a LUN fails to probe (e.g. absent BOOT WLUN), the
5153 * device will not have been registered but can still
5154 * have a device link holding a reference to the device.
5156 device_link_remove(&sdev
->sdev_gendev
, supplier
);
5157 put_device(supplier
);
5163 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
5164 * @lrbp: pointer to local reference block of completed command
5165 * @scsi_status: SCSI command status
5167 * Return: value base on SCSI command status.
5170 ufshcd_scsi_cmd_status(struct ufshcd_lrb
*lrbp
, int scsi_status
)
5174 switch (scsi_status
) {
5175 case SAM_STAT_CHECK_CONDITION
:
5176 ufshcd_copy_sense_data(lrbp
);
5179 result
|= DID_OK
<< 16 | scsi_status
;
5181 case SAM_STAT_TASK_SET_FULL
:
5183 case SAM_STAT_TASK_ABORTED
:
5184 ufshcd_copy_sense_data(lrbp
);
5185 result
|= scsi_status
;
5188 result
|= DID_ERROR
<< 16;
5190 } /* end of switch */
5196 * ufshcd_transfer_rsp_status - Get overall status of the response
5197 * @hba: per adapter instance
5198 * @lrbp: pointer to local reference block of completed command
5199 * @cqe: pointer to the completion queue entry
5201 * Return: result of the command to notify SCSI midlayer.
5204 ufshcd_transfer_rsp_status(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
,
5205 struct cq_entry
*cqe
)
5213 upiu_flags
= lrbp
->ucd_rsp_ptr
->header
.flags
;
5214 resid
= be32_to_cpu(lrbp
->ucd_rsp_ptr
->sr
.residual_transfer_count
);
5216 * Test !overflow instead of underflow to support UFS devices that do
5217 * not set either flag.
5219 if (resid
&& !(upiu_flags
& UPIU_RSP_FLAG_OVERFLOW
))
5220 scsi_set_resid(lrbp
->cmd
, resid
);
5222 /* overall command status of utrd */
5223 ocs
= ufshcd_get_tr_ocs(lrbp
, cqe
);
5225 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR
) {
5226 if (lrbp
->ucd_rsp_ptr
->header
.response
||
5227 lrbp
->ucd_rsp_ptr
->header
.status
)
5233 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
5234 switch (ufshcd_get_req_rsp(lrbp
->ucd_rsp_ptr
)) {
5235 case UPIU_TRANSACTION_RESPONSE
:
5237 * get the result based on SCSI status response
5238 * to notify the SCSI midlayer of the command status
5240 scsi_status
= lrbp
->ucd_rsp_ptr
->header
.status
;
5241 result
= ufshcd_scsi_cmd_status(lrbp
, scsi_status
);
5244 * Currently we are only supporting BKOPs exception
5245 * events hence we can ignore BKOPs exception event
5246 * during power management callbacks. BKOPs exception
5247 * event is not expected to be raised in runtime suspend
5248 * callback as it allows the urgent bkops.
5249 * During system suspend, we are anyway forcefully
5250 * disabling the bkops and if urgent bkops is needed
5251 * it will be enabled on system resume. Long term
5252 * solution could be to abort the system suspend if
5253 * UFS device needs urgent BKOPs.
5255 if (!hba
->pm_op_in_progress
&&
5256 !ufshcd_eh_in_progress(hba
) &&
5257 ufshcd_is_exception_event(lrbp
->ucd_rsp_ptr
))
5258 /* Flushed in suspend */
5259 schedule_work(&hba
->eeh_work
);
5261 case UPIU_TRANSACTION_REJECT_UPIU
:
5262 /* TODO: handle Reject UPIU Response */
5263 result
= DID_ERROR
<< 16;
5265 "Reject UPIU not fully implemented\n");
5269 "Unexpected request response code = %x\n",
5271 result
= DID_ERROR
<< 16;
5276 result
|= DID_ABORT
<< 16;
5278 case OCS_INVALID_COMMAND_STATUS
:
5279 result
|= DID_REQUEUE
<< 16;
5281 case OCS_INVALID_CMD_TABLE_ATTR
:
5282 case OCS_INVALID_PRDT_ATTR
:
5283 case OCS_MISMATCH_DATA_BUF_SIZE
:
5284 case OCS_MISMATCH_RESP_UPIU_SIZE
:
5285 case OCS_PEER_COMM_FAILURE
:
5286 case OCS_FATAL_ERROR
:
5287 case OCS_DEVICE_FATAL_ERROR
:
5288 case OCS_INVALID_CRYPTO_CONFIG
:
5289 case OCS_GENERAL_CRYPTO_ERROR
:
5291 result
|= DID_ERROR
<< 16;
5293 "OCS error from controller = %x for tag %d\n",
5294 ocs
, lrbp
->task_tag
);
5295 ufshcd_print_evt_hist(hba
);
5296 ufshcd_print_host_state(hba
);
5298 } /* end of switch */
5300 if ((host_byte(result
) != DID_OK
) &&
5301 (host_byte(result
) != DID_REQUEUE
) && !hba
->silence_err_logs
)
5302 ufshcd_print_tr(hba
, lrbp
->task_tag
, true);
5306 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba
*hba
,
5309 if (!ufshcd_is_auto_hibern8_supported(hba
) ||
5310 !ufshcd_is_auto_hibern8_enabled(hba
))
5313 if (!(intr_mask
& UFSHCD_UIC_HIBERN8_MASK
))
5316 if (hba
->active_uic_cmd
&&
5317 (hba
->active_uic_cmd
->command
== UIC_CMD_DME_HIBER_ENTER
||
5318 hba
->active_uic_cmd
->command
== UIC_CMD_DME_HIBER_EXIT
))
5325 * ufshcd_uic_cmd_compl - handle completion of uic command
5326 * @hba: per adapter instance
5327 * @intr_status: interrupt status generated by the controller
5330 * IRQ_HANDLED - If interrupt is valid
5331 * IRQ_NONE - If invalid interrupt
5333 static irqreturn_t
ufshcd_uic_cmd_compl(struct ufs_hba
*hba
, u32 intr_status
)
5335 irqreturn_t retval
= IRQ_NONE
;
5337 spin_lock(hba
->host
->host_lock
);
5338 if (ufshcd_is_auto_hibern8_error(hba
, intr_status
))
5339 hba
->errors
|= (UFSHCD_UIC_HIBERN8_MASK
& intr_status
);
5341 if ((intr_status
& UIC_COMMAND_COMPL
) && hba
->active_uic_cmd
) {
5342 hba
->active_uic_cmd
->argument2
|=
5343 ufshcd_get_uic_cmd_result(hba
);
5344 hba
->active_uic_cmd
->argument3
=
5345 ufshcd_get_dme_attr_val(hba
);
5346 if (!hba
->uic_async_done
)
5347 hba
->active_uic_cmd
->cmd_active
= 0;
5348 complete(&hba
->active_uic_cmd
->done
);
5349 retval
= IRQ_HANDLED
;
5352 if ((intr_status
& UFSHCD_UIC_PWR_MASK
) && hba
->uic_async_done
) {
5353 hba
->active_uic_cmd
->cmd_active
= 0;
5354 complete(hba
->uic_async_done
);
5355 retval
= IRQ_HANDLED
;
5358 if (retval
== IRQ_HANDLED
)
5359 ufshcd_add_uic_command_trace(hba
, hba
->active_uic_cmd
,
5361 spin_unlock(hba
->host
->host_lock
);
5365 /* Release the resources allocated for processing a SCSI command. */
5366 void ufshcd_release_scsi_cmd(struct ufs_hba
*hba
,
5367 struct ufshcd_lrb
*lrbp
)
5369 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
5371 scsi_dma_unmap(cmd
);
5372 ufshcd_release(hba
);
5373 ufshcd_clk_scaling_update_busy(hba
);
5377 * ufshcd_compl_one_cqe - handle a completion queue entry
5378 * @hba: per adapter instance
5379 * @task_tag: the task tag of the request to be completed
5380 * @cqe: pointer to the completion queue entry
5382 void ufshcd_compl_one_cqe(struct ufs_hba
*hba
, int task_tag
,
5383 struct cq_entry
*cqe
)
5385 struct ufshcd_lrb
*lrbp
;
5386 struct scsi_cmnd
*cmd
;
5389 lrbp
= &hba
->lrb
[task_tag
];
5390 lrbp
->compl_time_stamp
= ktime_get();
5393 if (unlikely(ufshcd_should_inform_monitor(hba
, lrbp
)))
5394 ufshcd_update_monitor(hba
, lrbp
);
5395 ufshcd_add_command_trace(hba
, task_tag
, UFS_CMD_COMP
);
5396 cmd
->result
= ufshcd_transfer_rsp_status(hba
, lrbp
, cqe
);
5397 ufshcd_release_scsi_cmd(hba
, lrbp
);
5398 /* Do not touch lrbp after scsi done */
5400 } else if (lrbp
->command_type
== UTP_CMD_TYPE_DEV_MANAGE
||
5401 lrbp
->command_type
== UTP_CMD_TYPE_UFS_STORAGE
) {
5402 if (hba
->dev_cmd
.complete
) {
5404 ocs
= le32_to_cpu(cqe
->status
) & MASK_OCS
;
5405 lrbp
->utr_descriptor_ptr
->header
.ocs
= ocs
;
5407 complete(hba
->dev_cmd
.complete
);
5408 ufshcd_clk_scaling_update_busy(hba
);
5414 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
5415 * @hba: per adapter instance
5416 * @completed_reqs: bitmask that indicates which requests to complete
5418 static void __ufshcd_transfer_req_compl(struct ufs_hba
*hba
,
5419 unsigned long completed_reqs
)
5423 for_each_set_bit(tag
, &completed_reqs
, hba
->nutrs
)
5424 ufshcd_compl_one_cqe(hba
, tag
, NULL
);
5427 /* Any value that is not an existing queue number is fine for this constant. */
5429 UFSHCD_POLL_FROM_INTERRUPT_CONTEXT
= -1
5432 static void ufshcd_clear_polled(struct ufs_hba
*hba
,
5433 unsigned long *completed_reqs
)
5437 for_each_set_bit(tag
, completed_reqs
, hba
->nutrs
) {
5438 struct scsi_cmnd
*cmd
= hba
->lrb
[tag
].cmd
;
5442 if (scsi_cmd_to_rq(cmd
)->cmd_flags
& REQ_POLLED
)
5443 __clear_bit(tag
, completed_reqs
);
5448 * Return: > 0 if one or more commands have been completed or 0 if no
5449 * requests have been completed.
5451 static int ufshcd_poll(struct Scsi_Host
*shost
, unsigned int queue_num
)
5453 struct ufs_hba
*hba
= shost_priv(shost
);
5454 unsigned long completed_reqs
, flags
;
5456 struct ufs_hw_queue
*hwq
;
5458 if (is_mcq_enabled(hba
)) {
5459 hwq
= &hba
->uhq
[queue_num
];
5461 return ufshcd_mcq_poll_cqe_lock(hba
, hwq
);
5464 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
5465 tr_doorbell
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
5466 completed_reqs
= ~tr_doorbell
& hba
->outstanding_reqs
;
5467 WARN_ONCE(completed_reqs
& ~hba
->outstanding_reqs
,
5468 "completed: %#lx; outstanding: %#lx\n", completed_reqs
,
5469 hba
->outstanding_reqs
);
5470 if (queue_num
== UFSHCD_POLL_FROM_INTERRUPT_CONTEXT
) {
5471 /* Do not complete polled requests from interrupt context. */
5472 ufshcd_clear_polled(hba
, &completed_reqs
);
5474 hba
->outstanding_reqs
&= ~completed_reqs
;
5475 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
5478 __ufshcd_transfer_req_compl(hba
, completed_reqs
);
5480 return completed_reqs
!= 0;
5484 * ufshcd_mcq_compl_pending_transfer - MCQ mode function. It is
5485 * invoked from the error handler context or ufshcd_host_reset_and_restore()
5486 * to complete the pending transfers and free the resources associated with
5489 * @hba: per adapter instance
5490 * @force_compl: This flag is set to true when invoked
5491 * from ufshcd_host_reset_and_restore() in which case it requires special
5492 * handling because the host controller has been reset by ufshcd_hba_stop().
5494 static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba
*hba
,
5497 struct ufs_hw_queue
*hwq
;
5498 struct ufshcd_lrb
*lrbp
;
5499 struct scsi_cmnd
*cmd
;
5500 unsigned long flags
;
5504 for (tag
= 0; tag
< hba
->nutrs
; tag
++) {
5505 lrbp
= &hba
->lrb
[tag
];
5507 if (!ufshcd_cmd_inflight(cmd
) ||
5508 test_bit(SCMD_STATE_COMPLETE
, &cmd
->state
))
5511 utag
= blk_mq_unique_tag(scsi_cmd_to_rq(cmd
));
5512 hwq_num
= blk_mq_unique_tag_to_hwq(utag
);
5513 hwq
= &hba
->uhq
[hwq_num
];
5516 ufshcd_mcq_compl_all_cqes_lock(hba
, hwq
);
5518 * For those cmds of which the cqes are not present
5519 * in the cq, complete them explicitly.
5521 if (cmd
&& !test_bit(SCMD_STATE_COMPLETE
, &cmd
->state
)) {
5522 spin_lock_irqsave(&hwq
->cq_lock
, flags
);
5523 set_host_byte(cmd
, DID_REQUEUE
);
5524 ufshcd_release_scsi_cmd(hba
, lrbp
);
5526 spin_unlock_irqrestore(&hwq
->cq_lock
, flags
);
5529 ufshcd_mcq_poll_cqe_lock(hba
, hwq
);
5535 * ufshcd_transfer_req_compl - handle SCSI and query command completion
5536 * @hba: per adapter instance
5539 * IRQ_HANDLED - If interrupt is valid
5540 * IRQ_NONE - If invalid interrupt
5542 static irqreturn_t
ufshcd_transfer_req_compl(struct ufs_hba
*hba
)
5544 /* Resetting interrupt aggregation counters first and reading the
5545 * DOOR_BELL afterward allows us to handle all the completed requests.
5546 * In order to prevent other interrupts starvation the DB is read once
5547 * after reset. The down side of this solution is the possibility of
5548 * false interrupt if device completes another request after resetting
5549 * aggregation and before reading the DB.
5551 if (ufshcd_is_intr_aggr_allowed(hba
) &&
5552 !(hba
->quirks
& UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR
))
5553 ufshcd_reset_intr_aggr(hba
);
5555 if (ufs_fail_completion())
5559 * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
5560 * do not want polling to trigger spurious interrupt complaints.
5562 ufshcd_poll(hba
->host
, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT
);
5567 int __ufshcd_write_ee_control(struct ufs_hba
*hba
, u32 ee_ctrl_mask
)
5569 return ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
5570 QUERY_ATTR_IDN_EE_CONTROL
, 0, 0,
5574 int ufshcd_write_ee_control(struct ufs_hba
*hba
)
5578 mutex_lock(&hba
->ee_ctrl_mutex
);
5579 err
= __ufshcd_write_ee_control(hba
, hba
->ee_ctrl_mask
);
5580 mutex_unlock(&hba
->ee_ctrl_mutex
);
5582 dev_err(hba
->dev
, "%s: failed to write ee control %d\n",
5587 int ufshcd_update_ee_control(struct ufs_hba
*hba
, u16
*mask
,
5588 const u16
*other_mask
, u16 set
, u16 clr
)
5590 u16 new_mask
, ee_ctrl_mask
;
5593 mutex_lock(&hba
->ee_ctrl_mutex
);
5594 new_mask
= (*mask
& ~clr
) | set
;
5595 ee_ctrl_mask
= new_mask
| *other_mask
;
5596 if (ee_ctrl_mask
!= hba
->ee_ctrl_mask
)
5597 err
= __ufshcd_write_ee_control(hba
, ee_ctrl_mask
);
5598 /* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */
5600 hba
->ee_ctrl_mask
= ee_ctrl_mask
;
5603 mutex_unlock(&hba
->ee_ctrl_mutex
);
5608 * ufshcd_disable_ee - disable exception event
5609 * @hba: per-adapter instance
5610 * @mask: exception event to disable
5612 * Disables exception event in the device so that the EVENT_ALERT
5615 * Return: zero on success, non-zero error value on failure.
5617 static inline int ufshcd_disable_ee(struct ufs_hba
*hba
, u16 mask
)
5619 return ufshcd_update_ee_drv_mask(hba
, 0, mask
);
5623 * ufshcd_enable_ee - enable exception event
5624 * @hba: per-adapter instance
5625 * @mask: exception event to enable
5627 * Enable corresponding exception event in the device to allow
5628 * device to alert host in critical scenarios.
5630 * Return: zero on success, non-zero error value on failure.
5632 static inline int ufshcd_enable_ee(struct ufs_hba
*hba
, u16 mask
)
5634 return ufshcd_update_ee_drv_mask(hba
, mask
, 0);
5638 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5639 * @hba: per-adapter instance
5641 * Allow device to manage background operations on its own. Enabling
5642 * this might lead to inconsistent latencies during normal data transfers
5643 * as the device is allowed to manage its own way of handling background
5646 * Return: zero on success, non-zero on failure.
5648 static int ufshcd_enable_auto_bkops(struct ufs_hba
*hba
)
5652 if (hba
->auto_bkops_enabled
)
5655 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_SET_FLAG
,
5656 QUERY_FLAG_IDN_BKOPS_EN
, 0, NULL
);
5658 dev_err(hba
->dev
, "%s: failed to enable bkops %d\n",
5663 hba
->auto_bkops_enabled
= true;
5664 trace_ufshcd_auto_bkops_state(dev_name(hba
->dev
), "Enabled");
5666 /* No need of URGENT_BKOPS exception from the device */
5667 err
= ufshcd_disable_ee(hba
, MASK_EE_URGENT_BKOPS
);
5669 dev_err(hba
->dev
, "%s: failed to disable exception event %d\n",
5676 * ufshcd_disable_auto_bkops - block device in doing background operations
5677 * @hba: per-adapter instance
5679 * Disabling background operations improves command response latency but
5680 * has drawback of device moving into critical state where the device is
5681 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5682 * host is idle so that BKOPS are managed effectively without any negative
5685 * Return: zero on success, non-zero on failure.
5687 static int ufshcd_disable_auto_bkops(struct ufs_hba
*hba
)
5691 if (!hba
->auto_bkops_enabled
)
5695 * If host assisted BKOPs is to be enabled, make sure
5696 * urgent bkops exception is allowed.
5698 err
= ufshcd_enable_ee(hba
, MASK_EE_URGENT_BKOPS
);
5700 dev_err(hba
->dev
, "%s: failed to enable exception event %d\n",
5705 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_CLEAR_FLAG
,
5706 QUERY_FLAG_IDN_BKOPS_EN
, 0, NULL
);
5708 dev_err(hba
->dev
, "%s: failed to disable bkops %d\n",
5710 ufshcd_disable_ee(hba
, MASK_EE_URGENT_BKOPS
);
5714 hba
->auto_bkops_enabled
= false;
5715 trace_ufshcd_auto_bkops_state(dev_name(hba
->dev
), "Disabled");
5716 hba
->is_urgent_bkops_lvl_checked
= false;
5722 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5723 * @hba: per adapter instance
5725 * After a device reset the device may toggle the BKOPS_EN flag
5726 * to default value. The s/w tracking variables should be updated
5727 * as well. This function would change the auto-bkops state based on
5728 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5730 static void ufshcd_force_reset_auto_bkops(struct ufs_hba
*hba
)
5732 if (ufshcd_keep_autobkops_enabled_except_suspend(hba
)) {
5733 hba
->auto_bkops_enabled
= false;
5734 hba
->ee_ctrl_mask
|= MASK_EE_URGENT_BKOPS
;
5735 ufshcd_enable_auto_bkops(hba
);
5737 hba
->auto_bkops_enabled
= true;
5738 hba
->ee_ctrl_mask
&= ~MASK_EE_URGENT_BKOPS
;
5739 ufshcd_disable_auto_bkops(hba
);
5741 hba
->urgent_bkops_lvl
= BKOPS_STATUS_PERF_IMPACT
;
5742 hba
->is_urgent_bkops_lvl_checked
= false;
5745 static inline int ufshcd_get_bkops_status(struct ufs_hba
*hba
, u32
*status
)
5747 return ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5748 QUERY_ATTR_IDN_BKOPS_STATUS
, 0, 0, status
);
5752 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5753 * @hba: per-adapter instance
5754 * @status: bkops_status value
5756 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5757 * flag in the device to permit background operations if the device
5758 * bkops_status is greater than or equal to "status" argument passed to
5759 * this function, disable otherwise.
5761 * Return: 0 for success, non-zero in case of failure.
5763 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5764 * to know whether auto bkops is enabled or disabled after this function
5765 * returns control to it.
5767 static int ufshcd_bkops_ctrl(struct ufs_hba
*hba
,
5768 enum bkops_status status
)
5771 u32 curr_status
= 0;
5773 err
= ufshcd_get_bkops_status(hba
, &curr_status
);
5775 dev_err(hba
->dev
, "%s: failed to get BKOPS status %d\n",
5778 } else if (curr_status
> BKOPS_STATUS_MAX
) {
5779 dev_err(hba
->dev
, "%s: invalid BKOPS status %d\n",
5780 __func__
, curr_status
);
5785 if (curr_status
>= status
)
5786 err
= ufshcd_enable_auto_bkops(hba
);
5788 err
= ufshcd_disable_auto_bkops(hba
);
5794 * ufshcd_urgent_bkops - handle urgent bkops exception event
5795 * @hba: per-adapter instance
5797 * Enable fBackgroundOpsEn flag in the device to permit background
5800 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5801 * and negative error value for any other failure.
5803 * Return: 0 upon success; < 0 upon failure.
5805 static int ufshcd_urgent_bkops(struct ufs_hba
*hba
)
5807 return ufshcd_bkops_ctrl(hba
, hba
->urgent_bkops_lvl
);
5810 static inline int ufshcd_get_ee_status(struct ufs_hba
*hba
, u32
*status
)
5812 return ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5813 QUERY_ATTR_IDN_EE_STATUS
, 0, 0, status
);
5816 static void ufshcd_bkops_exception_event_handler(struct ufs_hba
*hba
)
5819 u32 curr_status
= 0;
5821 if (hba
->is_urgent_bkops_lvl_checked
)
5822 goto enable_auto_bkops
;
5824 err
= ufshcd_get_bkops_status(hba
, &curr_status
);
5826 dev_err(hba
->dev
, "%s: failed to get BKOPS status %d\n",
5832 * We are seeing that some devices are raising the urgent bkops
5833 * exception events even when BKOPS status doesn't indicate performace
5834 * impacted or critical. Handle these device by determining their urgent
5835 * bkops status at runtime.
5837 if (curr_status
< BKOPS_STATUS_PERF_IMPACT
) {
5838 dev_err(hba
->dev
, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5839 __func__
, curr_status
);
5840 /* update the current status as the urgent bkops level */
5841 hba
->urgent_bkops_lvl
= curr_status
;
5842 hba
->is_urgent_bkops_lvl_checked
= true;
5846 err
= ufshcd_enable_auto_bkops(hba
);
5849 dev_err(hba
->dev
, "%s: failed to handle urgent bkops %d\n",
5853 static void ufshcd_temp_exception_event_handler(struct ufs_hba
*hba
, u16 status
)
5857 if (ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5858 QUERY_ATTR_IDN_CASE_ROUGH_TEMP
, 0, 0, &value
))
5861 dev_info(hba
->dev
, "exception Tcase %d\n", value
- 80);
5863 ufs_hwmon_notify_event(hba
, status
& MASK_EE_URGENT_TEMP
);
5866 * A placeholder for the platform vendors to add whatever additional
5871 static int __ufshcd_wb_toggle(struct ufs_hba
*hba
, bool set
, enum flag_idn idn
)
5874 enum query_opcode opcode
= set
? UPIU_QUERY_OPCODE_SET_FLAG
:
5875 UPIU_QUERY_OPCODE_CLEAR_FLAG
;
5877 index
= ufshcd_wb_get_query_index(hba
);
5878 return ufshcd_query_flag_retry(hba
, opcode
, idn
, index
, NULL
);
5881 int ufshcd_wb_toggle(struct ufs_hba
*hba
, bool enable
)
5885 if (!ufshcd_is_wb_allowed(hba
) ||
5886 hba
->dev_info
.wb_enabled
== enable
)
5889 ret
= __ufshcd_wb_toggle(hba
, enable
, QUERY_FLAG_IDN_WB_EN
);
5891 dev_err(hba
->dev
, "%s: Write Booster %s failed %d\n",
5892 __func__
, enable
? "enabling" : "disabling", ret
);
5896 hba
->dev_info
.wb_enabled
= enable
;
5897 dev_dbg(hba
->dev
, "%s: Write Booster %s\n",
5898 __func__
, enable
? "enabled" : "disabled");
5903 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba
*hba
,
5908 ret
= __ufshcd_wb_toggle(hba
, enable
,
5909 QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8
);
5911 dev_err(hba
->dev
, "%s: WB-Buf Flush during H8 %s failed %d\n",
5912 __func__
, enable
? "enabling" : "disabling", ret
);
5915 dev_dbg(hba
->dev
, "%s: WB-Buf Flush during H8 %s\n",
5916 __func__
, enable
? "enabled" : "disabled");
5919 int ufshcd_wb_toggle_buf_flush(struct ufs_hba
*hba
, bool enable
)
5923 if (!ufshcd_is_wb_allowed(hba
) ||
5924 hba
->dev_info
.wb_buf_flush_enabled
== enable
)
5927 ret
= __ufshcd_wb_toggle(hba
, enable
, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN
);
5929 dev_err(hba
->dev
, "%s: WB-Buf Flush %s failed %d\n",
5930 __func__
, enable
? "enabling" : "disabling", ret
);
5934 hba
->dev_info
.wb_buf_flush_enabled
= enable
;
5935 dev_dbg(hba
->dev
, "%s: WB-Buf Flush %s\n",
5936 __func__
, enable
? "enabled" : "disabled");
5941 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba
*hba
,
5948 index
= ufshcd_wb_get_query_index(hba
);
5949 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5950 QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE
,
5951 index
, 0, &cur_buf
);
5953 dev_err(hba
->dev
, "%s: dCurWriteBoosterBufferSize read failed %d\n",
5959 dev_info(hba
->dev
, "dCurWBBuf: %d WB disabled until free-space is available\n",
5963 /* Let it continue to flush when available buffer exceeds threshold */
5964 return avail_buf
< hba
->vps
->wb_flush_threshold
;
5967 static void ufshcd_wb_force_disable(struct ufs_hba
*hba
)
5969 if (ufshcd_is_wb_buf_flush_allowed(hba
))
5970 ufshcd_wb_toggle_buf_flush(hba
, false);
5972 ufshcd_wb_toggle_buf_flush_during_h8(hba
, false);
5973 ufshcd_wb_toggle(hba
, false);
5974 hba
->caps
&= ~UFSHCD_CAP_WB_EN
;
5976 dev_info(hba
->dev
, "%s: WB force disabled\n", __func__
);
5979 static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba
*hba
)
5985 index
= ufshcd_wb_get_query_index(hba
);
5986 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5987 QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST
,
5988 index
, 0, &lifetime
);
5991 "%s: bWriteBoosterBufferLifeTimeEst read failed %d\n",
5996 if (lifetime
== UFS_WB_EXCEED_LIFETIME
) {
5997 dev_err(hba
->dev
, "%s: WB buf lifetime is exhausted 0x%02X\n",
5998 __func__
, lifetime
);
6002 dev_dbg(hba
->dev
, "%s: WB buf lifetime is 0x%02X\n",
6003 __func__
, lifetime
);
6008 static bool ufshcd_wb_need_flush(struct ufs_hba
*hba
)
6014 if (!ufshcd_is_wb_allowed(hba
))
6017 if (!ufshcd_is_wb_buf_lifetime_available(hba
)) {
6018 ufshcd_wb_force_disable(hba
);
6023 * The ufs device needs the vcc to be ON to flush.
6024 * With user-space reduction enabled, it's enough to enable flush
6025 * by checking only the available buffer. The threshold
6026 * defined here is > 90% full.
6027 * With user-space preserved enabled, the current-buffer
6028 * should be checked too because the wb buffer size can reduce
6029 * when disk tends to be full. This info is provided by current
6030 * buffer (dCurrentWriteBoosterBufferSize). There's no point in
6031 * keeping vcc on when current buffer is empty.
6033 index
= ufshcd_wb_get_query_index(hba
);
6034 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
6035 QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE
,
6036 index
, 0, &avail_buf
);
6038 dev_warn(hba
->dev
, "%s: dAvailableWriteBoosterBufferSize read failed %d\n",
6043 if (!hba
->dev_info
.b_presrv_uspc_en
)
6044 return avail_buf
<= UFS_WB_BUF_REMAIN_PERCENT(10);
6046 return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba
, avail_buf
);
6049 static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct
*work
)
6051 struct ufs_hba
*hba
= container_of(to_delayed_work(work
),
6053 rpm_dev_flush_recheck_work
);
6055 * To prevent unnecessary VCC power drain after device finishes
6056 * WriteBooster buffer flush or Auto BKOPs, force runtime resume
6057 * after a certain delay to recheck the threshold by next runtime
6060 ufshcd_rpm_get_sync(hba
);
6061 ufshcd_rpm_put_sync(hba
);
6065 * ufshcd_exception_event_handler - handle exceptions raised by device
6066 * @work: pointer to work data
6068 * Read bExceptionEventStatus attribute from the device and handle the
6069 * exception event accordingly.
6071 static void ufshcd_exception_event_handler(struct work_struct
*work
)
6073 struct ufs_hba
*hba
;
6076 hba
= container_of(work
, struct ufs_hba
, eeh_work
);
6078 ufshcd_scsi_block_requests(hba
);
6079 err
= ufshcd_get_ee_status(hba
, &status
);
6081 dev_err(hba
->dev
, "%s: failed to get exception status %d\n",
6086 trace_ufshcd_exception_event(dev_name(hba
->dev
), status
);
6088 if (status
& hba
->ee_drv_mask
& MASK_EE_URGENT_BKOPS
)
6089 ufshcd_bkops_exception_event_handler(hba
);
6091 if (status
& hba
->ee_drv_mask
& MASK_EE_URGENT_TEMP
)
6092 ufshcd_temp_exception_event_handler(hba
, status
);
6094 ufs_debugfs_exception_event(hba
, status
);
6096 ufshcd_scsi_unblock_requests(hba
);
6099 /* Complete requests that have door-bell cleared */
6100 static void ufshcd_complete_requests(struct ufs_hba
*hba
, bool force_compl
)
6102 if (is_mcq_enabled(hba
))
6103 ufshcd_mcq_compl_pending_transfer(hba
, force_compl
);
6105 ufshcd_transfer_req_compl(hba
);
6107 ufshcd_tmc_handler(hba
);
6111 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
6112 * to recover from the DL NAC errors or not.
6113 * @hba: per-adapter instance
6115 * Return: true if error handling is required, false otherwise.
6117 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba
*hba
)
6119 unsigned long flags
;
6120 bool err_handling
= true;
6122 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6124 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
6125 * device fatal error and/or DL NAC & REPLAY timeout errors.
6127 if (hba
->saved_err
& (CONTROLLER_FATAL_ERROR
| SYSTEM_BUS_FATAL_ERROR
))
6130 if ((hba
->saved_err
& DEVICE_FATAL_ERROR
) ||
6131 ((hba
->saved_err
& UIC_ERROR
) &&
6132 (hba
->saved_uic_err
& UFSHCD_UIC_DL_TCx_REPLAY_ERROR
)))
6135 if ((hba
->saved_err
& UIC_ERROR
) &&
6136 (hba
->saved_uic_err
& UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)) {
6139 * wait for 50ms to see if we can get any other errors or not.
6141 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6143 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6146 * now check if we have got any other severe errors other than
6149 if ((hba
->saved_err
& INT_FATAL_ERRORS
) ||
6150 ((hba
->saved_err
& UIC_ERROR
) &&
6151 (hba
->saved_uic_err
& ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)))
6155 * As DL NAC is the only error received so far, send out NOP
6156 * command to confirm if link is still active or not.
6157 * - If we don't get any response then do error recovery.
6158 * - If we get response then clear the DL NAC error bit.
6161 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6162 err
= ufshcd_verify_dev_init(hba
);
6163 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6168 /* Link seems to be alive hence ignore the DL NAC errors */
6169 if (hba
->saved_uic_err
== UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)
6170 hba
->saved_err
&= ~UIC_ERROR
;
6171 /* clear NAC error */
6172 hba
->saved_uic_err
&= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
;
6173 if (!hba
->saved_uic_err
)
6174 err_handling
= false;
6177 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6178 return err_handling
;
6181 /* host lock must be held before calling this func */
6182 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba
*hba
)
6184 return (hba
->saved_uic_err
& UFSHCD_UIC_DL_PA_INIT_ERROR
) ||
6185 (hba
->saved_err
& (INT_FATAL_ERRORS
| UFSHCD_UIC_HIBERN8_MASK
));
6188 void ufshcd_schedule_eh_work(struct ufs_hba
*hba
)
6190 lockdep_assert_held(hba
->host
->host_lock
);
6192 /* handle fatal errors only when link is not in error state */
6193 if (hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
) {
6194 if (hba
->force_reset
|| ufshcd_is_link_broken(hba
) ||
6195 ufshcd_is_saved_err_fatal(hba
))
6196 hba
->ufshcd_state
= UFSHCD_STATE_EH_SCHEDULED_FATAL
;
6198 hba
->ufshcd_state
= UFSHCD_STATE_EH_SCHEDULED_NON_FATAL
;
6199 queue_work(hba
->eh_wq
, &hba
->eh_work
);
6203 static void ufshcd_force_error_recovery(struct ufs_hba
*hba
)
6205 spin_lock_irq(hba
->host
->host_lock
);
6206 hba
->force_reset
= true;
6207 ufshcd_schedule_eh_work(hba
);
6208 spin_unlock_irq(hba
->host
->host_lock
);
6211 static void ufshcd_clk_scaling_allow(struct ufs_hba
*hba
, bool allow
)
6213 mutex_lock(&hba
->wb_mutex
);
6214 down_write(&hba
->clk_scaling_lock
);
6215 hba
->clk_scaling
.is_allowed
= allow
;
6216 up_write(&hba
->clk_scaling_lock
);
6217 mutex_unlock(&hba
->wb_mutex
);
6220 static void ufshcd_clk_scaling_suspend(struct ufs_hba
*hba
, bool suspend
)
6223 if (hba
->clk_scaling
.is_enabled
)
6224 ufshcd_suspend_clkscaling(hba
);
6225 ufshcd_clk_scaling_allow(hba
, false);
6227 ufshcd_clk_scaling_allow(hba
, true);
6228 if (hba
->clk_scaling
.is_enabled
)
6229 ufshcd_resume_clkscaling(hba
);
6233 static void ufshcd_err_handling_prepare(struct ufs_hba
*hba
)
6235 ufshcd_rpm_get_sync(hba
);
6236 if (pm_runtime_status_suspended(&hba
->ufs_device_wlun
->sdev_gendev
) ||
6237 hba
->is_sys_suspended
) {
6238 enum ufs_pm_op pm_op
;
6241 * Don't assume anything of resume, if
6242 * resume fails, irq and clocks can be OFF, and powers
6243 * can be OFF or in LPM.
6245 ufshcd_setup_hba_vreg(hba
, true);
6246 ufshcd_enable_irq(hba
);
6247 ufshcd_setup_vreg(hba
, true);
6248 ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq
);
6249 ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq2
);
6251 if (!ufshcd_is_clkgating_allowed(hba
))
6252 ufshcd_setup_clocks(hba
, true);
6253 ufshcd_release(hba
);
6254 pm_op
= hba
->is_sys_suspended
? UFS_SYSTEM_PM
: UFS_RUNTIME_PM
;
6255 ufshcd_vops_resume(hba
, pm_op
);
6258 if (ufshcd_is_clkscaling_supported(hba
) &&
6259 hba
->clk_scaling
.is_enabled
)
6260 ufshcd_suspend_clkscaling(hba
);
6261 ufshcd_clk_scaling_allow(hba
, false);
6263 ufshcd_scsi_block_requests(hba
);
6264 /* Wait for ongoing ufshcd_queuecommand() calls to finish. */
6265 blk_mq_wait_quiesce_done(&hba
->host
->tag_set
);
6266 cancel_work_sync(&hba
->eeh_work
);
6269 static void ufshcd_err_handling_unprepare(struct ufs_hba
*hba
)
6271 ufshcd_scsi_unblock_requests(hba
);
6272 ufshcd_release(hba
);
6273 if (ufshcd_is_clkscaling_supported(hba
))
6274 ufshcd_clk_scaling_suspend(hba
, false);
6275 ufshcd_rpm_put(hba
);
6278 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba
*hba
)
6280 return (!hba
->is_powered
|| hba
->shutting_down
||
6281 !hba
->ufs_device_wlun
||
6282 hba
->ufshcd_state
== UFSHCD_STATE_ERROR
||
6283 (!(hba
->saved_err
|| hba
->saved_uic_err
|| hba
->force_reset
||
6284 ufshcd_is_link_broken(hba
))));
6288 static void ufshcd_recover_pm_error(struct ufs_hba
*hba
)
6290 struct Scsi_Host
*shost
= hba
->host
;
6291 struct scsi_device
*sdev
;
6292 struct request_queue
*q
;
6295 hba
->is_sys_suspended
= false;
6297 * Set RPM status of wlun device to RPM_ACTIVE,
6298 * this also clears its runtime error.
6300 ret
= pm_runtime_set_active(&hba
->ufs_device_wlun
->sdev_gendev
);
6302 /* hba device might have a runtime error otherwise */
6304 ret
= pm_runtime_set_active(hba
->dev
);
6306 * If wlun device had runtime error, we also need to resume those
6307 * consumer scsi devices in case any of them has failed to be
6308 * resumed due to supplier runtime resume failure. This is to unblock
6309 * blk_queue_enter in case there are bios waiting inside it.
6312 shost_for_each_device(sdev
, shost
) {
6313 q
= sdev
->request_queue
;
6314 if (q
->dev
&& (q
->rpm_status
== RPM_SUSPENDED
||
6315 q
->rpm_status
== RPM_SUSPENDING
))
6316 pm_request_resume(q
->dev
);
6321 static inline void ufshcd_recover_pm_error(struct ufs_hba
*hba
)
6326 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba
*hba
)
6328 struct ufs_pa_layer_attr
*pwr_info
= &hba
->pwr_info
;
6331 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_PWRMODE
), &mode
);
6333 if (pwr_info
->pwr_rx
!= ((mode
>> PWRMODE_RX_OFFSET
) & PWRMODE_MASK
))
6336 if (pwr_info
->pwr_tx
!= (mode
& PWRMODE_MASK
))
6342 static bool ufshcd_abort_one(struct request
*rq
, void *priv
)
6346 struct scsi_cmnd
*cmd
= blk_mq_rq_to_pdu(rq
);
6347 struct scsi_device
*sdev
= cmd
->device
;
6348 struct Scsi_Host
*shost
= sdev
->host
;
6349 struct ufs_hba
*hba
= shost_priv(shost
);
6351 *ret
= ufshcd_try_to_abort_task(hba
, tag
);
6352 dev_err(hba
->dev
, "Aborting tag %d / CDB %#02x %s\n", tag
,
6353 hba
->lrb
[tag
].cmd
? hba
->lrb
[tag
].cmd
->cmnd
[0] : -1,
6354 *ret
? "failed" : "succeeded");
6359 * ufshcd_abort_all - Abort all pending commands.
6360 * @hba: Host bus adapter pointer.
6362 * Return: true if and only if the host controller needs to be reset.
6364 static bool ufshcd_abort_all(struct ufs_hba
*hba
)
6368 blk_mq_tagset_busy_iter(&hba
->host
->tag_set
, ufshcd_abort_one
, &ret
);
6372 /* Clear pending task management requests */
6373 for_each_set_bit(tag
, &hba
->outstanding_tasks
, hba
->nutmrs
) {
6374 ret
= ufshcd_clear_tm_cmd(hba
, tag
);
6380 /* Complete the requests that are cleared by s/w */
6381 ufshcd_complete_requests(hba
, false);
6387 * ufshcd_err_handler - handle UFS errors that require s/w attention
6388 * @work: pointer to work structure
6390 static void ufshcd_err_handler(struct work_struct
*work
)
6392 int retries
= MAX_ERR_HANDLER_RETRIES
;
6393 struct ufs_hba
*hba
;
6394 unsigned long flags
;
6399 hba
= container_of(work
, struct ufs_hba
, eh_work
);
6402 "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
6403 __func__
, ufshcd_state_name
[hba
->ufshcd_state
],
6404 hba
->is_powered
, hba
->shutting_down
, hba
->saved_err
,
6405 hba
->saved_uic_err
, hba
->force_reset
,
6406 ufshcd_is_link_broken(hba
) ? "; link is broken" : "");
6408 down(&hba
->host_sem
);
6409 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6410 if (ufshcd_err_handling_should_stop(hba
)) {
6411 if (hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
)
6412 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
6413 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6417 ufshcd_set_eh_in_progress(hba
);
6418 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6419 ufshcd_err_handling_prepare(hba
);
6420 /* Complete requests that have door-bell cleared by h/w */
6421 ufshcd_complete_requests(hba
, false);
6422 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6424 needs_restore
= false;
6425 needs_reset
= false;
6427 if (hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
)
6428 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
6430 * A full reset and restore might have happened after preparation
6431 * is finished, double check whether we should stop.
6433 if (ufshcd_err_handling_should_stop(hba
))
6434 goto skip_err_handling
;
6436 if (hba
->dev_quirks
& UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
) {
6439 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6440 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
6441 ret
= ufshcd_quirk_dl_nac_errors(hba
);
6442 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6443 if (!ret
&& ufshcd_err_handling_should_stop(hba
))
6444 goto skip_err_handling
;
6447 if ((hba
->saved_err
& (INT_FATAL_ERRORS
| UFSHCD_UIC_HIBERN8_MASK
)) ||
6448 (hba
->saved_uic_err
&&
6449 (hba
->saved_uic_err
!= UFSHCD_UIC_PA_GENERIC_ERROR
))) {
6450 bool pr_prdt
= !!(hba
->saved_err
& SYSTEM_BUS_FATAL_ERROR
);
6452 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6453 ufshcd_print_host_state(hba
);
6454 ufshcd_print_pwr_info(hba
);
6455 ufshcd_print_evt_hist(hba
);
6456 ufshcd_print_tmrs(hba
, hba
->outstanding_tasks
);
6457 ufshcd_print_trs_all(hba
, pr_prdt
);
6458 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6462 * if host reset is required then skip clearing the pending
6463 * transfers forcefully because they will get cleared during
6464 * host reset and restore
6466 if (hba
->force_reset
|| ufshcd_is_link_broken(hba
) ||
6467 ufshcd_is_saved_err_fatal(hba
) ||
6468 ((hba
->saved_err
& UIC_ERROR
) &&
6469 (hba
->saved_uic_err
& (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
|
6470 UFSHCD_UIC_DL_TCx_REPLAY_ERROR
)))) {
6476 * If LINERESET was caught, UFS might have been put to PWM mode,
6477 * check if power mode restore is needed.
6479 if (hba
->saved_uic_err
& UFSHCD_UIC_PA_GENERIC_ERROR
) {
6480 hba
->saved_uic_err
&= ~UFSHCD_UIC_PA_GENERIC_ERROR
;
6481 if (!hba
->saved_uic_err
)
6482 hba
->saved_err
&= ~UIC_ERROR
;
6483 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6484 if (ufshcd_is_pwr_mode_restore_needed(hba
))
6485 needs_restore
= true;
6486 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6487 if (!hba
->saved_err
&& !needs_restore
)
6488 goto skip_err_handling
;
6491 hba
->silence_err_logs
= true;
6492 /* release lock as clear command might sleep */
6493 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6495 needs_reset
= ufshcd_abort_all(hba
);
6497 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6498 hba
->silence_err_logs
= false;
6503 * After all reqs and tasks are cleared from doorbell,
6504 * now it is safe to retore power mode.
6506 if (needs_restore
) {
6507 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6509 * Hold the scaling lock just in case dev cmds
6510 * are sent via bsg and/or sysfs.
6512 down_write(&hba
->clk_scaling_lock
);
6513 hba
->force_pmc
= true;
6514 pmc_err
= ufshcd_config_pwr_mode(hba
, &(hba
->pwr_info
));
6517 dev_err(hba
->dev
, "%s: Failed to restore power mode, err = %d\n",
6520 hba
->force_pmc
= false;
6521 ufshcd_print_pwr_info(hba
);
6522 up_write(&hba
->clk_scaling_lock
);
6523 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6527 /* Fatal errors need reset */
6531 hba
->force_reset
= false;
6532 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6533 err
= ufshcd_reset_and_restore(hba
);
6535 dev_err(hba
->dev
, "%s: reset and restore failed with err %d\n",
6538 ufshcd_recover_pm_error(hba
);
6539 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6544 if (hba
->ufshcd_state
== UFSHCD_STATE_RESET
)
6545 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
6546 if (hba
->saved_err
|| hba
->saved_uic_err
)
6547 dev_err_ratelimited(hba
->dev
, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
6548 __func__
, hba
->saved_err
, hba
->saved_uic_err
);
6550 /* Exit in an operational state or dead */
6551 if (hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
&&
6552 hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
) {
6555 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
6557 ufshcd_clear_eh_in_progress(hba
);
6558 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6559 ufshcd_err_handling_unprepare(hba
);
6562 dev_info(hba
->dev
, "%s finished; HBA state %s\n", __func__
,
6563 ufshcd_state_name
[hba
->ufshcd_state
]);
6567 * ufshcd_update_uic_error - check and set fatal UIC error flags.
6568 * @hba: per-adapter instance
6571 * IRQ_HANDLED - If interrupt is valid
6572 * IRQ_NONE - If invalid interrupt
6574 static irqreturn_t
ufshcd_update_uic_error(struct ufs_hba
*hba
)
6577 irqreturn_t retval
= IRQ_NONE
;
6579 /* PHY layer error */
6580 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER
);
6581 if ((reg
& UIC_PHY_ADAPTER_LAYER_ERROR
) &&
6582 (reg
& UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK
)) {
6583 ufshcd_update_evt_hist(hba
, UFS_EVT_PA_ERR
, reg
);
6585 * To know whether this error is fatal or not, DB timeout
6586 * must be checked but this error is handled separately.
6588 if (reg
& UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK
)
6589 dev_dbg(hba
->dev
, "%s: UIC Lane error reported\n",
6592 /* Got a LINERESET indication. */
6593 if (reg
& UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR
) {
6594 struct uic_command
*cmd
= NULL
;
6596 hba
->uic_error
|= UFSHCD_UIC_PA_GENERIC_ERROR
;
6597 if (hba
->uic_async_done
&& hba
->active_uic_cmd
)
6598 cmd
= hba
->active_uic_cmd
;
6600 * Ignore the LINERESET during power mode change
6601 * operation via DME_SET command.
6603 if (cmd
&& (cmd
->command
== UIC_CMD_DME_SET
))
6604 hba
->uic_error
&= ~UFSHCD_UIC_PA_GENERIC_ERROR
;
6606 retval
|= IRQ_HANDLED
;
6609 /* PA_INIT_ERROR is fatal and needs UIC reset */
6610 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_DATA_LINK_LAYER
);
6611 if ((reg
& UIC_DATA_LINK_LAYER_ERROR
) &&
6612 (reg
& UIC_DATA_LINK_LAYER_ERROR_CODE_MASK
)) {
6613 ufshcd_update_evt_hist(hba
, UFS_EVT_DL_ERR
, reg
);
6615 if (reg
& UIC_DATA_LINK_LAYER_ERROR_PA_INIT
)
6616 hba
->uic_error
|= UFSHCD_UIC_DL_PA_INIT_ERROR
;
6617 else if (hba
->dev_quirks
&
6618 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
) {
6619 if (reg
& UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED
)
6621 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
;
6622 else if (reg
& UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT
)
6623 hba
->uic_error
|= UFSHCD_UIC_DL_TCx_REPLAY_ERROR
;
6625 retval
|= IRQ_HANDLED
;
6628 /* UIC NL/TL/DME errors needs software retry */
6629 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_NETWORK_LAYER
);
6630 if ((reg
& UIC_NETWORK_LAYER_ERROR
) &&
6631 (reg
& UIC_NETWORK_LAYER_ERROR_CODE_MASK
)) {
6632 ufshcd_update_evt_hist(hba
, UFS_EVT_NL_ERR
, reg
);
6633 hba
->uic_error
|= UFSHCD_UIC_NL_ERROR
;
6634 retval
|= IRQ_HANDLED
;
6637 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_TRANSPORT_LAYER
);
6638 if ((reg
& UIC_TRANSPORT_LAYER_ERROR
) &&
6639 (reg
& UIC_TRANSPORT_LAYER_ERROR_CODE_MASK
)) {
6640 ufshcd_update_evt_hist(hba
, UFS_EVT_TL_ERR
, reg
);
6641 hba
->uic_error
|= UFSHCD_UIC_TL_ERROR
;
6642 retval
|= IRQ_HANDLED
;
6645 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_DME
);
6646 if ((reg
& UIC_DME_ERROR
) &&
6647 (reg
& UIC_DME_ERROR_CODE_MASK
)) {
6648 ufshcd_update_evt_hist(hba
, UFS_EVT_DME_ERR
, reg
);
6649 hba
->uic_error
|= UFSHCD_UIC_DME_ERROR
;
6650 retval
|= IRQ_HANDLED
;
6653 dev_dbg(hba
->dev
, "%s: UIC error flags = 0x%08x\n",
6654 __func__
, hba
->uic_error
);
6659 * ufshcd_check_errors - Check for errors that need s/w attention
6660 * @hba: per-adapter instance
6661 * @intr_status: interrupt status generated by the controller
6664 * IRQ_HANDLED - If interrupt is valid
6665 * IRQ_NONE - If invalid interrupt
6667 static irqreturn_t
ufshcd_check_errors(struct ufs_hba
*hba
, u32 intr_status
)
6669 bool queue_eh_work
= false;
6670 irqreturn_t retval
= IRQ_NONE
;
6672 spin_lock(hba
->host
->host_lock
);
6673 hba
->errors
|= UFSHCD_ERROR_MASK
& intr_status
;
6675 if (hba
->errors
& INT_FATAL_ERRORS
) {
6676 ufshcd_update_evt_hist(hba
, UFS_EVT_FATAL_ERR
,
6678 queue_eh_work
= true;
6681 if (hba
->errors
& UIC_ERROR
) {
6683 retval
= ufshcd_update_uic_error(hba
);
6685 queue_eh_work
= true;
6688 if (hba
->errors
& UFSHCD_UIC_HIBERN8_MASK
) {
6690 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
6691 __func__
, (hba
->errors
& UIC_HIBERNATE_ENTER
) ?
6693 hba
->errors
, ufshcd_get_upmcrs(hba
));
6694 ufshcd_update_evt_hist(hba
, UFS_EVT_AUTO_HIBERN8_ERR
,
6696 ufshcd_set_link_broken(hba
);
6697 queue_eh_work
= true;
6700 if (queue_eh_work
) {
6702 * update the transfer error masks to sticky bits, let's do this
6703 * irrespective of current ufshcd_state.
6705 hba
->saved_err
|= hba
->errors
;
6706 hba
->saved_uic_err
|= hba
->uic_error
;
6708 /* dump controller state before resetting */
6709 if ((hba
->saved_err
&
6710 (INT_FATAL_ERRORS
| UFSHCD_UIC_HIBERN8_MASK
)) ||
6711 (hba
->saved_uic_err
&&
6712 (hba
->saved_uic_err
!= UFSHCD_UIC_PA_GENERIC_ERROR
))) {
6713 dev_err(hba
->dev
, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
6714 __func__
, hba
->saved_err
,
6715 hba
->saved_uic_err
);
6716 ufshcd_dump_regs(hba
, 0, UFSHCI_REG_SPACE_SIZE
,
6718 ufshcd_print_pwr_info(hba
);
6720 ufshcd_schedule_eh_work(hba
);
6721 retval
|= IRQ_HANDLED
;
6724 * if (!queue_eh_work) -
6725 * Other errors are either non-fatal where host recovers
6726 * itself without s/w intervention or errors that will be
6727 * handled by the SCSI core layer.
6731 spin_unlock(hba
->host
->host_lock
);
6736 * ufshcd_tmc_handler - handle task management function completion
6737 * @hba: per adapter instance
6740 * IRQ_HANDLED - If interrupt is valid
6741 * IRQ_NONE - If invalid interrupt
6743 static irqreturn_t
ufshcd_tmc_handler(struct ufs_hba
*hba
)
6745 unsigned long flags
, pending
, issued
;
6746 irqreturn_t ret
= IRQ_NONE
;
6749 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6750 pending
= ufshcd_readl(hba
, REG_UTP_TASK_REQ_DOOR_BELL
);
6751 issued
= hba
->outstanding_tasks
& ~pending
;
6752 for_each_set_bit(tag
, &issued
, hba
->nutmrs
) {
6753 struct request
*req
= hba
->tmf_rqs
[tag
];
6754 struct completion
*c
= req
->end_io_data
;
6759 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6765 * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events
6766 * @hba: per adapter instance
6768 * Return: IRQ_HANDLED if interrupt is handled.
6770 static irqreturn_t
ufshcd_handle_mcq_cq_events(struct ufs_hba
*hba
)
6772 struct ufs_hw_queue
*hwq
;
6773 unsigned long outstanding_cqs
;
6774 unsigned int nr_queues
;
6778 ret
= ufshcd_vops_get_outstanding_cqs(hba
, &outstanding_cqs
);
6780 outstanding_cqs
= (1U << hba
->nr_hw_queues
) - 1;
6782 /* Exclude the poll queues */
6783 nr_queues
= hba
->nr_hw_queues
- hba
->nr_queues
[HCTX_TYPE_POLL
];
6784 for_each_set_bit(i
, &outstanding_cqs
, nr_queues
) {
6787 events
= ufshcd_mcq_read_cqis(hba
, i
);
6789 ufshcd_mcq_write_cqis(hba
, events
, i
);
6791 if (events
& UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS
)
6792 ufshcd_mcq_poll_cqe_lock(hba
, hwq
);
6799 * ufshcd_sl_intr - Interrupt service routine
6800 * @hba: per adapter instance
6801 * @intr_status: contains interrupts generated by the controller
6804 * IRQ_HANDLED - If interrupt is valid
6805 * IRQ_NONE - If invalid interrupt
6807 static irqreturn_t
ufshcd_sl_intr(struct ufs_hba
*hba
, u32 intr_status
)
6809 irqreturn_t retval
= IRQ_NONE
;
6811 if (intr_status
& UFSHCD_UIC_MASK
)
6812 retval
|= ufshcd_uic_cmd_compl(hba
, intr_status
);
6814 if (intr_status
& UFSHCD_ERROR_MASK
|| hba
->errors
)
6815 retval
|= ufshcd_check_errors(hba
, intr_status
);
6817 if (intr_status
& UTP_TASK_REQ_COMPL
)
6818 retval
|= ufshcd_tmc_handler(hba
);
6820 if (intr_status
& UTP_TRANSFER_REQ_COMPL
)
6821 retval
|= ufshcd_transfer_req_compl(hba
);
6823 if (intr_status
& MCQ_CQ_EVENT_STATUS
)
6824 retval
|= ufshcd_handle_mcq_cq_events(hba
);
6830 * ufshcd_intr - Main interrupt service routine
6832 * @__hba: pointer to adapter instance
6835 * IRQ_HANDLED - If interrupt is valid
6836 * IRQ_NONE - If invalid interrupt
6838 static irqreturn_t
ufshcd_intr(int irq
, void *__hba
)
6840 u32 intr_status
, enabled_intr_status
= 0;
6841 irqreturn_t retval
= IRQ_NONE
;
6842 struct ufs_hba
*hba
= __hba
;
6843 int retries
= hba
->nutrs
;
6845 intr_status
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
6846 hba
->ufs_stats
.last_intr_status
= intr_status
;
6847 hba
->ufs_stats
.last_intr_ts
= local_clock();
6850 * There could be max of hba->nutrs reqs in flight and in worst case
6851 * if the reqs get finished 1 by 1 after the interrupt status is
6852 * read, make sure we handle them by checking the interrupt status
6853 * again in a loop until we process all of the reqs before returning.
6855 while (intr_status
&& retries
--) {
6856 enabled_intr_status
=
6857 intr_status
& ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
6858 ufshcd_writel(hba
, intr_status
, REG_INTERRUPT_STATUS
);
6859 if (enabled_intr_status
)
6860 retval
|= ufshcd_sl_intr(hba
, enabled_intr_status
);
6862 intr_status
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
6865 if (enabled_intr_status
&& retval
== IRQ_NONE
&&
6866 (!(enabled_intr_status
& UTP_TRANSFER_REQ_COMPL
) ||
6867 hba
->outstanding_reqs
) && !ufshcd_eh_in_progress(hba
)) {
6868 dev_err(hba
->dev
, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
6871 hba
->ufs_stats
.last_intr_status
,
6872 enabled_intr_status
);
6873 ufshcd_dump_regs(hba
, 0, UFSHCI_REG_SPACE_SIZE
, "host_regs: ");
6879 static int ufshcd_clear_tm_cmd(struct ufs_hba
*hba
, int tag
)
6882 u32 mask
= 1 << tag
;
6883 unsigned long flags
;
6885 if (!test_bit(tag
, &hba
->outstanding_tasks
))
6888 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6889 ufshcd_utmrl_clear(hba
, tag
);
6890 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6892 /* poll for max. 1 sec to clear door bell register by h/w */
6893 err
= ufshcd_wait_for_register(hba
,
6894 REG_UTP_TASK_REQ_DOOR_BELL
,
6895 mask
, 0, 1000, 1000);
6897 dev_err(hba
->dev
, "Clearing task management function with tag %d %s\n",
6898 tag
, err
? "succeeded" : "failed");
6904 static int __ufshcd_issue_tm_cmd(struct ufs_hba
*hba
,
6905 struct utp_task_req_desc
*treq
, u8 tm_function
)
6907 struct request_queue
*q
= hba
->tmf_queue
;
6908 struct Scsi_Host
*host
= hba
->host
;
6909 DECLARE_COMPLETION_ONSTACK(wait
);
6910 struct request
*req
;
6911 unsigned long flags
;
6915 * blk_mq_alloc_request() is used here only to get a free tag.
6917 req
= blk_mq_alloc_request(q
, REQ_OP_DRV_OUT
, 0);
6919 return PTR_ERR(req
);
6921 req
->end_io_data
= &wait
;
6924 spin_lock_irqsave(host
->host_lock
, flags
);
6926 task_tag
= req
->tag
;
6927 WARN_ONCE(task_tag
< 0 || task_tag
>= hba
->nutmrs
, "Invalid tag %d\n",
6929 hba
->tmf_rqs
[req
->tag
] = req
;
6930 treq
->upiu_req
.req_header
.task_tag
= task_tag
;
6932 memcpy(hba
->utmrdl_base_addr
+ task_tag
, treq
, sizeof(*treq
));
6933 ufshcd_vops_setup_task_mgmt(hba
, task_tag
, tm_function
);
6935 /* send command to the controller */
6936 __set_bit(task_tag
, &hba
->outstanding_tasks
);
6938 ufshcd_writel(hba
, 1 << task_tag
, REG_UTP_TASK_REQ_DOOR_BELL
);
6939 /* Make sure that doorbell is committed immediately */
6942 spin_unlock_irqrestore(host
->host_lock
, flags
);
6944 ufshcd_add_tm_upiu_trace(hba
, task_tag
, UFS_TM_SEND
);
6946 /* wait until the task management command is completed */
6947 err
= wait_for_completion_io_timeout(&wait
,
6948 msecs_to_jiffies(TM_CMD_TIMEOUT
));
6950 ufshcd_add_tm_upiu_trace(hba
, task_tag
, UFS_TM_ERR
);
6951 dev_err(hba
->dev
, "%s: task management cmd 0x%.2x timed-out\n",
6952 __func__
, tm_function
);
6953 if (ufshcd_clear_tm_cmd(hba
, task_tag
))
6954 dev_WARN(hba
->dev
, "%s: unable to clear tm cmd (slot %d) after timeout\n",
6955 __func__
, task_tag
);
6959 memcpy(treq
, hba
->utmrdl_base_addr
+ task_tag
, sizeof(*treq
));
6961 ufshcd_add_tm_upiu_trace(hba
, task_tag
, UFS_TM_COMP
);
6964 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6965 hba
->tmf_rqs
[req
->tag
] = NULL
;
6966 __clear_bit(task_tag
, &hba
->outstanding_tasks
);
6967 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6969 ufshcd_release(hba
);
6970 blk_mq_free_request(req
);
6976 * ufshcd_issue_tm_cmd - issues task management commands to controller
6977 * @hba: per adapter instance
6978 * @lun_id: LUN ID to which TM command is sent
6979 * @task_id: task ID to which the TM command is applicable
6980 * @tm_function: task management function opcode
6981 * @tm_response: task management service response return value
6983 * Return: non-zero value on error, zero on success.
6985 static int ufshcd_issue_tm_cmd(struct ufs_hba
*hba
, int lun_id
, int task_id
,
6986 u8 tm_function
, u8
*tm_response
)
6988 struct utp_task_req_desc treq
= { };
6989 enum utp_ocs ocs_value
;
6992 /* Configure task request descriptor */
6993 treq
.header
.interrupt
= 1;
6994 treq
.header
.ocs
= OCS_INVALID_COMMAND_STATUS
;
6996 /* Configure task request UPIU */
6997 treq
.upiu_req
.req_header
.transaction_code
= UPIU_TRANSACTION_TASK_REQ
;
6998 treq
.upiu_req
.req_header
.lun
= lun_id
;
6999 treq
.upiu_req
.req_header
.tm_function
= tm_function
;
7002 * The host shall provide the same value for LUN field in the basic
7003 * header and for Input Parameter.
7005 treq
.upiu_req
.input_param1
= cpu_to_be32(lun_id
);
7006 treq
.upiu_req
.input_param2
= cpu_to_be32(task_id
);
7008 err
= __ufshcd_issue_tm_cmd(hba
, &treq
, tm_function
);
7009 if (err
== -ETIMEDOUT
)
7012 ocs_value
= treq
.header
.ocs
& MASK_OCS
;
7013 if (ocs_value
!= OCS_SUCCESS
)
7014 dev_err(hba
->dev
, "%s: failed, ocs = 0x%x\n",
7015 __func__
, ocs_value
);
7016 else if (tm_response
)
7017 *tm_response
= be32_to_cpu(treq
.upiu_rsp
.output_param1
) &
7018 MASK_TM_SERVICE_RESP
;
7023 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
7024 * @hba: per-adapter instance
7025 * @req_upiu: upiu request
7026 * @rsp_upiu: upiu reply
7027 * @desc_buff: pointer to descriptor buffer, NULL if NA
7028 * @buff_len: descriptor size, 0 if NA
7029 * @cmd_type: specifies the type (NOP, Query...)
7030 * @desc_op: descriptor operation
7032 * Those type of requests uses UTP Transfer Request Descriptor - utrd.
7033 * Therefore, it "rides" the device management infrastructure: uses its tag and
7034 * tasks work queues.
7036 * Since there is only one available tag for device management commands,
7037 * the caller is expected to hold the hba->dev_cmd.lock mutex.
7039 * Return: 0 upon success; < 0 upon failure.
7041 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba
*hba
,
7042 struct utp_upiu_req
*req_upiu
,
7043 struct utp_upiu_req
*rsp_upiu
,
7044 u8
*desc_buff
, int *buff_len
,
7045 enum dev_cmd_type cmd_type
,
7046 enum query_opcode desc_op
)
7048 DECLARE_COMPLETION_ONSTACK(wait
);
7049 const u32 tag
= hba
->reserved_slot
;
7050 struct ufshcd_lrb
*lrbp
;
7054 /* Protects use of hba->reserved_slot. */
7055 lockdep_assert_held(&hba
->dev_cmd
.lock
);
7057 down_read(&hba
->clk_scaling_lock
);
7059 lrbp
= &hba
->lrb
[tag
];
7061 lrbp
->task_tag
= tag
;
7063 lrbp
->intr_cmd
= true;
7064 ufshcd_prepare_lrbp_crypto(NULL
, lrbp
);
7065 hba
->dev_cmd
.type
= cmd_type
;
7067 if (hba
->ufs_version
<= ufshci_version(1, 1))
7068 lrbp
->command_type
= UTP_CMD_TYPE_DEV_MANAGE
;
7070 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
7072 /* update the task tag in the request upiu */
7073 req_upiu
->header
.task_tag
= tag
;
7075 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, DMA_NONE
, 0);
7077 /* just copy the upiu request as it is */
7078 memcpy(lrbp
->ucd_req_ptr
, req_upiu
, sizeof(*lrbp
->ucd_req_ptr
));
7079 if (desc_buff
&& desc_op
== UPIU_QUERY_OPCODE_WRITE_DESC
) {
7080 /* The Data Segment Area is optional depending upon the query
7081 * function value. for WRITE DESCRIPTOR, the data segment
7082 * follows right after the tsf.
7084 memcpy(lrbp
->ucd_req_ptr
+ 1, desc_buff
, *buff_len
);
7088 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
7090 hba
->dev_cmd
.complete
= &wait
;
7092 ufshcd_add_query_upiu_trace(hba
, UFS_QUERY_SEND
, lrbp
->ucd_req_ptr
);
7094 ufshcd_send_command(hba
, tag
, hba
->dev_cmd_queue
);
7096 * ignore the returning value here - ufshcd_check_query_response is
7097 * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
7098 * read the response directly ignoring all errors.
7100 ufshcd_wait_for_dev_cmd(hba
, lrbp
, QUERY_REQ_TIMEOUT
);
7102 /* just copy the upiu response as it is */
7103 memcpy(rsp_upiu
, lrbp
->ucd_rsp_ptr
, sizeof(*rsp_upiu
));
7104 if (desc_buff
&& desc_op
== UPIU_QUERY_OPCODE_READ_DESC
) {
7105 u8
*descp
= (u8
*)lrbp
->ucd_rsp_ptr
+ sizeof(*rsp_upiu
);
7106 u16 resp_len
= be16_to_cpu(lrbp
->ucd_rsp_ptr
->header
7107 .data_segment_length
);
7109 if (*buff_len
>= resp_len
) {
7110 memcpy(desc_buff
, descp
, resp_len
);
7111 *buff_len
= resp_len
;
7114 "%s: rsp size %d is bigger than buffer size %d",
7115 __func__
, resp_len
, *buff_len
);
7120 ufshcd_add_query_upiu_trace(hba
, err
? UFS_QUERY_ERR
: UFS_QUERY_COMP
,
7121 (struct utp_upiu_req
*)lrbp
->ucd_rsp_ptr
);
7123 up_read(&hba
->clk_scaling_lock
);
7128 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
7129 * @hba: per-adapter instance
7130 * @req_upiu: upiu request
7131 * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
7132 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
7133 * @desc_buff: pointer to descriptor buffer, NULL if NA
7134 * @buff_len: descriptor size, 0 if NA
7135 * @desc_op: descriptor operation
7137 * Supports UTP Transfer requests (nop and query), and UTP Task
7138 * Management requests.
7139 * It is up to the caller to fill the upiu conent properly, as it will
7140 * be copied without any further input validations.
7142 * Return: 0 upon success; < 0 upon failure.
7144 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba
*hba
,
7145 struct utp_upiu_req
*req_upiu
,
7146 struct utp_upiu_req
*rsp_upiu
,
7147 enum upiu_request_transaction msgcode
,
7148 u8
*desc_buff
, int *buff_len
,
7149 enum query_opcode desc_op
)
7152 enum dev_cmd_type cmd_type
= DEV_CMD_TYPE_QUERY
;
7153 struct utp_task_req_desc treq
= { };
7154 enum utp_ocs ocs_value
;
7155 u8 tm_f
= req_upiu
->header
.tm_function
;
7158 case UPIU_TRANSACTION_NOP_OUT
:
7159 cmd_type
= DEV_CMD_TYPE_NOP
;
7161 case UPIU_TRANSACTION_QUERY_REQ
:
7163 mutex_lock(&hba
->dev_cmd
.lock
);
7164 err
= ufshcd_issue_devman_upiu_cmd(hba
, req_upiu
, rsp_upiu
,
7165 desc_buff
, buff_len
,
7167 mutex_unlock(&hba
->dev_cmd
.lock
);
7168 ufshcd_release(hba
);
7171 case UPIU_TRANSACTION_TASK_REQ
:
7172 treq
.header
.interrupt
= 1;
7173 treq
.header
.ocs
= OCS_INVALID_COMMAND_STATUS
;
7175 memcpy(&treq
.upiu_req
, req_upiu
, sizeof(*req_upiu
));
7177 err
= __ufshcd_issue_tm_cmd(hba
, &treq
, tm_f
);
7178 if (err
== -ETIMEDOUT
)
7181 ocs_value
= treq
.header
.ocs
& MASK_OCS
;
7182 if (ocs_value
!= OCS_SUCCESS
) {
7183 dev_err(hba
->dev
, "%s: failed, ocs = 0x%x\n", __func__
,
7188 memcpy(rsp_upiu
, &treq
.upiu_rsp
, sizeof(*rsp_upiu
));
7201 * ufshcd_advanced_rpmb_req_handler - handle advanced RPMB request
7202 * @hba: per adapter instance
7203 * @req_upiu: upiu request
7204 * @rsp_upiu: upiu reply
7205 * @req_ehs: EHS field which contains Advanced RPMB Request Message
7206 * @rsp_ehs: EHS field which returns Advanced RPMB Response Message
7207 * @sg_cnt: The number of sg lists actually used
7208 * @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
7209 * @dir: DMA direction
7211 * Return: zero on success, non-zero on failure.
7213 int ufshcd_advanced_rpmb_req_handler(struct ufs_hba
*hba
, struct utp_upiu_req
*req_upiu
,
7214 struct utp_upiu_req
*rsp_upiu
, struct ufs_ehs
*req_ehs
,
7215 struct ufs_ehs
*rsp_ehs
, int sg_cnt
, struct scatterlist
*sg_list
,
7216 enum dma_data_direction dir
)
7218 DECLARE_COMPLETION_ONSTACK(wait
);
7219 const u32 tag
= hba
->reserved_slot
;
7220 struct ufshcd_lrb
*lrbp
;
7227 /* Protects use of hba->reserved_slot. */
7229 mutex_lock(&hba
->dev_cmd
.lock
);
7230 down_read(&hba
->clk_scaling_lock
);
7232 lrbp
= &hba
->lrb
[tag
];
7234 lrbp
->task_tag
= tag
;
7235 lrbp
->lun
= UFS_UPIU_RPMB_WLUN
;
7237 lrbp
->intr_cmd
= true;
7238 ufshcd_prepare_lrbp_crypto(NULL
, lrbp
);
7239 hba
->dev_cmd
.type
= DEV_CMD_TYPE_RPMB
;
7241 /* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */
7242 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
7245 * According to UFSHCI 4.0 specification page 24, if EHSLUTRDS is 0, host controller takes
7246 * EHS length from CMD UPIU, and SW driver use EHS Length field in CMD UPIU. if it is 1,
7247 * HW controller takes EHS length from UTRD.
7249 if (hba
->capabilities
& MASK_EHSLUTRD_SUPPORTED
)
7250 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, dir
, 2);
7252 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, dir
, 0);
7254 /* update the task tag */
7255 req_upiu
->header
.task_tag
= tag
;
7257 /* copy the UPIU(contains CDB) request as it is */
7258 memcpy(lrbp
->ucd_req_ptr
, req_upiu
, sizeof(*lrbp
->ucd_req_ptr
));
7259 /* Copy EHS, starting with byte32, immediately after the CDB package */
7260 memcpy(lrbp
->ucd_req_ptr
+ 1, req_ehs
, sizeof(*req_ehs
));
7262 if (dir
!= DMA_NONE
&& sg_list
)
7263 ufshcd_sgl_to_prdt(hba
, lrbp
, sg_cnt
, sg_list
);
7265 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
7267 hba
->dev_cmd
.complete
= &wait
;
7269 ufshcd_send_command(hba
, tag
, hba
->dev_cmd_queue
);
7271 err
= ufshcd_wait_for_dev_cmd(hba
, lrbp
, ADVANCED_RPMB_REQ_TIMEOUT
);
7274 /* Just copy the upiu response as it is */
7275 memcpy(rsp_upiu
, lrbp
->ucd_rsp_ptr
, sizeof(*rsp_upiu
));
7276 /* Get the response UPIU result */
7277 result
= (lrbp
->ucd_rsp_ptr
->header
.response
<< 8) |
7278 lrbp
->ucd_rsp_ptr
->header
.status
;
7280 ehs_len
= lrbp
->ucd_rsp_ptr
->header
.ehs_length
;
7282 * Since the bLength in EHS indicates the total size of the EHS Header and EHS Data
7283 * in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB
7286 if (ehs_len
== 2 && rsp_ehs
) {
7288 * ucd_rsp_ptr points to a buffer with a length of 512 bytes
7289 * (ALIGNED_UPIU_SIZE = 512), and the EHS data just starts from byte32
7291 ehs_data
= (u8
*)lrbp
->ucd_rsp_ptr
+ EHS_OFFSET_IN_RESPONSE
;
7292 memcpy(rsp_ehs
, ehs_data
, ehs_len
* 32);
7296 up_read(&hba
->clk_scaling_lock
);
7297 mutex_unlock(&hba
->dev_cmd
.lock
);
7298 ufshcd_release(hba
);
7299 return err
? : result
;
7303 * ufshcd_eh_device_reset_handler() - Reset a single logical unit.
7304 * @cmd: SCSI command pointer
7306 * Return: SUCCESS or FAILED.
7308 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd
*cmd
)
7310 unsigned long flags
, pending_reqs
= 0, not_cleared
= 0;
7311 struct Scsi_Host
*host
;
7312 struct ufs_hba
*hba
;
7313 struct ufs_hw_queue
*hwq
;
7314 struct ufshcd_lrb
*lrbp
;
7315 u32 pos
, not_cleared_mask
= 0;
7319 host
= cmd
->device
->host
;
7320 hba
= shost_priv(host
);
7322 lun
= ufshcd_scsi_to_upiu_lun(cmd
->device
->lun
);
7323 err
= ufshcd_issue_tm_cmd(hba
, lun
, 0, UFS_LOGICAL_RESET
, &resp
);
7324 if (err
|| resp
!= UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
7330 if (is_mcq_enabled(hba
)) {
7331 for (pos
= 0; pos
< hba
->nutrs
; pos
++) {
7332 lrbp
= &hba
->lrb
[pos
];
7333 if (ufshcd_cmd_inflight(lrbp
->cmd
) &&
7335 ufshcd_clear_cmd(hba
, pos
);
7336 hwq
= ufshcd_mcq_req_to_hwq(hba
, scsi_cmd_to_rq(lrbp
->cmd
));
7337 ufshcd_mcq_poll_cqe_lock(hba
, hwq
);
7344 /* clear the commands that were pending for corresponding LUN */
7345 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
7346 for_each_set_bit(pos
, &hba
->outstanding_reqs
, hba
->nutrs
)
7347 if (hba
->lrb
[pos
].lun
== lun
)
7348 __set_bit(pos
, &pending_reqs
);
7349 hba
->outstanding_reqs
&= ~pending_reqs
;
7350 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
7352 for_each_set_bit(pos
, &pending_reqs
, hba
->nutrs
) {
7353 if (ufshcd_clear_cmd(hba
, pos
) < 0) {
7354 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
7355 not_cleared
= 1U << pos
&
7356 ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
7357 hba
->outstanding_reqs
|= not_cleared
;
7358 not_cleared_mask
|= not_cleared
;
7359 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
7361 dev_err(hba
->dev
, "%s: failed to clear request %d\n",
7365 __ufshcd_transfer_req_compl(hba
, pending_reqs
& ~not_cleared_mask
);
7368 hba
->req_abort_count
= 0;
7369 ufshcd_update_evt_hist(hba
, UFS_EVT_DEV_RESET
, (u32
)err
);
7373 dev_err(hba
->dev
, "%s: failed with err %d\n", __func__
, err
);
7379 static void ufshcd_set_req_abort_skip(struct ufs_hba
*hba
, unsigned long bitmap
)
7381 struct ufshcd_lrb
*lrbp
;
7384 for_each_set_bit(tag
, &bitmap
, hba
->nutrs
) {
7385 lrbp
= &hba
->lrb
[tag
];
7386 lrbp
->req_abort_skip
= true;
7391 * ufshcd_try_to_abort_task - abort a specific task
7392 * @hba: Pointer to adapter instance
7393 * @tag: Task tag/index to be aborted
7395 * Abort the pending command in device by sending UFS_ABORT_TASK task management
7396 * command, and in host controller by clearing the door-bell register. There can
7397 * be race between controller sending the command to the device while abort is
7398 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
7399 * really issued and then try to abort it.
7401 * Return: zero on success, non-zero on failure.
7403 int ufshcd_try_to_abort_task(struct ufs_hba
*hba
, int tag
)
7405 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[tag
];
7411 for (poll_cnt
= 100; poll_cnt
; poll_cnt
--) {
7412 err
= ufshcd_issue_tm_cmd(hba
, lrbp
->lun
, lrbp
->task_tag
,
7413 UFS_QUERY_TASK
, &resp
);
7414 if (!err
&& resp
== UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED
) {
7415 /* cmd pending in the device */
7416 dev_err(hba
->dev
, "%s: cmd pending in the device. tag = %d\n",
7419 } else if (!err
&& resp
== UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
7421 * cmd not pending in the device, check if it is
7424 dev_err(hba
->dev
, "%s: cmd at tag %d not pending in the device.\n",
7426 if (is_mcq_enabled(hba
)) {
7428 if (ufshcd_cmd_inflight(lrbp
->cmd
)) {
7429 /* sleep for max. 200us same delay as in SDB mode */
7430 usleep_range(100, 200);
7433 /* command completed already */
7434 dev_err(hba
->dev
, "%s: cmd at tag=%d is cleared.\n",
7439 /* Single Doorbell Mode */
7440 reg
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
7441 if (reg
& (1 << tag
)) {
7442 /* sleep for max. 200us to stabilize */
7443 usleep_range(100, 200);
7446 /* command completed already */
7447 dev_err(hba
->dev
, "%s: cmd at tag %d successfully cleared from DB.\n",
7452 "%s: no response from device. tag = %d, err %d\n",
7453 __func__
, tag
, err
);
7455 err
= resp
; /* service response error */
7465 err
= ufshcd_issue_tm_cmd(hba
, lrbp
->lun
, lrbp
->task_tag
,
7466 UFS_ABORT_TASK
, &resp
);
7467 if (err
|| resp
!= UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
7469 err
= resp
; /* service response error */
7470 dev_err(hba
->dev
, "%s: issued. tag = %d, err %d\n",
7471 __func__
, tag
, err
);
7476 err
= ufshcd_clear_cmd(hba
, tag
);
7478 dev_err(hba
->dev
, "%s: Failed clearing cmd at tag %d, err %d\n",
7479 __func__
, tag
, err
);
7486 * ufshcd_abort - scsi host template eh_abort_handler callback
7487 * @cmd: SCSI command pointer
7489 * Return: SUCCESS or FAILED.
7491 static int ufshcd_abort(struct scsi_cmnd
*cmd
)
7493 struct Scsi_Host
*host
= cmd
->device
->host
;
7494 struct ufs_hba
*hba
= shost_priv(host
);
7495 int tag
= scsi_cmd_to_rq(cmd
)->tag
;
7496 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[tag
];
7497 unsigned long flags
;
7502 WARN_ONCE(tag
< 0, "Invalid tag %d\n", tag
);
7506 if (!is_mcq_enabled(hba
)) {
7507 reg
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
7508 if (!test_bit(tag
, &hba
->outstanding_reqs
)) {
7509 /* If command is already aborted/completed, return FAILED. */
7511 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
7512 __func__
, tag
, hba
->outstanding_reqs
, reg
);
7517 /* Print Transfer Request of aborted task */
7518 dev_info(hba
->dev
, "%s: Device abort task at tag %d\n", __func__
, tag
);
7521 * Print detailed info about aborted request.
7522 * As more than one request might get aborted at the same time,
7523 * print full information only for the first aborted request in order
7524 * to reduce repeated printouts. For other aborted requests only print
7527 scsi_print_command(cmd
);
7528 if (!hba
->req_abort_count
) {
7529 ufshcd_update_evt_hist(hba
, UFS_EVT_ABORT
, tag
);
7530 ufshcd_print_evt_hist(hba
);
7531 ufshcd_print_host_state(hba
);
7532 ufshcd_print_pwr_info(hba
);
7533 ufshcd_print_tr(hba
, tag
, true);
7535 ufshcd_print_tr(hba
, tag
, false);
7537 hba
->req_abort_count
++;
7539 if (!is_mcq_enabled(hba
) && !(reg
& (1 << tag
))) {
7540 /* only execute this code in single doorbell mode */
7542 "%s: cmd was completed, but without a notifying intr, tag = %d",
7544 __ufshcd_transfer_req_compl(hba
, 1UL << tag
);
7549 * Task abort to the device W-LUN is illegal. When this command
7550 * will fail, due to spec violation, scsi err handling next step
7551 * will be to send LU reset which, again, is a spec violation.
7552 * To avoid these unnecessary/illegal steps, first we clean up
7553 * the lrb taken by this cmd and re-set it in outstanding_reqs,
7554 * then queue the eh_work and bail.
7556 if (lrbp
->lun
== UFS_UPIU_UFS_DEVICE_WLUN
) {
7557 ufshcd_update_evt_hist(hba
, UFS_EVT_ABORT
, lrbp
->lun
);
7559 spin_lock_irqsave(host
->host_lock
, flags
);
7560 hba
->force_reset
= true;
7561 ufshcd_schedule_eh_work(hba
);
7562 spin_unlock_irqrestore(host
->host_lock
, flags
);
7566 if (is_mcq_enabled(hba
)) {
7567 /* MCQ mode. Branch off to handle abort for mcq mode */
7568 err
= ufshcd_mcq_abort(cmd
);
7572 /* Skip task abort in case previous aborts failed and report failure */
7573 if (lrbp
->req_abort_skip
) {
7574 dev_err(hba
->dev
, "%s: skipping abort\n", __func__
);
7575 ufshcd_set_req_abort_skip(hba
, hba
->outstanding_reqs
);
7579 err
= ufshcd_try_to_abort_task(hba
, tag
);
7581 dev_err(hba
->dev
, "%s: failed with err %d\n", __func__
, err
);
7582 ufshcd_set_req_abort_skip(hba
, hba
->outstanding_reqs
);
7588 * Clear the corresponding bit from outstanding_reqs since the command
7589 * has been aborted successfully.
7591 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
7592 outstanding
= __test_and_clear_bit(tag
, &hba
->outstanding_reqs
);
7593 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
7596 ufshcd_release_scsi_cmd(hba
, lrbp
);
7601 /* Matches the ufshcd_hold() call at the start of this function. */
7602 ufshcd_release(hba
);
7607 * ufshcd_host_reset_and_restore - reset and restore host controller
7608 * @hba: per-adapter instance
7610 * Note that host controller reset may issue DME_RESET to
7611 * local and remote (device) Uni-Pro stack and the attributes
7612 * are reset to default state.
7614 * Return: zero on success, non-zero on failure.
7616 static int ufshcd_host_reset_and_restore(struct ufs_hba
*hba
)
7621 * Stop the host controller and complete the requests
7624 ufshcd_hba_stop(hba
);
7625 hba
->silence_err_logs
= true;
7626 ufshcd_complete_requests(hba
, true);
7627 hba
->silence_err_logs
= false;
7629 /* scale up clocks to max frequency before full reinitialization */
7630 ufshcd_scale_clks(hba
, true);
7632 err
= ufshcd_hba_enable(hba
);
7634 /* Establish the link again and restore the device */
7636 err
= ufshcd_probe_hba(hba
, false);
7639 dev_err(hba
->dev
, "%s: Host init failed %d\n", __func__
, err
);
7640 ufshcd_update_evt_hist(hba
, UFS_EVT_HOST_RESET
, (u32
)err
);
7645 * ufshcd_reset_and_restore - reset and re-initialize host/device
7646 * @hba: per-adapter instance
7648 * Reset and recover device, host and re-establish link. This
7649 * is helpful to recover the communication in fatal error conditions.
7651 * Return: zero on success, non-zero on failure.
7653 static int ufshcd_reset_and_restore(struct ufs_hba
*hba
)
7656 u32 saved_uic_err
= 0;
7658 unsigned long flags
;
7659 int retries
= MAX_HOST_RESET_RETRIES
;
7661 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7664 * This is a fresh start, cache and clear saved error first,
7665 * in case new error generated during reset and restore.
7667 saved_err
|= hba
->saved_err
;
7668 saved_uic_err
|= hba
->saved_uic_err
;
7670 hba
->saved_uic_err
= 0;
7671 hba
->force_reset
= false;
7672 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
7673 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7675 /* Reset the attached device */
7676 ufshcd_device_reset(hba
);
7678 err
= ufshcd_host_reset_and_restore(hba
);
7680 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7683 /* Do not exit unless operational or dead */
7684 if (hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
&&
7685 hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
&&
7686 hba
->ufshcd_state
!= UFSHCD_STATE_EH_SCHEDULED_NON_FATAL
)
7688 } while (err
&& --retries
);
7691 * Inform scsi mid-layer that we did reset and allow to handle
7692 * Unit Attention properly.
7694 scsi_report_bus_reset(hba
->host
, 0);
7696 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
7697 hba
->saved_err
|= saved_err
;
7698 hba
->saved_uic_err
|= saved_uic_err
;
7700 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7706 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
7707 * @cmd: SCSI command pointer
7709 * Return: SUCCESS or FAILED.
7711 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd
*cmd
)
7714 unsigned long flags
;
7715 struct ufs_hba
*hba
;
7717 hba
= shost_priv(cmd
->device
->host
);
7719 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7720 hba
->force_reset
= true;
7721 ufshcd_schedule_eh_work(hba
);
7722 dev_err(hba
->dev
, "%s: reset in progress - 1\n", __func__
);
7723 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7725 flush_work(&hba
->eh_work
);
7727 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7728 if (hba
->ufshcd_state
== UFSHCD_STATE_ERROR
)
7730 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7736 * ufshcd_get_max_icc_level - calculate the ICC level
7737 * @sup_curr_uA: max. current supported by the regulator
7738 * @start_scan: row at the desc table to start scan from
7739 * @buff: power descriptor buffer
7741 * Return: calculated max ICC level for specific regulator.
7743 static u32
ufshcd_get_max_icc_level(int sup_curr_uA
, u32 start_scan
,
7751 for (i
= start_scan
; i
>= 0; i
--) {
7752 data
= get_unaligned_be16(&buff
[2 * i
]);
7753 unit
= (data
& ATTR_ICC_LVL_UNIT_MASK
) >>
7754 ATTR_ICC_LVL_UNIT_OFFSET
;
7755 curr_uA
= data
& ATTR_ICC_LVL_VALUE_MASK
;
7757 case UFSHCD_NANO_AMP
:
7758 curr_uA
= curr_uA
/ 1000;
7760 case UFSHCD_MILI_AMP
:
7761 curr_uA
= curr_uA
* 1000;
7764 curr_uA
= curr_uA
* 1000 * 1000;
7766 case UFSHCD_MICRO_AMP
:
7770 if (sup_curr_uA
>= curr_uA
)
7775 pr_err("%s: Couldn't find valid icc_level = %d", __func__
, i
);
7782 * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level
7783 * In case regulators are not initialized we'll return 0
7784 * @hba: per-adapter instance
7785 * @desc_buf: power descriptor buffer to extract ICC levels from.
7787 * Return: calculated ICC level.
7789 static u32
ufshcd_find_max_sup_active_icc_level(struct ufs_hba
*hba
,
7794 if (!hba
->vreg_info
.vcc
|| !hba
->vreg_info
.vccq
||
7795 !hba
->vreg_info
.vccq2
) {
7797 * Using dev_dbg to avoid messages during runtime PM to avoid
7798 * never-ending cycles of messages written back to storage by
7799 * user space causing runtime resume, causing more messages and
7803 "%s: Regulator capability was not set, actvIccLevel=%d",
7804 __func__
, icc_level
);
7808 if (hba
->vreg_info
.vcc
->max_uA
)
7809 icc_level
= ufshcd_get_max_icc_level(
7810 hba
->vreg_info
.vcc
->max_uA
,
7811 POWER_DESC_MAX_ACTV_ICC_LVLS
- 1,
7812 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCC_0
]);
7814 if (hba
->vreg_info
.vccq
->max_uA
)
7815 icc_level
= ufshcd_get_max_icc_level(
7816 hba
->vreg_info
.vccq
->max_uA
,
7818 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCCQ_0
]);
7820 if (hba
->vreg_info
.vccq2
->max_uA
)
7821 icc_level
= ufshcd_get_max_icc_level(
7822 hba
->vreg_info
.vccq2
->max_uA
,
7824 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCCQ2_0
]);
7829 static void ufshcd_set_active_icc_lvl(struct ufs_hba
*hba
)
7835 desc_buf
= kzalloc(QUERY_DESC_MAX_SIZE
, GFP_KERNEL
);
7839 ret
= ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_POWER
, 0, 0,
7840 desc_buf
, QUERY_DESC_MAX_SIZE
);
7843 "%s: Failed reading power descriptor ret = %d",
7848 icc_level
= ufshcd_find_max_sup_active_icc_level(hba
, desc_buf
);
7849 dev_dbg(hba
->dev
, "%s: setting icc_level 0x%x", __func__
, icc_level
);
7851 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
7852 QUERY_ATTR_IDN_ACTIVE_ICC_LVL
, 0, 0, &icc_level
);
7856 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
7857 __func__
, icc_level
, ret
);
7863 static inline void ufshcd_blk_pm_runtime_init(struct scsi_device
*sdev
)
7865 scsi_autopm_get_device(sdev
);
7866 blk_pm_runtime_init(sdev
->request_queue
, &sdev
->sdev_gendev
);
7867 if (sdev
->rpm_autosuspend
)
7868 pm_runtime_set_autosuspend_delay(&sdev
->sdev_gendev
,
7869 RPM_AUTOSUSPEND_DELAY_MS
);
7870 scsi_autopm_put_device(sdev
);
7874 * ufshcd_scsi_add_wlus - Adds required W-LUs
7875 * @hba: per-adapter instance
7877 * UFS device specification requires the UFS devices to support 4 well known
7879 * "REPORT_LUNS" (address: 01h)
7880 * "UFS Device" (address: 50h)
7881 * "RPMB" (address: 44h)
7882 * "BOOT" (address: 30h)
7883 * UFS device's power management needs to be controlled by "POWER CONDITION"
7884 * field of SSU (START STOP UNIT) command. But this "power condition" field
7885 * will take effect only when its sent to "UFS device" well known logical unit
7886 * hence we require the scsi_device instance to represent this logical unit in
7887 * order for the UFS host driver to send the SSU command for power management.
7889 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
7890 * Block) LU so user space process can control this LU. User space may also
7891 * want to have access to BOOT LU.
7893 * This function adds scsi device instances for each of all well known LUs
7894 * (except "REPORT LUNS" LU).
7896 * Return: zero on success (all required W-LUs are added successfully),
7897 * non-zero error value on failure (if failed to add any of the required W-LU).
7899 static int ufshcd_scsi_add_wlus(struct ufs_hba
*hba
)
7902 struct scsi_device
*sdev_boot
, *sdev_rpmb
;
7904 hba
->ufs_device_wlun
= __scsi_add_device(hba
->host
, 0, 0,
7905 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN
), NULL
);
7906 if (IS_ERR(hba
->ufs_device_wlun
)) {
7907 ret
= PTR_ERR(hba
->ufs_device_wlun
);
7908 hba
->ufs_device_wlun
= NULL
;
7911 scsi_device_put(hba
->ufs_device_wlun
);
7913 sdev_rpmb
= __scsi_add_device(hba
->host
, 0, 0,
7914 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN
), NULL
);
7915 if (IS_ERR(sdev_rpmb
)) {
7916 ret
= PTR_ERR(sdev_rpmb
);
7917 goto remove_ufs_device_wlun
;
7919 ufshcd_blk_pm_runtime_init(sdev_rpmb
);
7920 scsi_device_put(sdev_rpmb
);
7922 sdev_boot
= __scsi_add_device(hba
->host
, 0, 0,
7923 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN
), NULL
);
7924 if (IS_ERR(sdev_boot
)) {
7925 dev_err(hba
->dev
, "%s: BOOT WLUN not found\n", __func__
);
7927 ufshcd_blk_pm_runtime_init(sdev_boot
);
7928 scsi_device_put(sdev_boot
);
7932 remove_ufs_device_wlun
:
7933 scsi_remove_device(hba
->ufs_device_wlun
);
7938 static void ufshcd_wb_probe(struct ufs_hba
*hba
, const u8
*desc_buf
)
7940 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
7942 u32 d_lu_wb_buf_alloc
;
7943 u32 ext_ufs_feature
;
7945 if (!ufshcd_is_wb_allowed(hba
))
7949 * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
7950 * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
7953 if (!(dev_info
->wspecversion
>= 0x310 ||
7954 dev_info
->wspecversion
== 0x220 ||
7955 (hba
->dev_quirks
& UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
)))
7958 ext_ufs_feature
= get_unaligned_be32(desc_buf
+
7959 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP
);
7961 if (!(ext_ufs_feature
& UFS_DEV_WRITE_BOOSTER_SUP
))
7965 * WB may be supported but not configured while provisioning. The spec
7966 * says, in dedicated wb buffer mode, a max of 1 lun would have wb
7967 * buffer configured.
7969 dev_info
->wb_buffer_type
= desc_buf
[DEVICE_DESC_PARAM_WB_TYPE
];
7971 dev_info
->b_presrv_uspc_en
=
7972 desc_buf
[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN
];
7974 if (dev_info
->wb_buffer_type
== WB_BUF_MODE_SHARED
) {
7975 if (!get_unaligned_be32(desc_buf
+
7976 DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS
))
7979 for (lun
= 0; lun
< UFS_UPIU_MAX_WB_LUN_ID
; lun
++) {
7980 d_lu_wb_buf_alloc
= 0;
7981 ufshcd_read_unit_desc_param(hba
,
7983 UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS
,
7984 (u8
*)&d_lu_wb_buf_alloc
,
7985 sizeof(d_lu_wb_buf_alloc
));
7986 if (d_lu_wb_buf_alloc
) {
7987 dev_info
->wb_dedicated_lu
= lun
;
7992 if (!d_lu_wb_buf_alloc
)
7996 if (!ufshcd_is_wb_buf_lifetime_available(hba
))
8002 hba
->caps
&= ~UFSHCD_CAP_WB_EN
;
8005 static void ufshcd_temp_notif_probe(struct ufs_hba
*hba
, const u8
*desc_buf
)
8007 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8008 u32 ext_ufs_feature
;
8011 if (!(hba
->caps
& UFSHCD_CAP_TEMP_NOTIF
) || dev_info
->wspecversion
< 0x300)
8014 ext_ufs_feature
= get_unaligned_be32(desc_buf
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP
);
8016 if (ext_ufs_feature
& UFS_DEV_LOW_TEMP_NOTIF
)
8017 mask
|= MASK_EE_TOO_LOW_TEMP
;
8019 if (ext_ufs_feature
& UFS_DEV_HIGH_TEMP_NOTIF
)
8020 mask
|= MASK_EE_TOO_HIGH_TEMP
;
8023 ufshcd_enable_ee(hba
, mask
);
8024 ufs_hwmon_probe(hba
, mask
);
8028 static void ufshcd_ext_iid_probe(struct ufs_hba
*hba
, u8
*desc_buf
)
8030 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8031 u32 ext_ufs_feature
;
8035 /* Only UFS-4.0 and above may support EXT_IID */
8036 if (dev_info
->wspecversion
< 0x400)
8039 ext_ufs_feature
= get_unaligned_be32(desc_buf
+
8040 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP
);
8041 if (!(ext_ufs_feature
& UFS_DEV_EXT_IID_SUP
))
8044 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
8045 QUERY_ATTR_IDN_EXT_IID_EN
, 0, 0, &ext_iid_en
);
8047 dev_err(hba
->dev
, "failed reading bEXTIIDEn. err = %d\n", err
);
8050 dev_info
->b_ext_iid_en
= ext_iid_en
;
8053 void ufshcd_fixup_dev_quirks(struct ufs_hba
*hba
,
8054 const struct ufs_dev_quirk
*fixups
)
8056 const struct ufs_dev_quirk
*f
;
8057 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8062 for (f
= fixups
; f
->quirk
; f
++) {
8063 if ((f
->wmanufacturerid
== dev_info
->wmanufacturerid
||
8064 f
->wmanufacturerid
== UFS_ANY_VENDOR
) &&
8065 ((dev_info
->model
&&
8066 STR_PRFX_EQUAL(f
->model
, dev_info
->model
)) ||
8067 !strcmp(f
->model
, UFS_ANY_MODEL
)))
8068 hba
->dev_quirks
|= f
->quirk
;
8071 EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks
);
8073 static void ufs_fixup_device_setup(struct ufs_hba
*hba
)
8075 /* fix by general quirk table */
8076 ufshcd_fixup_dev_quirks(hba
, ufs_fixups
);
8078 /* allow vendors to fix quirks */
8079 ufshcd_vops_fixup_dev_quirks(hba
);
8082 static int ufs_get_device_desc(struct ufs_hba
*hba
)
8087 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8089 desc_buf
= kzalloc(QUERY_DESC_MAX_SIZE
, GFP_KERNEL
);
8095 err
= ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_DEVICE
, 0, 0, desc_buf
,
8096 QUERY_DESC_MAX_SIZE
);
8098 dev_err(hba
->dev
, "%s: Failed reading Device Desc. err = %d\n",
8104 * getting vendor (manufacturerID) and Bank Index in big endian
8107 dev_info
->wmanufacturerid
= desc_buf
[DEVICE_DESC_PARAM_MANF_ID
] << 8 |
8108 desc_buf
[DEVICE_DESC_PARAM_MANF_ID
+ 1];
8110 /* getting Specification Version in big endian format */
8111 dev_info
->wspecversion
= desc_buf
[DEVICE_DESC_PARAM_SPEC_VER
] << 8 |
8112 desc_buf
[DEVICE_DESC_PARAM_SPEC_VER
+ 1];
8113 dev_info
->bqueuedepth
= desc_buf
[DEVICE_DESC_PARAM_Q_DPTH
];
8115 model_index
= desc_buf
[DEVICE_DESC_PARAM_PRDCT_NAME
];
8117 err
= ufshcd_read_string_desc(hba
, model_index
,
8118 &dev_info
->model
, SD_ASCII_STD
);
8120 dev_err(hba
->dev
, "%s: Failed reading Product Name. err = %d\n",
8125 hba
->luns_avail
= desc_buf
[DEVICE_DESC_PARAM_NUM_LU
] +
8126 desc_buf
[DEVICE_DESC_PARAM_NUM_WLU
];
8128 ufs_fixup_device_setup(hba
);
8130 ufshcd_wb_probe(hba
, desc_buf
);
8132 ufshcd_temp_notif_probe(hba
, desc_buf
);
8134 if (hba
->ext_iid_sup
)
8135 ufshcd_ext_iid_probe(hba
, desc_buf
);
8138 * ufshcd_read_string_desc returns size of the string
8139 * reset the error value
8148 static void ufs_put_device_desc(struct ufs_hba
*hba
)
8150 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8152 kfree(dev_info
->model
);
8153 dev_info
->model
= NULL
;
8157 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
8158 * @hba: per-adapter instance
8160 * PA_TActivate parameter can be tuned manually if UniPro version is less than
8161 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
8162 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
8163 * the hibern8 exit latency.
8165 * Return: zero on success, non-zero error value on failure.
8167 static int ufshcd_tune_pa_tactivate(struct ufs_hba
*hba
)
8170 u32 peer_rx_min_activatetime
= 0, tuned_pa_tactivate
;
8172 ret
= ufshcd_dme_peer_get(hba
,
8174 RX_MIN_ACTIVATETIME_CAPABILITY
,
8175 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
8176 &peer_rx_min_activatetime
);
8180 /* make sure proper unit conversion is applied */
8181 tuned_pa_tactivate
=
8182 ((peer_rx_min_activatetime
* RX_MIN_ACTIVATETIME_UNIT_US
)
8183 / PA_TACTIVATE_TIME_UNIT_US
);
8184 ret
= ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
),
8185 tuned_pa_tactivate
);
8192 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
8193 * @hba: per-adapter instance
8195 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
8196 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
8197 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
8198 * This optimal value can help reduce the hibern8 exit latency.
8200 * Return: zero on success, non-zero error value on failure.
8202 static int ufshcd_tune_pa_hibern8time(struct ufs_hba
*hba
)
8205 u32 local_tx_hibern8_time_cap
= 0, peer_rx_hibern8_time_cap
= 0;
8206 u32 max_hibern8_time
, tuned_pa_hibern8time
;
8208 ret
= ufshcd_dme_get(hba
,
8209 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY
,
8210 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
8211 &local_tx_hibern8_time_cap
);
8215 ret
= ufshcd_dme_peer_get(hba
,
8216 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY
,
8217 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
8218 &peer_rx_hibern8_time_cap
);
8222 max_hibern8_time
= max(local_tx_hibern8_time_cap
,
8223 peer_rx_hibern8_time_cap
);
8224 /* make sure proper unit conversion is applied */
8225 tuned_pa_hibern8time
= ((max_hibern8_time
* HIBERN8TIME_UNIT_US
)
8226 / PA_HIBERN8_TIME_UNIT_US
);
8227 ret
= ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_HIBERN8TIME
),
8228 tuned_pa_hibern8time
);
8234 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
8235 * less than device PA_TACTIVATE time.
8236 * @hba: per-adapter instance
8238 * Some UFS devices require host PA_TACTIVATE to be lower than device
8239 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
8242 * Return: zero on success, non-zero error value on failure.
8244 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba
*hba
)
8247 u32 granularity
, peer_granularity
;
8248 u32 pa_tactivate
, peer_pa_tactivate
;
8249 u32 pa_tactivate_us
, peer_pa_tactivate_us
;
8250 static const u8 gran_to_us_table
[] = {1, 4, 8, 16, 32, 100};
8252 ret
= ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_GRANULARITY
),
8257 ret
= ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_GRANULARITY
),
8262 if ((granularity
< PA_GRANULARITY_MIN_VAL
) ||
8263 (granularity
> PA_GRANULARITY_MAX_VAL
)) {
8264 dev_err(hba
->dev
, "%s: invalid host PA_GRANULARITY %d",
8265 __func__
, granularity
);
8269 if ((peer_granularity
< PA_GRANULARITY_MIN_VAL
) ||
8270 (peer_granularity
> PA_GRANULARITY_MAX_VAL
)) {
8271 dev_err(hba
->dev
, "%s: invalid device PA_GRANULARITY %d",
8272 __func__
, peer_granularity
);
8276 ret
= ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_TACTIVATE
), &pa_tactivate
);
8280 ret
= ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_TACTIVATE
),
8281 &peer_pa_tactivate
);
8285 pa_tactivate_us
= pa_tactivate
* gran_to_us_table
[granularity
- 1];
8286 peer_pa_tactivate_us
= peer_pa_tactivate
*
8287 gran_to_us_table
[peer_granularity
- 1];
8289 if (pa_tactivate_us
>= peer_pa_tactivate_us
) {
8290 u32 new_peer_pa_tactivate
;
8292 new_peer_pa_tactivate
= pa_tactivate_us
/
8293 gran_to_us_table
[peer_granularity
- 1];
8294 new_peer_pa_tactivate
++;
8295 ret
= ufshcd_dme_peer_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
),
8296 new_peer_pa_tactivate
);
8303 static void ufshcd_tune_unipro_params(struct ufs_hba
*hba
)
8305 if (ufshcd_is_unipro_pa_params_tuning_req(hba
)) {
8306 ufshcd_tune_pa_tactivate(hba
);
8307 ufshcd_tune_pa_hibern8time(hba
);
8310 ufshcd_vops_apply_dev_quirks(hba
);
8312 if (hba
->dev_quirks
& UFS_DEVICE_QUIRK_PA_TACTIVATE
)
8313 /* set 1ms timeout for PA_TACTIVATE */
8314 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
), 10);
8316 if (hba
->dev_quirks
& UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE
)
8317 ufshcd_quirk_tune_host_pa_tactivate(hba
);
8320 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba
*hba
)
8322 hba
->ufs_stats
.hibern8_exit_cnt
= 0;
8323 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
8324 hba
->req_abort_count
= 0;
8327 static int ufshcd_device_geo_params_init(struct ufs_hba
*hba
)
8332 desc_buf
= kzalloc(QUERY_DESC_MAX_SIZE
, GFP_KERNEL
);
8338 err
= ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_GEOMETRY
, 0, 0,
8339 desc_buf
, QUERY_DESC_MAX_SIZE
);
8341 dev_err(hba
->dev
, "%s: Failed reading Geometry Desc. err = %d\n",
8346 if (desc_buf
[GEOMETRY_DESC_PARAM_MAX_NUM_LUN
] == 1)
8347 hba
->dev_info
.max_lu_supported
= 32;
8348 else if (desc_buf
[GEOMETRY_DESC_PARAM_MAX_NUM_LUN
] == 0)
8349 hba
->dev_info
.max_lu_supported
= 8;
8356 struct ufs_ref_clk
{
8357 unsigned long freq_hz
;
8358 enum ufs_ref_clk_freq val
;
8361 static const struct ufs_ref_clk ufs_ref_clk_freqs
[] = {
8362 {19200000, REF_CLK_FREQ_19_2_MHZ
},
8363 {26000000, REF_CLK_FREQ_26_MHZ
},
8364 {38400000, REF_CLK_FREQ_38_4_MHZ
},
8365 {52000000, REF_CLK_FREQ_52_MHZ
},
8366 {0, REF_CLK_FREQ_INVAL
},
8369 static enum ufs_ref_clk_freq
8370 ufs_get_bref_clk_from_hz(unsigned long freq
)
8374 for (i
= 0; ufs_ref_clk_freqs
[i
].freq_hz
; i
++)
8375 if (ufs_ref_clk_freqs
[i
].freq_hz
== freq
)
8376 return ufs_ref_clk_freqs
[i
].val
;
8378 return REF_CLK_FREQ_INVAL
;
8381 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba
*hba
, struct clk
*refclk
)
8385 freq
= clk_get_rate(refclk
);
8387 hba
->dev_ref_clk_freq
=
8388 ufs_get_bref_clk_from_hz(freq
);
8390 if (hba
->dev_ref_clk_freq
== REF_CLK_FREQ_INVAL
)
8392 "invalid ref_clk setting = %ld\n", freq
);
8395 static int ufshcd_set_dev_ref_clk(struct ufs_hba
*hba
)
8399 u32 freq
= hba
->dev_ref_clk_freq
;
8401 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
8402 QUERY_ATTR_IDN_REF_CLK_FREQ
, 0, 0, &ref_clk
);
8405 dev_err(hba
->dev
, "failed reading bRefClkFreq. err = %d\n",
8410 if (ref_clk
== freq
)
8411 goto out
; /* nothing to update */
8413 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
8414 QUERY_ATTR_IDN_REF_CLK_FREQ
, 0, 0, &freq
);
8417 dev_err(hba
->dev
, "bRefClkFreq setting to %lu Hz failed\n",
8418 ufs_ref_clk_freqs
[freq
].freq_hz
);
8422 dev_dbg(hba
->dev
, "bRefClkFreq setting to %lu Hz succeeded\n",
8423 ufs_ref_clk_freqs
[freq
].freq_hz
);
8429 static int ufshcd_device_params_init(struct ufs_hba
*hba
)
8434 /* Init UFS geometry descriptor related parameters */
8435 ret
= ufshcd_device_geo_params_init(hba
);
8439 /* Check and apply UFS device quirks */
8440 ret
= ufs_get_device_desc(hba
);
8442 dev_err(hba
->dev
, "%s: Failed getting device info. err = %d\n",
8447 ufshcd_get_ref_clk_gating_wait(hba
);
8449 if (!ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_READ_FLAG
,
8450 QUERY_FLAG_IDN_PWR_ON_WPE
, 0, &flag
))
8451 hba
->dev_info
.f_power_on_wp_en
= flag
;
8453 /* Probe maximum power mode co-supported by both UFS host and device */
8454 if (ufshcd_get_max_pwr_mode(hba
))
8456 "%s: Failed getting max supported power mode\n",
8462 static void ufshcd_set_timestamp_attr(struct ufs_hba
*hba
)
8465 struct ufs_query_req
*request
= NULL
;
8466 struct ufs_query_res
*response
= NULL
;
8467 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8468 struct utp_upiu_query_v4_0
*upiu_data
;
8470 if (dev_info
->wspecversion
< 0x400)
8475 mutex_lock(&hba
->dev_cmd
.lock
);
8477 ufshcd_init_query(hba
, &request
, &response
,
8478 UPIU_QUERY_OPCODE_WRITE_ATTR
,
8479 QUERY_ATTR_IDN_TIMESTAMP
, 0, 0);
8481 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
8483 upiu_data
= (struct utp_upiu_query_v4_0
*)&request
->upiu_req
;
8485 put_unaligned_be64(ktime_get_real_ns(), &upiu_data
->osf3
);
8487 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
8490 dev_err(hba
->dev
, "%s: failed to set timestamp %d\n",
8493 mutex_unlock(&hba
->dev_cmd
.lock
);
8494 ufshcd_release(hba
);
8498 * ufshcd_add_lus - probe and add UFS logical units
8499 * @hba: per-adapter instance
8501 * Return: 0 upon success; < 0 upon failure.
8503 static int ufshcd_add_lus(struct ufs_hba
*hba
)
8507 /* Add required well known logical units to scsi mid layer */
8508 ret
= ufshcd_scsi_add_wlus(hba
);
8512 /* Initialize devfreq after UFS device is detected */
8513 if (ufshcd_is_clkscaling_supported(hba
)) {
8514 memcpy(&hba
->clk_scaling
.saved_pwr_info
,
8516 sizeof(struct ufs_pa_layer_attr
));
8517 hba
->clk_scaling
.is_allowed
= true;
8519 ret
= ufshcd_devfreq_init(hba
);
8523 hba
->clk_scaling
.is_enabled
= true;
8524 ufshcd_init_clk_scaling_sysfs(hba
);
8528 scsi_scan_host(hba
->host
);
8529 pm_runtime_put_sync(hba
->dev
);
8535 /* SDB - Single Doorbell */
8536 static void ufshcd_release_sdb_queue(struct ufs_hba
*hba
, int nutrs
)
8538 size_t ucdl_size
, utrdl_size
;
8540 ucdl_size
= ufshcd_get_ucd_size(hba
) * nutrs
;
8541 dmam_free_coherent(hba
->dev
, ucdl_size
, hba
->ucdl_base_addr
,
8542 hba
->ucdl_dma_addr
);
8544 utrdl_size
= sizeof(struct utp_transfer_req_desc
) * nutrs
;
8545 dmam_free_coherent(hba
->dev
, utrdl_size
, hba
->utrdl_base_addr
,
8546 hba
->utrdl_dma_addr
);
8548 devm_kfree(hba
->dev
, hba
->lrb
);
8551 static int ufshcd_alloc_mcq(struct ufs_hba
*hba
)
8554 int old_nutrs
= hba
->nutrs
;
8556 ret
= ufshcd_mcq_decide_queue_depth(hba
);
8561 ret
= ufshcd_mcq_init(hba
);
8566 * Previously allocated memory for nutrs may not be enough in MCQ mode.
8567 * Number of supported tags in MCQ mode may be larger than SDB mode.
8569 if (hba
->nutrs
!= old_nutrs
) {
8570 ufshcd_release_sdb_queue(hba
, old_nutrs
);
8571 ret
= ufshcd_memory_alloc(hba
);
8574 ufshcd_host_memory_configure(hba
);
8577 ret
= ufshcd_mcq_memory_alloc(hba
);
8583 hba
->nutrs
= old_nutrs
;
8587 static void ufshcd_config_mcq(struct ufs_hba
*hba
)
8592 ret
= ufshcd_mcq_vops_config_esi(hba
);
8593 dev_info(hba
->dev
, "ESI %sconfigured\n", ret
? "is not " : "");
8595 intrs
= UFSHCD_ENABLE_MCQ_INTRS
;
8596 if (hba
->quirks
& UFSHCD_QUIRK_MCQ_BROKEN_INTR
)
8597 intrs
&= ~MCQ_CQ_EVENT_STATUS
;
8598 ufshcd_enable_intr(hba
, intrs
);
8599 ufshcd_mcq_make_queues_operational(hba
);
8600 ufshcd_mcq_config_mac(hba
, hba
->nutrs
);
8602 hba
->host
->can_queue
= hba
->nutrs
- UFSHCD_NUM_RESERVED
;
8603 hba
->reserved_slot
= hba
->nutrs
- UFSHCD_NUM_RESERVED
;
8605 /* Select MCQ mode */
8606 ufshcd_writel(hba
, ufshcd_readl(hba
, REG_UFS_MEM_CFG
) | 0x1,
8608 hba
->mcq_enabled
= true;
8610 dev_info(hba
->dev
, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n",
8611 hba
->nr_hw_queues
, hba
->nr_queues
[HCTX_TYPE_DEFAULT
],
8612 hba
->nr_queues
[HCTX_TYPE_READ
], hba
->nr_queues
[HCTX_TYPE_POLL
],
8616 static int ufshcd_device_init(struct ufs_hba
*hba
, bool init_dev_params
)
8619 struct Scsi_Host
*host
= hba
->host
;
8621 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
8623 ret
= ufshcd_link_startup(hba
);
8627 if (hba
->quirks
& UFSHCD_QUIRK_SKIP_PH_CONFIGURATION
)
8630 /* Debug counters initialization */
8631 ufshcd_clear_dbg_ufs_stats(hba
);
8633 /* UniPro link is active now */
8634 ufshcd_set_link_active(hba
);
8636 /* Reconfigure MCQ upon reset */
8637 if (is_mcq_enabled(hba
) && !init_dev_params
)
8638 ufshcd_config_mcq(hba
);
8640 /* Verify device initialization by sending NOP OUT UPIU */
8641 ret
= ufshcd_verify_dev_init(hba
);
8645 /* Initiate UFS initialization, and waiting until completion */
8646 ret
= ufshcd_complete_dev_init(hba
);
8651 * Initialize UFS device parameters used by driver, these
8652 * parameters are associated with UFS descriptors.
8654 if (init_dev_params
) {
8655 ret
= ufshcd_device_params_init(hba
);
8658 if (is_mcq_supported(hba
) && !hba
->scsi_host_added
) {
8659 ret
= ufshcd_alloc_mcq(hba
);
8661 ufshcd_config_mcq(hba
);
8663 /* Continue with SDB mode */
8664 use_mcq_mode
= false;
8665 dev_err(hba
->dev
, "MCQ mode is disabled, err=%d\n",
8668 ret
= scsi_add_host(host
, hba
->dev
);
8670 dev_err(hba
->dev
, "scsi_add_host failed\n");
8673 hba
->scsi_host_added
= true;
8674 } else if (is_mcq_supported(hba
)) {
8675 /* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */
8676 ufshcd_config_mcq(hba
);
8680 ufshcd_tune_unipro_params(hba
);
8682 /* UFS device is also active now */
8683 ufshcd_set_ufs_dev_active(hba
);
8684 ufshcd_force_reset_auto_bkops(hba
);
8686 ufshcd_set_timestamp_attr(hba
);
8688 /* Gear up to HS gear if supported */
8689 if (hba
->max_pwr_info
.is_valid
) {
8691 * Set the right value to bRefClkFreq before attempting to
8692 * switch to HS gears.
8694 if (hba
->dev_ref_clk_freq
!= REF_CLK_FREQ_INVAL
)
8695 ufshcd_set_dev_ref_clk(hba
);
8696 ret
= ufshcd_config_pwr_mode(hba
, &hba
->max_pwr_info
.info
);
8698 dev_err(hba
->dev
, "%s: Failed setting power mode, err = %d\n",
8708 * ufshcd_probe_hba - probe hba to detect device and initialize it
8709 * @hba: per-adapter instance
8710 * @init_dev_params: whether or not to call ufshcd_device_params_init().
8712 * Execute link-startup and verify device initialization
8714 * Return: 0 upon success; < 0 upon failure.
8716 static int ufshcd_probe_hba(struct ufs_hba
*hba
, bool init_dev_params
)
8718 ktime_t start
= ktime_get();
8719 unsigned long flags
;
8722 ret
= ufshcd_device_init(hba
, init_dev_params
);
8726 if (hba
->quirks
& UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH
) {
8727 /* Reset the device and controller before doing reinit */
8728 ufshcd_device_reset(hba
);
8729 ufshcd_hba_stop(hba
);
8730 ufshcd_vops_reinit_notify(hba
);
8731 ret
= ufshcd_hba_enable(hba
);
8733 dev_err(hba
->dev
, "Host controller enable failed\n");
8734 ufshcd_print_evt_hist(hba
);
8735 ufshcd_print_host_state(hba
);
8739 /* Reinit the device */
8740 ret
= ufshcd_device_init(hba
, init_dev_params
);
8745 ufshcd_print_pwr_info(hba
);
8748 * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
8749 * and for removable UFS card as well, hence always set the parameter.
8750 * Note: Error handler may issue the device reset hence resetting
8751 * bActiveICCLevel as well so it is always safe to set this here.
8753 ufshcd_set_active_icc_lvl(hba
);
8755 /* Enable UFS Write Booster if supported */
8756 ufshcd_configure_wb(hba
);
8758 if (hba
->ee_usr_mask
)
8759 ufshcd_write_ee_control(hba
);
8760 /* Enable Auto-Hibernate if configured */
8761 ufshcd_auto_hibern8_enable(hba
);
8764 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
8766 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
8767 else if (hba
->ufshcd_state
== UFSHCD_STATE_RESET
)
8768 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
8769 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
8771 trace_ufshcd_init(dev_name(hba
->dev
), ret
,
8772 ktime_to_us(ktime_sub(ktime_get(), start
)),
8773 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
8778 * ufshcd_async_scan - asynchronous execution for probing hba
8779 * @data: data pointer to pass to this function
8780 * @cookie: cookie data
8782 static void ufshcd_async_scan(void *data
, async_cookie_t cookie
)
8784 struct ufs_hba
*hba
= (struct ufs_hba
*)data
;
8787 down(&hba
->host_sem
);
8788 /* Initialize hba, detect and initialize UFS device */
8789 ret
= ufshcd_probe_hba(hba
, true);
8794 /* Probe and add UFS logical units */
8795 ret
= ufshcd_add_lus(hba
);
8798 * If we failed to initialize the device or the device is not
8799 * present, turn off the power/clocks etc.
8802 pm_runtime_put_sync(hba
->dev
);
8803 ufshcd_hba_exit(hba
);
8807 static enum scsi_timeout_action
ufshcd_eh_timed_out(struct scsi_cmnd
*scmd
)
8809 struct ufs_hba
*hba
= shost_priv(scmd
->device
->host
);
8811 if (!hba
->system_suspending
) {
8812 /* Activate the error handler in the SCSI core. */
8813 return SCSI_EH_NOT_HANDLED
;
8817 * If we get here we know that no TMFs are outstanding and also that
8818 * the only pending command is a START STOP UNIT command. Handle the
8819 * timeout of that command directly to prevent a deadlock between
8820 * ufshcd_set_dev_pwr_mode() and ufshcd_err_handler().
8822 ufshcd_link_recovery(hba
);
8823 dev_info(hba
->dev
, "%s() finished; outstanding_tasks = %#lx.\n",
8824 __func__
, hba
->outstanding_tasks
);
8826 return hba
->outstanding_reqs
? SCSI_EH_RESET_TIMER
: SCSI_EH_DONE
;
8829 static const struct attribute_group
*ufshcd_driver_groups
[] = {
8830 &ufs_sysfs_unit_descriptor_group
,
8831 &ufs_sysfs_lun_attributes_group
,
8835 static struct ufs_hba_variant_params ufs_hba_vps
= {
8836 .hba_enable_delay_us
= 1000,
8837 .wb_flush_threshold
= UFS_WB_BUF_REMAIN_PERCENT(40),
8838 .devfreq_profile
.polling_ms
= 100,
8839 .devfreq_profile
.target
= ufshcd_devfreq_target
,
8840 .devfreq_profile
.get_dev_status
= ufshcd_devfreq_get_dev_status
,
8841 .ondemand_data
.upthreshold
= 70,
8842 .ondemand_data
.downdifferential
= 5,
8845 static const struct scsi_host_template ufshcd_driver_template
= {
8846 .module
= THIS_MODULE
,
8848 .proc_name
= UFSHCD
,
8849 .map_queues
= ufshcd_map_queues
,
8850 .queuecommand
= ufshcd_queuecommand
,
8851 .mq_poll
= ufshcd_poll
,
8852 .slave_alloc
= ufshcd_slave_alloc
,
8853 .slave_configure
= ufshcd_slave_configure
,
8854 .slave_destroy
= ufshcd_slave_destroy
,
8855 .change_queue_depth
= ufshcd_change_queue_depth
,
8856 .eh_abort_handler
= ufshcd_abort
,
8857 .eh_device_reset_handler
= ufshcd_eh_device_reset_handler
,
8858 .eh_host_reset_handler
= ufshcd_eh_host_reset_handler
,
8859 .eh_timed_out
= ufshcd_eh_timed_out
,
8861 .sg_tablesize
= SG_ALL
,
8862 .cmd_per_lun
= UFSHCD_CMD_PER_LUN
,
8863 .can_queue
= UFSHCD_CAN_QUEUE
,
8864 .max_segment_size
= PRDT_DATA_BYTE_COUNT_MAX
,
8865 .max_sectors
= SZ_1M
/ SECTOR_SIZE
,
8866 .max_host_blocked
= 1,
8867 .track_queue_depth
= 1,
8868 .skip_settle_delay
= 1,
8869 .sdev_groups
= ufshcd_driver_groups
,
8870 .rpm_autosuspend_delay
= RPM_AUTOSUSPEND_DELAY_MS
,
8873 static int ufshcd_config_vreg_load(struct device
*dev
, struct ufs_vreg
*vreg
,
8882 * "set_load" operation shall be required on those regulators
8883 * which specifically configured current limitation. Otherwise
8884 * zero max_uA may cause unexpected behavior when regulator is
8885 * enabled or set as high power mode.
8890 ret
= regulator_set_load(vreg
->reg
, ua
);
8892 dev_err(dev
, "%s: %s set load (ua=%d) failed, err=%d\n",
8893 __func__
, vreg
->name
, ua
, ret
);
8899 static inline int ufshcd_config_vreg_lpm(struct ufs_hba
*hba
,
8900 struct ufs_vreg
*vreg
)
8902 return ufshcd_config_vreg_load(hba
->dev
, vreg
, UFS_VREG_LPM_LOAD_UA
);
8905 static inline int ufshcd_config_vreg_hpm(struct ufs_hba
*hba
,
8906 struct ufs_vreg
*vreg
)
8911 return ufshcd_config_vreg_load(hba
->dev
, vreg
, vreg
->max_uA
);
8914 static int ufshcd_config_vreg(struct device
*dev
,
8915 struct ufs_vreg
*vreg
, bool on
)
8917 if (regulator_count_voltages(vreg
->reg
) <= 0)
8920 return ufshcd_config_vreg_load(dev
, vreg
, on
? vreg
->max_uA
: 0);
8923 static int ufshcd_enable_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
8927 if (!vreg
|| vreg
->enabled
)
8930 ret
= ufshcd_config_vreg(dev
, vreg
, true);
8932 ret
= regulator_enable(vreg
->reg
);
8935 vreg
->enabled
= true;
8937 dev_err(dev
, "%s: %s enable failed, err=%d\n",
8938 __func__
, vreg
->name
, ret
);
8943 static int ufshcd_disable_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
8947 if (!vreg
|| !vreg
->enabled
|| vreg
->always_on
)
8950 ret
= regulator_disable(vreg
->reg
);
8953 /* ignore errors on applying disable config */
8954 ufshcd_config_vreg(dev
, vreg
, false);
8955 vreg
->enabled
= false;
8957 dev_err(dev
, "%s: %s disable failed, err=%d\n",
8958 __func__
, vreg
->name
, ret
);
8964 static int ufshcd_setup_vreg(struct ufs_hba
*hba
, bool on
)
8967 struct device
*dev
= hba
->dev
;
8968 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
8970 ret
= ufshcd_toggle_vreg(dev
, info
->vcc
, on
);
8974 ret
= ufshcd_toggle_vreg(dev
, info
->vccq
, on
);
8978 ret
= ufshcd_toggle_vreg(dev
, info
->vccq2
, on
);
8982 ufshcd_toggle_vreg(dev
, info
->vccq2
, false);
8983 ufshcd_toggle_vreg(dev
, info
->vccq
, false);
8984 ufshcd_toggle_vreg(dev
, info
->vcc
, false);
8989 static int ufshcd_setup_hba_vreg(struct ufs_hba
*hba
, bool on
)
8991 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
8993 return ufshcd_toggle_vreg(hba
->dev
, info
->vdd_hba
, on
);
8996 int ufshcd_get_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
9003 vreg
->reg
= devm_regulator_get(dev
, vreg
->name
);
9004 if (IS_ERR(vreg
->reg
)) {
9005 ret
= PTR_ERR(vreg
->reg
);
9006 dev_err(dev
, "%s: %s get failed, err=%d\n",
9007 __func__
, vreg
->name
, ret
);
9012 EXPORT_SYMBOL_GPL(ufshcd_get_vreg
);
9014 static int ufshcd_init_vreg(struct ufs_hba
*hba
)
9017 struct device
*dev
= hba
->dev
;
9018 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
9020 ret
= ufshcd_get_vreg(dev
, info
->vcc
);
9024 ret
= ufshcd_get_vreg(dev
, info
->vccq
);
9026 ret
= ufshcd_get_vreg(dev
, info
->vccq2
);
9031 static int ufshcd_init_hba_vreg(struct ufs_hba
*hba
)
9033 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
9035 return ufshcd_get_vreg(hba
->dev
, info
->vdd_hba
);
9038 static int ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
)
9041 struct ufs_clk_info
*clki
;
9042 struct list_head
*head
= &hba
->clk_list_head
;
9043 unsigned long flags
;
9044 ktime_t start
= ktime_get();
9045 bool clk_state_changed
= false;
9047 if (list_empty(head
))
9050 ret
= ufshcd_vops_setup_clocks(hba
, on
, PRE_CHANGE
);
9054 list_for_each_entry(clki
, head
, list
) {
9055 if (!IS_ERR_OR_NULL(clki
->clk
)) {
9057 * Don't disable clocks which are needed
9058 * to keep the link active.
9060 if (ufshcd_is_link_active(hba
) &&
9061 clki
->keep_link_active
)
9064 clk_state_changed
= on
^ clki
->enabled
;
9065 if (on
&& !clki
->enabled
) {
9066 ret
= clk_prepare_enable(clki
->clk
);
9068 dev_err(hba
->dev
, "%s: %s prepare enable failed, %d\n",
9069 __func__
, clki
->name
, ret
);
9072 } else if (!on
&& clki
->enabled
) {
9073 clk_disable_unprepare(clki
->clk
);
9076 dev_dbg(hba
->dev
, "%s: clk: %s %sabled\n", __func__
,
9077 clki
->name
, on
? "en" : "dis");
9081 ret
= ufshcd_vops_setup_clocks(hba
, on
, POST_CHANGE
);
9087 list_for_each_entry(clki
, head
, list
) {
9088 if (!IS_ERR_OR_NULL(clki
->clk
) && clki
->enabled
)
9089 clk_disable_unprepare(clki
->clk
);
9091 } else if (!ret
&& on
) {
9092 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
9093 hba
->clk_gating
.state
= CLKS_ON
;
9094 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
9095 hba
->clk_gating
.state
);
9096 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
9099 if (clk_state_changed
)
9100 trace_ufshcd_profile_clk_gating(dev_name(hba
->dev
),
9101 (on
? "on" : "off"),
9102 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
9106 static enum ufs_ref_clk_freq
ufshcd_parse_ref_clk_property(struct ufs_hba
*hba
)
9109 int ret
= device_property_read_u32(hba
->dev
, "ref-clk-freq", &freq
);
9112 dev_dbg(hba
->dev
, "Cannot query 'ref-clk-freq' property = %d", ret
);
9113 return REF_CLK_FREQ_INVAL
;
9116 return ufs_get_bref_clk_from_hz(freq
);
9119 static int ufshcd_init_clocks(struct ufs_hba
*hba
)
9122 struct ufs_clk_info
*clki
;
9123 struct device
*dev
= hba
->dev
;
9124 struct list_head
*head
= &hba
->clk_list_head
;
9126 if (list_empty(head
))
9129 list_for_each_entry(clki
, head
, list
) {
9133 clki
->clk
= devm_clk_get(dev
, clki
->name
);
9134 if (IS_ERR(clki
->clk
)) {
9135 ret
= PTR_ERR(clki
->clk
);
9136 dev_err(dev
, "%s: %s clk get failed, %d\n",
9137 __func__
, clki
->name
, ret
);
9142 * Parse device ref clk freq as per device tree "ref_clk".
9143 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
9144 * in ufshcd_alloc_host().
9146 if (!strcmp(clki
->name
, "ref_clk"))
9147 ufshcd_parse_dev_ref_clk_freq(hba
, clki
->clk
);
9149 if (clki
->max_freq
) {
9150 ret
= clk_set_rate(clki
->clk
, clki
->max_freq
);
9152 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
9153 __func__
, clki
->name
,
9154 clki
->max_freq
, ret
);
9157 clki
->curr_freq
= clki
->max_freq
;
9159 dev_dbg(dev
, "%s: clk: %s, rate: %lu\n", __func__
,
9160 clki
->name
, clk_get_rate(clki
->clk
));
9166 static int ufshcd_variant_hba_init(struct ufs_hba
*hba
)
9173 err
= ufshcd_vops_init(hba
);
9175 dev_err_probe(hba
->dev
, err
,
9176 "%s: variant %s init failed with err %d\n",
9177 __func__
, ufshcd_get_var_name(hba
), err
);
9182 static void ufshcd_variant_hba_exit(struct ufs_hba
*hba
)
9187 ufshcd_vops_exit(hba
);
9190 static int ufshcd_hba_init(struct ufs_hba
*hba
)
9195 * Handle host controller power separately from the UFS device power
9196 * rails as it will help controlling the UFS host controller power
9197 * collapse easily which is different than UFS device power collapse.
9198 * Also, enable the host controller power before we go ahead with rest
9199 * of the initialization here.
9201 err
= ufshcd_init_hba_vreg(hba
);
9205 err
= ufshcd_setup_hba_vreg(hba
, true);
9209 err
= ufshcd_init_clocks(hba
);
9211 goto out_disable_hba_vreg
;
9213 if (hba
->dev_ref_clk_freq
== REF_CLK_FREQ_INVAL
)
9214 hba
->dev_ref_clk_freq
= ufshcd_parse_ref_clk_property(hba
);
9216 err
= ufshcd_setup_clocks(hba
, true);
9218 goto out_disable_hba_vreg
;
9220 err
= ufshcd_init_vreg(hba
);
9222 goto out_disable_clks
;
9224 err
= ufshcd_setup_vreg(hba
, true);
9226 goto out_disable_clks
;
9228 err
= ufshcd_variant_hba_init(hba
);
9230 goto out_disable_vreg
;
9232 ufs_debugfs_hba_init(hba
);
9234 hba
->is_powered
= true;
9238 ufshcd_setup_vreg(hba
, false);
9240 ufshcd_setup_clocks(hba
, false);
9241 out_disable_hba_vreg
:
9242 ufshcd_setup_hba_vreg(hba
, false);
9247 static void ufshcd_hba_exit(struct ufs_hba
*hba
)
9249 if (hba
->is_powered
) {
9250 ufshcd_exit_clk_scaling(hba
);
9251 ufshcd_exit_clk_gating(hba
);
9253 destroy_workqueue(hba
->eh_wq
);
9254 ufs_debugfs_hba_exit(hba
);
9255 ufshcd_variant_hba_exit(hba
);
9256 ufshcd_setup_vreg(hba
, false);
9257 ufshcd_setup_clocks(hba
, false);
9258 ufshcd_setup_hba_vreg(hba
, false);
9259 hba
->is_powered
= false;
9260 ufs_put_device_desc(hba
);
9264 static int ufshcd_execute_start_stop(struct scsi_device
*sdev
,
9265 enum ufs_dev_pwr_mode pwr_mode
,
9266 struct scsi_sense_hdr
*sshdr
)
9268 const unsigned char cdb
[6] = { START_STOP
, 0, 0, 0, pwr_mode
<< 4, 0 };
9269 const struct scsi_exec_args args
= {
9271 .req_flags
= BLK_MQ_REQ_PM
,
9272 .scmd_flags
= SCMD_FAIL_IF_RECOVERING
,
9275 return scsi_execute_cmd(sdev
, cdb
, REQ_OP_DRV_IN
, /*buffer=*/NULL
,
9276 /*bufflen=*/0, /*timeout=*/10 * HZ
, /*retries=*/0,
9281 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
9283 * @hba: per adapter instance
9284 * @pwr_mode: device power mode to set
9286 * Return: 0 if requested power mode is set successfully;
9287 * < 0 if failed to set the requested power mode.
9289 static int ufshcd_set_dev_pwr_mode(struct ufs_hba
*hba
,
9290 enum ufs_dev_pwr_mode pwr_mode
)
9292 struct scsi_sense_hdr sshdr
;
9293 struct scsi_device
*sdp
;
9294 unsigned long flags
;
9297 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
9298 sdp
= hba
->ufs_device_wlun
;
9299 if (sdp
&& scsi_device_online(sdp
))
9300 ret
= scsi_device_get(sdp
);
9303 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
9309 * If scsi commands fail, the scsi mid-layer schedules scsi error-
9310 * handling, which would wait for host to be resumed. Since we know
9311 * we are functional while we are here, skip host resume in error
9314 hba
->host
->eh_noresume
= 1;
9317 * Current function would be generally called from the power management
9318 * callbacks hence set the RQF_PM flag so that it doesn't resume the
9319 * already suspended childs.
9321 for (retries
= 3; retries
> 0; --retries
) {
9322 ret
= ufshcd_execute_start_stop(sdp
, pwr_mode
, &sshdr
);
9324 * scsi_execute() only returns a negative value if the request
9331 sdev_printk(KERN_WARNING
, sdp
,
9332 "START_STOP failed for power mode: %d, result %x\n",
9335 if (scsi_sense_valid(&sshdr
))
9336 scsi_print_sense_hdr(sdp
, NULL
, &sshdr
);
9340 hba
->curr_dev_pwr_mode
= pwr_mode
;
9343 scsi_device_put(sdp
);
9344 hba
->host
->eh_noresume
= 0;
9348 static int ufshcd_link_state_transition(struct ufs_hba
*hba
,
9349 enum uic_link_state req_link_state
,
9350 bool check_for_bkops
)
9354 if (req_link_state
== hba
->uic_link_state
)
9357 if (req_link_state
== UIC_LINK_HIBERN8_STATE
) {
9358 ret
= ufshcd_uic_hibern8_enter(hba
);
9360 ufshcd_set_link_hibern8(hba
);
9362 dev_err(hba
->dev
, "%s: hibern8 enter failed %d\n",
9368 * If autobkops is enabled, link can't be turned off because
9369 * turning off the link would also turn off the device, except in the
9370 * case of DeepSleep where the device is expected to remain powered.
9372 else if ((req_link_state
== UIC_LINK_OFF_STATE
) &&
9373 (!check_for_bkops
|| !hba
->auto_bkops_enabled
)) {
9375 * Let's make sure that link is in low power mode, we are doing
9376 * this currently by putting the link in Hibern8. Otherway to
9377 * put the link in low power mode is to send the DME end point
9378 * to device and then send the DME reset command to local
9379 * unipro. But putting the link in hibern8 is much faster.
9381 * Note also that putting the link in Hibern8 is a requirement
9382 * for entering DeepSleep.
9384 ret
= ufshcd_uic_hibern8_enter(hba
);
9386 dev_err(hba
->dev
, "%s: hibern8 enter failed %d\n",
9391 * Change controller state to "reset state" which
9392 * should also put the link in off/reset state
9394 ufshcd_hba_stop(hba
);
9396 * TODO: Check if we need any delay to make sure that
9397 * controller is reset
9399 ufshcd_set_link_off(hba
);
9406 static void ufshcd_vreg_set_lpm(struct ufs_hba
*hba
)
9408 bool vcc_off
= false;
9411 * It seems some UFS devices may keep drawing more than sleep current
9412 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
9413 * To avoid this situation, add 2ms delay before putting these UFS
9414 * rails in LPM mode.
9416 if (!ufshcd_is_link_active(hba
) &&
9417 hba
->dev_quirks
& UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
)
9418 usleep_range(2000, 2100);
9421 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
9424 * If UFS device and link is in OFF state, all power supplies (VCC,
9425 * VCCQ, VCCQ2) can be turned off if power on write protect is not
9426 * required. If UFS link is inactive (Hibern8 or OFF state) and device
9427 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
9429 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
9430 * in low power state which would save some power.
9432 * If Write Booster is enabled and the device needs to flush the WB
9433 * buffer OR if bkops status is urgent for WB, keep Vcc on.
9435 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
) &&
9436 !hba
->dev_info
.is_lu_power_on_wp
) {
9437 ufshcd_setup_vreg(hba
, false);
9439 } else if (!ufshcd_is_ufs_dev_active(hba
)) {
9440 ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, false);
9442 if (ufshcd_is_link_hibern8(hba
) || ufshcd_is_link_off(hba
)) {
9443 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq
);
9444 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq2
);
9449 * Some UFS devices require delay after VCC power rail is turned-off.
9451 if (vcc_off
&& hba
->vreg_info
.vcc
&&
9452 hba
->dev_quirks
& UFS_DEVICE_QUIRK_DELAY_AFTER_LPM
)
9453 usleep_range(5000, 5100);
9457 static int ufshcd_vreg_set_hpm(struct ufs_hba
*hba
)
9461 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
) &&
9462 !hba
->dev_info
.is_lu_power_on_wp
) {
9463 ret
= ufshcd_setup_vreg(hba
, true);
9464 } else if (!ufshcd_is_ufs_dev_active(hba
)) {
9465 if (!ufshcd_is_link_active(hba
)) {
9466 ret
= ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq
);
9469 ret
= ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq2
);
9473 ret
= ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, true);
9478 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq
);
9480 ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, false);
9484 #endif /* CONFIG_PM */
9486 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba
*hba
)
9488 if (ufshcd_is_link_off(hba
) || ufshcd_can_aggressive_pc(hba
))
9489 ufshcd_setup_hba_vreg(hba
, false);
9492 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba
*hba
)
9494 if (ufshcd_is_link_off(hba
) || ufshcd_can_aggressive_pc(hba
))
9495 ufshcd_setup_hba_vreg(hba
, true);
9498 static int __ufshcd_wl_suspend(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
9501 bool check_for_bkops
;
9502 enum ufs_pm_level pm_lvl
;
9503 enum ufs_dev_pwr_mode req_dev_pwr_mode
;
9504 enum uic_link_state req_link_state
;
9506 hba
->pm_op_in_progress
= true;
9507 if (pm_op
!= UFS_SHUTDOWN_PM
) {
9508 pm_lvl
= pm_op
== UFS_RUNTIME_PM
?
9509 hba
->rpm_lvl
: hba
->spm_lvl
;
9510 req_dev_pwr_mode
= ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl
);
9511 req_link_state
= ufs_get_pm_lvl_to_link_pwr_state(pm_lvl
);
9513 req_dev_pwr_mode
= UFS_POWERDOWN_PWR_MODE
;
9514 req_link_state
= UIC_LINK_OFF_STATE
;
9518 * If we can't transition into any of the low power modes
9519 * just gate the clocks.
9522 hba
->clk_gating
.is_suspended
= true;
9524 if (ufshcd_is_clkscaling_supported(hba
))
9525 ufshcd_clk_scaling_suspend(hba
, true);
9527 if (req_dev_pwr_mode
== UFS_ACTIVE_PWR_MODE
&&
9528 req_link_state
== UIC_LINK_ACTIVE_STATE
) {
9532 if ((req_dev_pwr_mode
== hba
->curr_dev_pwr_mode
) &&
9533 (req_link_state
== hba
->uic_link_state
))
9534 goto enable_scaling
;
9536 /* UFS device & link must be active before we enter in this function */
9537 if (!ufshcd_is_ufs_dev_active(hba
) || !ufshcd_is_link_active(hba
)) {
9539 goto enable_scaling
;
9542 if (pm_op
== UFS_RUNTIME_PM
) {
9543 if (ufshcd_can_autobkops_during_suspend(hba
)) {
9545 * The device is idle with no requests in the queue,
9546 * allow background operations if bkops status shows
9547 * that performance might be impacted.
9549 ret
= ufshcd_urgent_bkops(hba
);
9552 * If return err in suspend flow, IO will hang.
9553 * Trigger error handler and break suspend for
9556 ufshcd_force_error_recovery(hba
);
9558 goto enable_scaling
;
9561 /* make sure that auto bkops is disabled */
9562 ufshcd_disable_auto_bkops(hba
);
9565 * If device needs to do BKOP or WB buffer flush during
9566 * Hibern8, keep device power mode as "active power mode"
9569 hba
->dev_info
.b_rpm_dev_flush_capable
=
9570 hba
->auto_bkops_enabled
||
9571 (((req_link_state
== UIC_LINK_HIBERN8_STATE
) ||
9572 ((req_link_state
== UIC_LINK_ACTIVE_STATE
) &&
9573 ufshcd_is_auto_hibern8_enabled(hba
))) &&
9574 ufshcd_wb_need_flush(hba
));
9577 flush_work(&hba
->eeh_work
);
9579 ret
= ufshcd_vops_suspend(hba
, pm_op
, PRE_CHANGE
);
9581 goto enable_scaling
;
9583 if (req_dev_pwr_mode
!= hba
->curr_dev_pwr_mode
) {
9584 if (pm_op
!= UFS_RUNTIME_PM
)
9585 /* ensure that bkops is disabled */
9586 ufshcd_disable_auto_bkops(hba
);
9588 if (!hba
->dev_info
.b_rpm_dev_flush_capable
) {
9589 ret
= ufshcd_set_dev_pwr_mode(hba
, req_dev_pwr_mode
);
9590 if (ret
&& pm_op
!= UFS_SHUTDOWN_PM
) {
9592 * If return err in suspend flow, IO will hang.
9593 * Trigger error handler and break suspend for
9596 ufshcd_force_error_recovery(hba
);
9600 goto enable_scaling
;
9605 * In the case of DeepSleep, the device is expected to remain powered
9606 * with the link off, so do not check for bkops.
9608 check_for_bkops
= !ufshcd_is_ufs_dev_deepsleep(hba
);
9609 ret
= ufshcd_link_state_transition(hba
, req_link_state
, check_for_bkops
);
9610 if (ret
&& pm_op
!= UFS_SHUTDOWN_PM
) {
9612 * If return err in suspend flow, IO will hang.
9613 * Trigger error handler and break suspend for
9616 ufshcd_force_error_recovery(hba
);
9620 goto set_dev_active
;
9624 * Call vendor specific suspend callback. As these callbacks may access
9625 * vendor specific host controller register space call them before the
9626 * host clocks are ON.
9628 ret
= ufshcd_vops_suspend(hba
, pm_op
, POST_CHANGE
);
9630 goto set_link_active
;
9635 * Device hardware reset is required to exit DeepSleep. Also, for
9636 * DeepSleep, the link is off so host reset and restore will be done
9639 if (ufshcd_is_ufs_dev_deepsleep(hba
)) {
9640 ufshcd_device_reset(hba
);
9641 WARN_ON(!ufshcd_is_link_off(hba
));
9643 if (ufshcd_is_link_hibern8(hba
) && !ufshcd_uic_hibern8_exit(hba
))
9644 ufshcd_set_link_active(hba
);
9645 else if (ufshcd_is_link_off(hba
))
9646 ufshcd_host_reset_and_restore(hba
);
9648 /* Can also get here needing to exit DeepSleep */
9649 if (ufshcd_is_ufs_dev_deepsleep(hba
)) {
9650 ufshcd_device_reset(hba
);
9651 ufshcd_host_reset_and_restore(hba
);
9653 if (!ufshcd_set_dev_pwr_mode(hba
, UFS_ACTIVE_PWR_MODE
))
9654 ufshcd_disable_auto_bkops(hba
);
9656 if (ufshcd_is_clkscaling_supported(hba
))
9657 ufshcd_clk_scaling_suspend(hba
, false);
9659 hba
->dev_info
.b_rpm_dev_flush_capable
= false;
9661 if (hba
->dev_info
.b_rpm_dev_flush_capable
) {
9662 schedule_delayed_work(&hba
->rpm_dev_flush_recheck_work
,
9663 msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS
));
9667 ufshcd_update_evt_hist(hba
, UFS_EVT_WL_SUSP_ERR
, (u32
)ret
);
9668 hba
->clk_gating
.is_suspended
= false;
9669 ufshcd_release(hba
);
9671 hba
->pm_op_in_progress
= false;
9676 static int __ufshcd_wl_resume(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
9679 enum uic_link_state old_link_state
= hba
->uic_link_state
;
9681 hba
->pm_op_in_progress
= true;
9684 * Call vendor specific resume callback. As these callbacks may access
9685 * vendor specific host controller register space call them when the
9686 * host clocks are ON.
9688 ret
= ufshcd_vops_resume(hba
, pm_op
);
9692 /* For DeepSleep, the only supported option is to have the link off */
9693 WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba
) && !ufshcd_is_link_off(hba
));
9695 if (ufshcd_is_link_hibern8(hba
)) {
9696 ret
= ufshcd_uic_hibern8_exit(hba
);
9698 ufshcd_set_link_active(hba
);
9700 dev_err(hba
->dev
, "%s: hibern8 exit failed %d\n",
9702 goto vendor_suspend
;
9704 } else if (ufshcd_is_link_off(hba
)) {
9706 * A full initialization of the host and the device is
9707 * required since the link was put to off during suspend.
9708 * Note, in the case of DeepSleep, the device will exit
9709 * DeepSleep due to device reset.
9711 ret
= ufshcd_reset_and_restore(hba
);
9713 * ufshcd_reset_and_restore() should have already
9714 * set the link state as active
9716 if (ret
|| !ufshcd_is_link_active(hba
))
9717 goto vendor_suspend
;
9720 if (!ufshcd_is_ufs_dev_active(hba
)) {
9721 ret
= ufshcd_set_dev_pwr_mode(hba
, UFS_ACTIVE_PWR_MODE
);
9723 goto set_old_link_state
;
9724 ufshcd_set_timestamp_attr(hba
);
9727 if (ufshcd_keep_autobkops_enabled_except_suspend(hba
))
9728 ufshcd_enable_auto_bkops(hba
);
9731 * If BKOPs operations are urgently needed at this moment then
9732 * keep auto-bkops enabled or else disable it.
9734 ufshcd_urgent_bkops(hba
);
9736 if (hba
->ee_usr_mask
)
9737 ufshcd_write_ee_control(hba
);
9739 if (ufshcd_is_clkscaling_supported(hba
))
9740 ufshcd_clk_scaling_suspend(hba
, false);
9742 if (hba
->dev_info
.b_rpm_dev_flush_capable
) {
9743 hba
->dev_info
.b_rpm_dev_flush_capable
= false;
9744 cancel_delayed_work(&hba
->rpm_dev_flush_recheck_work
);
9747 /* Enable Auto-Hibernate if configured */
9748 ufshcd_auto_hibern8_enable(hba
);
9753 ufshcd_link_state_transition(hba
, old_link_state
, 0);
9755 ufshcd_vops_suspend(hba
, pm_op
, PRE_CHANGE
);
9756 ufshcd_vops_suspend(hba
, pm_op
, POST_CHANGE
);
9759 ufshcd_update_evt_hist(hba
, UFS_EVT_WL_RES_ERR
, (u32
)ret
);
9760 hba
->clk_gating
.is_suspended
= false;
9761 ufshcd_release(hba
);
9762 hba
->pm_op_in_progress
= false;
9766 static int ufshcd_wl_runtime_suspend(struct device
*dev
)
9768 struct scsi_device
*sdev
= to_scsi_device(dev
);
9769 struct ufs_hba
*hba
;
9771 ktime_t start
= ktime_get();
9773 hba
= shost_priv(sdev
->host
);
9775 ret
= __ufshcd_wl_suspend(hba
, UFS_RUNTIME_PM
);
9777 dev_err(&sdev
->sdev_gendev
, "%s failed: %d\n", __func__
, ret
);
9779 trace_ufshcd_wl_runtime_suspend(dev_name(dev
), ret
,
9780 ktime_to_us(ktime_sub(ktime_get(), start
)),
9781 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9786 static int ufshcd_wl_runtime_resume(struct device
*dev
)
9788 struct scsi_device
*sdev
= to_scsi_device(dev
);
9789 struct ufs_hba
*hba
;
9791 ktime_t start
= ktime_get();
9793 hba
= shost_priv(sdev
->host
);
9795 ret
= __ufshcd_wl_resume(hba
, UFS_RUNTIME_PM
);
9797 dev_err(&sdev
->sdev_gendev
, "%s failed: %d\n", __func__
, ret
);
9799 trace_ufshcd_wl_runtime_resume(dev_name(dev
), ret
,
9800 ktime_to_us(ktime_sub(ktime_get(), start
)),
9801 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9807 #ifdef CONFIG_PM_SLEEP
9808 static int ufshcd_wl_suspend(struct device
*dev
)
9810 struct scsi_device
*sdev
= to_scsi_device(dev
);
9811 struct ufs_hba
*hba
;
9813 ktime_t start
= ktime_get();
9815 hba
= shost_priv(sdev
->host
);
9816 down(&hba
->host_sem
);
9817 hba
->system_suspending
= true;
9819 if (pm_runtime_suspended(dev
))
9822 ret
= __ufshcd_wl_suspend(hba
, UFS_SYSTEM_PM
);
9824 dev_err(&sdev
->sdev_gendev
, "%s failed: %d\n", __func__
, ret
);
9830 hba
->is_sys_suspended
= true;
9831 trace_ufshcd_wl_suspend(dev_name(dev
), ret
,
9832 ktime_to_us(ktime_sub(ktime_get(), start
)),
9833 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9838 static int ufshcd_wl_resume(struct device
*dev
)
9840 struct scsi_device
*sdev
= to_scsi_device(dev
);
9841 struct ufs_hba
*hba
;
9843 ktime_t start
= ktime_get();
9845 hba
= shost_priv(sdev
->host
);
9847 if (pm_runtime_suspended(dev
))
9850 ret
= __ufshcd_wl_resume(hba
, UFS_SYSTEM_PM
);
9852 dev_err(&sdev
->sdev_gendev
, "%s failed: %d\n", __func__
, ret
);
9854 trace_ufshcd_wl_resume(dev_name(dev
), ret
,
9855 ktime_to_us(ktime_sub(ktime_get(), start
)),
9856 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9858 hba
->is_sys_suspended
= false;
9859 hba
->system_suspending
= false;
9866 * ufshcd_suspend - helper function for suspend operations
9867 * @hba: per adapter instance
9869 * This function will put disable irqs, turn off clocks
9870 * and set vreg and hba-vreg in lpm mode.
9872 * Return: 0 upon success; < 0 upon failure.
9874 static int ufshcd_suspend(struct ufs_hba
*hba
)
9878 if (!hba
->is_powered
)
9881 * Disable the host irq as host controller as there won't be any
9882 * host controller transaction expected till resume.
9884 ufshcd_disable_irq(hba
);
9885 ret
= ufshcd_setup_clocks(hba
, false);
9887 ufshcd_enable_irq(hba
);
9890 if (ufshcd_is_clkgating_allowed(hba
)) {
9891 hba
->clk_gating
.state
= CLKS_OFF
;
9892 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
9893 hba
->clk_gating
.state
);
9896 ufshcd_vreg_set_lpm(hba
);
9897 /* Put the host controller in low power mode if possible */
9898 ufshcd_hba_vreg_set_lpm(hba
);
9904 * ufshcd_resume - helper function for resume operations
9905 * @hba: per adapter instance
9907 * This function basically turns on the regulators, clocks and
9910 * Return: 0 for success and non-zero for failure.
9912 static int ufshcd_resume(struct ufs_hba
*hba
)
9916 if (!hba
->is_powered
)
9919 ufshcd_hba_vreg_set_hpm(hba
);
9920 ret
= ufshcd_vreg_set_hpm(hba
);
9924 /* Make sure clocks are enabled before accessing controller */
9925 ret
= ufshcd_setup_clocks(hba
, true);
9929 /* enable the host irq as host controller would be active soon */
9930 ufshcd_enable_irq(hba
);
9935 ufshcd_vreg_set_lpm(hba
);
9938 ufshcd_update_evt_hist(hba
, UFS_EVT_RESUME_ERR
, (u32
)ret
);
9941 #endif /* CONFIG_PM */
9943 #ifdef CONFIG_PM_SLEEP
9945 * ufshcd_system_suspend - system suspend callback
9946 * @dev: Device associated with the UFS controller.
9948 * Executed before putting the system into a sleep state in which the contents
9949 * of main memory are preserved.
9951 * Return: 0 for success and non-zero for failure.
9953 int ufshcd_system_suspend(struct device
*dev
)
9955 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
9957 ktime_t start
= ktime_get();
9959 if (pm_runtime_suspended(hba
->dev
))
9962 ret
= ufshcd_suspend(hba
);
9964 trace_ufshcd_system_suspend(dev_name(hba
->dev
), ret
,
9965 ktime_to_us(ktime_sub(ktime_get(), start
)),
9966 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9969 EXPORT_SYMBOL(ufshcd_system_suspend
);
9972 * ufshcd_system_resume - system resume callback
9973 * @dev: Device associated with the UFS controller.
9975 * Executed after waking the system up from a sleep state in which the contents
9976 * of main memory were preserved.
9978 * Return: 0 for success and non-zero for failure.
9980 int ufshcd_system_resume(struct device
*dev
)
9982 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
9983 ktime_t start
= ktime_get();
9986 if (pm_runtime_suspended(hba
->dev
))
9989 ret
= ufshcd_resume(hba
);
9992 trace_ufshcd_system_resume(dev_name(hba
->dev
), ret
,
9993 ktime_to_us(ktime_sub(ktime_get(), start
)),
9994 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9998 EXPORT_SYMBOL(ufshcd_system_resume
);
9999 #endif /* CONFIG_PM_SLEEP */
10003 * ufshcd_runtime_suspend - runtime suspend callback
10004 * @dev: Device associated with the UFS controller.
10006 * Check the description of ufshcd_suspend() function for more details.
10008 * Return: 0 for success and non-zero for failure.
10010 int ufshcd_runtime_suspend(struct device
*dev
)
10012 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10014 ktime_t start
= ktime_get();
10016 ret
= ufshcd_suspend(hba
);
10018 trace_ufshcd_runtime_suspend(dev_name(hba
->dev
), ret
,
10019 ktime_to_us(ktime_sub(ktime_get(), start
)),
10020 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
10023 EXPORT_SYMBOL(ufshcd_runtime_suspend
);
10026 * ufshcd_runtime_resume - runtime resume routine
10027 * @dev: Device associated with the UFS controller.
10029 * This function basically brings controller
10030 * to active state. Following operations are done in this function:
10032 * 1. Turn on all the controller related clocks
10033 * 2. Turn ON VCC rail
10035 * Return: 0 upon success; < 0 upon failure.
10037 int ufshcd_runtime_resume(struct device
*dev
)
10039 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10041 ktime_t start
= ktime_get();
10043 ret
= ufshcd_resume(hba
);
10045 trace_ufshcd_runtime_resume(dev_name(hba
->dev
), ret
,
10046 ktime_to_us(ktime_sub(ktime_get(), start
)),
10047 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
10050 EXPORT_SYMBOL(ufshcd_runtime_resume
);
10051 #endif /* CONFIG_PM */
10053 static void ufshcd_wl_shutdown(struct device
*dev
)
10055 struct scsi_device
*sdev
= to_scsi_device(dev
);
10056 struct ufs_hba
*hba
= shost_priv(sdev
->host
);
10058 down(&hba
->host_sem
);
10059 hba
->shutting_down
= true;
10060 up(&hba
->host_sem
);
10062 /* Turn on everything while shutting down */
10063 ufshcd_rpm_get_sync(hba
);
10064 scsi_device_quiesce(sdev
);
10065 shost_for_each_device(sdev
, hba
->host
) {
10066 if (sdev
== hba
->ufs_device_wlun
)
10068 scsi_device_quiesce(sdev
);
10070 __ufshcd_wl_suspend(hba
, UFS_SHUTDOWN_PM
);
10073 * Next, turn off the UFS controller and the UFS regulators. Disable
10076 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
))
10077 ufshcd_suspend(hba
);
10079 hba
->is_powered
= false;
10083 * ufshcd_remove - de-allocate SCSI host and host memory space
10084 * data structure memory
10085 * @hba: per adapter instance
10087 void ufshcd_remove(struct ufs_hba
*hba
)
10089 if (hba
->ufs_device_wlun
)
10090 ufshcd_rpm_get_sync(hba
);
10091 ufs_hwmon_remove(hba
);
10092 ufs_bsg_remove(hba
);
10093 ufs_sysfs_remove_nodes(hba
->dev
);
10094 blk_mq_destroy_queue(hba
->tmf_queue
);
10095 blk_put_queue(hba
->tmf_queue
);
10096 blk_mq_free_tag_set(&hba
->tmf_tag_set
);
10097 scsi_remove_host(hba
->host
);
10098 /* disable interrupts */
10099 ufshcd_disable_intr(hba
, hba
->intr_mask
);
10100 ufshcd_hba_stop(hba
);
10101 ufshcd_hba_exit(hba
);
10103 EXPORT_SYMBOL_GPL(ufshcd_remove
);
10105 #ifdef CONFIG_PM_SLEEP
10106 int ufshcd_system_freeze(struct device
*dev
)
10109 return ufshcd_system_suspend(dev
);
10112 EXPORT_SYMBOL_GPL(ufshcd_system_freeze
);
10114 int ufshcd_system_restore(struct device
*dev
)
10117 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10120 ret
= ufshcd_system_resume(dev
);
10124 /* Configure UTRL and UTMRL base address registers */
10125 ufshcd_writel(hba
, lower_32_bits(hba
->utrdl_dma_addr
),
10126 REG_UTP_TRANSFER_REQ_LIST_BASE_L
);
10127 ufshcd_writel(hba
, upper_32_bits(hba
->utrdl_dma_addr
),
10128 REG_UTP_TRANSFER_REQ_LIST_BASE_H
);
10129 ufshcd_writel(hba
, lower_32_bits(hba
->utmrdl_dma_addr
),
10130 REG_UTP_TASK_REQ_LIST_BASE_L
);
10131 ufshcd_writel(hba
, upper_32_bits(hba
->utmrdl_dma_addr
),
10132 REG_UTP_TASK_REQ_LIST_BASE_H
);
10134 * Make sure that UTRL and UTMRL base address registers
10135 * are updated with the latest queue addresses. Only after
10136 * updating these addresses, we can queue the new commands.
10140 /* Resuming from hibernate, assume that link was OFF */
10141 ufshcd_set_link_off(hba
);
10146 EXPORT_SYMBOL_GPL(ufshcd_system_restore
);
10148 int ufshcd_system_thaw(struct device
*dev
)
10150 return ufshcd_system_resume(dev
);
10152 EXPORT_SYMBOL_GPL(ufshcd_system_thaw
);
10153 #endif /* CONFIG_PM_SLEEP */
10156 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
10157 * @hba: pointer to Host Bus Adapter (HBA)
10159 void ufshcd_dealloc_host(struct ufs_hba
*hba
)
10161 scsi_host_put(hba
->host
);
10163 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host
);
10166 * ufshcd_set_dma_mask - Set dma mask based on the controller
10167 * addressing capability
10168 * @hba: per adapter instance
10170 * Return: 0 for success, non-zero for failure.
10172 static int ufshcd_set_dma_mask(struct ufs_hba
*hba
)
10174 if (hba
->capabilities
& MASK_64_ADDRESSING_SUPPORT
) {
10175 if (!dma_set_mask_and_coherent(hba
->dev
, DMA_BIT_MASK(64)))
10178 return dma_set_mask_and_coherent(hba
->dev
, DMA_BIT_MASK(32));
10182 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
10183 * @dev: pointer to device handle
10184 * @hba_handle: driver private handle
10186 * Return: 0 on success, non-zero value on failure.
10188 int ufshcd_alloc_host(struct device
*dev
, struct ufs_hba
**hba_handle
)
10190 struct Scsi_Host
*host
;
10191 struct ufs_hba
*hba
;
10196 "Invalid memory reference for dev is NULL\n");
10201 host
= scsi_host_alloc(&ufshcd_driver_template
,
10202 sizeof(struct ufs_hba
));
10204 dev_err(dev
, "scsi_host_alloc failed\n");
10208 host
->nr_maps
= HCTX_TYPE_POLL
+ 1;
10209 hba
= shost_priv(host
);
10212 hba
->dev_ref_clk_freq
= REF_CLK_FREQ_INVAL
;
10213 hba
->nop_out_timeout
= NOP_OUT_TIMEOUT
;
10214 ufshcd_set_sg_entry_size(hba
, sizeof(struct ufshcd_sg_entry
));
10215 INIT_LIST_HEAD(&hba
->clk_list_head
);
10216 spin_lock_init(&hba
->outstanding_lock
);
10223 EXPORT_SYMBOL(ufshcd_alloc_host
);
10225 /* This function exists because blk_mq_alloc_tag_set() requires this. */
10226 static blk_status_t
ufshcd_queue_tmf(struct blk_mq_hw_ctx
*hctx
,
10227 const struct blk_mq_queue_data
*qd
)
10229 WARN_ON_ONCE(true);
10230 return BLK_STS_NOTSUPP
;
10233 static const struct blk_mq_ops ufshcd_tmf_ops
= {
10234 .queue_rq
= ufshcd_queue_tmf
,
10238 * ufshcd_init - Driver initialization routine
10239 * @hba: per-adapter instance
10240 * @mmio_base: base register address
10241 * @irq: Interrupt line of device
10243 * Return: 0 on success, non-zero value on failure.
10245 int ufshcd_init(struct ufs_hba
*hba
, void __iomem
*mmio_base
, unsigned int irq
)
10248 struct Scsi_Host
*host
= hba
->host
;
10249 struct device
*dev
= hba
->dev
;
10250 char eh_wq_name
[sizeof("ufs_eh_wq_00")];
10253 * dev_set_drvdata() must be called before any callbacks are registered
10254 * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon,
10257 dev_set_drvdata(dev
, hba
);
10261 "Invalid memory reference for mmio_base is NULL\n");
10266 hba
->mmio_base
= mmio_base
;
10268 hba
->vps
= &ufs_hba_vps
;
10270 err
= ufshcd_hba_init(hba
);
10274 /* Read capabilities registers */
10275 err
= ufshcd_hba_capabilities(hba
);
10279 /* Get UFS version supported by the controller */
10280 hba
->ufs_version
= ufshcd_get_ufs_version(hba
);
10282 /* Get Interrupt bit mask per version */
10283 hba
->intr_mask
= ufshcd_get_intr_mask(hba
);
10285 err
= ufshcd_set_dma_mask(hba
);
10287 dev_err(hba
->dev
, "set dma mask failed\n");
10291 /* Allocate memory for host memory space */
10292 err
= ufshcd_memory_alloc(hba
);
10294 dev_err(hba
->dev
, "Memory allocation failed\n");
10298 /* Configure LRB */
10299 ufshcd_host_memory_configure(hba
);
10301 host
->can_queue
= hba
->nutrs
- UFSHCD_NUM_RESERVED
;
10302 host
->cmd_per_lun
= hba
->nutrs
- UFSHCD_NUM_RESERVED
;
10303 host
->max_id
= UFSHCD_MAX_ID
;
10304 host
->max_lun
= UFS_MAX_LUNS
;
10305 host
->max_channel
= UFSHCD_MAX_CHANNEL
;
10306 host
->unique_id
= host
->host_no
;
10307 host
->max_cmd_len
= UFS_CDB_SIZE
;
10308 host
->queuecommand_may_block
= !!(hba
->caps
& UFSHCD_CAP_CLK_GATING
);
10310 hba
->max_pwr_info
.is_valid
= false;
10312 /* Initialize work queues */
10313 snprintf(eh_wq_name
, sizeof(eh_wq_name
), "ufs_eh_wq_%d",
10314 hba
->host
->host_no
);
10315 hba
->eh_wq
= create_singlethread_workqueue(eh_wq_name
);
10317 dev_err(hba
->dev
, "%s: failed to create eh workqueue\n",
10322 INIT_WORK(&hba
->eh_work
, ufshcd_err_handler
);
10323 INIT_WORK(&hba
->eeh_work
, ufshcd_exception_event_handler
);
10325 sema_init(&hba
->host_sem
, 1);
10327 /* Initialize UIC command mutex */
10328 mutex_init(&hba
->uic_cmd_mutex
);
10330 /* Initialize mutex for device management commands */
10331 mutex_init(&hba
->dev_cmd
.lock
);
10333 /* Initialize mutex for exception event control */
10334 mutex_init(&hba
->ee_ctrl_mutex
);
10336 mutex_init(&hba
->wb_mutex
);
10337 init_rwsem(&hba
->clk_scaling_lock
);
10339 ufshcd_init_clk_gating(hba
);
10341 ufshcd_init_clk_scaling(hba
);
10344 * In order to avoid any spurious interrupt immediately after
10345 * registering UFS controller interrupt handler, clear any pending UFS
10346 * interrupt status and disable all the UFS interrupts.
10348 ufshcd_writel(hba
, ufshcd_readl(hba
, REG_INTERRUPT_STATUS
),
10349 REG_INTERRUPT_STATUS
);
10350 ufshcd_writel(hba
, 0, REG_INTERRUPT_ENABLE
);
10352 * Make sure that UFS interrupts are disabled and any pending interrupt
10353 * status is cleared before registering UFS interrupt handler.
10357 /* IRQ registration */
10358 err
= devm_request_irq(dev
, irq
, ufshcd_intr
, IRQF_SHARED
, UFSHCD
, hba
);
10360 dev_err(hba
->dev
, "request irq failed\n");
10363 hba
->is_irq_enabled
= true;
10366 if (!is_mcq_supported(hba
)) {
10367 err
= scsi_add_host(host
, hba
->dev
);
10369 dev_err(hba
->dev
, "scsi_add_host failed\n");
10374 hba
->tmf_tag_set
= (struct blk_mq_tag_set
) {
10376 .queue_depth
= hba
->nutmrs
,
10377 .ops
= &ufshcd_tmf_ops
,
10378 .flags
= BLK_MQ_F_NO_SCHED
,
10380 err
= blk_mq_alloc_tag_set(&hba
->tmf_tag_set
);
10382 goto out_remove_scsi_host
;
10383 hba
->tmf_queue
= blk_mq_init_queue(&hba
->tmf_tag_set
);
10384 if (IS_ERR(hba
->tmf_queue
)) {
10385 err
= PTR_ERR(hba
->tmf_queue
);
10386 goto free_tmf_tag_set
;
10388 hba
->tmf_rqs
= devm_kcalloc(hba
->dev
, hba
->nutmrs
,
10389 sizeof(*hba
->tmf_rqs
), GFP_KERNEL
);
10390 if (!hba
->tmf_rqs
) {
10392 goto free_tmf_queue
;
10395 /* Reset the attached device */
10396 ufshcd_device_reset(hba
);
10398 ufshcd_init_crypto(hba
);
10400 /* Host controller enable */
10401 err
= ufshcd_hba_enable(hba
);
10403 dev_err(hba
->dev
, "Host controller enable failed\n");
10404 ufshcd_print_evt_hist(hba
);
10405 ufshcd_print_host_state(hba
);
10406 goto free_tmf_queue
;
10410 * Set the default power management level for runtime and system PM.
10411 * Default power saving mode is to keep UFS link in Hibern8 state
10412 * and UFS device in sleep state.
10414 hba
->rpm_lvl
= ufs_get_desired_pm_lvl_for_dev_link_state(
10415 UFS_SLEEP_PWR_MODE
,
10416 UIC_LINK_HIBERN8_STATE
);
10417 hba
->spm_lvl
= ufs_get_desired_pm_lvl_for_dev_link_state(
10418 UFS_SLEEP_PWR_MODE
,
10419 UIC_LINK_HIBERN8_STATE
);
10421 INIT_DELAYED_WORK(&hba
->rpm_dev_flush_recheck_work
,
10422 ufshcd_rpm_dev_flush_recheck_work
);
10424 /* Set the default auto-hiberate idle timer value to 150 ms */
10425 if (ufshcd_is_auto_hibern8_supported(hba
) && !hba
->ahit
) {
10426 hba
->ahit
= FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK
, 150) |
10427 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK
, 3);
10430 /* Hold auto suspend until async scan completes */
10431 pm_runtime_get_sync(dev
);
10432 atomic_set(&hba
->scsi_block_reqs_cnt
, 0);
10434 * We are assuming that device wasn't put in sleep/power-down
10435 * state exclusively during the boot stage before kernel.
10436 * This assumption helps avoid doing link startup twice during
10437 * ufshcd_probe_hba().
10439 ufshcd_set_ufs_dev_active(hba
);
10441 async_schedule(ufshcd_async_scan
, hba
);
10442 ufs_sysfs_add_nodes(hba
->dev
);
10444 device_enable_async_suspend(dev
);
10448 blk_mq_destroy_queue(hba
->tmf_queue
);
10449 blk_put_queue(hba
->tmf_queue
);
10451 blk_mq_free_tag_set(&hba
->tmf_tag_set
);
10452 out_remove_scsi_host
:
10453 scsi_remove_host(hba
->host
);
10455 hba
->is_irq_enabled
= false;
10456 ufshcd_hba_exit(hba
);
10460 EXPORT_SYMBOL_GPL(ufshcd_init
);
10462 void ufshcd_resume_complete(struct device
*dev
)
10464 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10466 if (hba
->complete_put
) {
10467 ufshcd_rpm_put(hba
);
10468 hba
->complete_put
= false;
10471 EXPORT_SYMBOL_GPL(ufshcd_resume_complete
);
10473 static bool ufshcd_rpm_ok_for_spm(struct ufs_hba
*hba
)
10475 struct device
*dev
= &hba
->ufs_device_wlun
->sdev_gendev
;
10476 enum ufs_dev_pwr_mode dev_pwr_mode
;
10477 enum uic_link_state link_state
;
10478 unsigned long flags
;
10481 spin_lock_irqsave(&dev
->power
.lock
, flags
);
10482 dev_pwr_mode
= ufs_get_pm_lvl_to_dev_pwr_mode(hba
->spm_lvl
);
10483 link_state
= ufs_get_pm_lvl_to_link_pwr_state(hba
->spm_lvl
);
10484 res
= pm_runtime_suspended(dev
) &&
10485 hba
->curr_dev_pwr_mode
== dev_pwr_mode
&&
10486 hba
->uic_link_state
== link_state
&&
10487 !hba
->dev_info
.b_rpm_dev_flush_capable
;
10488 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
10493 int __ufshcd_suspend_prepare(struct device
*dev
, bool rpm_ok_for_spm
)
10495 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10499 * SCSI assumes that runtime-pm and system-pm for scsi drivers
10500 * are same. And it doesn't wake up the device for system-suspend
10501 * if it's runtime suspended. But ufs doesn't follow that.
10502 * Refer ufshcd_resume_complete()
10504 if (hba
->ufs_device_wlun
) {
10505 /* Prevent runtime suspend */
10506 ufshcd_rpm_get_noresume(hba
);
10508 * Check if already runtime suspended in same state as system
10509 * suspend would be.
10511 if (!rpm_ok_for_spm
|| !ufshcd_rpm_ok_for_spm(hba
)) {
10512 /* RPM state is not ok for SPM, so runtime resume */
10513 ret
= ufshcd_rpm_resume(hba
);
10514 if (ret
< 0 && ret
!= -EACCES
) {
10515 ufshcd_rpm_put(hba
);
10519 hba
->complete_put
= true;
10523 EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare
);
10525 int ufshcd_suspend_prepare(struct device
*dev
)
10527 return __ufshcd_suspend_prepare(dev
, true);
10529 EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare
);
10531 #ifdef CONFIG_PM_SLEEP
10532 static int ufshcd_wl_poweroff(struct device
*dev
)
10534 struct scsi_device
*sdev
= to_scsi_device(dev
);
10535 struct ufs_hba
*hba
= shost_priv(sdev
->host
);
10537 __ufshcd_wl_suspend(hba
, UFS_SHUTDOWN_PM
);
10542 static int ufshcd_wl_probe(struct device
*dev
)
10544 struct scsi_device
*sdev
= to_scsi_device(dev
);
10546 if (!is_device_wlun(sdev
))
10549 blk_pm_runtime_init(sdev
->request_queue
, dev
);
10550 pm_runtime_set_autosuspend_delay(dev
, 0);
10551 pm_runtime_allow(dev
);
10556 static int ufshcd_wl_remove(struct device
*dev
)
10558 pm_runtime_forbid(dev
);
10562 static const struct dev_pm_ops ufshcd_wl_pm_ops
= {
10563 #ifdef CONFIG_PM_SLEEP
10564 .suspend
= ufshcd_wl_suspend
,
10565 .resume
= ufshcd_wl_resume
,
10566 .freeze
= ufshcd_wl_suspend
,
10567 .thaw
= ufshcd_wl_resume
,
10568 .poweroff
= ufshcd_wl_poweroff
,
10569 .restore
= ufshcd_wl_resume
,
10571 SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend
, ufshcd_wl_runtime_resume
, NULL
)
10574 static void ufshcd_check_header_layout(void)
10577 * gcc compilers before version 10 cannot do constant-folding for
10578 * sub-byte bitfields. Hence skip the layout checks for gcc 9 and
10581 if (IS_ENABLED(CONFIG_CC_IS_GCC
) && CONFIG_GCC_VERSION
< 100000)
10584 BUILD_BUG_ON(((u8
*)&(struct request_desc_header
){
10585 .cci
= 3})[0] != 3);
10587 BUILD_BUG_ON(((u8
*)&(struct request_desc_header
){
10588 .ehs_length
= 2})[1] != 2);
10590 BUILD_BUG_ON(((u8
*)&(struct request_desc_header
){
10591 .enable_crypto
= 1})[2]
10594 BUILD_BUG_ON((((u8
*)&(struct request_desc_header
){
10596 .data_direction
= 3,
10598 })[3]) != ((5 << 4) | (3 << 1) | 1));
10600 BUILD_BUG_ON(((__le32
*)&(struct request_desc_header
){
10601 .dunl
= cpu_to_le32(0xdeadbeef)})[1] !=
10602 cpu_to_le32(0xdeadbeef));
10604 BUILD_BUG_ON(((u8
*)&(struct request_desc_header
){
10605 .ocs
= 4})[8] != 4);
10607 BUILD_BUG_ON(((u8
*)&(struct request_desc_header
){
10608 .cds
= 5})[9] != 5);
10610 BUILD_BUG_ON(((__le32
*)&(struct request_desc_header
){
10611 .dunu
= cpu_to_le32(0xbadcafe)})[3] !=
10612 cpu_to_le32(0xbadcafe));
10614 BUILD_BUG_ON(((u8
*)&(struct utp_upiu_header
){
10615 .iid
= 0xf })[4] != 0xf0);
10617 BUILD_BUG_ON(((u8
*)&(struct utp_upiu_header
){
10618 .command_set_type
= 0xf })[4] != 0xf);
10622 * ufs_dev_wlun_template - describes ufs device wlun
10623 * ufs-device wlun - used to send pm commands
10624 * All luns are consumers of ufs-device wlun.
10626 * Currently, no sd driver is present for wluns.
10627 * Hence the no specific pm operations are performed.
10628 * With ufs design, SSU should be sent to ufs-device wlun.
10629 * Hence register a scsi driver for ufs wluns only.
10631 static struct scsi_driver ufs_dev_wlun_template
= {
10633 .name
= "ufs_device_wlun",
10634 .owner
= THIS_MODULE
,
10635 .probe
= ufshcd_wl_probe
,
10636 .remove
= ufshcd_wl_remove
,
10637 .pm
= &ufshcd_wl_pm_ops
,
10638 .shutdown
= ufshcd_wl_shutdown
,
10642 static int __init
ufshcd_core_init(void)
10646 ufshcd_check_header_layout();
10648 ufs_debugfs_init();
10650 ret
= scsi_register_driver(&ufs_dev_wlun_template
.gendrv
);
10652 ufs_debugfs_exit();
10656 static void __exit
ufshcd_core_exit(void)
10658 ufs_debugfs_exit();
10659 scsi_unregister_driver(&ufs_dev_wlun_template
.gendrv
);
10662 module_init(ufshcd_core_init
);
10663 module_exit(ufshcd_core_exit
);
10665 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
10666 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
10667 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
10668 MODULE_SOFTDEP("pre: governor_simpleondemand");
10669 MODULE_LICENSE("GPL");