1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Universal Flash Storage Host controller driver Core
4 * Copyright (C) 2011-2013 Samsung India Software Operations
5 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
8 * Santosh Yaraganavi <santosh.sy@samsung.com>
9 * Vinayak Holikatti <h.vinayak@samsung.com>
12 #include <linux/async.h>
13 #include <linux/devfreq.h>
14 #include <linux/nls.h>
16 #include <linux/bitfield.h>
17 #include <linux/blk-pm.h>
18 #include <linux/blkdev.h>
19 #include <linux/clk.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/pm_opp.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/sched/clock.h>
26 #include <linux/iopoll.h>
27 #include <scsi/scsi_cmnd.h>
28 #include <scsi/scsi_dbg.h>
29 #include <scsi/scsi_driver.h>
30 #include <scsi/scsi_eh.h>
31 #include "ufshcd-priv.h"
32 #include <ufs/ufs_quirks.h>
33 #include <ufs/unipro.h>
34 #include "ufs-sysfs.h"
35 #include "ufs-debugfs.h"
36 #include "ufs-fault-injection.h"
38 #include "ufshcd-crypto.h"
39 #include <asm/unaligned.h>
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/ufs.h>
44 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
48 #define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
53 /* UIC command timeout, unit: ms */
54 #define UIC_CMD_TIMEOUT 500
56 /* NOP OUT retries waiting for NOP IN response */
57 #define NOP_OUT_RETRIES 10
58 /* Timeout after 50 msecs if NOP OUT hangs without response */
59 #define NOP_OUT_TIMEOUT 50 /* msecs */
61 /* Query request retries */
62 #define QUERY_REQ_RETRIES 3
63 /* Query request timeout */
64 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
66 /* Advanced RPMB request timeout */
67 #define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */
69 /* Task management command timeout */
70 #define TM_CMD_TIMEOUT 100 /* msecs */
72 /* maximum number of retries for a general UIC command */
73 #define UFS_UIC_COMMAND_RETRIES 3
75 /* maximum number of link-startup retries */
76 #define DME_LINKSTARTUP_RETRIES 3
78 /* maximum number of reset retries before giving up */
79 #define MAX_HOST_RESET_RETRIES 5
81 /* Maximum number of error handler retries before giving up */
82 #define MAX_ERR_HANDLER_RETRIES 5
84 /* Expose the flag value from utp_upiu_query.value */
85 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
87 /* Interrupt aggregation default timeout, unit: 40us */
88 #define INT_AGGR_DEF_TO 0x02
90 /* default delay of autosuspend: 2000 ms */
91 #define RPM_AUTOSUSPEND_DELAY_MS 2000
93 /* Default delay of RPM device flush delayed work */
94 #define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
96 /* Default value of wait time before gating device ref clock */
97 #define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
99 /* Polling time to wait for fDeviceInit */
100 #define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
102 /* Default RTC update every 10 seconds */
103 #define UFS_RTC_UPDATE_INTERVAL_MS (10 * MSEC_PER_SEC)
105 /* UFSHC 4.0 compliant HC support this mode. */
106 static bool use_mcq_mode
= true;
108 static bool is_mcq_supported(struct ufs_hba
*hba
)
110 return hba
->mcq_sup
&& use_mcq_mode
;
113 module_param(use_mcq_mode
, bool, 0644);
114 MODULE_PARM_DESC(use_mcq_mode
, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
116 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
120 _ret = ufshcd_enable_vreg(_dev, _vreg); \
122 _ret = ufshcd_disable_vreg(_dev, _vreg); \
126 #define ufshcd_hex_dump(prefix_str, buf, len) do { \
127 size_t __len = (len); \
128 print_hex_dump(KERN_ERR, prefix_str, \
129 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
130 16, 4, buf, __len, false); \
133 int ufshcd_dump_regs(struct ufs_hba
*hba
, size_t offset
, size_t len
,
139 if (offset
% 4 != 0 || len
% 4 != 0) /* keep readl happy */
142 regs
= kzalloc(len
, GFP_ATOMIC
);
146 for (pos
= 0; pos
< len
; pos
+= 4) {
148 pos
>= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER
&&
149 pos
<= REG_UIC_ERROR_CODE_DME
)
151 regs
[pos
/ 4] = ufshcd_readl(hba
, offset
+ pos
);
154 ufshcd_hex_dump(prefix
, regs
, len
);
159 EXPORT_SYMBOL_GPL(ufshcd_dump_regs
);
162 UFSHCD_MAX_CHANNEL
= 0,
164 UFSHCD_CMD_PER_LUN
= 32 - UFSHCD_NUM_RESERVED
,
165 UFSHCD_CAN_QUEUE
= 32 - UFSHCD_NUM_RESERVED
,
168 static const char *const ufshcd_state_name
[] = {
169 [UFSHCD_STATE_RESET
] = "reset",
170 [UFSHCD_STATE_OPERATIONAL
] = "operational",
171 [UFSHCD_STATE_ERROR
] = "error",
172 [UFSHCD_STATE_EH_SCHEDULED_FATAL
] = "eh_fatal",
173 [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL
] = "eh_non_fatal",
176 /* UFSHCD error handling flags */
178 UFSHCD_EH_IN_PROGRESS
= (1 << 0),
181 /* UFSHCD UIC layer error flags */
183 UFSHCD_UIC_DL_PA_INIT_ERROR
= (1 << 0), /* Data link layer error */
184 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
= (1 << 1), /* Data link layer error */
185 UFSHCD_UIC_DL_TCx_REPLAY_ERROR
= (1 << 2), /* Data link layer error */
186 UFSHCD_UIC_NL_ERROR
= (1 << 3), /* Network layer error */
187 UFSHCD_UIC_TL_ERROR
= (1 << 4), /* Transport Layer error */
188 UFSHCD_UIC_DME_ERROR
= (1 << 5), /* DME error */
189 UFSHCD_UIC_PA_GENERIC_ERROR
= (1 << 6), /* Generic PA error */
192 #define ufshcd_set_eh_in_progress(h) \
193 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
194 #define ufshcd_eh_in_progress(h) \
195 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
196 #define ufshcd_clear_eh_in_progress(h) \
197 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
199 const struct ufs_pm_lvl_states ufs_pm_lvl_states
[] = {
200 [UFS_PM_LVL_0
] = {UFS_ACTIVE_PWR_MODE
, UIC_LINK_ACTIVE_STATE
},
201 [UFS_PM_LVL_1
] = {UFS_ACTIVE_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
202 [UFS_PM_LVL_2
] = {UFS_SLEEP_PWR_MODE
, UIC_LINK_ACTIVE_STATE
},
203 [UFS_PM_LVL_3
] = {UFS_SLEEP_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
204 [UFS_PM_LVL_4
] = {UFS_POWERDOWN_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
205 [UFS_PM_LVL_5
] = {UFS_POWERDOWN_PWR_MODE
, UIC_LINK_OFF_STATE
},
207 * For DeepSleep, the link is first put in hibern8 and then off.
208 * Leaving the link in hibern8 is not supported.
210 [UFS_PM_LVL_6
] = {UFS_DEEPSLEEP_PWR_MODE
, UIC_LINK_OFF_STATE
},
213 static inline enum ufs_dev_pwr_mode
214 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl
)
216 return ufs_pm_lvl_states
[lvl
].dev_state
;
219 static inline enum uic_link_state
220 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl
)
222 return ufs_pm_lvl_states
[lvl
].link_state
;
225 static inline enum ufs_pm_level
226 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state
,
227 enum uic_link_state link_state
)
229 enum ufs_pm_level lvl
;
231 for (lvl
= UFS_PM_LVL_0
; lvl
< UFS_PM_LVL_MAX
; lvl
++) {
232 if ((ufs_pm_lvl_states
[lvl
].dev_state
== dev_state
) &&
233 (ufs_pm_lvl_states
[lvl
].link_state
== link_state
))
237 /* if no match found, return the level 0 */
241 static bool ufshcd_is_ufs_dev_busy(struct ufs_hba
*hba
)
243 return (hba
->clk_gating
.active_reqs
|| hba
->outstanding_reqs
|| hba
->outstanding_tasks
||
244 hba
->active_uic_cmd
|| hba
->uic_async_done
);
247 static const struct ufs_dev_quirk ufs_fixups
[] = {
248 /* UFS cards deviations table */
249 { .wmanufacturerid
= UFS_VENDOR_MICRON
,
250 .model
= UFS_ANY_MODEL
,
251 .quirk
= UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
},
252 { .wmanufacturerid
= UFS_VENDOR_SAMSUNG
,
253 .model
= UFS_ANY_MODEL
,
254 .quirk
= UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
|
255 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE
|
256 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
},
257 { .wmanufacturerid
= UFS_VENDOR_SKHYNIX
,
258 .model
= UFS_ANY_MODEL
,
259 .quirk
= UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME
},
260 { .wmanufacturerid
= UFS_VENDOR_SKHYNIX
,
261 .model
= "hB8aL1" /*H28U62301AMR*/,
262 .quirk
= UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME
},
263 { .wmanufacturerid
= UFS_VENDOR_TOSHIBA
,
264 .model
= UFS_ANY_MODEL
,
265 .quirk
= UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
},
266 { .wmanufacturerid
= UFS_VENDOR_TOSHIBA
,
267 .model
= "THGLF2G9C8KBADG",
268 .quirk
= UFS_DEVICE_QUIRK_PA_TACTIVATE
},
269 { .wmanufacturerid
= UFS_VENDOR_TOSHIBA
,
270 .model
= "THGLF2G9D8KBADG",
271 .quirk
= UFS_DEVICE_QUIRK_PA_TACTIVATE
},
275 static irqreturn_t
ufshcd_tmc_handler(struct ufs_hba
*hba
);
276 static void ufshcd_async_scan(void *data
, async_cookie_t cookie
);
277 static int ufshcd_reset_and_restore(struct ufs_hba
*hba
);
278 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd
*cmd
);
279 static int ufshcd_clear_tm_cmd(struct ufs_hba
*hba
, int tag
);
280 static void ufshcd_hba_exit(struct ufs_hba
*hba
);
281 static int ufshcd_probe_hba(struct ufs_hba
*hba
, bool init_dev_params
);
282 static int ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
);
283 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba
*hba
);
284 static int ufshcd_host_reset_and_restore(struct ufs_hba
*hba
);
285 static void ufshcd_resume_clkscaling(struct ufs_hba
*hba
);
286 static void ufshcd_suspend_clkscaling(struct ufs_hba
*hba
);
287 static int ufshcd_scale_clks(struct ufs_hba
*hba
, unsigned long freq
,
289 static irqreturn_t
ufshcd_intr(int irq
, void *__hba
);
290 static int ufshcd_change_power_mode(struct ufs_hba
*hba
,
291 struct ufs_pa_layer_attr
*pwr_mode
);
292 static int ufshcd_setup_hba_vreg(struct ufs_hba
*hba
, bool on
);
293 static int ufshcd_setup_vreg(struct ufs_hba
*hba
, bool on
);
294 static inline int ufshcd_config_vreg_hpm(struct ufs_hba
*hba
,
295 struct ufs_vreg
*vreg
);
296 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba
*hba
,
298 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba
*hba
);
299 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba
*hba
);
301 void ufshcd_enable_irq(struct ufs_hba
*hba
)
303 if (!hba
->is_irq_enabled
) {
304 enable_irq(hba
->irq
);
305 hba
->is_irq_enabled
= true;
308 EXPORT_SYMBOL_GPL(ufshcd_enable_irq
);
310 void ufshcd_disable_irq(struct ufs_hba
*hba
)
312 if (hba
->is_irq_enabled
) {
313 disable_irq(hba
->irq
);
314 hba
->is_irq_enabled
= false;
317 EXPORT_SYMBOL_GPL(ufshcd_disable_irq
);
319 static void ufshcd_configure_wb(struct ufs_hba
*hba
)
321 if (!ufshcd_is_wb_allowed(hba
))
324 ufshcd_wb_toggle(hba
, true);
326 ufshcd_wb_toggle_buf_flush_during_h8(hba
, true);
328 if (ufshcd_is_wb_buf_flush_allowed(hba
))
329 ufshcd_wb_toggle_buf_flush(hba
, true);
332 static void ufshcd_scsi_unblock_requests(struct ufs_hba
*hba
)
334 if (atomic_dec_and_test(&hba
->scsi_block_reqs_cnt
))
335 scsi_unblock_requests(hba
->host
);
338 static void ufshcd_scsi_block_requests(struct ufs_hba
*hba
)
340 if (atomic_inc_return(&hba
->scsi_block_reqs_cnt
) == 1)
341 scsi_block_requests(hba
->host
);
344 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba
*hba
, unsigned int tag
,
345 enum ufs_trace_str_t str_t
)
347 struct utp_upiu_req
*rq
= hba
->lrb
[tag
].ucd_req_ptr
;
348 struct utp_upiu_header
*header
;
350 if (!trace_ufshcd_upiu_enabled())
353 if (str_t
== UFS_CMD_SEND
)
354 header
= &rq
->header
;
356 header
= &hba
->lrb
[tag
].ucd_rsp_ptr
->header
;
358 trace_ufshcd_upiu(dev_name(hba
->dev
), str_t
, header
, &rq
->sc
.cdb
,
362 static void ufshcd_add_query_upiu_trace(struct ufs_hba
*hba
,
363 enum ufs_trace_str_t str_t
,
364 struct utp_upiu_req
*rq_rsp
)
366 if (!trace_ufshcd_upiu_enabled())
369 trace_ufshcd_upiu(dev_name(hba
->dev
), str_t
, &rq_rsp
->header
,
370 &rq_rsp
->qr
, UFS_TSF_OSF
);
373 static void ufshcd_add_tm_upiu_trace(struct ufs_hba
*hba
, unsigned int tag
,
374 enum ufs_trace_str_t str_t
)
376 struct utp_task_req_desc
*descp
= &hba
->utmrdl_base_addr
[tag
];
378 if (!trace_ufshcd_upiu_enabled())
381 if (str_t
== UFS_TM_SEND
)
382 trace_ufshcd_upiu(dev_name(hba
->dev
), str_t
,
383 &descp
->upiu_req
.req_header
,
384 &descp
->upiu_req
.input_param1
,
387 trace_ufshcd_upiu(dev_name(hba
->dev
), str_t
,
388 &descp
->upiu_rsp
.rsp_header
,
389 &descp
->upiu_rsp
.output_param1
,
393 static void ufshcd_add_uic_command_trace(struct ufs_hba
*hba
,
394 const struct uic_command
*ucmd
,
395 enum ufs_trace_str_t str_t
)
399 if (!trace_ufshcd_uic_command_enabled())
402 if (str_t
== UFS_CMD_SEND
)
405 cmd
= ufshcd_readl(hba
, REG_UIC_COMMAND
);
407 trace_ufshcd_uic_command(dev_name(hba
->dev
), str_t
, cmd
,
408 ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_1
),
409 ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_2
),
410 ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_3
));
413 static void ufshcd_add_command_trace(struct ufs_hba
*hba
, unsigned int tag
,
414 enum ufs_trace_str_t str_t
)
417 u8 opcode
= 0, group_id
= 0;
421 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[tag
];
422 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
423 struct request
*rq
= scsi_cmd_to_rq(cmd
);
424 int transfer_len
= -1;
429 /* trace UPIU also */
430 ufshcd_add_cmd_upiu_trace(hba
, tag
, str_t
);
431 if (!trace_ufshcd_command_enabled())
434 opcode
= cmd
->cmnd
[0];
436 if (opcode
== READ_10
|| opcode
== WRITE_10
) {
438 * Currently we only fully trace read(10) and write(10) commands
441 be32_to_cpu(lrbp
->ucd_req_ptr
->sc
.exp_data_transfer_len
);
442 lba
= scsi_get_lba(cmd
);
443 if (opcode
== WRITE_10
)
444 group_id
= lrbp
->cmd
->cmnd
[6];
445 } else if (opcode
== UNMAP
) {
447 * The number of Bytes to be unmapped beginning with the lba.
449 transfer_len
= blk_rq_bytes(rq
);
450 lba
= scsi_get_lba(cmd
);
453 intr
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
455 if (is_mcq_enabled(hba
)) {
456 struct ufs_hw_queue
*hwq
= ufshcd_mcq_req_to_hwq(hba
, rq
);
460 doorbell
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
462 trace_ufshcd_command(cmd
->device
, str_t
, tag
, doorbell
, hwq_id
,
463 transfer_len
, intr
, lba
, opcode
, group_id
);
466 static void ufshcd_print_clk_freqs(struct ufs_hba
*hba
)
468 struct ufs_clk_info
*clki
;
469 struct list_head
*head
= &hba
->clk_list_head
;
471 if (list_empty(head
))
474 list_for_each_entry(clki
, head
, list
) {
475 if (!IS_ERR_OR_NULL(clki
->clk
) && clki
->min_freq
&&
477 dev_err(hba
->dev
, "clk: %s, rate: %u\n",
478 clki
->name
, clki
->curr_freq
);
482 static void ufshcd_print_evt(struct ufs_hba
*hba
, u32 id
,
483 const char *err_name
)
487 const struct ufs_event_hist
*e
;
489 if (id
>= UFS_EVT_CNT
)
492 e
= &hba
->ufs_stats
.event
[id
];
494 for (i
= 0; i
< UFS_EVENT_HIST_LENGTH
; i
++) {
495 int p
= (i
+ e
->pos
) % UFS_EVENT_HIST_LENGTH
;
497 if (e
->tstamp
[p
] == 0)
499 dev_err(hba
->dev
, "%s[%d] = 0x%x at %lld us\n", err_name
, p
,
500 e
->val
[p
], div_u64(e
->tstamp
[p
], 1000));
505 dev_err(hba
->dev
, "No record of %s\n", err_name
);
507 dev_err(hba
->dev
, "%s: total cnt=%llu\n", err_name
, e
->cnt
);
510 static void ufshcd_print_evt_hist(struct ufs_hba
*hba
)
512 ufshcd_dump_regs(hba
, 0, UFSHCI_REG_SPACE_SIZE
, "host_regs: ");
514 ufshcd_print_evt(hba
, UFS_EVT_PA_ERR
, "pa_err");
515 ufshcd_print_evt(hba
, UFS_EVT_DL_ERR
, "dl_err");
516 ufshcd_print_evt(hba
, UFS_EVT_NL_ERR
, "nl_err");
517 ufshcd_print_evt(hba
, UFS_EVT_TL_ERR
, "tl_err");
518 ufshcd_print_evt(hba
, UFS_EVT_DME_ERR
, "dme_err");
519 ufshcd_print_evt(hba
, UFS_EVT_AUTO_HIBERN8_ERR
,
521 ufshcd_print_evt(hba
, UFS_EVT_FATAL_ERR
, "fatal_err");
522 ufshcd_print_evt(hba
, UFS_EVT_LINK_STARTUP_FAIL
,
523 "link_startup_fail");
524 ufshcd_print_evt(hba
, UFS_EVT_RESUME_ERR
, "resume_fail");
525 ufshcd_print_evt(hba
, UFS_EVT_SUSPEND_ERR
,
527 ufshcd_print_evt(hba
, UFS_EVT_WL_RES_ERR
, "wlun resume_fail");
528 ufshcd_print_evt(hba
, UFS_EVT_WL_SUSP_ERR
,
529 "wlun suspend_fail");
530 ufshcd_print_evt(hba
, UFS_EVT_DEV_RESET
, "dev_reset");
531 ufshcd_print_evt(hba
, UFS_EVT_HOST_RESET
, "host_reset");
532 ufshcd_print_evt(hba
, UFS_EVT_ABORT
, "task_abort");
534 ufshcd_vops_dbg_register_dump(hba
);
538 void ufshcd_print_tr(struct ufs_hba
*hba
, int tag
, bool pr_prdt
)
540 const struct ufshcd_lrb
*lrbp
;
543 lrbp
= &hba
->lrb
[tag
];
545 dev_err(hba
->dev
, "UPIU[%d] - issue time %lld us\n",
546 tag
, div_u64(lrbp
->issue_time_stamp_local_clock
, 1000));
547 dev_err(hba
->dev
, "UPIU[%d] - complete time %lld us\n",
548 tag
, div_u64(lrbp
->compl_time_stamp_local_clock
, 1000));
550 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
551 tag
, (u64
)lrbp
->utrd_dma_addr
);
553 ufshcd_hex_dump("UPIU TRD: ", lrbp
->utr_descriptor_ptr
,
554 sizeof(struct utp_transfer_req_desc
));
555 dev_err(hba
->dev
, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag
,
556 (u64
)lrbp
->ucd_req_dma_addr
);
557 ufshcd_hex_dump("UPIU REQ: ", lrbp
->ucd_req_ptr
,
558 sizeof(struct utp_upiu_req
));
559 dev_err(hba
->dev
, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag
,
560 (u64
)lrbp
->ucd_rsp_dma_addr
);
561 ufshcd_hex_dump("UPIU RSP: ", lrbp
->ucd_rsp_ptr
,
562 sizeof(struct utp_upiu_rsp
));
564 prdt_length
= le16_to_cpu(
565 lrbp
->utr_descriptor_ptr
->prd_table_length
);
566 if (hba
->quirks
& UFSHCD_QUIRK_PRDT_BYTE_GRAN
)
567 prdt_length
/= ufshcd_sg_entry_size(hba
);
570 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
572 (u64
)lrbp
->ucd_prdt_dma_addr
);
575 ufshcd_hex_dump("UPIU PRDT: ", lrbp
->ucd_prdt_ptr
,
576 ufshcd_sg_entry_size(hba
) * prdt_length
);
579 static bool ufshcd_print_tr_iter(struct request
*req
, void *priv
)
581 struct scsi_device
*sdev
= req
->q
->queuedata
;
582 struct Scsi_Host
*shost
= sdev
->host
;
583 struct ufs_hba
*hba
= shost_priv(shost
);
585 ufshcd_print_tr(hba
, req
->tag
, *(bool *)priv
);
591 * ufshcd_print_trs_all - print trs for all started requests.
592 * @hba: per-adapter instance.
593 * @pr_prdt: need to print prdt or not.
595 static void ufshcd_print_trs_all(struct ufs_hba
*hba
, bool pr_prdt
)
597 blk_mq_tagset_busy_iter(&hba
->host
->tag_set
, ufshcd_print_tr_iter
, &pr_prdt
);
600 static void ufshcd_print_tmrs(struct ufs_hba
*hba
, unsigned long bitmap
)
604 for_each_set_bit(tag
, &bitmap
, hba
->nutmrs
) {
605 struct utp_task_req_desc
*tmrdp
= &hba
->utmrdl_base_addr
[tag
];
607 dev_err(hba
->dev
, "TM[%d] - Task Management Header\n", tag
);
608 ufshcd_hex_dump("", tmrdp
, sizeof(*tmrdp
));
612 static void ufshcd_print_host_state(struct ufs_hba
*hba
)
614 const struct scsi_device
*sdev_ufs
= hba
->ufs_device_wlun
;
616 dev_err(hba
->dev
, "UFS Host state=%d\n", hba
->ufshcd_state
);
617 dev_err(hba
->dev
, "outstanding reqs=0x%lx tasks=0x%lx\n",
618 hba
->outstanding_reqs
, hba
->outstanding_tasks
);
619 dev_err(hba
->dev
, "saved_err=0x%x, saved_uic_err=0x%x\n",
620 hba
->saved_err
, hba
->saved_uic_err
);
621 dev_err(hba
->dev
, "Device power mode=%d, UIC link state=%d\n",
622 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
623 dev_err(hba
->dev
, "PM in progress=%d, sys. suspended=%d\n",
624 hba
->pm_op_in_progress
, hba
->is_sys_suspended
);
625 dev_err(hba
->dev
, "Auto BKOPS=%d, Host self-block=%d\n",
626 hba
->auto_bkops_enabled
, hba
->host
->host_self_blocked
);
627 dev_err(hba
->dev
, "Clk gate=%d\n", hba
->clk_gating
.state
);
629 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
630 div_u64(hba
->ufs_stats
.last_hibern8_exit_tstamp
, 1000),
631 hba
->ufs_stats
.hibern8_exit_cnt
);
632 dev_err(hba
->dev
, "last intr at %lld us, last intr status=0x%x\n",
633 div_u64(hba
->ufs_stats
.last_intr_ts
, 1000),
634 hba
->ufs_stats
.last_intr_status
);
635 dev_err(hba
->dev
, "error handling flags=0x%x, req. abort count=%d\n",
636 hba
->eh_flags
, hba
->req_abort_count
);
637 dev_err(hba
->dev
, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
638 hba
->ufs_version
, hba
->capabilities
, hba
->caps
);
639 dev_err(hba
->dev
, "quirks=0x%x, dev. quirks=0x%x\n", hba
->quirks
,
642 dev_err(hba
->dev
, "UFS dev info: %.8s %.16s rev %.4s\n",
643 sdev_ufs
->vendor
, sdev_ufs
->model
, sdev_ufs
->rev
);
645 ufshcd_print_clk_freqs(hba
);
649 * ufshcd_print_pwr_info - print power params as saved in hba
651 * @hba: per-adapter instance
653 static void ufshcd_print_pwr_info(struct ufs_hba
*hba
)
655 static const char * const names
[] = {
666 * Using dev_dbg to avoid messages during runtime PM to avoid
667 * never-ending cycles of messages written back to storage by user space
668 * causing runtime resume, causing more messages and so on.
670 dev_dbg(hba
->dev
, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
672 hba
->pwr_info
.gear_rx
, hba
->pwr_info
.gear_tx
,
673 hba
->pwr_info
.lane_rx
, hba
->pwr_info
.lane_tx
,
674 names
[hba
->pwr_info
.pwr_rx
],
675 names
[hba
->pwr_info
.pwr_tx
],
676 hba
->pwr_info
.hs_rate
);
679 static void ufshcd_device_reset(struct ufs_hba
*hba
)
683 err
= ufshcd_vops_device_reset(hba
);
686 ufshcd_set_ufs_dev_active(hba
);
687 if (ufshcd_is_wb_allowed(hba
)) {
688 hba
->dev_info
.wb_enabled
= false;
689 hba
->dev_info
.wb_buf_flush_enabled
= false;
691 if (hba
->dev_info
.rtc_type
== UFS_RTC_RELATIVE
)
692 hba
->dev_info
.rtc_time_baseline
= 0;
694 if (err
!= -EOPNOTSUPP
)
695 ufshcd_update_evt_hist(hba
, UFS_EVT_DEV_RESET
, err
);
698 void ufshcd_delay_us(unsigned long us
, unsigned long tolerance
)
706 usleep_range(us
, us
+ tolerance
);
708 EXPORT_SYMBOL_GPL(ufshcd_delay_us
);
711 * ufshcd_wait_for_register - wait for register value to change
712 * @hba: per-adapter interface
713 * @reg: mmio register offset
714 * @mask: mask to apply to the read register value
715 * @val: value to wait for
716 * @interval_us: polling interval in microseconds
717 * @timeout_ms: timeout in milliseconds
719 * Return: -ETIMEDOUT on error, zero on success.
721 static int ufshcd_wait_for_register(struct ufs_hba
*hba
, u32 reg
, u32 mask
,
722 u32 val
, unsigned long interval_us
,
723 unsigned long timeout_ms
)
726 unsigned long timeout
= jiffies
+ msecs_to_jiffies(timeout_ms
);
728 /* ignore bits that we don't intend to wait on */
731 while ((ufshcd_readl(hba
, reg
) & mask
) != val
) {
732 usleep_range(interval_us
, interval_us
+ 50);
733 if (time_after(jiffies
, timeout
)) {
734 if ((ufshcd_readl(hba
, reg
) & mask
) != val
)
744 * ufshcd_get_intr_mask - Get the interrupt bit mask
745 * @hba: Pointer to adapter instance
747 * Return: interrupt bit mask per version
749 static inline u32
ufshcd_get_intr_mask(struct ufs_hba
*hba
)
751 if (hba
->ufs_version
== ufshci_version(1, 0))
752 return INTERRUPT_MASK_ALL_VER_10
;
753 if (hba
->ufs_version
<= ufshci_version(2, 0))
754 return INTERRUPT_MASK_ALL_VER_11
;
756 return INTERRUPT_MASK_ALL_VER_21
;
760 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
761 * @hba: Pointer to adapter instance
763 * Return: UFSHCI version supported by the controller
765 static inline u32
ufshcd_get_ufs_version(struct ufs_hba
*hba
)
769 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION
)
770 ufshci_ver
= ufshcd_vops_get_ufs_hci_version(hba
);
772 ufshci_ver
= ufshcd_readl(hba
, REG_UFS_VERSION
);
775 * UFSHCI v1.x uses a different version scheme, in order
776 * to allow the use of comparisons with the ufshci_version
777 * function, we convert it to the same scheme as ufs 2.0+.
779 if (ufshci_ver
& 0x00010000)
780 return ufshci_version(1, ufshci_ver
& 0x00000100);
786 * ufshcd_is_device_present - Check if any device connected to
787 * the host controller
788 * @hba: pointer to adapter instance
790 * Return: true if device present, false if no device detected
792 static inline bool ufshcd_is_device_present(struct ufs_hba
*hba
)
794 return ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) & DEVICE_PRESENT
;
798 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
799 * @lrbp: pointer to local command reference block
800 * @cqe: pointer to the completion queue entry
802 * This function is used to get the OCS field from UTRD
804 * Return: the OCS field in the UTRD.
806 static enum utp_ocs
ufshcd_get_tr_ocs(struct ufshcd_lrb
*lrbp
,
807 struct cq_entry
*cqe
)
810 return le32_to_cpu(cqe
->status
) & MASK_OCS
;
812 return lrbp
->utr_descriptor_ptr
->header
.ocs
& MASK_OCS
;
816 * ufshcd_utrl_clear() - Clear requests from the controller request list.
817 * @hba: per adapter instance
818 * @mask: mask with one bit set for each request to be cleared
820 static inline void ufshcd_utrl_clear(struct ufs_hba
*hba
, u32 mask
)
822 if (hba
->quirks
& UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR
)
825 * From the UFSHCI specification: "UTP Transfer Request List CLear
826 * Register (UTRLCLR): This field is bit significant. Each bit
827 * corresponds to a slot in the UTP Transfer Request List, where bit 0
828 * corresponds to request slot 0. A bit in this field is set to ‘0’
829 * by host software to indicate to the host controller that a transfer
830 * request slot is cleared. The host controller
831 * shall free up any resources associated to the request slot
832 * immediately, and shall set the associated bit in UTRLDBR to ‘0’. The
833 * host software indicates no change to request slots by setting the
834 * associated bits in this field to ‘1’. Bits in this field shall only
835 * be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’."
837 ufshcd_writel(hba
, ~mask
, REG_UTP_TRANSFER_REQ_LIST_CLEAR
);
841 * ufshcd_utmrl_clear - Clear a bit in UTMRLCLR register
842 * @hba: per adapter instance
843 * @pos: position of the bit to be cleared
845 static inline void ufshcd_utmrl_clear(struct ufs_hba
*hba
, u32 pos
)
847 if (hba
->quirks
& UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR
)
848 ufshcd_writel(hba
, (1 << pos
), REG_UTP_TASK_REQ_LIST_CLEAR
);
850 ufshcd_writel(hba
, ~(1 << pos
), REG_UTP_TASK_REQ_LIST_CLEAR
);
854 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
855 * @reg: Register value of host controller status
857 * Return: 0 on success; a positive value if failed.
859 static inline int ufshcd_get_lists_status(u32 reg
)
861 return !((reg
& UFSHCD_STATUS_READY
) == UFSHCD_STATUS_READY
);
865 * ufshcd_get_uic_cmd_result - Get the UIC command result
866 * @hba: Pointer to adapter instance
868 * This function gets the result of UIC command completion
870 * Return: 0 on success; non-zero value on error.
872 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba
*hba
)
874 return ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_2
) &
875 MASK_UIC_COMMAND_RESULT
;
879 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
880 * @hba: Pointer to adapter instance
882 * This function gets UIC command argument3
884 * Return: 0 on success; non-zero value on error.
886 static inline u32
ufshcd_get_dme_attr_val(struct ufs_hba
*hba
)
888 return ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_3
);
892 * ufshcd_get_req_rsp - returns the TR response transaction type
893 * @ucd_rsp_ptr: pointer to response UPIU
897 static inline enum upiu_response_transaction
898 ufshcd_get_req_rsp(struct utp_upiu_rsp
*ucd_rsp_ptr
)
900 return ucd_rsp_ptr
->header
.transaction_code
;
904 * ufshcd_is_exception_event - Check if the device raised an exception event
905 * @ucd_rsp_ptr: pointer to response UPIU
907 * The function checks if the device raised an exception event indicated in
908 * the Device Information field of response UPIU.
910 * Return: true if exception is raised, false otherwise.
912 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp
*ucd_rsp_ptr
)
914 return ucd_rsp_ptr
->header
.device_information
& 1;
918 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
919 * @hba: per adapter instance
922 ufshcd_reset_intr_aggr(struct ufs_hba
*hba
)
924 ufshcd_writel(hba
, INT_AGGR_ENABLE
|
925 INT_AGGR_COUNTER_AND_TIMER_RESET
,
926 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
930 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
931 * @hba: per adapter instance
932 * @cnt: Interrupt aggregation counter threshold
933 * @tmout: Interrupt aggregation timeout value
936 ufshcd_config_intr_aggr(struct ufs_hba
*hba
, u8 cnt
, u8 tmout
)
938 ufshcd_writel(hba
, INT_AGGR_ENABLE
| INT_AGGR_PARAM_WRITE
|
939 INT_AGGR_COUNTER_THLD_VAL(cnt
) |
940 INT_AGGR_TIMEOUT_VAL(tmout
),
941 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
945 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
946 * @hba: per adapter instance
948 static inline void ufshcd_disable_intr_aggr(struct ufs_hba
*hba
)
950 ufshcd_writel(hba
, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
954 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
955 * When run-stop registers are set to 1, it indicates the
956 * host controller that it can process the requests
957 * @hba: per adapter instance
959 static void ufshcd_enable_run_stop_reg(struct ufs_hba
*hba
)
961 ufshcd_writel(hba
, UTP_TASK_REQ_LIST_RUN_STOP_BIT
,
962 REG_UTP_TASK_REQ_LIST_RUN_STOP
);
963 ufshcd_writel(hba
, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT
,
964 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP
);
968 * ufshcd_hba_start - Start controller initialization sequence
969 * @hba: per adapter instance
971 static inline void ufshcd_hba_start(struct ufs_hba
*hba
)
973 u32 val
= CONTROLLER_ENABLE
;
975 if (ufshcd_crypto_enable(hba
))
976 val
|= CRYPTO_GENERAL_ENABLE
;
978 ufshcd_writel(hba
, val
, REG_CONTROLLER_ENABLE
);
982 * ufshcd_is_hba_active - Get controller state
983 * @hba: per adapter instance
985 * Return: true if and only if the controller is active.
987 bool ufshcd_is_hba_active(struct ufs_hba
*hba
)
989 return ufshcd_readl(hba
, REG_CONTROLLER_ENABLE
) & CONTROLLER_ENABLE
;
991 EXPORT_SYMBOL_GPL(ufshcd_is_hba_active
);
993 u32
ufshcd_get_local_unipro_ver(struct ufs_hba
*hba
)
995 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
996 if (hba
->ufs_version
<= ufshci_version(1, 1))
997 return UFS_UNIPRO_VER_1_41
;
999 return UFS_UNIPRO_VER_1_6
;
1001 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver
);
1003 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba
*hba
)
1006 * If both host and device support UniPro ver1.6 or later, PA layer
1007 * parameters tuning happens during link startup itself.
1009 * We can manually tune PA layer parameters if either host or device
1010 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
1011 * logic simple, we will only do manual tuning if local unipro version
1012 * doesn't support ver1.6 or later.
1014 return ufshcd_get_local_unipro_ver(hba
) < UFS_UNIPRO_VER_1_6
;
1018 * ufshcd_set_clk_freq - set UFS controller clock frequencies
1019 * @hba: per adapter instance
1020 * @scale_up: If True, set max possible frequency othewise set low frequency
1022 * Return: 0 if successful; < 0 upon failure.
1024 static int ufshcd_set_clk_freq(struct ufs_hba
*hba
, bool scale_up
)
1027 struct ufs_clk_info
*clki
;
1028 struct list_head
*head
= &hba
->clk_list_head
;
1030 if (list_empty(head
))
1033 list_for_each_entry(clki
, head
, list
) {
1034 if (!IS_ERR_OR_NULL(clki
->clk
)) {
1035 if (scale_up
&& clki
->max_freq
) {
1036 if (clki
->curr_freq
== clki
->max_freq
)
1039 ret
= clk_set_rate(clki
->clk
, clki
->max_freq
);
1041 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
1042 __func__
, clki
->name
,
1043 clki
->max_freq
, ret
);
1046 trace_ufshcd_clk_scaling(dev_name(hba
->dev
),
1047 "scaled up", clki
->name
,
1051 clki
->curr_freq
= clki
->max_freq
;
1053 } else if (!scale_up
&& clki
->min_freq
) {
1054 if (clki
->curr_freq
== clki
->min_freq
)
1057 ret
= clk_set_rate(clki
->clk
, clki
->min_freq
);
1059 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
1060 __func__
, clki
->name
,
1061 clki
->min_freq
, ret
);
1064 trace_ufshcd_clk_scaling(dev_name(hba
->dev
),
1065 "scaled down", clki
->name
,
1068 clki
->curr_freq
= clki
->min_freq
;
1071 dev_dbg(hba
->dev
, "%s: clk: %s, rate: %lu\n", __func__
,
1072 clki
->name
, clk_get_rate(clki
->clk
));
1079 int ufshcd_opp_config_clks(struct device
*dev
, struct opp_table
*opp_table
,
1080 struct dev_pm_opp
*opp
, void *data
,
1083 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1084 struct list_head
*head
= &hba
->clk_list_head
;
1085 struct ufs_clk_info
*clki
;
1090 list_for_each_entry(clki
, head
, list
) {
1091 if (!IS_ERR_OR_NULL(clki
->clk
)) {
1092 freq
= dev_pm_opp_get_freq_indexed(opp
, idx
++);
1094 /* Do not set rate for clocks having frequency as 0 */
1098 ret
= clk_set_rate(clki
->clk
, freq
);
1100 dev_err(dev
, "%s: %s clk set rate(%ldHz) failed, %d\n",
1101 __func__
, clki
->name
, freq
, ret
);
1105 trace_ufshcd_clk_scaling(dev_name(dev
),
1106 (scaling_down
? "scaled down" : "scaled up"),
1107 clki
->name
, hba
->clk_scaling
.target_freq
, freq
);
1113 EXPORT_SYMBOL_GPL(ufshcd_opp_config_clks
);
1115 static int ufshcd_opp_set_rate(struct ufs_hba
*hba
, unsigned long freq
)
1117 struct dev_pm_opp
*opp
;
1120 opp
= dev_pm_opp_find_freq_floor_indexed(hba
->dev
,
1123 return PTR_ERR(opp
);
1125 ret
= dev_pm_opp_set_opp(hba
->dev
, opp
);
1126 dev_pm_opp_put(opp
);
1132 * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1133 * @hba: per adapter instance
1134 * @freq: frequency to scale
1135 * @scale_up: True if scaling up and false if scaling down
1137 * Return: 0 if successful; < 0 upon failure.
1139 static int ufshcd_scale_clks(struct ufs_hba
*hba
, unsigned long freq
,
1143 ktime_t start
= ktime_get();
1145 ret
= ufshcd_vops_clk_scale_notify(hba
, scale_up
, PRE_CHANGE
);
1149 if (hba
->use_pm_opp
)
1150 ret
= ufshcd_opp_set_rate(hba
, freq
);
1152 ret
= ufshcd_set_clk_freq(hba
, scale_up
);
1156 ret
= ufshcd_vops_clk_scale_notify(hba
, scale_up
, POST_CHANGE
);
1158 if (hba
->use_pm_opp
)
1159 ufshcd_opp_set_rate(hba
,
1160 hba
->devfreq
->previous_freq
);
1162 ufshcd_set_clk_freq(hba
, !scale_up
);
1166 trace_ufshcd_profile_clk_scaling(dev_name(hba
->dev
),
1167 (scale_up
? "up" : "down"),
1168 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
1173 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1174 * @hba: per adapter instance
1175 * @freq: frequency to scale
1176 * @scale_up: True if scaling up and false if scaling down
1178 * Return: true if scaling is required, false otherwise.
1180 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba
*hba
,
1181 unsigned long freq
, bool scale_up
)
1183 struct ufs_clk_info
*clki
;
1184 struct list_head
*head
= &hba
->clk_list_head
;
1186 if (list_empty(head
))
1189 if (hba
->use_pm_opp
)
1190 return freq
!= hba
->clk_scaling
.target_freq
;
1192 list_for_each_entry(clki
, head
, list
) {
1193 if (!IS_ERR_OR_NULL(clki
->clk
)) {
1194 if (scale_up
&& clki
->max_freq
) {
1195 if (clki
->curr_freq
== clki
->max_freq
)
1198 } else if (!scale_up
&& clki
->min_freq
) {
1199 if (clki
->curr_freq
== clki
->min_freq
)
1210 * Determine the number of pending commands by counting the bits in the SCSI
1211 * device budget maps. This approach has been selected because a bit is set in
1212 * the budget map before scsi_host_queue_ready() checks the host_self_blocked
1213 * flag. The host_self_blocked flag can be modified by calling
1214 * scsi_block_requests() or scsi_unblock_requests().
1216 static u32
ufshcd_pending_cmds(struct ufs_hba
*hba
)
1218 const struct scsi_device
*sdev
;
1221 lockdep_assert_held(hba
->host
->host_lock
);
1222 __shost_for_each_device(sdev
, hba
->host
)
1223 pending
+= sbitmap_weight(&sdev
->budget_map
);
1229 * Wait until all pending SCSI commands and TMFs have finished or the timeout
1232 * Return: 0 upon success; -EBUSY upon timeout.
1234 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba
*hba
,
1235 u64 wait_timeout_us
)
1237 unsigned long flags
;
1241 bool timeout
= false, do_last_check
= false;
1245 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1247 * Wait for all the outstanding tasks/transfer requests.
1248 * Verify by checking the doorbell registers are clear.
1250 start
= ktime_get();
1252 if (hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
) {
1257 tm_doorbell
= ufshcd_readl(hba
, REG_UTP_TASK_REQ_DOOR_BELL
);
1258 tr_pending
= ufshcd_pending_cmds(hba
);
1259 if (!tm_doorbell
&& !tr_pending
) {
1262 } else if (do_last_check
) {
1266 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1267 io_schedule_timeout(msecs_to_jiffies(20));
1268 if (ktime_to_us(ktime_sub(ktime_get(), start
)) >
1272 * We might have scheduled out for long time so make
1273 * sure to check if doorbells are cleared by this time
1276 do_last_check
= true;
1278 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1279 } while (tm_doorbell
|| tr_pending
);
1283 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1284 __func__
, tm_doorbell
, tr_pending
);
1288 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1289 ufshcd_release(hba
);
1294 * ufshcd_scale_gear - scale up/down UFS gear
1295 * @hba: per adapter instance
1296 * @scale_up: True for scaling up gear and false for scaling down
1298 * Return: 0 for success; -EBUSY if scaling can't happen at this time;
1299 * non-zero for any other errors.
1301 static int ufshcd_scale_gear(struct ufs_hba
*hba
, bool scale_up
)
1304 struct ufs_pa_layer_attr new_pwr_info
;
1307 memcpy(&new_pwr_info
, &hba
->clk_scaling
.saved_pwr_info
,
1308 sizeof(struct ufs_pa_layer_attr
));
1310 memcpy(&new_pwr_info
, &hba
->pwr_info
,
1311 sizeof(struct ufs_pa_layer_attr
));
1313 if (hba
->pwr_info
.gear_tx
> hba
->clk_scaling
.min_gear
||
1314 hba
->pwr_info
.gear_rx
> hba
->clk_scaling
.min_gear
) {
1315 /* save the current power mode */
1316 memcpy(&hba
->clk_scaling
.saved_pwr_info
,
1318 sizeof(struct ufs_pa_layer_attr
));
1320 /* scale down gear */
1321 new_pwr_info
.gear_tx
= hba
->clk_scaling
.min_gear
;
1322 new_pwr_info
.gear_rx
= hba
->clk_scaling
.min_gear
;
1326 /* check if the power mode needs to be changed or not? */
1327 ret
= ufshcd_config_pwr_mode(hba
, &new_pwr_info
);
1329 dev_err(hba
->dev
, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1331 hba
->pwr_info
.gear_tx
, hba
->pwr_info
.gear_rx
,
1332 new_pwr_info
.gear_tx
, new_pwr_info
.gear_rx
);
1338 * Wait until all pending SCSI commands and TMFs have finished or the timeout
1341 * Return: 0 upon success; -EBUSY upon timeout.
1343 static int ufshcd_clock_scaling_prepare(struct ufs_hba
*hba
, u64 timeout_us
)
1347 * make sure that there are no outstanding requests when
1348 * clock scaling is in progress
1350 ufshcd_scsi_block_requests(hba
);
1351 mutex_lock(&hba
->wb_mutex
);
1352 down_write(&hba
->clk_scaling_lock
);
1354 if (!hba
->clk_scaling
.is_allowed
||
1355 ufshcd_wait_for_doorbell_clr(hba
, timeout_us
)) {
1357 up_write(&hba
->clk_scaling_lock
);
1358 mutex_unlock(&hba
->wb_mutex
);
1359 ufshcd_scsi_unblock_requests(hba
);
1363 /* let's not get into low power until clock scaling is completed */
1370 static void ufshcd_clock_scaling_unprepare(struct ufs_hba
*hba
, int err
, bool scale_up
)
1372 up_write(&hba
->clk_scaling_lock
);
1374 /* Enable Write Booster if we have scaled up else disable it */
1375 if (ufshcd_enable_wb_if_scaling_up(hba
) && !err
)
1376 ufshcd_wb_toggle(hba
, scale_up
);
1378 mutex_unlock(&hba
->wb_mutex
);
1380 ufshcd_scsi_unblock_requests(hba
);
1381 ufshcd_release(hba
);
1385 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1386 * @hba: per adapter instance
1387 * @freq: frequency to scale
1388 * @scale_up: True for scaling up and false for scalin down
1390 * Return: 0 for success; -EBUSY if scaling can't happen at this time; non-zero
1391 * for any other errors.
1393 static int ufshcd_devfreq_scale(struct ufs_hba
*hba
, unsigned long freq
,
1398 ret
= ufshcd_clock_scaling_prepare(hba
, 1 * USEC_PER_SEC
);
1402 /* scale down the gear before scaling down clocks */
1404 ret
= ufshcd_scale_gear(hba
, false);
1409 ret
= ufshcd_scale_clks(hba
, freq
, scale_up
);
1412 ufshcd_scale_gear(hba
, true);
1416 /* scale up the gear after scaling up clocks */
1418 ret
= ufshcd_scale_gear(hba
, true);
1420 ufshcd_scale_clks(hba
, hba
->devfreq
->previous_freq
,
1427 ufshcd_clock_scaling_unprepare(hba
, ret
, scale_up
);
1431 static void ufshcd_clk_scaling_suspend_work(struct work_struct
*work
)
1433 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1434 clk_scaling
.suspend_work
);
1435 unsigned long irq_flags
;
1437 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1438 if (hba
->clk_scaling
.active_reqs
|| hba
->clk_scaling
.is_suspended
) {
1439 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1442 hba
->clk_scaling
.is_suspended
= true;
1443 hba
->clk_scaling
.window_start_t
= 0;
1444 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1446 devfreq_suspend_device(hba
->devfreq
);
1449 static void ufshcd_clk_scaling_resume_work(struct work_struct
*work
)
1451 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1452 clk_scaling
.resume_work
);
1453 unsigned long irq_flags
;
1455 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1456 if (!hba
->clk_scaling
.is_suspended
) {
1457 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1460 hba
->clk_scaling
.is_suspended
= false;
1461 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1463 devfreq_resume_device(hba
->devfreq
);
1466 static int ufshcd_devfreq_target(struct device
*dev
,
1467 unsigned long *freq
, u32 flags
)
1470 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1472 bool scale_up
, sched_clk_scaling_suspend_work
= false;
1473 struct list_head
*clk_list
= &hba
->clk_list_head
;
1474 struct ufs_clk_info
*clki
;
1475 unsigned long irq_flags
;
1477 if (!ufshcd_is_clkscaling_supported(hba
))
1480 if (hba
->use_pm_opp
) {
1481 struct dev_pm_opp
*opp
;
1483 /* Get the recommended frequency from OPP framework */
1484 opp
= devfreq_recommended_opp(dev
, freq
, flags
);
1486 return PTR_ERR(opp
);
1488 dev_pm_opp_put(opp
);
1490 /* Override with the closest supported frequency */
1491 clki
= list_first_entry(&hba
->clk_list_head
, struct ufs_clk_info
,
1493 *freq
= (unsigned long) clk_round_rate(clki
->clk
, *freq
);
1496 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1497 if (ufshcd_eh_in_progress(hba
)) {
1498 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1502 /* Skip scaling clock when clock scaling is suspended */
1503 if (hba
->clk_scaling
.is_suspended
) {
1504 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1505 dev_warn(hba
->dev
, "clock scaling is suspended, skip");
1509 if (!hba
->clk_scaling
.active_reqs
)
1510 sched_clk_scaling_suspend_work
= true;
1512 if (list_empty(clk_list
)) {
1513 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1517 /* Decide based on the target or rounded-off frequency and update */
1518 if (hba
->use_pm_opp
)
1519 scale_up
= *freq
> hba
->clk_scaling
.target_freq
;
1521 scale_up
= *freq
== clki
->max_freq
;
1523 if (!hba
->use_pm_opp
&& !scale_up
)
1524 *freq
= clki
->min_freq
;
1526 /* Update the frequency */
1527 if (!ufshcd_is_devfreq_scaling_required(hba
, *freq
, scale_up
)) {
1528 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1530 goto out
; /* no state change required */
1532 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1534 start
= ktime_get();
1535 ret
= ufshcd_devfreq_scale(hba
, *freq
, scale_up
);
1537 hba
->clk_scaling
.target_freq
= *freq
;
1539 trace_ufshcd_profile_clk_scaling(dev_name(hba
->dev
),
1540 (scale_up
? "up" : "down"),
1541 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
1544 if (sched_clk_scaling_suspend_work
&& !scale_up
)
1545 queue_work(hba
->clk_scaling
.workq
,
1546 &hba
->clk_scaling
.suspend_work
);
1551 static int ufshcd_devfreq_get_dev_status(struct device
*dev
,
1552 struct devfreq_dev_status
*stat
)
1554 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1555 struct ufs_clk_scaling
*scaling
= &hba
->clk_scaling
;
1556 unsigned long flags
;
1559 if (!ufshcd_is_clkscaling_supported(hba
))
1562 memset(stat
, 0, sizeof(*stat
));
1564 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1565 curr_t
= ktime_get();
1566 if (!scaling
->window_start_t
)
1570 * If current frequency is 0, then the ondemand governor considers
1571 * there's no initial frequency set. And it always requests to set
1572 * to max. frequency.
1574 if (hba
->use_pm_opp
) {
1575 stat
->current_frequency
= hba
->clk_scaling
.target_freq
;
1577 struct list_head
*clk_list
= &hba
->clk_list_head
;
1578 struct ufs_clk_info
*clki
;
1580 clki
= list_first_entry(clk_list
, struct ufs_clk_info
, list
);
1581 stat
->current_frequency
= clki
->curr_freq
;
1584 if (scaling
->is_busy_started
)
1585 scaling
->tot_busy_t
+= ktime_us_delta(curr_t
,
1586 scaling
->busy_start_t
);
1587 stat
->total_time
= ktime_us_delta(curr_t
, scaling
->window_start_t
);
1588 stat
->busy_time
= scaling
->tot_busy_t
;
1590 scaling
->window_start_t
= curr_t
;
1591 scaling
->tot_busy_t
= 0;
1593 if (scaling
->active_reqs
) {
1594 scaling
->busy_start_t
= curr_t
;
1595 scaling
->is_busy_started
= true;
1597 scaling
->busy_start_t
= 0;
1598 scaling
->is_busy_started
= false;
1600 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1604 static int ufshcd_devfreq_init(struct ufs_hba
*hba
)
1606 struct list_head
*clk_list
= &hba
->clk_list_head
;
1607 struct ufs_clk_info
*clki
;
1608 struct devfreq
*devfreq
;
1611 /* Skip devfreq if we don't have any clocks in the list */
1612 if (list_empty(clk_list
))
1615 if (!hba
->use_pm_opp
) {
1616 clki
= list_first_entry(clk_list
, struct ufs_clk_info
, list
);
1617 dev_pm_opp_add(hba
->dev
, clki
->min_freq
, 0);
1618 dev_pm_opp_add(hba
->dev
, clki
->max_freq
, 0);
1621 ufshcd_vops_config_scaling_param(hba
, &hba
->vps
->devfreq_profile
,
1622 &hba
->vps
->ondemand_data
);
1623 devfreq
= devfreq_add_device(hba
->dev
,
1624 &hba
->vps
->devfreq_profile
,
1625 DEVFREQ_GOV_SIMPLE_ONDEMAND
,
1626 &hba
->vps
->ondemand_data
);
1627 if (IS_ERR(devfreq
)) {
1628 ret
= PTR_ERR(devfreq
);
1629 dev_err(hba
->dev
, "Unable to register with devfreq %d\n", ret
);
1631 if (!hba
->use_pm_opp
) {
1632 dev_pm_opp_remove(hba
->dev
, clki
->min_freq
);
1633 dev_pm_opp_remove(hba
->dev
, clki
->max_freq
);
1638 hba
->devfreq
= devfreq
;
1643 static void ufshcd_devfreq_remove(struct ufs_hba
*hba
)
1645 struct list_head
*clk_list
= &hba
->clk_list_head
;
1650 devfreq_remove_device(hba
->devfreq
);
1651 hba
->devfreq
= NULL
;
1653 if (!hba
->use_pm_opp
) {
1654 struct ufs_clk_info
*clki
;
1656 clki
= list_first_entry(clk_list
, struct ufs_clk_info
, list
);
1657 dev_pm_opp_remove(hba
->dev
, clki
->min_freq
);
1658 dev_pm_opp_remove(hba
->dev
, clki
->max_freq
);
1662 static void ufshcd_suspend_clkscaling(struct ufs_hba
*hba
)
1664 unsigned long flags
;
1665 bool suspend
= false;
1667 cancel_work_sync(&hba
->clk_scaling
.suspend_work
);
1668 cancel_work_sync(&hba
->clk_scaling
.resume_work
);
1670 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1671 if (!hba
->clk_scaling
.is_suspended
) {
1673 hba
->clk_scaling
.is_suspended
= true;
1674 hba
->clk_scaling
.window_start_t
= 0;
1676 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1679 devfreq_suspend_device(hba
->devfreq
);
1682 static void ufshcd_resume_clkscaling(struct ufs_hba
*hba
)
1684 unsigned long flags
;
1685 bool resume
= false;
1687 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1688 if (hba
->clk_scaling
.is_suspended
) {
1690 hba
->clk_scaling
.is_suspended
= false;
1692 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1695 devfreq_resume_device(hba
->devfreq
);
1698 static ssize_t
ufshcd_clkscale_enable_show(struct device
*dev
,
1699 struct device_attribute
*attr
, char *buf
)
1701 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1703 return sysfs_emit(buf
, "%d\n", hba
->clk_scaling
.is_enabled
);
1706 static ssize_t
ufshcd_clkscale_enable_store(struct device
*dev
,
1707 struct device_attribute
*attr
, const char *buf
, size_t count
)
1709 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1713 if (kstrtou32(buf
, 0, &value
))
1716 down(&hba
->host_sem
);
1717 if (!ufshcd_is_user_access_allowed(hba
)) {
1723 if (value
== hba
->clk_scaling
.is_enabled
)
1726 ufshcd_rpm_get_sync(hba
);
1729 hba
->clk_scaling
.is_enabled
= value
;
1732 ufshcd_resume_clkscaling(hba
);
1734 ufshcd_suspend_clkscaling(hba
);
1735 err
= ufshcd_devfreq_scale(hba
, ULONG_MAX
, true);
1737 dev_err(hba
->dev
, "%s: failed to scale clocks up %d\n",
1741 ufshcd_release(hba
);
1742 ufshcd_rpm_put_sync(hba
);
1745 return err
? err
: count
;
1748 static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba
*hba
)
1750 hba
->clk_scaling
.enable_attr
.show
= ufshcd_clkscale_enable_show
;
1751 hba
->clk_scaling
.enable_attr
.store
= ufshcd_clkscale_enable_store
;
1752 sysfs_attr_init(&hba
->clk_scaling
.enable_attr
.attr
);
1753 hba
->clk_scaling
.enable_attr
.attr
.name
= "clkscale_enable";
1754 hba
->clk_scaling
.enable_attr
.attr
.mode
= 0644;
1755 if (device_create_file(hba
->dev
, &hba
->clk_scaling
.enable_attr
))
1756 dev_err(hba
->dev
, "Failed to create sysfs for clkscale_enable\n");
1759 static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba
*hba
)
1761 if (hba
->clk_scaling
.enable_attr
.attr
.name
)
1762 device_remove_file(hba
->dev
, &hba
->clk_scaling
.enable_attr
);
1765 static void ufshcd_init_clk_scaling(struct ufs_hba
*hba
)
1767 char wq_name
[sizeof("ufs_clkscaling_00")];
1769 if (!ufshcd_is_clkscaling_supported(hba
))
1772 if (!hba
->clk_scaling
.min_gear
)
1773 hba
->clk_scaling
.min_gear
= UFS_HS_G1
;
1775 INIT_WORK(&hba
->clk_scaling
.suspend_work
,
1776 ufshcd_clk_scaling_suspend_work
);
1777 INIT_WORK(&hba
->clk_scaling
.resume_work
,
1778 ufshcd_clk_scaling_resume_work
);
1780 snprintf(wq_name
, sizeof(wq_name
), "ufs_clkscaling_%d",
1781 hba
->host
->host_no
);
1782 hba
->clk_scaling
.workq
= create_singlethread_workqueue(wq_name
);
1784 hba
->clk_scaling
.is_initialized
= true;
1787 static void ufshcd_exit_clk_scaling(struct ufs_hba
*hba
)
1789 if (!hba
->clk_scaling
.is_initialized
)
1792 ufshcd_remove_clk_scaling_sysfs(hba
);
1793 destroy_workqueue(hba
->clk_scaling
.workq
);
1794 ufshcd_devfreq_remove(hba
);
1795 hba
->clk_scaling
.is_initialized
= false;
1798 static void ufshcd_ungate_work(struct work_struct
*work
)
1801 unsigned long flags
;
1802 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1803 clk_gating
.ungate_work
);
1805 cancel_delayed_work_sync(&hba
->clk_gating
.gate_work
);
1807 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1808 if (hba
->clk_gating
.state
== CLKS_ON
) {
1809 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1813 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1814 ufshcd_hba_vreg_set_hpm(hba
);
1815 ufshcd_setup_clocks(hba
, true);
1817 ufshcd_enable_irq(hba
);
1819 /* Exit from hibern8 */
1820 if (ufshcd_can_hibern8_during_gating(hba
)) {
1821 /* Prevent gating in this path */
1822 hba
->clk_gating
.is_suspended
= true;
1823 if (ufshcd_is_link_hibern8(hba
)) {
1824 ret
= ufshcd_uic_hibern8_exit(hba
);
1826 dev_err(hba
->dev
, "%s: hibern8 exit failed %d\n",
1829 ufshcd_set_link_active(hba
);
1831 hba
->clk_gating
.is_suspended
= false;
1836 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1837 * Also, exit from hibern8 mode and set the link as active.
1838 * @hba: per adapter instance
1840 void ufshcd_hold(struct ufs_hba
*hba
)
1843 unsigned long flags
;
1845 if (!ufshcd_is_clkgating_allowed(hba
) ||
1846 !hba
->clk_gating
.is_initialized
)
1848 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1849 hba
->clk_gating
.active_reqs
++;
1852 switch (hba
->clk_gating
.state
) {
1855 * Wait for the ungate work to complete if in progress.
1856 * Though the clocks may be in ON state, the link could
1857 * still be in hibner8 state if hibern8 is allowed
1858 * during clock gating.
1859 * Make sure we exit hibern8 state also in addition to
1862 if (ufshcd_can_hibern8_during_gating(hba
) &&
1863 ufshcd_is_link_hibern8(hba
)) {
1864 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1865 flush_result
= flush_work(&hba
->clk_gating
.ungate_work
);
1866 if (hba
->clk_gating
.is_suspended
&& !flush_result
)
1868 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1873 if (cancel_delayed_work(&hba
->clk_gating
.gate_work
)) {
1874 hba
->clk_gating
.state
= CLKS_ON
;
1875 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1876 hba
->clk_gating
.state
);
1880 * If we are here, it means gating work is either done or
1881 * currently running. Hence, fall through to cancel gating
1882 * work and to enable clocks.
1886 hba
->clk_gating
.state
= REQ_CLKS_ON
;
1887 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1888 hba
->clk_gating
.state
);
1889 queue_work(hba
->clk_gating
.clk_gating_workq
,
1890 &hba
->clk_gating
.ungate_work
);
1892 * fall through to check if we should wait for this
1893 * work to be done or not.
1897 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1898 flush_work(&hba
->clk_gating
.ungate_work
);
1899 /* Make sure state is CLKS_ON before returning */
1900 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1903 dev_err(hba
->dev
, "%s: clk gating is in invalid state %d\n",
1904 __func__
, hba
->clk_gating
.state
);
1907 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1909 EXPORT_SYMBOL_GPL(ufshcd_hold
);
1911 static void ufshcd_gate_work(struct work_struct
*work
)
1913 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1914 clk_gating
.gate_work
.work
);
1915 unsigned long flags
;
1918 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1920 * In case you are here to cancel this work the gating state
1921 * would be marked as REQ_CLKS_ON. In this case save time by
1922 * skipping the gating work and exit after changing the clock
1925 if (hba
->clk_gating
.is_suspended
||
1926 (hba
->clk_gating
.state
!= REQ_CLKS_OFF
)) {
1927 hba
->clk_gating
.state
= CLKS_ON
;
1928 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1929 hba
->clk_gating
.state
);
1933 if (ufshcd_is_ufs_dev_busy(hba
) || hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
)
1936 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1938 /* put the link into hibern8 mode before turning off clocks */
1939 if (ufshcd_can_hibern8_during_gating(hba
)) {
1940 ret
= ufshcd_uic_hibern8_enter(hba
);
1942 hba
->clk_gating
.state
= CLKS_ON
;
1943 dev_err(hba
->dev
, "%s: hibern8 enter failed %d\n",
1945 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1946 hba
->clk_gating
.state
);
1949 ufshcd_set_link_hibern8(hba
);
1952 ufshcd_disable_irq(hba
);
1954 ufshcd_setup_clocks(hba
, false);
1956 /* Put the host controller in low power mode if possible */
1957 ufshcd_hba_vreg_set_lpm(hba
);
1959 * In case you are here to cancel this work the gating state
1960 * would be marked as REQ_CLKS_ON. In this case keep the state
1961 * as REQ_CLKS_ON which would anyway imply that clocks are off
1962 * and a request to turn them on is pending. By doing this way,
1963 * we keep the state machine in tact and this would ultimately
1964 * prevent from doing cancel work multiple times when there are
1965 * new requests arriving before the current cancel work is done.
1967 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1968 if (hba
->clk_gating
.state
== REQ_CLKS_OFF
) {
1969 hba
->clk_gating
.state
= CLKS_OFF
;
1970 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1971 hba
->clk_gating
.state
);
1974 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1979 /* host lock must be held before calling this variant */
1980 static void __ufshcd_release(struct ufs_hba
*hba
)
1982 if (!ufshcd_is_clkgating_allowed(hba
))
1985 hba
->clk_gating
.active_reqs
--;
1987 if (hba
->clk_gating
.active_reqs
|| hba
->clk_gating
.is_suspended
||
1988 hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
||
1989 hba
->outstanding_tasks
|| !hba
->clk_gating
.is_initialized
||
1990 hba
->active_uic_cmd
|| hba
->uic_async_done
||
1991 hba
->clk_gating
.state
== CLKS_OFF
)
1994 hba
->clk_gating
.state
= REQ_CLKS_OFF
;
1995 trace_ufshcd_clk_gating(dev_name(hba
->dev
), hba
->clk_gating
.state
);
1996 queue_delayed_work(hba
->clk_gating
.clk_gating_workq
,
1997 &hba
->clk_gating
.gate_work
,
1998 msecs_to_jiffies(hba
->clk_gating
.delay_ms
));
2001 void ufshcd_release(struct ufs_hba
*hba
)
2003 unsigned long flags
;
2005 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2006 __ufshcd_release(hba
);
2007 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2009 EXPORT_SYMBOL_GPL(ufshcd_release
);
2011 static ssize_t
ufshcd_clkgate_delay_show(struct device
*dev
,
2012 struct device_attribute
*attr
, char *buf
)
2014 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
2016 return sysfs_emit(buf
, "%lu\n", hba
->clk_gating
.delay_ms
);
2019 void ufshcd_clkgate_delay_set(struct device
*dev
, unsigned long value
)
2021 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
2022 unsigned long flags
;
2024 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2025 hba
->clk_gating
.delay_ms
= value
;
2026 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2028 EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set
);
2030 static ssize_t
ufshcd_clkgate_delay_store(struct device
*dev
,
2031 struct device_attribute
*attr
, const char *buf
, size_t count
)
2033 unsigned long value
;
2035 if (kstrtoul(buf
, 0, &value
))
2038 ufshcd_clkgate_delay_set(dev
, value
);
2042 static ssize_t
ufshcd_clkgate_enable_show(struct device
*dev
,
2043 struct device_attribute
*attr
, char *buf
)
2045 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
2047 return sysfs_emit(buf
, "%d\n", hba
->clk_gating
.is_enabled
);
2050 static ssize_t
ufshcd_clkgate_enable_store(struct device
*dev
,
2051 struct device_attribute
*attr
, const char *buf
, size_t count
)
2053 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
2054 unsigned long flags
;
2057 if (kstrtou32(buf
, 0, &value
))
2062 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2063 if (value
== hba
->clk_gating
.is_enabled
)
2067 __ufshcd_release(hba
);
2069 hba
->clk_gating
.active_reqs
++;
2071 hba
->clk_gating
.is_enabled
= value
;
2073 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2077 static void ufshcd_init_clk_gating_sysfs(struct ufs_hba
*hba
)
2079 hba
->clk_gating
.delay_attr
.show
= ufshcd_clkgate_delay_show
;
2080 hba
->clk_gating
.delay_attr
.store
= ufshcd_clkgate_delay_store
;
2081 sysfs_attr_init(&hba
->clk_gating
.delay_attr
.attr
);
2082 hba
->clk_gating
.delay_attr
.attr
.name
= "clkgate_delay_ms";
2083 hba
->clk_gating
.delay_attr
.attr
.mode
= 0644;
2084 if (device_create_file(hba
->dev
, &hba
->clk_gating
.delay_attr
))
2085 dev_err(hba
->dev
, "Failed to create sysfs for clkgate_delay\n");
2087 hba
->clk_gating
.enable_attr
.show
= ufshcd_clkgate_enable_show
;
2088 hba
->clk_gating
.enable_attr
.store
= ufshcd_clkgate_enable_store
;
2089 sysfs_attr_init(&hba
->clk_gating
.enable_attr
.attr
);
2090 hba
->clk_gating
.enable_attr
.attr
.name
= "clkgate_enable";
2091 hba
->clk_gating
.enable_attr
.attr
.mode
= 0644;
2092 if (device_create_file(hba
->dev
, &hba
->clk_gating
.enable_attr
))
2093 dev_err(hba
->dev
, "Failed to create sysfs for clkgate_enable\n");
2096 static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba
*hba
)
2098 if (hba
->clk_gating
.delay_attr
.attr
.name
)
2099 device_remove_file(hba
->dev
, &hba
->clk_gating
.delay_attr
);
2100 if (hba
->clk_gating
.enable_attr
.attr
.name
)
2101 device_remove_file(hba
->dev
, &hba
->clk_gating
.enable_attr
);
2104 static void ufshcd_init_clk_gating(struct ufs_hba
*hba
)
2106 char wq_name
[sizeof("ufs_clk_gating_00")];
2108 if (!ufshcd_is_clkgating_allowed(hba
))
2111 hba
->clk_gating
.state
= CLKS_ON
;
2113 hba
->clk_gating
.delay_ms
= 150;
2114 INIT_DELAYED_WORK(&hba
->clk_gating
.gate_work
, ufshcd_gate_work
);
2115 INIT_WORK(&hba
->clk_gating
.ungate_work
, ufshcd_ungate_work
);
2117 snprintf(wq_name
, ARRAY_SIZE(wq_name
), "ufs_clk_gating_%d",
2118 hba
->host
->host_no
);
2119 hba
->clk_gating
.clk_gating_workq
= alloc_ordered_workqueue(wq_name
,
2120 WQ_MEM_RECLAIM
| WQ_HIGHPRI
);
2122 ufshcd_init_clk_gating_sysfs(hba
);
2124 hba
->clk_gating
.is_enabled
= true;
2125 hba
->clk_gating
.is_initialized
= true;
2128 static void ufshcd_exit_clk_gating(struct ufs_hba
*hba
)
2130 if (!hba
->clk_gating
.is_initialized
)
2133 ufshcd_remove_clk_gating_sysfs(hba
);
2135 /* Ungate the clock if necessary. */
2137 hba
->clk_gating
.is_initialized
= false;
2138 ufshcd_release(hba
);
2140 destroy_workqueue(hba
->clk_gating
.clk_gating_workq
);
2143 static void ufshcd_clk_scaling_start_busy(struct ufs_hba
*hba
)
2145 bool queue_resume_work
= false;
2146 ktime_t curr_t
= ktime_get();
2147 unsigned long flags
;
2149 if (!ufshcd_is_clkscaling_supported(hba
))
2152 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2153 if (!hba
->clk_scaling
.active_reqs
++)
2154 queue_resume_work
= true;
2156 if (!hba
->clk_scaling
.is_enabled
|| hba
->pm_op_in_progress
) {
2157 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2161 if (queue_resume_work
)
2162 queue_work(hba
->clk_scaling
.workq
,
2163 &hba
->clk_scaling
.resume_work
);
2165 if (!hba
->clk_scaling
.window_start_t
) {
2166 hba
->clk_scaling
.window_start_t
= curr_t
;
2167 hba
->clk_scaling
.tot_busy_t
= 0;
2168 hba
->clk_scaling
.is_busy_started
= false;
2171 if (!hba
->clk_scaling
.is_busy_started
) {
2172 hba
->clk_scaling
.busy_start_t
= curr_t
;
2173 hba
->clk_scaling
.is_busy_started
= true;
2175 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2178 static void ufshcd_clk_scaling_update_busy(struct ufs_hba
*hba
)
2180 struct ufs_clk_scaling
*scaling
= &hba
->clk_scaling
;
2181 unsigned long flags
;
2183 if (!ufshcd_is_clkscaling_supported(hba
))
2186 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2187 hba
->clk_scaling
.active_reqs
--;
2188 if (!scaling
->active_reqs
&& scaling
->is_busy_started
) {
2189 scaling
->tot_busy_t
+= ktime_to_us(ktime_sub(ktime_get(),
2190 scaling
->busy_start_t
));
2191 scaling
->busy_start_t
= 0;
2192 scaling
->is_busy_started
= false;
2194 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2197 static inline int ufshcd_monitor_opcode2dir(u8 opcode
)
2199 if (opcode
== READ_6
|| opcode
== READ_10
|| opcode
== READ_16
)
2201 else if (opcode
== WRITE_6
|| opcode
== WRITE_10
|| opcode
== WRITE_16
)
2207 static inline bool ufshcd_should_inform_monitor(struct ufs_hba
*hba
,
2208 struct ufshcd_lrb
*lrbp
)
2210 const struct ufs_hba_monitor
*m
= &hba
->monitor
;
2212 return (m
->enabled
&& lrbp
&& lrbp
->cmd
&&
2213 (!m
->chunk_size
|| m
->chunk_size
== lrbp
->cmd
->sdb
.length
) &&
2214 ktime_before(hba
->monitor
.enabled_ts
, lrbp
->issue_time_stamp
));
2217 static void ufshcd_start_monitor(struct ufs_hba
*hba
,
2218 const struct ufshcd_lrb
*lrbp
)
2220 int dir
= ufshcd_monitor_opcode2dir(*lrbp
->cmd
->cmnd
);
2221 unsigned long flags
;
2223 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2224 if (dir
>= 0 && hba
->monitor
.nr_queued
[dir
]++ == 0)
2225 hba
->monitor
.busy_start_ts
[dir
] = ktime_get();
2226 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2229 static void ufshcd_update_monitor(struct ufs_hba
*hba
, const struct ufshcd_lrb
*lrbp
)
2231 int dir
= ufshcd_monitor_opcode2dir(*lrbp
->cmd
->cmnd
);
2232 unsigned long flags
;
2234 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2235 if (dir
>= 0 && hba
->monitor
.nr_queued
[dir
] > 0) {
2236 const struct request
*req
= scsi_cmd_to_rq(lrbp
->cmd
);
2237 struct ufs_hba_monitor
*m
= &hba
->monitor
;
2238 ktime_t now
, inc
, lat
;
2240 now
= lrbp
->compl_time_stamp
;
2241 inc
= ktime_sub(now
, m
->busy_start_ts
[dir
]);
2242 m
->total_busy
[dir
] = ktime_add(m
->total_busy
[dir
], inc
);
2243 m
->nr_sec_rw
[dir
] += blk_rq_sectors(req
);
2245 /* Update latencies */
2247 lat
= ktime_sub(now
, lrbp
->issue_time_stamp
);
2248 m
->lat_sum
[dir
] += lat
;
2249 if (m
->lat_max
[dir
] < lat
|| !m
->lat_max
[dir
])
2250 m
->lat_max
[dir
] = lat
;
2251 if (m
->lat_min
[dir
] > lat
|| !m
->lat_min
[dir
])
2252 m
->lat_min
[dir
] = lat
;
2254 m
->nr_queued
[dir
]--;
2255 /* Push forward the busy start of monitor */
2256 m
->busy_start_ts
[dir
] = now
;
2258 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2262 * ufshcd_send_command - Send SCSI or device management commands
2263 * @hba: per adapter instance
2264 * @task_tag: Task tag of the command
2265 * @hwq: pointer to hardware queue instance
2268 void ufshcd_send_command(struct ufs_hba
*hba
, unsigned int task_tag
,
2269 struct ufs_hw_queue
*hwq
)
2271 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[task_tag
];
2272 unsigned long flags
;
2274 lrbp
->issue_time_stamp
= ktime_get();
2275 lrbp
->issue_time_stamp_local_clock
= local_clock();
2276 lrbp
->compl_time_stamp
= ktime_set(0, 0);
2277 lrbp
->compl_time_stamp_local_clock
= 0;
2278 ufshcd_add_command_trace(hba
, task_tag
, UFS_CMD_SEND
);
2280 ufshcd_clk_scaling_start_busy(hba
);
2281 if (unlikely(ufshcd_should_inform_monitor(hba
, lrbp
)))
2282 ufshcd_start_monitor(hba
, lrbp
);
2284 if (is_mcq_enabled(hba
)) {
2285 int utrd_size
= sizeof(struct utp_transfer_req_desc
);
2286 struct utp_transfer_req_desc
*src
= lrbp
->utr_descriptor_ptr
;
2287 struct utp_transfer_req_desc
*dest
;
2289 spin_lock(&hwq
->sq_lock
);
2290 dest
= hwq
->sqe_base_addr
+ hwq
->sq_tail_slot
;
2291 memcpy(dest
, src
, utrd_size
);
2292 ufshcd_inc_sq_tail(hwq
);
2293 spin_unlock(&hwq
->sq_lock
);
2295 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
2296 if (hba
->vops
&& hba
->vops
->setup_xfer_req
)
2297 hba
->vops
->setup_xfer_req(hba
, lrbp
->task_tag
,
2299 __set_bit(lrbp
->task_tag
, &hba
->outstanding_reqs
);
2300 ufshcd_writel(hba
, 1 << lrbp
->task_tag
,
2301 REG_UTP_TRANSFER_REQ_DOOR_BELL
);
2302 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
2307 * ufshcd_copy_sense_data - Copy sense data in case of check condition
2308 * @lrbp: pointer to local reference block
2310 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb
*lrbp
)
2312 u8
*const sense_buffer
= lrbp
->cmd
->sense_buffer
;
2316 resp_len
= be16_to_cpu(lrbp
->ucd_rsp_ptr
->header
.data_segment_length
);
2317 if (sense_buffer
&& resp_len
) {
2320 len
= be16_to_cpu(lrbp
->ucd_rsp_ptr
->sr
.sense_data_len
);
2321 len_to_copy
= min_t(int, UFS_SENSE_SIZE
, len
);
2323 memcpy(sense_buffer
, lrbp
->ucd_rsp_ptr
->sr
.sense_data
,
2329 * ufshcd_copy_query_response() - Copy the Query Response and the data
2331 * @hba: per adapter instance
2332 * @lrbp: pointer to local reference block
2334 * Return: 0 upon success; < 0 upon failure.
2337 int ufshcd_copy_query_response(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2339 struct ufs_query_res
*query_res
= &hba
->dev_cmd
.query
.response
;
2341 memcpy(&query_res
->upiu_res
, &lrbp
->ucd_rsp_ptr
->qr
, QUERY_OSF_SIZE
);
2343 /* Get the descriptor */
2344 if (hba
->dev_cmd
.query
.descriptor
&&
2345 lrbp
->ucd_rsp_ptr
->qr
.opcode
== UPIU_QUERY_OPCODE_READ_DESC
) {
2346 u8
*descp
= (u8
*)lrbp
->ucd_rsp_ptr
+
2347 GENERAL_UPIU_REQUEST_SIZE
;
2351 /* data segment length */
2352 resp_len
= be16_to_cpu(lrbp
->ucd_rsp_ptr
->header
2353 .data_segment_length
);
2354 buf_len
= be16_to_cpu(
2355 hba
->dev_cmd
.query
.request
.upiu_req
.length
);
2356 if (likely(buf_len
>= resp_len
)) {
2357 memcpy(hba
->dev_cmd
.query
.descriptor
, descp
, resp_len
);
2360 "%s: rsp size %d is bigger than buffer size %d",
2361 __func__
, resp_len
, buf_len
);
2370 * ufshcd_hba_capabilities - Read controller capabilities
2371 * @hba: per adapter instance
2373 * Return: 0 on success, negative on error.
2375 static inline int ufshcd_hba_capabilities(struct ufs_hba
*hba
)
2379 hba
->capabilities
= ufshcd_readl(hba
, REG_CONTROLLER_CAPABILITIES
);
2380 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS
)
2381 hba
->capabilities
&= ~MASK_64_ADDRESSING_SUPPORT
;
2383 /* nutrs and nutmrs are 0 based values */
2384 hba
->nutrs
= (hba
->capabilities
& MASK_TRANSFER_REQUESTS_SLOTS
) + 1;
2386 ((hba
->capabilities
& MASK_TASK_MANAGEMENT_REQUEST_SLOTS
) >> 16) + 1;
2387 hba
->reserved_slot
= hba
->nutrs
- 1;
2389 /* Read crypto capabilities */
2390 err
= ufshcd_hba_init_crypto_capabilities(hba
);
2392 dev_err(hba
->dev
, "crypto setup failed\n");
2396 hba
->mcq_sup
= FIELD_GET(MASK_MCQ_SUPPORT
, hba
->capabilities
);
2400 hba
->mcq_capabilities
= ufshcd_readl(hba
, REG_MCQCAP
);
2401 hba
->ext_iid_sup
= FIELD_GET(MASK_EXT_IID_SUPPORT
,
2402 hba
->mcq_capabilities
);
2408 * ufshcd_ready_for_uic_cmd - Check if controller is ready
2409 * to accept UIC commands
2410 * @hba: per adapter instance
2412 * Return: true on success, else false.
2414 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba
*hba
)
2417 int ret
= read_poll_timeout(ufshcd_readl
, val
, val
& UIC_COMMAND_READY
,
2418 500, UIC_CMD_TIMEOUT
* 1000, false, hba
,
2419 REG_CONTROLLER_STATUS
);
2424 * ufshcd_get_upmcrs - Get the power mode change request status
2425 * @hba: Pointer to adapter instance
2427 * This function gets the UPMCRS field of HCS register
2429 * Return: value of UPMCRS field.
2431 static inline u8
ufshcd_get_upmcrs(struct ufs_hba
*hba
)
2433 return (ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) >> 8) & 0x7;
2437 * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer
2438 * @hba: per adapter instance
2439 * @uic_cmd: UIC command
2442 ufshcd_dispatch_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
2444 lockdep_assert_held(&hba
->uic_cmd_mutex
);
2446 WARN_ON(hba
->active_uic_cmd
);
2448 hba
->active_uic_cmd
= uic_cmd
;
2451 ufshcd_writel(hba
, uic_cmd
->argument1
, REG_UIC_COMMAND_ARG_1
);
2452 ufshcd_writel(hba
, uic_cmd
->argument2
, REG_UIC_COMMAND_ARG_2
);
2453 ufshcd_writel(hba
, uic_cmd
->argument3
, REG_UIC_COMMAND_ARG_3
);
2455 ufshcd_add_uic_command_trace(hba
, uic_cmd
, UFS_CMD_SEND
);
2458 ufshcd_writel(hba
, uic_cmd
->command
& COMMAND_OPCODE_MASK
,
2463 * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command
2464 * @hba: per adapter instance
2465 * @uic_cmd: UIC command
2467 * Return: 0 only if success.
2470 ufshcd_wait_for_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
2473 unsigned long flags
;
2475 lockdep_assert_held(&hba
->uic_cmd_mutex
);
2477 if (wait_for_completion_timeout(&uic_cmd
->done
,
2478 msecs_to_jiffies(UIC_CMD_TIMEOUT
))) {
2479 ret
= uic_cmd
->argument2
& MASK_UIC_COMMAND_RESULT
;
2483 "uic cmd 0x%x with arg3 0x%x completion timeout\n",
2484 uic_cmd
->command
, uic_cmd
->argument3
);
2486 if (!uic_cmd
->cmd_active
) {
2487 dev_err(hba
->dev
, "%s: UIC cmd has been completed, return the result\n",
2489 ret
= uic_cmd
->argument2
& MASK_UIC_COMMAND_RESULT
;
2493 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2494 hba
->active_uic_cmd
= NULL
;
2495 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2501 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2502 * @hba: per adapter instance
2503 * @uic_cmd: UIC command
2504 * @completion: initialize the completion only if this is set to true
2506 * Return: 0 only if success.
2509 __ufshcd_send_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
,
2512 lockdep_assert_held(&hba
->uic_cmd_mutex
);
2514 if (!ufshcd_ready_for_uic_cmd(hba
)) {
2516 "Controller not ready to accept UIC commands\n");
2521 init_completion(&uic_cmd
->done
);
2523 uic_cmd
->cmd_active
= 1;
2524 ufshcd_dispatch_uic_cmd(hba
, uic_cmd
);
2530 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2531 * @hba: per adapter instance
2532 * @uic_cmd: UIC command
2534 * Return: 0 only if success.
2536 int ufshcd_send_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
2540 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_UIC_CMD
)
2544 mutex_lock(&hba
->uic_cmd_mutex
);
2545 ufshcd_add_delay_before_dme_cmd(hba
);
2547 ret
= __ufshcd_send_uic_cmd(hba
, uic_cmd
, true);
2549 ret
= ufshcd_wait_for_uic_cmd(hba
, uic_cmd
);
2551 mutex_unlock(&hba
->uic_cmd_mutex
);
2553 ufshcd_release(hba
);
2558 * ufshcd_sgl_to_prdt - SG list to PRTD (Physical Region Description Table, 4DW format)
2559 * @hba: per-adapter instance
2560 * @lrbp: pointer to local reference block
2561 * @sg_entries: The number of sg lists actually used
2562 * @sg_list: Pointer to SG list
2564 static void ufshcd_sgl_to_prdt(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
, int sg_entries
,
2565 struct scatterlist
*sg_list
)
2567 struct ufshcd_sg_entry
*prd
;
2568 struct scatterlist
*sg
;
2573 if (hba
->quirks
& UFSHCD_QUIRK_PRDT_BYTE_GRAN
)
2574 lrbp
->utr_descriptor_ptr
->prd_table_length
=
2575 cpu_to_le16(sg_entries
* ufshcd_sg_entry_size(hba
));
2577 lrbp
->utr_descriptor_ptr
->prd_table_length
= cpu_to_le16(sg_entries
);
2579 prd
= lrbp
->ucd_prdt_ptr
;
2581 for_each_sg(sg_list
, sg
, sg_entries
, i
) {
2582 const unsigned int len
= sg_dma_len(sg
);
2585 * From the UFSHCI spec: "Data Byte Count (DBC): A '0'
2586 * based value that indicates the length, in bytes, of
2587 * the data block. A maximum of length of 256KB may
2588 * exist for any entry. Bits 1:0 of this field shall be
2589 * 11b to indicate Dword granularity. A value of '3'
2590 * indicates 4 bytes, '7' indicates 8 bytes, etc."
2592 WARN_ONCE(len
> SZ_256K
, "len = %#x\n", len
);
2593 prd
->size
= cpu_to_le32(len
- 1);
2594 prd
->addr
= cpu_to_le64(sg
->dma_address
);
2596 prd
= (void *)prd
+ ufshcd_sg_entry_size(hba
);
2599 lrbp
->utr_descriptor_ptr
->prd_table_length
= 0;
2604 * ufshcd_map_sg - Map scatter-gather list to prdt
2605 * @hba: per adapter instance
2606 * @lrbp: pointer to local reference block
2608 * Return: 0 in case of success, non-zero value in case of failure.
2610 static int ufshcd_map_sg(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2612 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
2613 int sg_segments
= scsi_dma_map(cmd
);
2615 if (sg_segments
< 0)
2618 ufshcd_sgl_to_prdt(hba
, lrbp
, sg_segments
, scsi_sglist(cmd
));
2624 * ufshcd_enable_intr - enable interrupts
2625 * @hba: per adapter instance
2626 * @intrs: interrupt bits
2628 static void ufshcd_enable_intr(struct ufs_hba
*hba
, u32 intrs
)
2630 u32 set
= ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
2632 if (hba
->ufs_version
== ufshci_version(1, 0)) {
2634 rw
= set
& INTERRUPT_MASK_RW_VER_10
;
2635 set
= rw
| ((set
^ intrs
) & intrs
);
2640 ufshcd_writel(hba
, set
, REG_INTERRUPT_ENABLE
);
2644 * ufshcd_disable_intr - disable interrupts
2645 * @hba: per adapter instance
2646 * @intrs: interrupt bits
2648 static void ufshcd_disable_intr(struct ufs_hba
*hba
, u32 intrs
)
2650 u32 set
= ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
2652 if (hba
->ufs_version
== ufshci_version(1, 0)) {
2654 rw
= (set
& INTERRUPT_MASK_RW_VER_10
) &
2655 ~(intrs
& INTERRUPT_MASK_RW_VER_10
);
2656 set
= rw
| ((set
& intrs
) & ~INTERRUPT_MASK_RW_VER_10
);
2662 ufshcd_writel(hba
, set
, REG_INTERRUPT_ENABLE
);
2666 * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
2667 * descriptor according to request
2668 * @lrbp: pointer to local reference block
2669 * @upiu_flags: flags required in the header
2670 * @cmd_dir: requests data direction
2671 * @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments)
2673 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb
*lrbp
, u8
*upiu_flags
,
2674 enum dma_data_direction cmd_dir
, int ehs_length
)
2676 struct utp_transfer_req_desc
*req_desc
= lrbp
->utr_descriptor_ptr
;
2677 struct request_desc_header
*h
= &req_desc
->header
;
2678 enum utp_data_direction data_direction
;
2680 *h
= (typeof(*h
)){ };
2682 if (cmd_dir
== DMA_FROM_DEVICE
) {
2683 data_direction
= UTP_DEVICE_TO_HOST
;
2684 *upiu_flags
= UPIU_CMD_FLAGS_READ
;
2685 } else if (cmd_dir
== DMA_TO_DEVICE
) {
2686 data_direction
= UTP_HOST_TO_DEVICE
;
2687 *upiu_flags
= UPIU_CMD_FLAGS_WRITE
;
2689 data_direction
= UTP_NO_DATA_TRANSFER
;
2690 *upiu_flags
= UPIU_CMD_FLAGS_NONE
;
2693 h
->command_type
= lrbp
->command_type
;
2694 h
->data_direction
= data_direction
;
2695 h
->ehs_length
= ehs_length
;
2700 /* Prepare crypto related dwords */
2701 ufshcd_prepare_req_desc_hdr_crypto(lrbp
, h
);
2704 * assigning invalid value for command status. Controller
2705 * updates OCS on command completion, with the command
2708 h
->ocs
= OCS_INVALID_COMMAND_STATUS
;
2710 req_desc
->prd_table_length
= 0;
2714 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2716 * @lrbp: local reference block pointer
2717 * @upiu_flags: flags
2720 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb
*lrbp
, u8 upiu_flags
)
2722 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
2723 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2724 unsigned short cdb_len
;
2726 ucd_req_ptr
->header
= (struct utp_upiu_header
){
2727 .transaction_code
= UPIU_TRANSACTION_COMMAND
,
2728 .flags
= upiu_flags
,
2730 .task_tag
= lrbp
->task_tag
,
2731 .command_set_type
= UPIU_COMMAND_SET_TYPE_SCSI
,
2734 WARN_ON_ONCE(ucd_req_ptr
->header
.task_tag
!= lrbp
->task_tag
);
2736 ucd_req_ptr
->sc
.exp_data_transfer_len
= cpu_to_be32(cmd
->sdb
.length
);
2738 cdb_len
= min_t(unsigned short, cmd
->cmd_len
, UFS_CDB_SIZE
);
2739 memset(ucd_req_ptr
->sc
.cdb
, 0, UFS_CDB_SIZE
);
2740 memcpy(ucd_req_ptr
->sc
.cdb
, cmd
->cmnd
, cdb_len
);
2742 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2746 * ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request
2748 * @lrbp: local reference block pointer
2749 * @upiu_flags: flags
2751 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba
*hba
,
2752 struct ufshcd_lrb
*lrbp
, u8 upiu_flags
)
2754 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2755 struct ufs_query
*query
= &hba
->dev_cmd
.query
;
2756 u16 len
= be16_to_cpu(query
->request
.upiu_req
.length
);
2758 /* Query request header */
2759 ucd_req_ptr
->header
= (struct utp_upiu_header
){
2760 .transaction_code
= UPIU_TRANSACTION_QUERY_REQ
,
2761 .flags
= upiu_flags
,
2763 .task_tag
= lrbp
->task_tag
,
2764 .query_function
= query
->request
.query_func
,
2765 /* Data segment length only need for WRITE_DESC */
2766 .data_segment_length
=
2767 query
->request
.upiu_req
.opcode
==
2768 UPIU_QUERY_OPCODE_WRITE_DESC
?
2773 /* Copy the Query Request buffer as is */
2774 memcpy(&ucd_req_ptr
->qr
, &query
->request
.upiu_req
,
2777 /* Copy the Descriptor */
2778 if (query
->request
.upiu_req
.opcode
== UPIU_QUERY_OPCODE_WRITE_DESC
)
2779 memcpy(ucd_req_ptr
+ 1, query
->descriptor
, len
);
2781 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2784 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb
*lrbp
)
2786 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2788 memset(ucd_req_ptr
, 0, sizeof(struct utp_upiu_req
));
2790 ucd_req_ptr
->header
= (struct utp_upiu_header
){
2791 .transaction_code
= UPIU_TRANSACTION_NOP_OUT
,
2792 .task_tag
= lrbp
->task_tag
,
2795 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2799 * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
2800 * for Device Management Purposes
2801 * @hba: per adapter instance
2802 * @lrbp: pointer to local reference block
2804 * Return: 0 upon success; < 0 upon failure.
2806 static int ufshcd_compose_devman_upiu(struct ufs_hba
*hba
,
2807 struct ufshcd_lrb
*lrbp
)
2812 if (hba
->ufs_version
<= ufshci_version(1, 1))
2813 lrbp
->command_type
= UTP_CMD_TYPE_DEV_MANAGE
;
2815 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
2817 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, DMA_NONE
, 0);
2818 if (hba
->dev_cmd
.type
== DEV_CMD_TYPE_QUERY
)
2819 ufshcd_prepare_utp_query_req_upiu(hba
, lrbp
, upiu_flags
);
2820 else if (hba
->dev_cmd
.type
== DEV_CMD_TYPE_NOP
)
2821 ufshcd_prepare_utp_nop_upiu(lrbp
);
2829 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2831 * @hba: per adapter instance
2832 * @lrbp: pointer to local reference block
2834 static void ufshcd_comp_scsi_upiu(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2836 struct request
*rq
= scsi_cmd_to_rq(lrbp
->cmd
);
2837 unsigned int ioprio_class
= IOPRIO_PRIO_CLASS(req_get_ioprio(rq
));
2840 if (hba
->ufs_version
<= ufshci_version(1, 1))
2841 lrbp
->command_type
= UTP_CMD_TYPE_SCSI
;
2843 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
2845 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
,
2846 lrbp
->cmd
->sc_data_direction
, 0);
2847 if (ioprio_class
== IOPRIO_CLASS_RT
)
2848 upiu_flags
|= UPIU_CMD_FLAGS_CP
;
2849 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp
, upiu_flags
);
2853 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2854 * @upiu_wlun_id: UPIU W-LUN id
2856 * Return: SCSI W-LUN id.
2858 static inline u16
ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id
)
2860 return (upiu_wlun_id
& ~UFS_UPIU_WLUN_ID
) | SCSI_W_LUN_BASE
;
2863 static inline bool is_device_wlun(struct scsi_device
*sdev
)
2866 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN
);
2870 * Associate the UFS controller queue with the default and poll HCTX types.
2871 * Initialize the mq_map[] arrays.
2873 static void ufshcd_map_queues(struct Scsi_Host
*shost
)
2875 struct ufs_hba
*hba
= shost_priv(shost
);
2876 int i
, queue_offset
= 0;
2878 if (!is_mcq_supported(hba
)) {
2879 hba
->nr_queues
[HCTX_TYPE_DEFAULT
] = 1;
2880 hba
->nr_queues
[HCTX_TYPE_READ
] = 0;
2881 hba
->nr_queues
[HCTX_TYPE_POLL
] = 1;
2882 hba
->nr_hw_queues
= 1;
2885 for (i
= 0; i
< shost
->nr_maps
; i
++) {
2886 struct blk_mq_queue_map
*map
= &shost
->tag_set
.map
[i
];
2888 map
->nr_queues
= hba
->nr_queues
[i
];
2889 if (!map
->nr_queues
)
2891 map
->queue_offset
= queue_offset
;
2892 if (i
== HCTX_TYPE_POLL
&& !is_mcq_supported(hba
))
2893 map
->queue_offset
= 0;
2895 blk_mq_map_queues(map
);
2896 queue_offset
+= map
->nr_queues
;
2900 static void ufshcd_init_lrb(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrb
, int i
)
2902 struct utp_transfer_cmd_desc
*cmd_descp
= (void *)hba
->ucdl_base_addr
+
2903 i
* ufshcd_get_ucd_size(hba
);
2904 struct utp_transfer_req_desc
*utrdlp
= hba
->utrdl_base_addr
;
2905 dma_addr_t cmd_desc_element_addr
= hba
->ucdl_dma_addr
+
2906 i
* ufshcd_get_ucd_size(hba
);
2907 u16 response_offset
= offsetof(struct utp_transfer_cmd_desc
,
2909 u16 prdt_offset
= offsetof(struct utp_transfer_cmd_desc
, prd_table
);
2911 lrb
->utr_descriptor_ptr
= utrdlp
+ i
;
2912 lrb
->utrd_dma_addr
= hba
->utrdl_dma_addr
+
2913 i
* sizeof(struct utp_transfer_req_desc
);
2914 lrb
->ucd_req_ptr
= (struct utp_upiu_req
*)cmd_descp
->command_upiu
;
2915 lrb
->ucd_req_dma_addr
= cmd_desc_element_addr
;
2916 lrb
->ucd_rsp_ptr
= (struct utp_upiu_rsp
*)cmd_descp
->response_upiu
;
2917 lrb
->ucd_rsp_dma_addr
= cmd_desc_element_addr
+ response_offset
;
2918 lrb
->ucd_prdt_ptr
= (struct ufshcd_sg_entry
*)cmd_descp
->prd_table
;
2919 lrb
->ucd_prdt_dma_addr
= cmd_desc_element_addr
+ prdt_offset
;
2923 * ufshcd_queuecommand - main entry point for SCSI requests
2924 * @host: SCSI host pointer
2925 * @cmd: command from SCSI Midlayer
2927 * Return: 0 for success, non-zero in case of failure.
2929 static int ufshcd_queuecommand(struct Scsi_Host
*host
, struct scsi_cmnd
*cmd
)
2931 struct ufs_hba
*hba
= shost_priv(host
);
2932 int tag
= scsi_cmd_to_rq(cmd
)->tag
;
2933 struct ufshcd_lrb
*lrbp
;
2935 struct ufs_hw_queue
*hwq
= NULL
;
2937 switch (hba
->ufshcd_state
) {
2938 case UFSHCD_STATE_OPERATIONAL
:
2940 case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL
:
2942 * SCSI error handler can call ->queuecommand() while UFS error
2943 * handler is in progress. Error interrupts could change the
2944 * state from UFSHCD_STATE_RESET to
2945 * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests
2946 * being issued in that case.
2948 if (ufshcd_eh_in_progress(hba
)) {
2949 err
= SCSI_MLQUEUE_HOST_BUSY
;
2953 case UFSHCD_STATE_EH_SCHEDULED_FATAL
:
2955 * pm_runtime_get_sync() is used at error handling preparation
2956 * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
2957 * PM ops, it can never be finished if we let SCSI layer keep
2958 * retrying it, which gets err handler stuck forever. Neither
2959 * can we let the scsi cmd pass through, because UFS is in bad
2960 * state, the scsi cmd may eventually time out, which will get
2961 * err handler blocked for too long. So, just fail the scsi cmd
2962 * sent from PM ops, err handler can recover PM error anyways.
2964 if (hba
->pm_op_in_progress
) {
2965 hba
->force_reset
= true;
2966 set_host_byte(cmd
, DID_BAD_TARGET
);
2971 case UFSHCD_STATE_RESET
:
2972 err
= SCSI_MLQUEUE_HOST_BUSY
;
2974 case UFSHCD_STATE_ERROR
:
2975 set_host_byte(cmd
, DID_ERROR
);
2980 hba
->req_abort_count
= 0;
2984 lrbp
= &hba
->lrb
[tag
];
2986 lrbp
->task_tag
= tag
;
2987 lrbp
->lun
= ufshcd_scsi_to_upiu_lun(cmd
->device
->lun
);
2988 lrbp
->intr_cmd
= !ufshcd_is_intr_aggr_allowed(hba
);
2990 ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd
), lrbp
);
2992 lrbp
->req_abort_skip
= false;
2994 ufshcd_comp_scsi_upiu(hba
, lrbp
);
2996 err
= ufshcd_map_sg(hba
, lrbp
);
2998 ufshcd_release(hba
);
3002 if (is_mcq_enabled(hba
))
3003 hwq
= ufshcd_mcq_req_to_hwq(hba
, scsi_cmd_to_rq(cmd
));
3005 ufshcd_send_command(hba
, tag
, hwq
);
3008 if (ufs_trigger_eh(hba
)) {
3009 unsigned long flags
;
3011 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
3012 ufshcd_schedule_eh_work(hba
);
3013 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3019 static int ufshcd_compose_dev_cmd(struct ufs_hba
*hba
,
3020 struct ufshcd_lrb
*lrbp
, enum dev_cmd_type cmd_type
, int tag
)
3023 lrbp
->task_tag
= tag
;
3024 lrbp
->lun
= 0; /* device management cmd is not specific to any LUN */
3025 lrbp
->intr_cmd
= true; /* No interrupt aggregation */
3026 ufshcd_prepare_lrbp_crypto(NULL
, lrbp
);
3027 hba
->dev_cmd
.type
= cmd_type
;
3029 return ufshcd_compose_devman_upiu(hba
, lrbp
);
3033 * Check with the block layer if the command is inflight
3034 * @cmd: command to check.
3036 * Return: true if command is inflight; false if not.
3038 bool ufshcd_cmd_inflight(struct scsi_cmnd
*cmd
)
3045 rq
= scsi_cmd_to_rq(cmd
);
3046 if (!blk_mq_request_started(rq
))
3053 * Clear the pending command in the controller and wait until
3054 * the controller confirms that the command has been cleared.
3055 * @hba: per adapter instance
3056 * @task_tag: The tag number of the command to be cleared.
3058 static int ufshcd_clear_cmd(struct ufs_hba
*hba
, u32 task_tag
)
3060 u32 mask
= 1U << task_tag
;
3061 unsigned long flags
;
3064 if (is_mcq_enabled(hba
)) {
3066 * MCQ mode. Clean up the MCQ resources similar to
3067 * what the ufshcd_utrl_clear() does for SDB mode.
3069 err
= ufshcd_mcq_sq_cleanup(hba
, task_tag
);
3071 dev_err(hba
->dev
, "%s: failed tag=%d. err=%d\n",
3072 __func__
, task_tag
, err
);
3078 /* clear outstanding transaction before retry */
3079 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
3080 ufshcd_utrl_clear(hba
, mask
);
3081 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3084 * wait for h/w to clear corresponding bit in door-bell.
3085 * max. wait is 1 sec.
3087 return ufshcd_wait_for_register(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
,
3088 mask
, ~mask
, 1000, 1000);
3092 * ufshcd_dev_cmd_completion() - handles device management command responses
3093 * @hba: per adapter instance
3094 * @lrbp: pointer to local reference block
3096 * Return: 0 upon success; < 0 upon failure.
3099 ufshcd_dev_cmd_completion(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
3101 enum upiu_response_transaction resp
;
3104 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
3105 resp
= ufshcd_get_req_rsp(lrbp
->ucd_rsp_ptr
);
3108 case UPIU_TRANSACTION_NOP_IN
:
3109 if (hba
->dev_cmd
.type
!= DEV_CMD_TYPE_NOP
) {
3111 dev_err(hba
->dev
, "%s: unexpected response %x\n",
3115 case UPIU_TRANSACTION_QUERY_RSP
: {
3116 u8 response
= lrbp
->ucd_rsp_ptr
->header
.response
;
3119 err
= ufshcd_copy_query_response(hba
, lrbp
);
3122 case UPIU_TRANSACTION_REJECT_UPIU
:
3123 /* TODO: handle Reject UPIU Response */
3125 dev_err(hba
->dev
, "%s: Reject UPIU not fully implemented\n",
3128 case UPIU_TRANSACTION_RESPONSE
:
3129 if (hba
->dev_cmd
.type
!= DEV_CMD_TYPE_RPMB
) {
3131 dev_err(hba
->dev
, "%s: unexpected response %x\n", __func__
, resp
);
3136 dev_err(hba
->dev
, "%s: Invalid device management cmd response: %x\n",
3144 static int ufshcd_wait_for_dev_cmd(struct ufs_hba
*hba
,
3145 struct ufshcd_lrb
*lrbp
, int max_timeout
)
3147 unsigned long time_left
= msecs_to_jiffies(max_timeout
);
3148 unsigned long flags
;
3153 time_left
= wait_for_completion_timeout(hba
->dev_cmd
.complete
,
3156 if (likely(time_left
)) {
3158 * The completion handler called complete() and the caller of
3159 * this function still owns the @lrbp tag so the code below does
3160 * not trigger any race conditions.
3162 hba
->dev_cmd
.complete
= NULL
;
3163 err
= ufshcd_get_tr_ocs(lrbp
, NULL
);
3165 err
= ufshcd_dev_cmd_completion(hba
, lrbp
);
3168 dev_dbg(hba
->dev
, "%s: dev_cmd request timedout, tag %d\n",
3169 __func__
, lrbp
->task_tag
);
3172 if (is_mcq_enabled(hba
)) {
3173 err
= ufshcd_clear_cmd(hba
, lrbp
->task_tag
);
3174 hba
->dev_cmd
.complete
= NULL
;
3179 if (ufshcd_clear_cmd(hba
, lrbp
->task_tag
) == 0) {
3180 /* successfully cleared the command, retry if needed */
3183 * Since clearing the command succeeded we also need to
3184 * clear the task tag bit from the outstanding_reqs
3187 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
3188 pending
= test_bit(lrbp
->task_tag
,
3189 &hba
->outstanding_reqs
);
3191 hba
->dev_cmd
.complete
= NULL
;
3192 __clear_bit(lrbp
->task_tag
,
3193 &hba
->outstanding_reqs
);
3195 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
3199 * The completion handler ran while we tried to
3200 * clear the command.
3206 dev_err(hba
->dev
, "%s: failed to clear tag %d\n",
3207 __func__
, lrbp
->task_tag
);
3209 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
3210 pending
= test_bit(lrbp
->task_tag
,
3211 &hba
->outstanding_reqs
);
3213 hba
->dev_cmd
.complete
= NULL
;
3214 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
3218 * The completion handler ran while we tried to
3219 * clear the command.
3231 * ufshcd_exec_dev_cmd - API for sending device management requests
3233 * @cmd_type: specifies the type (NOP, Query...)
3234 * @timeout: timeout in milliseconds
3236 * Return: 0 upon success; < 0 upon failure.
3238 * NOTE: Since there is only one available tag for device management commands,
3239 * it is expected you hold the hba->dev_cmd.lock mutex.
3241 static int ufshcd_exec_dev_cmd(struct ufs_hba
*hba
,
3242 enum dev_cmd_type cmd_type
, int timeout
)
3244 DECLARE_COMPLETION_ONSTACK(wait
);
3245 const u32 tag
= hba
->reserved_slot
;
3246 struct ufshcd_lrb
*lrbp
;
3249 /* Protects use of hba->reserved_slot. */
3250 lockdep_assert_held(&hba
->dev_cmd
.lock
);
3252 down_read(&hba
->clk_scaling_lock
);
3254 lrbp
= &hba
->lrb
[tag
];
3256 err
= ufshcd_compose_dev_cmd(hba
, lrbp
, cmd_type
, tag
);
3260 hba
->dev_cmd
.complete
= &wait
;
3262 ufshcd_add_query_upiu_trace(hba
, UFS_QUERY_SEND
, lrbp
->ucd_req_ptr
);
3264 ufshcd_send_command(hba
, tag
, hba
->dev_cmd_queue
);
3265 err
= ufshcd_wait_for_dev_cmd(hba
, lrbp
, timeout
);
3266 ufshcd_add_query_upiu_trace(hba
, err
? UFS_QUERY_ERR
: UFS_QUERY_COMP
,
3267 (struct utp_upiu_req
*)lrbp
->ucd_rsp_ptr
);
3270 up_read(&hba
->clk_scaling_lock
);
3275 * ufshcd_init_query() - init the query response and request parameters
3276 * @hba: per-adapter instance
3277 * @request: address of the request pointer to be initialized
3278 * @response: address of the response pointer to be initialized
3279 * @opcode: operation to perform
3280 * @idn: flag idn to access
3281 * @index: LU number to access
3282 * @selector: query/flag/descriptor further identification
3284 static inline void ufshcd_init_query(struct ufs_hba
*hba
,
3285 struct ufs_query_req
**request
, struct ufs_query_res
**response
,
3286 enum query_opcode opcode
, u8 idn
, u8 index
, u8 selector
)
3288 *request
= &hba
->dev_cmd
.query
.request
;
3289 *response
= &hba
->dev_cmd
.query
.response
;
3290 memset(*request
, 0, sizeof(struct ufs_query_req
));
3291 memset(*response
, 0, sizeof(struct ufs_query_res
));
3292 (*request
)->upiu_req
.opcode
= opcode
;
3293 (*request
)->upiu_req
.idn
= idn
;
3294 (*request
)->upiu_req
.index
= index
;
3295 (*request
)->upiu_req
.selector
= selector
;
3298 static int ufshcd_query_flag_retry(struct ufs_hba
*hba
,
3299 enum query_opcode opcode
, enum flag_idn idn
, u8 index
, bool *flag_res
)
3304 for (retries
= 0; retries
< QUERY_REQ_RETRIES
; retries
++) {
3305 ret
= ufshcd_query_flag(hba
, opcode
, idn
, index
, flag_res
);
3308 "%s: failed with error %d, retries %d\n",
3309 __func__
, ret
, retries
);
3316 "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n",
3317 __func__
, opcode
, idn
, ret
, retries
);
3322 * ufshcd_query_flag() - API function for sending flag query requests
3323 * @hba: per-adapter instance
3324 * @opcode: flag query to perform
3325 * @idn: flag idn to access
3326 * @index: flag index to access
3327 * @flag_res: the flag value after the query request completes
3329 * Return: 0 for success, non-zero in case of failure.
3331 int ufshcd_query_flag(struct ufs_hba
*hba
, enum query_opcode opcode
,
3332 enum flag_idn idn
, u8 index
, bool *flag_res
)
3334 struct ufs_query_req
*request
= NULL
;
3335 struct ufs_query_res
*response
= NULL
;
3336 int err
, selector
= 0;
3337 int timeout
= QUERY_REQ_TIMEOUT
;
3342 mutex_lock(&hba
->dev_cmd
.lock
);
3343 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
3347 case UPIU_QUERY_OPCODE_SET_FLAG
:
3348 case UPIU_QUERY_OPCODE_CLEAR_FLAG
:
3349 case UPIU_QUERY_OPCODE_TOGGLE_FLAG
:
3350 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
3352 case UPIU_QUERY_OPCODE_READ_FLAG
:
3353 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
3355 /* No dummy reads */
3356 dev_err(hba
->dev
, "%s: Invalid argument for read request\n",
3364 "%s: Expected query flag opcode but got = %d\n",
3370 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, timeout
);
3374 "%s: Sending flag query for idn %d failed, err = %d\n",
3375 __func__
, idn
, err
);
3380 *flag_res
= (be32_to_cpu(response
->upiu_res
.value
) &
3381 MASK_QUERY_UPIU_FLAG_LOC
) & 0x1;
3384 mutex_unlock(&hba
->dev_cmd
.lock
);
3385 ufshcd_release(hba
);
3390 * ufshcd_query_attr - API function for sending attribute requests
3391 * @hba: per-adapter instance
3392 * @opcode: attribute opcode
3393 * @idn: attribute idn to access
3394 * @index: index field
3395 * @selector: selector field
3396 * @attr_val: the attribute value after the query request completes
3398 * Return: 0 for success, non-zero in case of failure.
3400 int ufshcd_query_attr(struct ufs_hba
*hba
, enum query_opcode opcode
,
3401 enum attr_idn idn
, u8 index
, u8 selector
, u32
*attr_val
)
3403 struct ufs_query_req
*request
= NULL
;
3404 struct ufs_query_res
*response
= NULL
;
3410 dev_err(hba
->dev
, "%s: attribute value required for opcode 0x%x\n",
3417 mutex_lock(&hba
->dev_cmd
.lock
);
3418 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
3422 case UPIU_QUERY_OPCODE_WRITE_ATTR
:
3423 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
3424 request
->upiu_req
.value
= cpu_to_be32(*attr_val
);
3426 case UPIU_QUERY_OPCODE_READ_ATTR
:
3427 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
3430 dev_err(hba
->dev
, "%s: Expected query attr opcode but got = 0x%.2x\n",
3436 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
3439 dev_err(hba
->dev
, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3440 __func__
, opcode
, idn
, index
, err
);
3444 *attr_val
= be32_to_cpu(response
->upiu_res
.value
);
3447 mutex_unlock(&hba
->dev_cmd
.lock
);
3448 ufshcd_release(hba
);
3453 * ufshcd_query_attr_retry() - API function for sending query
3454 * attribute with retries
3455 * @hba: per-adapter instance
3456 * @opcode: attribute opcode
3457 * @idn: attribute idn to access
3458 * @index: index field
3459 * @selector: selector field
3460 * @attr_val: the attribute value after the query request
3463 * Return: 0 for success, non-zero in case of failure.
3465 int ufshcd_query_attr_retry(struct ufs_hba
*hba
,
3466 enum query_opcode opcode
, enum attr_idn idn
, u8 index
, u8 selector
,
3472 for (retries
= QUERY_REQ_RETRIES
; retries
> 0; retries
--) {
3473 ret
= ufshcd_query_attr(hba
, opcode
, idn
, index
,
3474 selector
, attr_val
);
3476 dev_dbg(hba
->dev
, "%s: failed with error %d, retries %d\n",
3477 __func__
, ret
, retries
);
3484 "%s: query attribute, idn %d, failed with error %d after %d retries\n",
3485 __func__
, idn
, ret
, QUERY_REQ_RETRIES
);
3489 static int __ufshcd_query_descriptor(struct ufs_hba
*hba
,
3490 enum query_opcode opcode
, enum desc_idn idn
, u8 index
,
3491 u8 selector
, u8
*desc_buf
, int *buf_len
)
3493 struct ufs_query_req
*request
= NULL
;
3494 struct ufs_query_res
*response
= NULL
;
3500 dev_err(hba
->dev
, "%s: descriptor buffer required for opcode 0x%x\n",
3505 if (*buf_len
< QUERY_DESC_MIN_SIZE
|| *buf_len
> QUERY_DESC_MAX_SIZE
) {
3506 dev_err(hba
->dev
, "%s: descriptor buffer size (%d) is out of range\n",
3507 __func__
, *buf_len
);
3513 mutex_lock(&hba
->dev_cmd
.lock
);
3514 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
3516 hba
->dev_cmd
.query
.descriptor
= desc_buf
;
3517 request
->upiu_req
.length
= cpu_to_be16(*buf_len
);
3520 case UPIU_QUERY_OPCODE_WRITE_DESC
:
3521 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
3523 case UPIU_QUERY_OPCODE_READ_DESC
:
3524 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
3528 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3534 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
3537 dev_err(hba
->dev
, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3538 __func__
, opcode
, idn
, index
, err
);
3542 *buf_len
= be16_to_cpu(response
->upiu_res
.length
);
3545 hba
->dev_cmd
.query
.descriptor
= NULL
;
3546 mutex_unlock(&hba
->dev_cmd
.lock
);
3547 ufshcd_release(hba
);
3552 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3553 * @hba: per-adapter instance
3554 * @opcode: attribute opcode
3555 * @idn: attribute idn to access
3556 * @index: index field
3557 * @selector: selector field
3558 * @desc_buf: the buffer that contains the descriptor
3559 * @buf_len: length parameter passed to the device
3561 * The buf_len parameter will contain, on return, the length parameter
3562 * received on the response.
3564 * Return: 0 for success, non-zero in case of failure.
3566 int ufshcd_query_descriptor_retry(struct ufs_hba
*hba
,
3567 enum query_opcode opcode
,
3568 enum desc_idn idn
, u8 index
,
3570 u8
*desc_buf
, int *buf_len
)
3575 for (retries
= QUERY_REQ_RETRIES
; retries
> 0; retries
--) {
3576 err
= __ufshcd_query_descriptor(hba
, opcode
, idn
, index
,
3577 selector
, desc_buf
, buf_len
);
3578 if (!err
|| err
== -EINVAL
)
3586 * ufshcd_read_desc_param - read the specified descriptor parameter
3587 * @hba: Pointer to adapter instance
3588 * @desc_id: descriptor idn value
3589 * @desc_index: descriptor index
3590 * @param_offset: offset of the parameter to read
3591 * @param_read_buf: pointer to buffer where parameter would be read
3592 * @param_size: sizeof(param_read_buf)
3594 * Return: 0 in case of success, non-zero otherwise.
3596 int ufshcd_read_desc_param(struct ufs_hba
*hba
,
3597 enum desc_idn desc_id
,
3605 int buff_len
= QUERY_DESC_MAX_SIZE
;
3606 bool is_kmalloc
= true;
3609 if (desc_id
>= QUERY_DESC_IDN_MAX
|| !param_size
)
3612 /* Check whether we need temp memory */
3613 if (param_offset
!= 0 || param_size
< buff_len
) {
3614 desc_buf
= kzalloc(buff_len
, GFP_KERNEL
);
3618 desc_buf
= param_read_buf
;
3622 /* Request for full descriptor */
3623 ret
= ufshcd_query_descriptor_retry(hba
, UPIU_QUERY_OPCODE_READ_DESC
,
3624 desc_id
, desc_index
, 0,
3625 desc_buf
, &buff_len
);
3627 dev_err(hba
->dev
, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
3628 __func__
, desc_id
, desc_index
, param_offset
, ret
);
3632 /* Update descriptor length */
3633 buff_len
= desc_buf
[QUERY_DESC_LENGTH_OFFSET
];
3635 if (param_offset
>= buff_len
) {
3636 dev_err(hba
->dev
, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3637 __func__
, param_offset
, desc_id
, buff_len
);
3643 if (desc_buf
[QUERY_DESC_DESC_TYPE_OFFSET
] != desc_id
) {
3644 dev_err(hba
->dev
, "%s: invalid desc_id %d in descriptor header\n",
3645 __func__
, desc_buf
[QUERY_DESC_DESC_TYPE_OFFSET
]);
3651 /* Make sure we don't copy more data than available */
3652 if (param_offset
>= buff_len
)
3655 memcpy(param_read_buf
, &desc_buf
[param_offset
],
3656 min_t(u32
, param_size
, buff_len
- param_offset
));
3665 * struct uc_string_id - unicode string
3667 * @len: size of this descriptor inclusive
3668 * @type: descriptor type
3669 * @uc: unicode string character
3671 struct uc_string_id
{
3677 /* replace non-printable or non-ASCII characters with spaces */
3678 static inline char ufshcd_remove_non_printable(u8 ch
)
3680 return (ch
>= 0x20 && ch
<= 0x7e) ? ch
: ' ';
3684 * ufshcd_read_string_desc - read string descriptor
3685 * @hba: pointer to adapter instance
3686 * @desc_index: descriptor index
3687 * @buf: pointer to buffer where descriptor would be read,
3688 * the caller should free the memory.
3689 * @ascii: if true convert from unicode to ascii characters
3690 * null terminated string.
3693 * * string size on success.
3694 * * -ENOMEM: on allocation failure
3695 * * -EINVAL: on a wrong parameter
3697 int ufshcd_read_string_desc(struct ufs_hba
*hba
, u8 desc_index
,
3698 u8
**buf
, bool ascii
)
3700 struct uc_string_id
*uc_str
;
3707 uc_str
= kzalloc(QUERY_DESC_MAX_SIZE
, GFP_KERNEL
);
3711 ret
= ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_STRING
, desc_index
, 0,
3712 (u8
*)uc_str
, QUERY_DESC_MAX_SIZE
);
3714 dev_err(hba
->dev
, "Reading String Desc failed after %d retries. err = %d\n",
3715 QUERY_REQ_RETRIES
, ret
);
3720 if (uc_str
->len
<= QUERY_DESC_HDR_SIZE
) {
3721 dev_dbg(hba
->dev
, "String Desc is of zero length\n");
3730 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3731 ascii_len
= (uc_str
->len
- QUERY_DESC_HDR_SIZE
) / 2 + 1;
3732 str
= kzalloc(ascii_len
, GFP_KERNEL
);
3739 * the descriptor contains string in UTF16 format
3740 * we need to convert to utf-8 so it can be displayed
3742 ret
= utf16s_to_utf8s(uc_str
->uc
,
3743 uc_str
->len
- QUERY_DESC_HDR_SIZE
,
3744 UTF16_BIG_ENDIAN
, str
, ascii_len
- 1);
3746 /* replace non-printable or non-ASCII characters with spaces */
3747 for (i
= 0; i
< ret
; i
++)
3748 str
[i
] = ufshcd_remove_non_printable(str
[i
]);
3753 str
= kmemdup(uc_str
, uc_str
->len
, GFP_KERNEL
);
3767 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3768 * @hba: Pointer to adapter instance
3770 * @param_offset: offset of the parameter to read
3771 * @param_read_buf: pointer to buffer where parameter would be read
3772 * @param_size: sizeof(param_read_buf)
3774 * Return: 0 in case of success, non-zero otherwise.
3776 static inline int ufshcd_read_unit_desc_param(struct ufs_hba
*hba
,
3778 enum unit_desc_param param_offset
,
3783 * Unit descriptors are only available for general purpose LUs (LUN id
3784 * from 0 to 7) and RPMB Well known LU.
3786 if (!ufs_is_valid_unit_desc_lun(&hba
->dev_info
, lun
))
3789 return ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_UNIT
, lun
,
3790 param_offset
, param_read_buf
, param_size
);
3793 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba
*hba
)
3796 u32 gating_wait
= UFSHCD_REF_CLK_GATING_WAIT_US
;
3798 if (hba
->dev_info
.wspecversion
>= 0x300) {
3799 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
3800 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME
, 0, 0,
3803 dev_err(hba
->dev
, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3806 if (gating_wait
== 0) {
3807 gating_wait
= UFSHCD_REF_CLK_GATING_WAIT_US
;
3808 dev_err(hba
->dev
, "Undefined ref clk gating wait time, use default %uus\n",
3812 hba
->dev_info
.clk_gating_wait_us
= gating_wait
;
3819 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3820 * @hba: per adapter instance
3822 * 1. Allocate DMA memory for Command Descriptor array
3823 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3824 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3825 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3827 * 4. Allocate memory for local reference block(lrb).
3829 * Return: 0 for success, non-zero in case of failure.
3831 static int ufshcd_memory_alloc(struct ufs_hba
*hba
)
3833 size_t utmrdl_size
, utrdl_size
, ucdl_size
;
3835 /* Allocate memory for UTP command descriptors */
3836 ucdl_size
= ufshcd_get_ucd_size(hba
) * hba
->nutrs
;
3837 hba
->ucdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3839 &hba
->ucdl_dma_addr
,
3843 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3845 if (!hba
->ucdl_base_addr
||
3846 WARN_ON(hba
->ucdl_dma_addr
& (128 - 1))) {
3848 "Command Descriptor Memory allocation failed\n");
3853 * Allocate memory for UTP Transfer descriptors
3854 * UFSHCI requires 1KB alignment of UTRD
3856 utrdl_size
= (sizeof(struct utp_transfer_req_desc
) * hba
->nutrs
);
3857 hba
->utrdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3859 &hba
->utrdl_dma_addr
,
3861 if (!hba
->utrdl_base_addr
||
3862 WARN_ON(hba
->utrdl_dma_addr
& (SZ_1K
- 1))) {
3864 "Transfer Descriptor Memory allocation failed\n");
3869 * Skip utmrdl allocation; it may have been
3870 * allocated during first pass and not released during
3871 * MCQ memory allocation.
3872 * See ufshcd_release_sdb_queue() and ufshcd_config_mcq()
3874 if (hba
->utmrdl_base_addr
)
3877 * Allocate memory for UTP Task Management descriptors
3878 * UFSHCI requires 1KB alignment of UTMRD
3880 utmrdl_size
= sizeof(struct utp_task_req_desc
) * hba
->nutmrs
;
3881 hba
->utmrdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3883 &hba
->utmrdl_dma_addr
,
3885 if (!hba
->utmrdl_base_addr
||
3886 WARN_ON(hba
->utmrdl_dma_addr
& (SZ_1K
- 1))) {
3888 "Task Management Descriptor Memory allocation failed\n");
3893 /* Allocate memory for local reference block */
3894 hba
->lrb
= devm_kcalloc(hba
->dev
,
3895 hba
->nutrs
, sizeof(struct ufshcd_lrb
),
3898 dev_err(hba
->dev
, "LRB Memory allocation failed\n");
3907 * ufshcd_host_memory_configure - configure local reference block with
3909 * @hba: per adapter instance
3911 * Configure Host memory space
3912 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3914 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3916 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3917 * into local reference block.
3919 static void ufshcd_host_memory_configure(struct ufs_hba
*hba
)
3921 struct utp_transfer_req_desc
*utrdlp
;
3922 dma_addr_t cmd_desc_dma_addr
;
3923 dma_addr_t cmd_desc_element_addr
;
3924 u16 response_offset
;
3929 utrdlp
= hba
->utrdl_base_addr
;
3932 offsetof(struct utp_transfer_cmd_desc
, response_upiu
);
3934 offsetof(struct utp_transfer_cmd_desc
, prd_table
);
3936 cmd_desc_size
= ufshcd_get_ucd_size(hba
);
3937 cmd_desc_dma_addr
= hba
->ucdl_dma_addr
;
3939 for (i
= 0; i
< hba
->nutrs
; i
++) {
3940 /* Configure UTRD with command descriptor base address */
3941 cmd_desc_element_addr
=
3942 (cmd_desc_dma_addr
+ (cmd_desc_size
* i
));
3943 utrdlp
[i
].command_desc_base_addr
=
3944 cpu_to_le64(cmd_desc_element_addr
);
3946 /* Response upiu and prdt offset should be in double words */
3947 if (hba
->quirks
& UFSHCD_QUIRK_PRDT_BYTE_GRAN
) {
3948 utrdlp
[i
].response_upiu_offset
=
3949 cpu_to_le16(response_offset
);
3950 utrdlp
[i
].prd_table_offset
=
3951 cpu_to_le16(prdt_offset
);
3952 utrdlp
[i
].response_upiu_length
=
3953 cpu_to_le16(ALIGNED_UPIU_SIZE
);
3955 utrdlp
[i
].response_upiu_offset
=
3956 cpu_to_le16(response_offset
>> 2);
3957 utrdlp
[i
].prd_table_offset
=
3958 cpu_to_le16(prdt_offset
>> 2);
3959 utrdlp
[i
].response_upiu_length
=
3960 cpu_to_le16(ALIGNED_UPIU_SIZE
>> 2);
3963 ufshcd_init_lrb(hba
, &hba
->lrb
[i
], i
);
3968 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3969 * @hba: per adapter instance
3971 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3972 * in order to initialize the Unipro link startup procedure.
3973 * Once the Unipro links are up, the device connected to the controller
3976 * Return: 0 on success, non-zero value on failure.
3978 static int ufshcd_dme_link_startup(struct ufs_hba
*hba
)
3980 struct uic_command uic_cmd
= {0};
3983 uic_cmd
.command
= UIC_CMD_DME_LINK_STARTUP
;
3985 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
3988 "dme-link-startup: error code %d\n", ret
);
3992 * ufshcd_dme_reset - UIC command for DME_RESET
3993 * @hba: per adapter instance
3995 * DME_RESET command is issued in order to reset UniPro stack.
3996 * This function now deals with cold reset.
3998 * Return: 0 on success, non-zero value on failure.
4000 static int ufshcd_dme_reset(struct ufs_hba
*hba
)
4002 struct uic_command uic_cmd
= {0};
4005 uic_cmd
.command
= UIC_CMD_DME_RESET
;
4007 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
4010 "dme-reset: error code %d\n", ret
);
4015 int ufshcd_dme_configure_adapt(struct ufs_hba
*hba
,
4021 if (agreed_gear
< UFS_HS_G4
)
4022 adapt_val
= PA_NO_ADAPT
;
4024 ret
= ufshcd_dme_set(hba
,
4025 UIC_ARG_MIB(PA_TXHSADAPTTYPE
),
4029 EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt
);
4032 * ufshcd_dme_enable - UIC command for DME_ENABLE
4033 * @hba: per adapter instance
4035 * DME_ENABLE command is issued in order to enable UniPro stack.
4037 * Return: 0 on success, non-zero value on failure.
4039 static int ufshcd_dme_enable(struct ufs_hba
*hba
)
4041 struct uic_command uic_cmd
= {0};
4044 uic_cmd
.command
= UIC_CMD_DME_ENABLE
;
4046 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
4049 "dme-enable: error code %d\n", ret
);
4054 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba
*hba
)
4056 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
4057 unsigned long min_sleep_time_us
;
4059 if (!(hba
->quirks
& UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
))
4063 * last_dme_cmd_tstamp will be 0 only for 1st call to
4066 if (unlikely(!ktime_to_us(hba
->last_dme_cmd_tstamp
))) {
4067 min_sleep_time_us
= MIN_DELAY_BEFORE_DME_CMDS_US
;
4069 unsigned long delta
=
4070 (unsigned long) ktime_to_us(
4071 ktime_sub(ktime_get(),
4072 hba
->last_dme_cmd_tstamp
));
4074 if (delta
< MIN_DELAY_BEFORE_DME_CMDS_US
)
4076 MIN_DELAY_BEFORE_DME_CMDS_US
- delta
;
4078 return; /* no more delay required */
4081 /* allow sleep for extra 50us if needed */
4082 usleep_range(min_sleep_time_us
, min_sleep_time_us
+ 50);
4086 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
4087 * @hba: per adapter instance
4088 * @attr_sel: uic command argument1
4089 * @attr_set: attribute set type as uic command argument2
4090 * @mib_val: setting value as uic command argument3
4091 * @peer: indicate whether peer or local
4093 * Return: 0 on success, non-zero value on failure.
4095 int ufshcd_dme_set_attr(struct ufs_hba
*hba
, u32 attr_sel
,
4096 u8 attr_set
, u32 mib_val
, u8 peer
)
4098 struct uic_command uic_cmd
= {0};
4099 static const char *const action
[] = {
4103 const char *set
= action
[!!peer
];
4105 int retries
= UFS_UIC_COMMAND_RETRIES
;
4107 uic_cmd
.command
= peer
?
4108 UIC_CMD_DME_PEER_SET
: UIC_CMD_DME_SET
;
4109 uic_cmd
.argument1
= attr_sel
;
4110 uic_cmd
.argument2
= UIC_ARG_ATTR_TYPE(attr_set
);
4111 uic_cmd
.argument3
= mib_val
;
4114 /* for peer attributes we retry upon failure */
4115 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
4117 dev_dbg(hba
->dev
, "%s: attr-id 0x%x val 0x%x error code %d\n",
4118 set
, UIC_GET_ATTR_ID(attr_sel
), mib_val
, ret
);
4119 } while (ret
&& peer
&& --retries
);
4122 dev_err(hba
->dev
, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
4123 set
, UIC_GET_ATTR_ID(attr_sel
), mib_val
,
4124 UFS_UIC_COMMAND_RETRIES
- retries
);
4128 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr
);
4131 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
4132 * @hba: per adapter instance
4133 * @attr_sel: uic command argument1
4134 * @mib_val: the value of the attribute as returned by the UIC command
4135 * @peer: indicate whether peer or local
4137 * Return: 0 on success, non-zero value on failure.
4139 int ufshcd_dme_get_attr(struct ufs_hba
*hba
, u32 attr_sel
,
4140 u32
*mib_val
, u8 peer
)
4142 struct uic_command uic_cmd
= {0};
4143 static const char *const action
[] = {
4147 const char *get
= action
[!!peer
];
4149 int retries
= UFS_UIC_COMMAND_RETRIES
;
4150 struct ufs_pa_layer_attr orig_pwr_info
;
4151 struct ufs_pa_layer_attr temp_pwr_info
;
4152 bool pwr_mode_change
= false;
4154 if (peer
&& (hba
->quirks
& UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
)) {
4155 orig_pwr_info
= hba
->pwr_info
;
4156 temp_pwr_info
= orig_pwr_info
;
4158 if (orig_pwr_info
.pwr_tx
== FAST_MODE
||
4159 orig_pwr_info
.pwr_rx
== FAST_MODE
) {
4160 temp_pwr_info
.pwr_tx
= FASTAUTO_MODE
;
4161 temp_pwr_info
.pwr_rx
= FASTAUTO_MODE
;
4162 pwr_mode_change
= true;
4163 } else if (orig_pwr_info
.pwr_tx
== SLOW_MODE
||
4164 orig_pwr_info
.pwr_rx
== SLOW_MODE
) {
4165 temp_pwr_info
.pwr_tx
= SLOWAUTO_MODE
;
4166 temp_pwr_info
.pwr_rx
= SLOWAUTO_MODE
;
4167 pwr_mode_change
= true;
4169 if (pwr_mode_change
) {
4170 ret
= ufshcd_change_power_mode(hba
, &temp_pwr_info
);
4176 uic_cmd
.command
= peer
?
4177 UIC_CMD_DME_PEER_GET
: UIC_CMD_DME_GET
;
4178 uic_cmd
.argument1
= attr_sel
;
4181 /* for peer attributes we retry upon failure */
4182 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
4184 dev_dbg(hba
->dev
, "%s: attr-id 0x%x error code %d\n",
4185 get
, UIC_GET_ATTR_ID(attr_sel
), ret
);
4186 } while (ret
&& peer
&& --retries
);
4189 dev_err(hba
->dev
, "%s: attr-id 0x%x failed %d retries\n",
4190 get
, UIC_GET_ATTR_ID(attr_sel
),
4191 UFS_UIC_COMMAND_RETRIES
- retries
);
4193 if (mib_val
&& !ret
)
4194 *mib_val
= uic_cmd
.argument3
;
4196 if (peer
&& (hba
->quirks
& UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
)
4198 ufshcd_change_power_mode(hba
, &orig_pwr_info
);
4202 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr
);
4205 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
4206 * state) and waits for it to take effect.
4208 * @hba: per adapter instance
4209 * @cmd: UIC command to execute
4211 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
4212 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
4213 * and device UniPro link and hence it's final completion would be indicated by
4214 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
4215 * addition to normal UIC command completion Status (UCCS). This function only
4216 * returns after the relevant status bits indicate the completion.
4218 * Return: 0 on success, non-zero value on failure.
4220 static int ufshcd_uic_pwr_ctrl(struct ufs_hba
*hba
, struct uic_command
*cmd
)
4222 DECLARE_COMPLETION_ONSTACK(uic_async_done
);
4223 unsigned long flags
;
4226 bool reenable_intr
= false;
4228 mutex_lock(&hba
->uic_cmd_mutex
);
4229 ufshcd_add_delay_before_dme_cmd(hba
);
4231 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4232 if (ufshcd_is_link_broken(hba
)) {
4236 hba
->uic_async_done
= &uic_async_done
;
4237 if (ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
) & UIC_COMMAND_COMPL
) {
4238 ufshcd_disable_intr(hba
, UIC_COMMAND_COMPL
);
4240 * Make sure UIC command completion interrupt is disabled before
4241 * issuing UIC command.
4244 reenable_intr
= true;
4246 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4247 ret
= __ufshcd_send_uic_cmd(hba
, cmd
, false);
4250 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
4251 cmd
->command
, cmd
->argument3
, ret
);
4255 if (!wait_for_completion_timeout(hba
->uic_async_done
,
4256 msecs_to_jiffies(UIC_CMD_TIMEOUT
))) {
4258 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
4259 cmd
->command
, cmd
->argument3
);
4261 if (!cmd
->cmd_active
) {
4262 dev_err(hba
->dev
, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
4272 status
= ufshcd_get_upmcrs(hba
);
4273 if (status
!= PWR_LOCAL
) {
4275 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
4276 cmd
->command
, status
);
4277 ret
= (status
!= PWR_OK
) ? status
: -1;
4281 ufshcd_print_host_state(hba
);
4282 ufshcd_print_pwr_info(hba
);
4283 ufshcd_print_evt_hist(hba
);
4286 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4287 hba
->active_uic_cmd
= NULL
;
4288 hba
->uic_async_done
= NULL
;
4290 ufshcd_enable_intr(hba
, UIC_COMMAND_COMPL
);
4292 ufshcd_set_link_broken(hba
);
4293 ufshcd_schedule_eh_work(hba
);
4296 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4297 mutex_unlock(&hba
->uic_cmd_mutex
);
4303 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
4304 * using DME_SET primitives.
4305 * @hba: per adapter instance
4306 * @mode: powr mode value
4308 * Return: 0 on success, non-zero value on failure.
4310 int ufshcd_uic_change_pwr_mode(struct ufs_hba
*hba
, u8 mode
)
4312 struct uic_command uic_cmd
= {0};
4315 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
) {
4316 ret
= ufshcd_dme_set(hba
,
4317 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP
, 0), 1);
4319 dev_err(hba
->dev
, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
4325 uic_cmd
.command
= UIC_CMD_DME_SET
;
4326 uic_cmd
.argument1
= UIC_ARG_MIB(PA_PWRMODE
);
4327 uic_cmd
.argument3
= mode
;
4329 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
4330 ufshcd_release(hba
);
4335 EXPORT_SYMBOL_GPL(ufshcd_uic_change_pwr_mode
);
4337 int ufshcd_link_recovery(struct ufs_hba
*hba
)
4340 unsigned long flags
;
4342 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4343 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
4344 ufshcd_set_eh_in_progress(hba
);
4345 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4347 /* Reset the attached device */
4348 ufshcd_device_reset(hba
);
4350 ret
= ufshcd_host_reset_and_restore(hba
);
4352 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4354 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
4355 ufshcd_clear_eh_in_progress(hba
);
4356 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4359 dev_err(hba
->dev
, "%s: link recovery failed, err %d",
4364 EXPORT_SYMBOL_GPL(ufshcd_link_recovery
);
4366 int ufshcd_uic_hibern8_enter(struct ufs_hba
*hba
)
4369 struct uic_command uic_cmd
= {0};
4370 ktime_t start
= ktime_get();
4372 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_ENTER
, PRE_CHANGE
);
4374 uic_cmd
.command
= UIC_CMD_DME_HIBER_ENTER
;
4375 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
4376 trace_ufshcd_profile_hibern8(dev_name(hba
->dev
), "enter",
4377 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
4380 dev_err(hba
->dev
, "%s: hibern8 enter failed. ret = %d\n",
4383 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_ENTER
,
4388 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter
);
4390 int ufshcd_uic_hibern8_exit(struct ufs_hba
*hba
)
4392 struct uic_command uic_cmd
= {0};
4394 ktime_t start
= ktime_get();
4396 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_EXIT
, PRE_CHANGE
);
4398 uic_cmd
.command
= UIC_CMD_DME_HIBER_EXIT
;
4399 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
4400 trace_ufshcd_profile_hibern8(dev_name(hba
->dev
), "exit",
4401 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
4404 dev_err(hba
->dev
, "%s: hibern8 exit failed. ret = %d\n",
4407 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_EXIT
,
4409 hba
->ufs_stats
.last_hibern8_exit_tstamp
= local_clock();
4410 hba
->ufs_stats
.hibern8_exit_cnt
++;
4415 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit
);
4417 static void ufshcd_configure_auto_hibern8(struct ufs_hba
*hba
)
4419 if (!ufshcd_is_auto_hibern8_supported(hba
))
4422 ufshcd_writel(hba
, hba
->ahit
, REG_AUTO_HIBERNATE_IDLE_TIMER
);
4425 void ufshcd_auto_hibern8_update(struct ufs_hba
*hba
, u32 ahit
)
4427 const u32 cur_ahit
= READ_ONCE(hba
->ahit
);
4429 if (!ufshcd_is_auto_hibern8_supported(hba
) || cur_ahit
== ahit
)
4432 WRITE_ONCE(hba
->ahit
, ahit
);
4433 if (!pm_runtime_suspended(&hba
->ufs_device_wlun
->sdev_gendev
)) {
4434 ufshcd_rpm_get_sync(hba
);
4436 ufshcd_configure_auto_hibern8(hba
);
4437 ufshcd_release(hba
);
4438 ufshcd_rpm_put_sync(hba
);
4441 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update
);
4444 * ufshcd_init_pwr_info - setting the POR (power on reset)
4445 * values in hba power info
4446 * @hba: per-adapter instance
4448 static void ufshcd_init_pwr_info(struct ufs_hba
*hba
)
4450 hba
->pwr_info
.gear_rx
= UFS_PWM_G1
;
4451 hba
->pwr_info
.gear_tx
= UFS_PWM_G1
;
4452 hba
->pwr_info
.lane_rx
= UFS_LANE_1
;
4453 hba
->pwr_info
.lane_tx
= UFS_LANE_1
;
4454 hba
->pwr_info
.pwr_rx
= SLOWAUTO_MODE
;
4455 hba
->pwr_info
.pwr_tx
= SLOWAUTO_MODE
;
4456 hba
->pwr_info
.hs_rate
= 0;
4460 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4461 * @hba: per-adapter instance
4463 * Return: 0 upon success; < 0 upon failure.
4465 static int ufshcd_get_max_pwr_mode(struct ufs_hba
*hba
)
4467 struct ufs_pa_layer_attr
*pwr_info
= &hba
->max_pwr_info
.info
;
4469 if (hba
->max_pwr_info
.is_valid
)
4472 if (hba
->quirks
& UFSHCD_QUIRK_HIBERN_FASTAUTO
) {
4473 pwr_info
->pwr_tx
= FASTAUTO_MODE
;
4474 pwr_info
->pwr_rx
= FASTAUTO_MODE
;
4476 pwr_info
->pwr_tx
= FAST_MODE
;
4477 pwr_info
->pwr_rx
= FAST_MODE
;
4479 pwr_info
->hs_rate
= PA_HS_MODE_B
;
4481 /* Get the connected lane count */
4482 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES
),
4483 &pwr_info
->lane_rx
);
4484 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
4485 &pwr_info
->lane_tx
);
4487 if (!pwr_info
->lane_rx
|| !pwr_info
->lane_tx
) {
4488 dev_err(hba
->dev
, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4496 * First, get the maximum gears of HS speed.
4497 * If a zero value, it means there is no HSGEAR capability.
4498 * Then, get the maximum gears of PWM speed.
4500 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_MAXRXHSGEAR
), &pwr_info
->gear_rx
);
4501 if (!pwr_info
->gear_rx
) {
4502 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_MAXRXPWMGEAR
),
4503 &pwr_info
->gear_rx
);
4504 if (!pwr_info
->gear_rx
) {
4505 dev_err(hba
->dev
, "%s: invalid max pwm rx gear read = %d\n",
4506 __func__
, pwr_info
->gear_rx
);
4509 pwr_info
->pwr_rx
= SLOW_MODE
;
4512 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_MAXRXHSGEAR
),
4513 &pwr_info
->gear_tx
);
4514 if (!pwr_info
->gear_tx
) {
4515 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_MAXRXPWMGEAR
),
4516 &pwr_info
->gear_tx
);
4517 if (!pwr_info
->gear_tx
) {
4518 dev_err(hba
->dev
, "%s: invalid max pwm tx gear read = %d\n",
4519 __func__
, pwr_info
->gear_tx
);
4522 pwr_info
->pwr_tx
= SLOW_MODE
;
4525 hba
->max_pwr_info
.is_valid
= true;
4529 static int ufshcd_change_power_mode(struct ufs_hba
*hba
,
4530 struct ufs_pa_layer_attr
*pwr_mode
)
4534 /* if already configured to the requested pwr_mode */
4535 if (!hba
->force_pmc
&&
4536 pwr_mode
->gear_rx
== hba
->pwr_info
.gear_rx
&&
4537 pwr_mode
->gear_tx
== hba
->pwr_info
.gear_tx
&&
4538 pwr_mode
->lane_rx
== hba
->pwr_info
.lane_rx
&&
4539 pwr_mode
->lane_tx
== hba
->pwr_info
.lane_tx
&&
4540 pwr_mode
->pwr_rx
== hba
->pwr_info
.pwr_rx
&&
4541 pwr_mode
->pwr_tx
== hba
->pwr_info
.pwr_tx
&&
4542 pwr_mode
->hs_rate
== hba
->pwr_info
.hs_rate
) {
4543 dev_dbg(hba
->dev
, "%s: power already configured\n", __func__
);
4548 * Configure attributes for power mode change with below.
4549 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4550 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4553 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXGEAR
), pwr_mode
->gear_rx
);
4554 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_ACTIVERXDATALANES
),
4556 if (pwr_mode
->pwr_rx
== FASTAUTO_MODE
||
4557 pwr_mode
->pwr_rx
== FAST_MODE
)
4558 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXTERMINATION
), true);
4560 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXTERMINATION
), false);
4562 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXGEAR
), pwr_mode
->gear_tx
);
4563 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_ACTIVETXDATALANES
),
4565 if (pwr_mode
->pwr_tx
== FASTAUTO_MODE
||
4566 pwr_mode
->pwr_tx
== FAST_MODE
)
4567 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXTERMINATION
), true);
4569 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXTERMINATION
), false);
4571 if (pwr_mode
->pwr_rx
== FASTAUTO_MODE
||
4572 pwr_mode
->pwr_tx
== FASTAUTO_MODE
||
4573 pwr_mode
->pwr_rx
== FAST_MODE
||
4574 pwr_mode
->pwr_tx
== FAST_MODE
)
4575 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_HSSERIES
),
4578 if (!(hba
->quirks
& UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING
)) {
4579 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA0
),
4580 DL_FC0ProtectionTimeOutVal_Default
);
4581 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA1
),
4582 DL_TC0ReplayTimeOutVal_Default
);
4583 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA2
),
4584 DL_AFC0ReqTimeOutVal_Default
);
4585 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA3
),
4586 DL_FC1ProtectionTimeOutVal_Default
);
4587 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA4
),
4588 DL_TC1ReplayTimeOutVal_Default
);
4589 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA5
),
4590 DL_AFC1ReqTimeOutVal_Default
);
4592 ufshcd_dme_set(hba
, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal
),
4593 DL_FC0ProtectionTimeOutVal_Default
);
4594 ufshcd_dme_set(hba
, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal
),
4595 DL_TC0ReplayTimeOutVal_Default
);
4596 ufshcd_dme_set(hba
, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal
),
4597 DL_AFC0ReqTimeOutVal_Default
);
4600 ret
= ufshcd_uic_change_pwr_mode(hba
, pwr_mode
->pwr_rx
<< 4
4601 | pwr_mode
->pwr_tx
);
4605 "%s: power mode change failed %d\n", __func__
, ret
);
4607 ufshcd_vops_pwr_change_notify(hba
, POST_CHANGE
, NULL
,
4610 memcpy(&hba
->pwr_info
, pwr_mode
,
4611 sizeof(struct ufs_pa_layer_attr
));
4618 * ufshcd_config_pwr_mode - configure a new power mode
4619 * @hba: per-adapter instance
4620 * @desired_pwr_mode: desired power configuration
4622 * Return: 0 upon success; < 0 upon failure.
4624 int ufshcd_config_pwr_mode(struct ufs_hba
*hba
,
4625 struct ufs_pa_layer_attr
*desired_pwr_mode
)
4627 struct ufs_pa_layer_attr final_params
= { 0 };
4630 ret
= ufshcd_vops_pwr_change_notify(hba
, PRE_CHANGE
,
4631 desired_pwr_mode
, &final_params
);
4634 memcpy(&final_params
, desired_pwr_mode
, sizeof(final_params
));
4636 ret
= ufshcd_change_power_mode(hba
, &final_params
);
4640 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode
);
4643 * ufshcd_complete_dev_init() - checks device readiness
4644 * @hba: per-adapter instance
4646 * Set fDeviceInit flag and poll until device toggles it.
4648 * Return: 0 upon success; < 0 upon failure.
4650 static int ufshcd_complete_dev_init(struct ufs_hba
*hba
)
4653 bool flag_res
= true;
4656 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_SET_FLAG
,
4657 QUERY_FLAG_IDN_FDEVICEINIT
, 0, NULL
);
4660 "%s: setting fDeviceInit flag failed with error %d\n",
4665 /* Poll fDeviceInit flag to be cleared */
4666 timeout
= ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT
);
4668 err
= ufshcd_query_flag(hba
, UPIU_QUERY_OPCODE_READ_FLAG
,
4669 QUERY_FLAG_IDN_FDEVICEINIT
, 0, &flag_res
);
4672 usleep_range(500, 1000);
4673 } while (ktime_before(ktime_get(), timeout
));
4677 "%s: reading fDeviceInit flag failed with error %d\n",
4679 } else if (flag_res
) {
4681 "%s: fDeviceInit was not cleared by the device\n",
4690 * ufshcd_make_hba_operational - Make UFS controller operational
4691 * @hba: per adapter instance
4693 * To bring UFS host controller to operational state,
4694 * 1. Enable required interrupts
4695 * 2. Configure interrupt aggregation
4696 * 3. Program UTRL and UTMRL base address
4697 * 4. Configure run-stop-registers
4699 * Return: 0 on success, non-zero value on failure.
4701 int ufshcd_make_hba_operational(struct ufs_hba
*hba
)
4706 /* Enable required interrupts */
4707 ufshcd_enable_intr(hba
, UFSHCD_ENABLE_INTRS
);
4709 /* Configure interrupt aggregation */
4710 if (ufshcd_is_intr_aggr_allowed(hba
))
4711 ufshcd_config_intr_aggr(hba
, hba
->nutrs
- 1, INT_AGGR_DEF_TO
);
4713 ufshcd_disable_intr_aggr(hba
);
4715 /* Configure UTRL and UTMRL base address registers */
4716 ufshcd_writel(hba
, lower_32_bits(hba
->utrdl_dma_addr
),
4717 REG_UTP_TRANSFER_REQ_LIST_BASE_L
);
4718 ufshcd_writel(hba
, upper_32_bits(hba
->utrdl_dma_addr
),
4719 REG_UTP_TRANSFER_REQ_LIST_BASE_H
);
4720 ufshcd_writel(hba
, lower_32_bits(hba
->utmrdl_dma_addr
),
4721 REG_UTP_TASK_REQ_LIST_BASE_L
);
4722 ufshcd_writel(hba
, upper_32_bits(hba
->utmrdl_dma_addr
),
4723 REG_UTP_TASK_REQ_LIST_BASE_H
);
4726 * Make sure base address and interrupt setup are updated before
4727 * enabling the run/stop registers below.
4732 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4734 reg
= ufshcd_readl(hba
, REG_CONTROLLER_STATUS
);
4735 if (!(ufshcd_get_lists_status(reg
))) {
4736 ufshcd_enable_run_stop_reg(hba
);
4739 "Host controller not ready to process requests");
4745 EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational
);
4748 * ufshcd_hba_stop - Send controller to reset state
4749 * @hba: per adapter instance
4751 void ufshcd_hba_stop(struct ufs_hba
*hba
)
4753 unsigned long flags
;
4757 * Obtain the host lock to prevent that the controller is disabled
4758 * while the UFS interrupt handler is active on another CPU.
4760 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4761 ufshcd_writel(hba
, CONTROLLER_DISABLE
, REG_CONTROLLER_ENABLE
);
4762 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4764 err
= ufshcd_wait_for_register(hba
, REG_CONTROLLER_ENABLE
,
4765 CONTROLLER_ENABLE
, CONTROLLER_DISABLE
,
4768 dev_err(hba
->dev
, "%s: Controller disable failed\n", __func__
);
4770 EXPORT_SYMBOL_GPL(ufshcd_hba_stop
);
4773 * ufshcd_hba_execute_hce - initialize the controller
4774 * @hba: per adapter instance
4776 * The controller resets itself and controller firmware initialization
4777 * sequence kicks off. When controller is ready it will set
4778 * the Host Controller Enable bit to 1.
4780 * Return: 0 on success, non-zero value on failure.
4782 static int ufshcd_hba_execute_hce(struct ufs_hba
*hba
)
4784 int retry_outer
= 3;
4788 if (ufshcd_is_hba_active(hba
))
4789 /* change controller state to "reset state" */
4790 ufshcd_hba_stop(hba
);
4792 /* UniPro link is disabled at this point */
4793 ufshcd_set_link_off(hba
);
4795 ufshcd_vops_hce_enable_notify(hba
, PRE_CHANGE
);
4797 /* start controller initialization sequence */
4798 ufshcd_hba_start(hba
);
4801 * To initialize a UFS host controller HCE bit must be set to 1.
4802 * During initialization the HCE bit value changes from 1->0->1.
4803 * When the host controller completes initialization sequence
4804 * it sets the value of HCE bit to 1. The same HCE bit is read back
4805 * to check if the controller has completed initialization sequence.
4806 * So without this delay the value HCE = 1, set in the previous
4807 * instruction might be read back.
4808 * This delay can be changed based on the controller.
4810 ufshcd_delay_us(hba
->vps
->hba_enable_delay_us
, 100);
4812 /* wait for the host controller to complete initialization */
4814 while (!ufshcd_is_hba_active(hba
)) {
4819 "Controller enable failed\n");
4826 usleep_range(1000, 1100);
4829 /* enable UIC related interrupts */
4830 ufshcd_enable_intr(hba
, UFSHCD_UIC_MASK
);
4832 ufshcd_vops_hce_enable_notify(hba
, POST_CHANGE
);
4837 int ufshcd_hba_enable(struct ufs_hba
*hba
)
4841 if (hba
->quirks
& UFSHCI_QUIRK_BROKEN_HCE
) {
4842 ufshcd_set_link_off(hba
);
4843 ufshcd_vops_hce_enable_notify(hba
, PRE_CHANGE
);
4845 /* enable UIC related interrupts */
4846 ufshcd_enable_intr(hba
, UFSHCD_UIC_MASK
);
4847 ret
= ufshcd_dme_reset(hba
);
4849 dev_err(hba
->dev
, "DME_RESET failed\n");
4853 ret
= ufshcd_dme_enable(hba
);
4855 dev_err(hba
->dev
, "Enabling DME failed\n");
4859 ufshcd_vops_hce_enable_notify(hba
, POST_CHANGE
);
4861 ret
= ufshcd_hba_execute_hce(hba
);
4866 EXPORT_SYMBOL_GPL(ufshcd_hba_enable
);
4868 static int ufshcd_disable_tx_lcc(struct ufs_hba
*hba
, bool peer
)
4870 int tx_lanes
= 0, i
, err
= 0;
4873 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
4876 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
4878 for (i
= 0; i
< tx_lanes
; i
++) {
4880 err
= ufshcd_dme_set(hba
,
4881 UIC_ARG_MIB_SEL(TX_LCC_ENABLE
,
4882 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i
)),
4885 err
= ufshcd_dme_peer_set(hba
,
4886 UIC_ARG_MIB_SEL(TX_LCC_ENABLE
,
4887 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i
)),
4890 dev_err(hba
->dev
, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4891 __func__
, peer
, i
, err
);
4899 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba
*hba
)
4901 return ufshcd_disable_tx_lcc(hba
, true);
4904 void ufshcd_update_evt_hist(struct ufs_hba
*hba
, u32 id
, u32 val
)
4906 struct ufs_event_hist
*e
;
4908 if (id
>= UFS_EVT_CNT
)
4911 e
= &hba
->ufs_stats
.event
[id
];
4912 e
->val
[e
->pos
] = val
;
4913 e
->tstamp
[e
->pos
] = local_clock();
4915 e
->pos
= (e
->pos
+ 1) % UFS_EVENT_HIST_LENGTH
;
4917 ufshcd_vops_event_notify(hba
, id
, &val
);
4919 EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist
);
4922 * ufshcd_link_startup - Initialize unipro link startup
4923 * @hba: per adapter instance
4925 * Return: 0 for success, non-zero in case of failure.
4927 static int ufshcd_link_startup(struct ufs_hba
*hba
)
4930 int retries
= DME_LINKSTARTUP_RETRIES
;
4931 bool link_startup_again
= false;
4934 * If UFS device isn't active then we will have to issue link startup
4935 * 2 times to make sure the device state move to active.
4937 if (!ufshcd_is_ufs_dev_active(hba
))
4938 link_startup_again
= true;
4942 ufshcd_vops_link_startup_notify(hba
, PRE_CHANGE
);
4944 ret
= ufshcd_dme_link_startup(hba
);
4946 /* check if device is detected by inter-connect layer */
4947 if (!ret
&& !ufshcd_is_device_present(hba
)) {
4948 ufshcd_update_evt_hist(hba
,
4949 UFS_EVT_LINK_STARTUP_FAIL
,
4951 dev_err(hba
->dev
, "%s: Device not present\n", __func__
);
4957 * DME link lost indication is only received when link is up,
4958 * but we can't be sure if the link is up until link startup
4959 * succeeds. So reset the local Uni-Pro and try again.
4961 if (ret
&& retries
&& ufshcd_hba_enable(hba
)) {
4962 ufshcd_update_evt_hist(hba
,
4963 UFS_EVT_LINK_STARTUP_FAIL
,
4967 } while (ret
&& retries
--);
4970 /* failed to get the link up... retire */
4971 ufshcd_update_evt_hist(hba
,
4972 UFS_EVT_LINK_STARTUP_FAIL
,
4977 if (link_startup_again
) {
4978 link_startup_again
= false;
4979 retries
= DME_LINKSTARTUP_RETRIES
;
4983 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4984 ufshcd_init_pwr_info(hba
);
4985 ufshcd_print_pwr_info(hba
);
4987 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_LCC
) {
4988 ret
= ufshcd_disable_device_tx_lcc(hba
);
4993 /* Include any host controller configuration via UIC commands */
4994 ret
= ufshcd_vops_link_startup_notify(hba
, POST_CHANGE
);
4998 /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
4999 ufshcd_readl(hba
, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER
);
5000 ret
= ufshcd_make_hba_operational(hba
);
5003 dev_err(hba
->dev
, "link startup failed %d\n", ret
);
5004 ufshcd_print_host_state(hba
);
5005 ufshcd_print_pwr_info(hba
);
5006 ufshcd_print_evt_hist(hba
);
5012 * ufshcd_verify_dev_init() - Verify device initialization
5013 * @hba: per-adapter instance
5015 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
5016 * device Transport Protocol (UTP) layer is ready after a reset.
5017 * If the UTP layer at the device side is not initialized, it may
5018 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
5019 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
5021 * Return: 0 upon success; < 0 upon failure.
5023 static int ufshcd_verify_dev_init(struct ufs_hba
*hba
)
5029 mutex_lock(&hba
->dev_cmd
.lock
);
5030 for (retries
= NOP_OUT_RETRIES
; retries
> 0; retries
--) {
5031 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_NOP
,
5032 hba
->nop_out_timeout
);
5034 if (!err
|| err
== -ETIMEDOUT
)
5037 dev_dbg(hba
->dev
, "%s: error %d retrying\n", __func__
, err
);
5039 mutex_unlock(&hba
->dev_cmd
.lock
);
5040 ufshcd_release(hba
);
5043 dev_err(hba
->dev
, "%s: NOP OUT failed %d\n", __func__
, err
);
5048 * ufshcd_setup_links - associate link b/w device wlun and other luns
5049 * @sdev: pointer to SCSI device
5050 * @hba: pointer to ufs hba
5052 static void ufshcd_setup_links(struct ufs_hba
*hba
, struct scsi_device
*sdev
)
5054 struct device_link
*link
;
5057 * Device wlun is the supplier & rest of the luns are consumers.
5058 * This ensures that device wlun suspends after all other luns.
5060 if (hba
->ufs_device_wlun
) {
5061 link
= device_link_add(&sdev
->sdev_gendev
,
5062 &hba
->ufs_device_wlun
->sdev_gendev
,
5063 DL_FLAG_PM_RUNTIME
| DL_FLAG_RPM_ACTIVE
);
5065 dev_err(&sdev
->sdev_gendev
, "Failed establishing link - %s\n",
5066 dev_name(&hba
->ufs_device_wlun
->sdev_gendev
));
5070 /* Ignore REPORT_LUN wlun probing */
5071 if (hba
->luns_avail
== 1) {
5072 ufshcd_rpm_put(hba
);
5077 * Device wlun is probed. The assumption is that WLUNs are
5078 * scanned before other LUNs.
5085 * ufshcd_lu_init - Initialize the relevant parameters of the LU
5086 * @hba: per-adapter instance
5087 * @sdev: pointer to SCSI device
5089 static void ufshcd_lu_init(struct ufs_hba
*hba
, struct scsi_device
*sdev
)
5091 int len
= QUERY_DESC_MAX_SIZE
;
5092 u8 lun
= ufshcd_scsi_to_upiu_lun(sdev
->lun
);
5093 u8 lun_qdepth
= hba
->nutrs
;
5097 desc_buf
= kzalloc(len
, GFP_KERNEL
);
5101 ret
= ufshcd_read_unit_desc_param(hba
, lun
, 0, desc_buf
, len
);
5103 if (ret
== -EOPNOTSUPP
)
5104 /* If LU doesn't support unit descriptor, its queue depth is set to 1 */
5110 if (desc_buf
[UNIT_DESC_PARAM_LU_Q_DEPTH
]) {
5112 * In per-LU queueing architecture, bLUQueueDepth will not be 0, then we will
5113 * use the smaller between UFSHCI CAP.NUTRS and UFS LU bLUQueueDepth
5115 lun_qdepth
= min_t(int, desc_buf
[UNIT_DESC_PARAM_LU_Q_DEPTH
], hba
->nutrs
);
5118 * According to UFS device specification, the write protection mode is only supported by
5119 * normal LU, not supported by WLUN.
5121 if (hba
->dev_info
.f_power_on_wp_en
&& lun
< hba
->dev_info
.max_lu_supported
&&
5122 !hba
->dev_info
.is_lu_power_on_wp
&&
5123 desc_buf
[UNIT_DESC_PARAM_LU_WR_PROTECT
] == UFS_LU_POWER_ON_WP
)
5124 hba
->dev_info
.is_lu_power_on_wp
= true;
5126 /* In case of RPMB LU, check if advanced RPMB mode is enabled */
5127 if (desc_buf
[UNIT_DESC_PARAM_UNIT_INDEX
] == UFS_UPIU_RPMB_WLUN
&&
5128 desc_buf
[RPMB_UNIT_DESC_PARAM_REGION_EN
] & BIT(4))
5129 hba
->dev_info
.b_advanced_rpmb_en
= true;
5135 * For WLUNs that don't support unit descriptor, queue depth is set to 1. For LUs whose
5136 * bLUQueueDepth == 0, the queue depth is set to a maximum value that host can queue.
5138 dev_dbg(hba
->dev
, "Set LU %x queue depth %d\n", lun
, lun_qdepth
);
5139 scsi_change_queue_depth(sdev
, lun_qdepth
);
5143 * ufshcd_slave_alloc - handle initial SCSI device configurations
5144 * @sdev: pointer to SCSI device
5148 static int ufshcd_slave_alloc(struct scsi_device
*sdev
)
5150 struct ufs_hba
*hba
;
5152 hba
= shost_priv(sdev
->host
);
5154 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
5155 sdev
->use_10_for_ms
= 1;
5157 /* DBD field should be set to 1 in mode sense(10) */
5158 sdev
->set_dbd_for_ms
= 1;
5160 /* allow SCSI layer to restart the device in case of errors */
5161 sdev
->allow_restart
= 1;
5163 /* REPORT SUPPORTED OPERATION CODES is not supported */
5164 sdev
->no_report_opcodes
= 1;
5166 /* WRITE_SAME command is not supported */
5167 sdev
->no_write_same
= 1;
5169 ufshcd_lu_init(hba
, sdev
);
5171 ufshcd_setup_links(hba
, sdev
);
5177 * ufshcd_change_queue_depth - change queue depth
5178 * @sdev: pointer to SCSI device
5179 * @depth: required depth to set
5181 * Change queue depth and make sure the max. limits are not crossed.
5183 * Return: new queue depth.
5185 static int ufshcd_change_queue_depth(struct scsi_device
*sdev
, int depth
)
5187 return scsi_change_queue_depth(sdev
, min(depth
, sdev
->host
->can_queue
));
5191 * ufshcd_slave_configure - adjust SCSI device configurations
5192 * @sdev: pointer to SCSI device
5194 * Return: 0 (success).
5196 static int ufshcd_slave_configure(struct scsi_device
*sdev
)
5198 struct ufs_hba
*hba
= shost_priv(sdev
->host
);
5199 struct request_queue
*q
= sdev
->request_queue
;
5201 blk_queue_update_dma_pad(q
, PRDT_DATA_BYTE_COUNT_PAD
- 1);
5204 * Block runtime-pm until all consumers are added.
5205 * Refer ufshcd_setup_links().
5207 if (is_device_wlun(sdev
))
5208 pm_runtime_get_noresume(&sdev
->sdev_gendev
);
5209 else if (ufshcd_is_rpm_autosuspend_allowed(hba
))
5210 sdev
->rpm_autosuspend
= 1;
5212 * Do not print messages during runtime PM to avoid never-ending cycles
5213 * of messages written back to storage by user space causing runtime
5214 * resume, causing more messages and so on.
5216 sdev
->silence_suspend
= 1;
5218 if (hba
->vops
&& hba
->vops
->config_scsi_dev
)
5219 hba
->vops
->config_scsi_dev(sdev
);
5221 ufshcd_crypto_register(hba
, q
);
5227 * ufshcd_slave_destroy - remove SCSI device configurations
5228 * @sdev: pointer to SCSI device
5230 static void ufshcd_slave_destroy(struct scsi_device
*sdev
)
5232 struct ufs_hba
*hba
;
5233 unsigned long flags
;
5235 hba
= shost_priv(sdev
->host
);
5237 /* Drop the reference as it won't be needed anymore */
5238 if (ufshcd_scsi_to_upiu_lun(sdev
->lun
) == UFS_UPIU_UFS_DEVICE_WLUN
) {
5239 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5240 hba
->ufs_device_wlun
= NULL
;
5241 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5242 } else if (hba
->ufs_device_wlun
) {
5243 struct device
*supplier
= NULL
;
5245 /* Ensure UFS Device WLUN exists and does not disappear */
5246 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5247 if (hba
->ufs_device_wlun
) {
5248 supplier
= &hba
->ufs_device_wlun
->sdev_gendev
;
5249 get_device(supplier
);
5251 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5255 * If a LUN fails to probe (e.g. absent BOOT WLUN), the
5256 * device will not have been registered but can still
5257 * have a device link holding a reference to the device.
5259 device_link_remove(&sdev
->sdev_gendev
, supplier
);
5260 put_device(supplier
);
5266 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
5267 * @lrbp: pointer to local reference block of completed command
5268 * @scsi_status: SCSI command status
5270 * Return: value base on SCSI command status.
5273 ufshcd_scsi_cmd_status(struct ufshcd_lrb
*lrbp
, int scsi_status
)
5277 switch (scsi_status
) {
5278 case SAM_STAT_CHECK_CONDITION
:
5279 ufshcd_copy_sense_data(lrbp
);
5282 result
|= DID_OK
<< 16 | scsi_status
;
5284 case SAM_STAT_TASK_SET_FULL
:
5286 case SAM_STAT_TASK_ABORTED
:
5287 ufshcd_copy_sense_data(lrbp
);
5288 result
|= scsi_status
;
5291 result
|= DID_ERROR
<< 16;
5293 } /* end of switch */
5299 * ufshcd_transfer_rsp_status - Get overall status of the response
5300 * @hba: per adapter instance
5301 * @lrbp: pointer to local reference block of completed command
5302 * @cqe: pointer to the completion queue entry
5304 * Return: result of the command to notify SCSI midlayer.
5307 ufshcd_transfer_rsp_status(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
,
5308 struct cq_entry
*cqe
)
5316 upiu_flags
= lrbp
->ucd_rsp_ptr
->header
.flags
;
5317 resid
= be32_to_cpu(lrbp
->ucd_rsp_ptr
->sr
.residual_transfer_count
);
5319 * Test !overflow instead of underflow to support UFS devices that do
5320 * not set either flag.
5322 if (resid
&& !(upiu_flags
& UPIU_RSP_FLAG_OVERFLOW
))
5323 scsi_set_resid(lrbp
->cmd
, resid
);
5325 /* overall command status of utrd */
5326 ocs
= ufshcd_get_tr_ocs(lrbp
, cqe
);
5328 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR
) {
5329 if (lrbp
->ucd_rsp_ptr
->header
.response
||
5330 lrbp
->ucd_rsp_ptr
->header
.status
)
5336 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
5337 switch (ufshcd_get_req_rsp(lrbp
->ucd_rsp_ptr
)) {
5338 case UPIU_TRANSACTION_RESPONSE
:
5340 * get the result based on SCSI status response
5341 * to notify the SCSI midlayer of the command status
5343 scsi_status
= lrbp
->ucd_rsp_ptr
->header
.status
;
5344 result
= ufshcd_scsi_cmd_status(lrbp
, scsi_status
);
5347 * Currently we are only supporting BKOPs exception
5348 * events hence we can ignore BKOPs exception event
5349 * during power management callbacks. BKOPs exception
5350 * event is not expected to be raised in runtime suspend
5351 * callback as it allows the urgent bkops.
5352 * During system suspend, we are anyway forcefully
5353 * disabling the bkops and if urgent bkops is needed
5354 * it will be enabled on system resume. Long term
5355 * solution could be to abort the system suspend if
5356 * UFS device needs urgent BKOPs.
5358 if (!hba
->pm_op_in_progress
&&
5359 !ufshcd_eh_in_progress(hba
) &&
5360 ufshcd_is_exception_event(lrbp
->ucd_rsp_ptr
))
5361 /* Flushed in suspend */
5362 schedule_work(&hba
->eeh_work
);
5364 case UPIU_TRANSACTION_REJECT_UPIU
:
5365 /* TODO: handle Reject UPIU Response */
5366 result
= DID_ERROR
<< 16;
5368 "Reject UPIU not fully implemented\n");
5372 "Unexpected request response code = %x\n",
5374 result
= DID_ERROR
<< 16;
5379 result
|= DID_ABORT
<< 16;
5381 case OCS_INVALID_COMMAND_STATUS
:
5382 result
|= DID_REQUEUE
<< 16;
5384 case OCS_INVALID_CMD_TABLE_ATTR
:
5385 case OCS_INVALID_PRDT_ATTR
:
5386 case OCS_MISMATCH_DATA_BUF_SIZE
:
5387 case OCS_MISMATCH_RESP_UPIU_SIZE
:
5388 case OCS_PEER_COMM_FAILURE
:
5389 case OCS_FATAL_ERROR
:
5390 case OCS_DEVICE_FATAL_ERROR
:
5391 case OCS_INVALID_CRYPTO_CONFIG
:
5392 case OCS_GENERAL_CRYPTO_ERROR
:
5394 result
|= DID_ERROR
<< 16;
5396 "OCS error from controller = %x for tag %d\n",
5397 ocs
, lrbp
->task_tag
);
5398 ufshcd_print_evt_hist(hba
);
5399 ufshcd_print_host_state(hba
);
5401 } /* end of switch */
5403 if ((host_byte(result
) != DID_OK
) &&
5404 (host_byte(result
) != DID_REQUEUE
) && !hba
->silence_err_logs
)
5405 ufshcd_print_tr(hba
, lrbp
->task_tag
, true);
5409 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba
*hba
,
5412 if (!ufshcd_is_auto_hibern8_supported(hba
) ||
5413 !ufshcd_is_auto_hibern8_enabled(hba
))
5416 if (!(intr_mask
& UFSHCD_UIC_HIBERN8_MASK
))
5419 if (hba
->active_uic_cmd
&&
5420 (hba
->active_uic_cmd
->command
== UIC_CMD_DME_HIBER_ENTER
||
5421 hba
->active_uic_cmd
->command
== UIC_CMD_DME_HIBER_EXIT
))
5428 * ufshcd_uic_cmd_compl - handle completion of uic command
5429 * @hba: per adapter instance
5430 * @intr_status: interrupt status generated by the controller
5433 * IRQ_HANDLED - If interrupt is valid
5434 * IRQ_NONE - If invalid interrupt
5436 static irqreturn_t
ufshcd_uic_cmd_compl(struct ufs_hba
*hba
, u32 intr_status
)
5438 irqreturn_t retval
= IRQ_NONE
;
5440 spin_lock(hba
->host
->host_lock
);
5441 if (ufshcd_is_auto_hibern8_error(hba
, intr_status
))
5442 hba
->errors
|= (UFSHCD_UIC_HIBERN8_MASK
& intr_status
);
5444 if ((intr_status
& UIC_COMMAND_COMPL
) && hba
->active_uic_cmd
) {
5445 hba
->active_uic_cmd
->argument2
|=
5446 ufshcd_get_uic_cmd_result(hba
);
5447 hba
->active_uic_cmd
->argument3
=
5448 ufshcd_get_dme_attr_val(hba
);
5449 if (!hba
->uic_async_done
)
5450 hba
->active_uic_cmd
->cmd_active
= 0;
5451 complete(&hba
->active_uic_cmd
->done
);
5452 retval
= IRQ_HANDLED
;
5455 if ((intr_status
& UFSHCD_UIC_PWR_MASK
) && hba
->uic_async_done
) {
5456 hba
->active_uic_cmd
->cmd_active
= 0;
5457 complete(hba
->uic_async_done
);
5458 retval
= IRQ_HANDLED
;
5461 if (retval
== IRQ_HANDLED
)
5462 ufshcd_add_uic_command_trace(hba
, hba
->active_uic_cmd
,
5464 spin_unlock(hba
->host
->host_lock
);
5468 /* Release the resources allocated for processing a SCSI command. */
5469 void ufshcd_release_scsi_cmd(struct ufs_hba
*hba
,
5470 struct ufshcd_lrb
*lrbp
)
5472 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
5474 scsi_dma_unmap(cmd
);
5475 ufshcd_release(hba
);
5476 ufshcd_clk_scaling_update_busy(hba
);
5480 * ufshcd_compl_one_cqe - handle a completion queue entry
5481 * @hba: per adapter instance
5482 * @task_tag: the task tag of the request to be completed
5483 * @cqe: pointer to the completion queue entry
5485 void ufshcd_compl_one_cqe(struct ufs_hba
*hba
, int task_tag
,
5486 struct cq_entry
*cqe
)
5488 struct ufshcd_lrb
*lrbp
;
5489 struct scsi_cmnd
*cmd
;
5492 lrbp
= &hba
->lrb
[task_tag
];
5493 lrbp
->compl_time_stamp
= ktime_get();
5496 if (unlikely(ufshcd_should_inform_monitor(hba
, lrbp
)))
5497 ufshcd_update_monitor(hba
, lrbp
);
5498 ufshcd_add_command_trace(hba
, task_tag
, UFS_CMD_COMP
);
5499 cmd
->result
= ufshcd_transfer_rsp_status(hba
, lrbp
, cqe
);
5500 ufshcd_release_scsi_cmd(hba
, lrbp
);
5501 /* Do not touch lrbp after scsi done */
5503 } else if (lrbp
->command_type
== UTP_CMD_TYPE_DEV_MANAGE
||
5504 lrbp
->command_type
== UTP_CMD_TYPE_UFS_STORAGE
) {
5505 if (hba
->dev_cmd
.complete
) {
5507 ocs
= le32_to_cpu(cqe
->status
) & MASK_OCS
;
5508 lrbp
->utr_descriptor_ptr
->header
.ocs
= ocs
;
5510 complete(hba
->dev_cmd
.complete
);
5516 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
5517 * @hba: per adapter instance
5518 * @completed_reqs: bitmask that indicates which requests to complete
5520 static void __ufshcd_transfer_req_compl(struct ufs_hba
*hba
,
5521 unsigned long completed_reqs
)
5525 for_each_set_bit(tag
, &completed_reqs
, hba
->nutrs
)
5526 ufshcd_compl_one_cqe(hba
, tag
, NULL
);
5529 /* Any value that is not an existing queue number is fine for this constant. */
5531 UFSHCD_POLL_FROM_INTERRUPT_CONTEXT
= -1
5534 static void ufshcd_clear_polled(struct ufs_hba
*hba
,
5535 unsigned long *completed_reqs
)
5539 for_each_set_bit(tag
, completed_reqs
, hba
->nutrs
) {
5540 struct scsi_cmnd
*cmd
= hba
->lrb
[tag
].cmd
;
5544 if (scsi_cmd_to_rq(cmd
)->cmd_flags
& REQ_POLLED
)
5545 __clear_bit(tag
, completed_reqs
);
5550 * Return: > 0 if one or more commands have been completed or 0 if no
5551 * requests have been completed.
5553 static int ufshcd_poll(struct Scsi_Host
*shost
, unsigned int queue_num
)
5555 struct ufs_hba
*hba
= shost_priv(shost
);
5556 unsigned long completed_reqs
, flags
;
5558 struct ufs_hw_queue
*hwq
;
5560 if (is_mcq_enabled(hba
)) {
5561 hwq
= &hba
->uhq
[queue_num
];
5563 return ufshcd_mcq_poll_cqe_lock(hba
, hwq
);
5566 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
5567 tr_doorbell
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
5568 completed_reqs
= ~tr_doorbell
& hba
->outstanding_reqs
;
5569 WARN_ONCE(completed_reqs
& ~hba
->outstanding_reqs
,
5570 "completed: %#lx; outstanding: %#lx\n", completed_reqs
,
5571 hba
->outstanding_reqs
);
5572 if (queue_num
== UFSHCD_POLL_FROM_INTERRUPT_CONTEXT
) {
5573 /* Do not complete polled requests from interrupt context. */
5574 ufshcd_clear_polled(hba
, &completed_reqs
);
5576 hba
->outstanding_reqs
&= ~completed_reqs
;
5577 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
5580 __ufshcd_transfer_req_compl(hba
, completed_reqs
);
5582 return completed_reqs
!= 0;
5586 * ufshcd_mcq_compl_pending_transfer - MCQ mode function. It is
5587 * invoked from the error handler context or ufshcd_host_reset_and_restore()
5588 * to complete the pending transfers and free the resources associated with
5591 * @hba: per adapter instance
5592 * @force_compl: This flag is set to true when invoked
5593 * from ufshcd_host_reset_and_restore() in which case it requires special
5594 * handling because the host controller has been reset by ufshcd_hba_stop().
5596 static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba
*hba
,
5599 struct ufs_hw_queue
*hwq
;
5600 struct ufshcd_lrb
*lrbp
;
5601 struct scsi_cmnd
*cmd
;
5602 unsigned long flags
;
5606 for (tag
= 0; tag
< hba
->nutrs
; tag
++) {
5607 lrbp
= &hba
->lrb
[tag
];
5609 if (!ufshcd_cmd_inflight(cmd
) ||
5610 test_bit(SCMD_STATE_COMPLETE
, &cmd
->state
))
5613 utag
= blk_mq_unique_tag(scsi_cmd_to_rq(cmd
));
5614 hwq_num
= blk_mq_unique_tag_to_hwq(utag
);
5615 hwq
= &hba
->uhq
[hwq_num
];
5618 ufshcd_mcq_compl_all_cqes_lock(hba
, hwq
);
5620 * For those cmds of which the cqes are not present
5621 * in the cq, complete them explicitly.
5623 spin_lock_irqsave(&hwq
->cq_lock
, flags
);
5624 if (cmd
&& !test_bit(SCMD_STATE_COMPLETE
, &cmd
->state
)) {
5625 set_host_byte(cmd
, DID_REQUEUE
);
5626 ufshcd_release_scsi_cmd(hba
, lrbp
);
5629 spin_unlock_irqrestore(&hwq
->cq_lock
, flags
);
5631 ufshcd_mcq_poll_cqe_lock(hba
, hwq
);
5637 * ufshcd_transfer_req_compl - handle SCSI and query command completion
5638 * @hba: per adapter instance
5641 * IRQ_HANDLED - If interrupt is valid
5642 * IRQ_NONE - If invalid interrupt
5644 static irqreturn_t
ufshcd_transfer_req_compl(struct ufs_hba
*hba
)
5646 /* Resetting interrupt aggregation counters first and reading the
5647 * DOOR_BELL afterward allows us to handle all the completed requests.
5648 * In order to prevent other interrupts starvation the DB is read once
5649 * after reset. The down side of this solution is the possibility of
5650 * false interrupt if device completes another request after resetting
5651 * aggregation and before reading the DB.
5653 if (ufshcd_is_intr_aggr_allowed(hba
) &&
5654 !(hba
->quirks
& UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR
))
5655 ufshcd_reset_intr_aggr(hba
);
5657 if (ufs_fail_completion(hba
))
5661 * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
5662 * do not want polling to trigger spurious interrupt complaints.
5664 ufshcd_poll(hba
->host
, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT
);
5669 int __ufshcd_write_ee_control(struct ufs_hba
*hba
, u32 ee_ctrl_mask
)
5671 return ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
5672 QUERY_ATTR_IDN_EE_CONTROL
, 0, 0,
5676 int ufshcd_write_ee_control(struct ufs_hba
*hba
)
5680 mutex_lock(&hba
->ee_ctrl_mutex
);
5681 err
= __ufshcd_write_ee_control(hba
, hba
->ee_ctrl_mask
);
5682 mutex_unlock(&hba
->ee_ctrl_mutex
);
5684 dev_err(hba
->dev
, "%s: failed to write ee control %d\n",
5689 int ufshcd_update_ee_control(struct ufs_hba
*hba
, u16
*mask
,
5690 const u16
*other_mask
, u16 set
, u16 clr
)
5692 u16 new_mask
, ee_ctrl_mask
;
5695 mutex_lock(&hba
->ee_ctrl_mutex
);
5696 new_mask
= (*mask
& ~clr
) | set
;
5697 ee_ctrl_mask
= new_mask
| *other_mask
;
5698 if (ee_ctrl_mask
!= hba
->ee_ctrl_mask
)
5699 err
= __ufshcd_write_ee_control(hba
, ee_ctrl_mask
);
5700 /* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */
5702 hba
->ee_ctrl_mask
= ee_ctrl_mask
;
5705 mutex_unlock(&hba
->ee_ctrl_mutex
);
5710 * ufshcd_disable_ee - disable exception event
5711 * @hba: per-adapter instance
5712 * @mask: exception event to disable
5714 * Disables exception event in the device so that the EVENT_ALERT
5717 * Return: zero on success, non-zero error value on failure.
5719 static inline int ufshcd_disable_ee(struct ufs_hba
*hba
, u16 mask
)
5721 return ufshcd_update_ee_drv_mask(hba
, 0, mask
);
5725 * ufshcd_enable_ee - enable exception event
5726 * @hba: per-adapter instance
5727 * @mask: exception event to enable
5729 * Enable corresponding exception event in the device to allow
5730 * device to alert host in critical scenarios.
5732 * Return: zero on success, non-zero error value on failure.
5734 static inline int ufshcd_enable_ee(struct ufs_hba
*hba
, u16 mask
)
5736 return ufshcd_update_ee_drv_mask(hba
, mask
, 0);
5740 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5741 * @hba: per-adapter instance
5743 * Allow device to manage background operations on its own. Enabling
5744 * this might lead to inconsistent latencies during normal data transfers
5745 * as the device is allowed to manage its own way of handling background
5748 * Return: zero on success, non-zero on failure.
5750 static int ufshcd_enable_auto_bkops(struct ufs_hba
*hba
)
5754 if (hba
->auto_bkops_enabled
)
5757 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_SET_FLAG
,
5758 QUERY_FLAG_IDN_BKOPS_EN
, 0, NULL
);
5760 dev_err(hba
->dev
, "%s: failed to enable bkops %d\n",
5765 hba
->auto_bkops_enabled
= true;
5766 trace_ufshcd_auto_bkops_state(dev_name(hba
->dev
), "Enabled");
5768 /* No need of URGENT_BKOPS exception from the device */
5769 err
= ufshcd_disable_ee(hba
, MASK_EE_URGENT_BKOPS
);
5771 dev_err(hba
->dev
, "%s: failed to disable exception event %d\n",
5778 * ufshcd_disable_auto_bkops - block device in doing background operations
5779 * @hba: per-adapter instance
5781 * Disabling background operations improves command response latency but
5782 * has drawback of device moving into critical state where the device is
5783 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5784 * host is idle so that BKOPS are managed effectively without any negative
5787 * Return: zero on success, non-zero on failure.
5789 static int ufshcd_disable_auto_bkops(struct ufs_hba
*hba
)
5793 if (!hba
->auto_bkops_enabled
)
5797 * If host assisted BKOPs is to be enabled, make sure
5798 * urgent bkops exception is allowed.
5800 err
= ufshcd_enable_ee(hba
, MASK_EE_URGENT_BKOPS
);
5802 dev_err(hba
->dev
, "%s: failed to enable exception event %d\n",
5807 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_CLEAR_FLAG
,
5808 QUERY_FLAG_IDN_BKOPS_EN
, 0, NULL
);
5810 dev_err(hba
->dev
, "%s: failed to disable bkops %d\n",
5812 ufshcd_disable_ee(hba
, MASK_EE_URGENT_BKOPS
);
5816 hba
->auto_bkops_enabled
= false;
5817 trace_ufshcd_auto_bkops_state(dev_name(hba
->dev
), "Disabled");
5818 hba
->is_urgent_bkops_lvl_checked
= false;
5824 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5825 * @hba: per adapter instance
5827 * After a device reset the device may toggle the BKOPS_EN flag
5828 * to default value. The s/w tracking variables should be updated
5829 * as well. This function would change the auto-bkops state based on
5830 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5832 static void ufshcd_force_reset_auto_bkops(struct ufs_hba
*hba
)
5834 if (ufshcd_keep_autobkops_enabled_except_suspend(hba
)) {
5835 hba
->auto_bkops_enabled
= false;
5836 hba
->ee_ctrl_mask
|= MASK_EE_URGENT_BKOPS
;
5837 ufshcd_enable_auto_bkops(hba
);
5839 hba
->auto_bkops_enabled
= true;
5840 hba
->ee_ctrl_mask
&= ~MASK_EE_URGENT_BKOPS
;
5841 ufshcd_disable_auto_bkops(hba
);
5843 hba
->urgent_bkops_lvl
= BKOPS_STATUS_PERF_IMPACT
;
5844 hba
->is_urgent_bkops_lvl_checked
= false;
5847 static inline int ufshcd_get_bkops_status(struct ufs_hba
*hba
, u32
*status
)
5849 return ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5850 QUERY_ATTR_IDN_BKOPS_STATUS
, 0, 0, status
);
5854 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5855 * @hba: per-adapter instance
5856 * @status: bkops_status value
5858 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5859 * flag in the device to permit background operations if the device
5860 * bkops_status is greater than or equal to "status" argument passed to
5861 * this function, disable otherwise.
5863 * Return: 0 for success, non-zero in case of failure.
5865 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5866 * to know whether auto bkops is enabled or disabled after this function
5867 * returns control to it.
5869 static int ufshcd_bkops_ctrl(struct ufs_hba
*hba
,
5870 enum bkops_status status
)
5873 u32 curr_status
= 0;
5875 err
= ufshcd_get_bkops_status(hba
, &curr_status
);
5877 dev_err(hba
->dev
, "%s: failed to get BKOPS status %d\n",
5880 } else if (curr_status
> BKOPS_STATUS_MAX
) {
5881 dev_err(hba
->dev
, "%s: invalid BKOPS status %d\n",
5882 __func__
, curr_status
);
5887 if (curr_status
>= status
)
5888 err
= ufshcd_enable_auto_bkops(hba
);
5890 err
= ufshcd_disable_auto_bkops(hba
);
5896 * ufshcd_urgent_bkops - handle urgent bkops exception event
5897 * @hba: per-adapter instance
5899 * Enable fBackgroundOpsEn flag in the device to permit background
5902 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5903 * and negative error value for any other failure.
5905 * Return: 0 upon success; < 0 upon failure.
5907 static int ufshcd_urgent_bkops(struct ufs_hba
*hba
)
5909 return ufshcd_bkops_ctrl(hba
, hba
->urgent_bkops_lvl
);
5912 static inline int ufshcd_get_ee_status(struct ufs_hba
*hba
, u32
*status
)
5914 return ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5915 QUERY_ATTR_IDN_EE_STATUS
, 0, 0, status
);
5918 static void ufshcd_bkops_exception_event_handler(struct ufs_hba
*hba
)
5921 u32 curr_status
= 0;
5923 if (hba
->is_urgent_bkops_lvl_checked
)
5924 goto enable_auto_bkops
;
5926 err
= ufshcd_get_bkops_status(hba
, &curr_status
);
5928 dev_err(hba
->dev
, "%s: failed to get BKOPS status %d\n",
5934 * We are seeing that some devices are raising the urgent bkops
5935 * exception events even when BKOPS status doesn't indicate performace
5936 * impacted or critical. Handle these device by determining their urgent
5937 * bkops status at runtime.
5939 if (curr_status
< BKOPS_STATUS_PERF_IMPACT
) {
5940 dev_err(hba
->dev
, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5941 __func__
, curr_status
);
5942 /* update the current status as the urgent bkops level */
5943 hba
->urgent_bkops_lvl
= curr_status
;
5944 hba
->is_urgent_bkops_lvl_checked
= true;
5948 err
= ufshcd_enable_auto_bkops(hba
);
5951 dev_err(hba
->dev
, "%s: failed to handle urgent bkops %d\n",
5955 static void ufshcd_temp_exception_event_handler(struct ufs_hba
*hba
, u16 status
)
5959 if (ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5960 QUERY_ATTR_IDN_CASE_ROUGH_TEMP
, 0, 0, &value
))
5963 dev_info(hba
->dev
, "exception Tcase %d\n", value
- 80);
5965 ufs_hwmon_notify_event(hba
, status
& MASK_EE_URGENT_TEMP
);
5968 * A placeholder for the platform vendors to add whatever additional
5973 static int __ufshcd_wb_toggle(struct ufs_hba
*hba
, bool set
, enum flag_idn idn
)
5976 enum query_opcode opcode
= set
? UPIU_QUERY_OPCODE_SET_FLAG
:
5977 UPIU_QUERY_OPCODE_CLEAR_FLAG
;
5979 index
= ufshcd_wb_get_query_index(hba
);
5980 return ufshcd_query_flag_retry(hba
, opcode
, idn
, index
, NULL
);
5983 int ufshcd_wb_toggle(struct ufs_hba
*hba
, bool enable
)
5987 if (!ufshcd_is_wb_allowed(hba
) ||
5988 hba
->dev_info
.wb_enabled
== enable
)
5991 ret
= __ufshcd_wb_toggle(hba
, enable
, QUERY_FLAG_IDN_WB_EN
);
5993 dev_err(hba
->dev
, "%s: Write Booster %s failed %d\n",
5994 __func__
, enable
? "enabling" : "disabling", ret
);
5998 hba
->dev_info
.wb_enabled
= enable
;
5999 dev_dbg(hba
->dev
, "%s: Write Booster %s\n",
6000 __func__
, enable
? "enabled" : "disabled");
6005 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba
*hba
,
6010 ret
= __ufshcd_wb_toggle(hba
, enable
,
6011 QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8
);
6013 dev_err(hba
->dev
, "%s: WB-Buf Flush during H8 %s failed %d\n",
6014 __func__
, enable
? "enabling" : "disabling", ret
);
6017 dev_dbg(hba
->dev
, "%s: WB-Buf Flush during H8 %s\n",
6018 __func__
, enable
? "enabled" : "disabled");
6021 int ufshcd_wb_toggle_buf_flush(struct ufs_hba
*hba
, bool enable
)
6025 if (!ufshcd_is_wb_allowed(hba
) ||
6026 hba
->dev_info
.wb_buf_flush_enabled
== enable
)
6029 ret
= __ufshcd_wb_toggle(hba
, enable
, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN
);
6031 dev_err(hba
->dev
, "%s: WB-Buf Flush %s failed %d\n",
6032 __func__
, enable
? "enabling" : "disabling", ret
);
6036 hba
->dev_info
.wb_buf_flush_enabled
= enable
;
6037 dev_dbg(hba
->dev
, "%s: WB-Buf Flush %s\n",
6038 __func__
, enable
? "enabled" : "disabled");
6043 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba
*hba
,
6050 index
= ufshcd_wb_get_query_index(hba
);
6051 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
6052 QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE
,
6053 index
, 0, &cur_buf
);
6055 dev_err(hba
->dev
, "%s: dCurWriteBoosterBufferSize read failed %d\n",
6061 dev_info(hba
->dev
, "dCurWBBuf: %d WB disabled until free-space is available\n",
6065 /* Let it continue to flush when available buffer exceeds threshold */
6066 return avail_buf
< hba
->vps
->wb_flush_threshold
;
6069 static void ufshcd_wb_force_disable(struct ufs_hba
*hba
)
6071 if (ufshcd_is_wb_buf_flush_allowed(hba
))
6072 ufshcd_wb_toggle_buf_flush(hba
, false);
6074 ufshcd_wb_toggle_buf_flush_during_h8(hba
, false);
6075 ufshcd_wb_toggle(hba
, false);
6076 hba
->caps
&= ~UFSHCD_CAP_WB_EN
;
6078 dev_info(hba
->dev
, "%s: WB force disabled\n", __func__
);
6081 static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba
*hba
)
6087 index
= ufshcd_wb_get_query_index(hba
);
6088 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
6089 QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST
,
6090 index
, 0, &lifetime
);
6093 "%s: bWriteBoosterBufferLifeTimeEst read failed %d\n",
6098 if (lifetime
== UFS_WB_EXCEED_LIFETIME
) {
6099 dev_err(hba
->dev
, "%s: WB buf lifetime is exhausted 0x%02X\n",
6100 __func__
, lifetime
);
6104 dev_dbg(hba
->dev
, "%s: WB buf lifetime is 0x%02X\n",
6105 __func__
, lifetime
);
6110 static bool ufshcd_wb_need_flush(struct ufs_hba
*hba
)
6116 if (!ufshcd_is_wb_allowed(hba
))
6119 if (!ufshcd_is_wb_buf_lifetime_available(hba
)) {
6120 ufshcd_wb_force_disable(hba
);
6125 * The ufs device needs the vcc to be ON to flush.
6126 * With user-space reduction enabled, it's enough to enable flush
6127 * by checking only the available buffer. The threshold
6128 * defined here is > 90% full.
6129 * With user-space preserved enabled, the current-buffer
6130 * should be checked too because the wb buffer size can reduce
6131 * when disk tends to be full. This info is provided by current
6132 * buffer (dCurrentWriteBoosterBufferSize). There's no point in
6133 * keeping vcc on when current buffer is empty.
6135 index
= ufshcd_wb_get_query_index(hba
);
6136 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
6137 QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE
,
6138 index
, 0, &avail_buf
);
6140 dev_warn(hba
->dev
, "%s: dAvailableWriteBoosterBufferSize read failed %d\n",
6145 if (!hba
->dev_info
.b_presrv_uspc_en
)
6146 return avail_buf
<= UFS_WB_BUF_REMAIN_PERCENT(10);
6148 return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba
, avail_buf
);
6151 static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct
*work
)
6153 struct ufs_hba
*hba
= container_of(to_delayed_work(work
),
6155 rpm_dev_flush_recheck_work
);
6157 * To prevent unnecessary VCC power drain after device finishes
6158 * WriteBooster buffer flush or Auto BKOPs, force runtime resume
6159 * after a certain delay to recheck the threshold by next runtime
6162 ufshcd_rpm_get_sync(hba
);
6163 ufshcd_rpm_put_sync(hba
);
6167 * ufshcd_exception_event_handler - handle exceptions raised by device
6168 * @work: pointer to work data
6170 * Read bExceptionEventStatus attribute from the device and handle the
6171 * exception event accordingly.
6173 static void ufshcd_exception_event_handler(struct work_struct
*work
)
6175 struct ufs_hba
*hba
;
6178 hba
= container_of(work
, struct ufs_hba
, eeh_work
);
6180 ufshcd_scsi_block_requests(hba
);
6181 err
= ufshcd_get_ee_status(hba
, &status
);
6183 dev_err(hba
->dev
, "%s: failed to get exception status %d\n",
6188 trace_ufshcd_exception_event(dev_name(hba
->dev
), status
);
6190 if (status
& hba
->ee_drv_mask
& MASK_EE_URGENT_BKOPS
)
6191 ufshcd_bkops_exception_event_handler(hba
);
6193 if (status
& hba
->ee_drv_mask
& MASK_EE_URGENT_TEMP
)
6194 ufshcd_temp_exception_event_handler(hba
, status
);
6196 ufs_debugfs_exception_event(hba
, status
);
6198 ufshcd_scsi_unblock_requests(hba
);
6201 /* Complete requests that have door-bell cleared */
6202 static void ufshcd_complete_requests(struct ufs_hba
*hba
, bool force_compl
)
6204 if (is_mcq_enabled(hba
))
6205 ufshcd_mcq_compl_pending_transfer(hba
, force_compl
);
6207 ufshcd_transfer_req_compl(hba
);
6209 ufshcd_tmc_handler(hba
);
6213 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
6214 * to recover from the DL NAC errors or not.
6215 * @hba: per-adapter instance
6217 * Return: true if error handling is required, false otherwise.
6219 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba
*hba
)
6221 unsigned long flags
;
6222 bool err_handling
= true;
6224 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6226 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
6227 * device fatal error and/or DL NAC & REPLAY timeout errors.
6229 if (hba
->saved_err
& (CONTROLLER_FATAL_ERROR
| SYSTEM_BUS_FATAL_ERROR
))
6232 if ((hba
->saved_err
& DEVICE_FATAL_ERROR
) ||
6233 ((hba
->saved_err
& UIC_ERROR
) &&
6234 (hba
->saved_uic_err
& UFSHCD_UIC_DL_TCx_REPLAY_ERROR
)))
6237 if ((hba
->saved_err
& UIC_ERROR
) &&
6238 (hba
->saved_uic_err
& UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)) {
6241 * wait for 50ms to see if we can get any other errors or not.
6243 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6245 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6248 * now check if we have got any other severe errors other than
6251 if ((hba
->saved_err
& INT_FATAL_ERRORS
) ||
6252 ((hba
->saved_err
& UIC_ERROR
) &&
6253 (hba
->saved_uic_err
& ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)))
6257 * As DL NAC is the only error received so far, send out NOP
6258 * command to confirm if link is still active or not.
6259 * - If we don't get any response then do error recovery.
6260 * - If we get response then clear the DL NAC error bit.
6263 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6264 err
= ufshcd_verify_dev_init(hba
);
6265 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6270 /* Link seems to be alive hence ignore the DL NAC errors */
6271 if (hba
->saved_uic_err
== UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)
6272 hba
->saved_err
&= ~UIC_ERROR
;
6273 /* clear NAC error */
6274 hba
->saved_uic_err
&= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
;
6275 if (!hba
->saved_uic_err
)
6276 err_handling
= false;
6279 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6280 return err_handling
;
6283 /* host lock must be held before calling this func */
6284 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba
*hba
)
6286 return (hba
->saved_uic_err
& UFSHCD_UIC_DL_PA_INIT_ERROR
) ||
6287 (hba
->saved_err
& (INT_FATAL_ERRORS
| UFSHCD_UIC_HIBERN8_MASK
));
6290 void ufshcd_schedule_eh_work(struct ufs_hba
*hba
)
6292 lockdep_assert_held(hba
->host
->host_lock
);
6294 /* handle fatal errors only when link is not in error state */
6295 if (hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
) {
6296 if (hba
->force_reset
|| ufshcd_is_link_broken(hba
) ||
6297 ufshcd_is_saved_err_fatal(hba
))
6298 hba
->ufshcd_state
= UFSHCD_STATE_EH_SCHEDULED_FATAL
;
6300 hba
->ufshcd_state
= UFSHCD_STATE_EH_SCHEDULED_NON_FATAL
;
6301 queue_work(hba
->eh_wq
, &hba
->eh_work
);
6305 static void ufshcd_force_error_recovery(struct ufs_hba
*hba
)
6307 spin_lock_irq(hba
->host
->host_lock
);
6308 hba
->force_reset
= true;
6309 ufshcd_schedule_eh_work(hba
);
6310 spin_unlock_irq(hba
->host
->host_lock
);
6313 static void ufshcd_clk_scaling_allow(struct ufs_hba
*hba
, bool allow
)
6315 mutex_lock(&hba
->wb_mutex
);
6316 down_write(&hba
->clk_scaling_lock
);
6317 hba
->clk_scaling
.is_allowed
= allow
;
6318 up_write(&hba
->clk_scaling_lock
);
6319 mutex_unlock(&hba
->wb_mutex
);
6322 static void ufshcd_clk_scaling_suspend(struct ufs_hba
*hba
, bool suspend
)
6325 if (hba
->clk_scaling
.is_enabled
)
6326 ufshcd_suspend_clkscaling(hba
);
6327 ufshcd_clk_scaling_allow(hba
, false);
6329 ufshcd_clk_scaling_allow(hba
, true);
6330 if (hba
->clk_scaling
.is_enabled
)
6331 ufshcd_resume_clkscaling(hba
);
6335 static void ufshcd_err_handling_prepare(struct ufs_hba
*hba
)
6337 ufshcd_rpm_get_sync(hba
);
6338 if (pm_runtime_status_suspended(&hba
->ufs_device_wlun
->sdev_gendev
) ||
6339 hba
->is_sys_suspended
) {
6340 enum ufs_pm_op pm_op
;
6343 * Don't assume anything of resume, if
6344 * resume fails, irq and clocks can be OFF, and powers
6345 * can be OFF or in LPM.
6347 ufshcd_setup_hba_vreg(hba
, true);
6348 ufshcd_enable_irq(hba
);
6349 ufshcd_setup_vreg(hba
, true);
6350 ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq
);
6351 ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq2
);
6353 if (!ufshcd_is_clkgating_allowed(hba
))
6354 ufshcd_setup_clocks(hba
, true);
6355 ufshcd_release(hba
);
6356 pm_op
= hba
->is_sys_suspended
? UFS_SYSTEM_PM
: UFS_RUNTIME_PM
;
6357 ufshcd_vops_resume(hba
, pm_op
);
6360 if (ufshcd_is_clkscaling_supported(hba
) &&
6361 hba
->clk_scaling
.is_enabled
)
6362 ufshcd_suspend_clkscaling(hba
);
6363 ufshcd_clk_scaling_allow(hba
, false);
6365 ufshcd_scsi_block_requests(hba
);
6366 /* Wait for ongoing ufshcd_queuecommand() calls to finish. */
6367 blk_mq_wait_quiesce_done(&hba
->host
->tag_set
);
6368 cancel_work_sync(&hba
->eeh_work
);
6371 static void ufshcd_err_handling_unprepare(struct ufs_hba
*hba
)
6373 ufshcd_scsi_unblock_requests(hba
);
6374 ufshcd_release(hba
);
6375 if (ufshcd_is_clkscaling_supported(hba
))
6376 ufshcd_clk_scaling_suspend(hba
, false);
6377 ufshcd_rpm_put(hba
);
6380 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba
*hba
)
6382 return (!hba
->is_powered
|| hba
->shutting_down
||
6383 !hba
->ufs_device_wlun
||
6384 hba
->ufshcd_state
== UFSHCD_STATE_ERROR
||
6385 (!(hba
->saved_err
|| hba
->saved_uic_err
|| hba
->force_reset
||
6386 ufshcd_is_link_broken(hba
))));
6390 static void ufshcd_recover_pm_error(struct ufs_hba
*hba
)
6392 struct Scsi_Host
*shost
= hba
->host
;
6393 struct scsi_device
*sdev
;
6394 struct request_queue
*q
;
6397 hba
->is_sys_suspended
= false;
6399 * Set RPM status of wlun device to RPM_ACTIVE,
6400 * this also clears its runtime error.
6402 ret
= pm_runtime_set_active(&hba
->ufs_device_wlun
->sdev_gendev
);
6404 /* hba device might have a runtime error otherwise */
6406 ret
= pm_runtime_set_active(hba
->dev
);
6408 * If wlun device had runtime error, we also need to resume those
6409 * consumer scsi devices in case any of them has failed to be
6410 * resumed due to supplier runtime resume failure. This is to unblock
6411 * blk_queue_enter in case there are bios waiting inside it.
6414 shost_for_each_device(sdev
, shost
) {
6415 q
= sdev
->request_queue
;
6416 if (q
->dev
&& (q
->rpm_status
== RPM_SUSPENDED
||
6417 q
->rpm_status
== RPM_SUSPENDING
))
6418 pm_request_resume(q
->dev
);
6423 static inline void ufshcd_recover_pm_error(struct ufs_hba
*hba
)
6428 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba
*hba
)
6430 struct ufs_pa_layer_attr
*pwr_info
= &hba
->pwr_info
;
6433 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_PWRMODE
), &mode
);
6435 if (pwr_info
->pwr_rx
!= ((mode
>> PWRMODE_RX_OFFSET
) & PWRMODE_MASK
))
6438 if (pwr_info
->pwr_tx
!= (mode
& PWRMODE_MASK
))
6444 static bool ufshcd_abort_one(struct request
*rq
, void *priv
)
6448 struct scsi_cmnd
*cmd
= blk_mq_rq_to_pdu(rq
);
6449 struct scsi_device
*sdev
= cmd
->device
;
6450 struct Scsi_Host
*shost
= sdev
->host
;
6451 struct ufs_hba
*hba
= shost_priv(shost
);
6452 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[tag
];
6453 struct ufs_hw_queue
*hwq
;
6454 unsigned long flags
;
6456 *ret
= ufshcd_try_to_abort_task(hba
, tag
);
6457 dev_err(hba
->dev
, "Aborting tag %d / CDB %#02x %s\n", tag
,
6458 hba
->lrb
[tag
].cmd
? hba
->lrb
[tag
].cmd
->cmnd
[0] : -1,
6459 *ret
? "failed" : "succeeded");
6461 /* Release cmd in MCQ mode if abort succeeds */
6462 if (is_mcq_enabled(hba
) && (*ret
== 0)) {
6463 hwq
= ufshcd_mcq_req_to_hwq(hba
, scsi_cmd_to_rq(lrbp
->cmd
));
6464 spin_lock_irqsave(&hwq
->cq_lock
, flags
);
6465 if (ufshcd_cmd_inflight(lrbp
->cmd
))
6466 ufshcd_release_scsi_cmd(hba
, lrbp
);
6467 spin_unlock_irqrestore(&hwq
->cq_lock
, flags
);
6474 * ufshcd_abort_all - Abort all pending commands.
6475 * @hba: Host bus adapter pointer.
6477 * Return: true if and only if the host controller needs to be reset.
6479 static bool ufshcd_abort_all(struct ufs_hba
*hba
)
6483 blk_mq_tagset_busy_iter(&hba
->host
->tag_set
, ufshcd_abort_one
, &ret
);
6487 /* Clear pending task management requests */
6488 for_each_set_bit(tag
, &hba
->outstanding_tasks
, hba
->nutmrs
) {
6489 ret
= ufshcd_clear_tm_cmd(hba
, tag
);
6495 /* Complete the requests that are cleared by s/w */
6496 ufshcd_complete_requests(hba
, false);
6502 * ufshcd_err_handler - handle UFS errors that require s/w attention
6503 * @work: pointer to work structure
6505 static void ufshcd_err_handler(struct work_struct
*work
)
6507 int retries
= MAX_ERR_HANDLER_RETRIES
;
6508 struct ufs_hba
*hba
;
6509 unsigned long flags
;
6514 hba
= container_of(work
, struct ufs_hba
, eh_work
);
6517 "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
6518 __func__
, ufshcd_state_name
[hba
->ufshcd_state
],
6519 hba
->is_powered
, hba
->shutting_down
, hba
->saved_err
,
6520 hba
->saved_uic_err
, hba
->force_reset
,
6521 ufshcd_is_link_broken(hba
) ? "; link is broken" : "");
6523 down(&hba
->host_sem
);
6524 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6525 if (ufshcd_err_handling_should_stop(hba
)) {
6526 if (hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
)
6527 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
6528 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6532 ufshcd_set_eh_in_progress(hba
);
6533 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6534 ufshcd_err_handling_prepare(hba
);
6535 /* Complete requests that have door-bell cleared by h/w */
6536 ufshcd_complete_requests(hba
, false);
6537 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6539 needs_restore
= false;
6540 needs_reset
= false;
6542 if (hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
)
6543 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
6545 * A full reset and restore might have happened after preparation
6546 * is finished, double check whether we should stop.
6548 if (ufshcd_err_handling_should_stop(hba
))
6549 goto skip_err_handling
;
6551 if (hba
->dev_quirks
& UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
) {
6554 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6555 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
6556 ret
= ufshcd_quirk_dl_nac_errors(hba
);
6557 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6558 if (!ret
&& ufshcd_err_handling_should_stop(hba
))
6559 goto skip_err_handling
;
6562 if ((hba
->saved_err
& (INT_FATAL_ERRORS
| UFSHCD_UIC_HIBERN8_MASK
)) ||
6563 (hba
->saved_uic_err
&&
6564 (hba
->saved_uic_err
!= UFSHCD_UIC_PA_GENERIC_ERROR
))) {
6565 bool pr_prdt
= !!(hba
->saved_err
& SYSTEM_BUS_FATAL_ERROR
);
6567 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6568 ufshcd_print_host_state(hba
);
6569 ufshcd_print_pwr_info(hba
);
6570 ufshcd_print_evt_hist(hba
);
6571 ufshcd_print_tmrs(hba
, hba
->outstanding_tasks
);
6572 ufshcd_print_trs_all(hba
, pr_prdt
);
6573 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6577 * if host reset is required then skip clearing the pending
6578 * transfers forcefully because they will get cleared during
6579 * host reset and restore
6581 if (hba
->force_reset
|| ufshcd_is_link_broken(hba
) ||
6582 ufshcd_is_saved_err_fatal(hba
) ||
6583 ((hba
->saved_err
& UIC_ERROR
) &&
6584 (hba
->saved_uic_err
& (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
|
6585 UFSHCD_UIC_DL_TCx_REPLAY_ERROR
)))) {
6591 * If LINERESET was caught, UFS might have been put to PWM mode,
6592 * check if power mode restore is needed.
6594 if (hba
->saved_uic_err
& UFSHCD_UIC_PA_GENERIC_ERROR
) {
6595 hba
->saved_uic_err
&= ~UFSHCD_UIC_PA_GENERIC_ERROR
;
6596 if (!hba
->saved_uic_err
)
6597 hba
->saved_err
&= ~UIC_ERROR
;
6598 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6599 if (ufshcd_is_pwr_mode_restore_needed(hba
))
6600 needs_restore
= true;
6601 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6602 if (!hba
->saved_err
&& !needs_restore
)
6603 goto skip_err_handling
;
6606 hba
->silence_err_logs
= true;
6607 /* release lock as clear command might sleep */
6608 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6610 needs_reset
= ufshcd_abort_all(hba
);
6612 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6613 hba
->silence_err_logs
= false;
6618 * After all reqs and tasks are cleared from doorbell,
6619 * now it is safe to retore power mode.
6621 if (needs_restore
) {
6622 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6624 * Hold the scaling lock just in case dev cmds
6625 * are sent via bsg and/or sysfs.
6627 down_write(&hba
->clk_scaling_lock
);
6628 hba
->force_pmc
= true;
6629 pmc_err
= ufshcd_config_pwr_mode(hba
, &(hba
->pwr_info
));
6632 dev_err(hba
->dev
, "%s: Failed to restore power mode, err = %d\n",
6635 hba
->force_pmc
= false;
6636 ufshcd_print_pwr_info(hba
);
6637 up_write(&hba
->clk_scaling_lock
);
6638 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6642 /* Fatal errors need reset */
6646 hba
->force_reset
= false;
6647 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6648 err
= ufshcd_reset_and_restore(hba
);
6650 dev_err(hba
->dev
, "%s: reset and restore failed with err %d\n",
6653 ufshcd_recover_pm_error(hba
);
6654 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6659 if (hba
->ufshcd_state
== UFSHCD_STATE_RESET
)
6660 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
6661 if (hba
->saved_err
|| hba
->saved_uic_err
)
6662 dev_err_ratelimited(hba
->dev
, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
6663 __func__
, hba
->saved_err
, hba
->saved_uic_err
);
6665 /* Exit in an operational state or dead */
6666 if (hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
&&
6667 hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
) {
6670 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
6672 ufshcd_clear_eh_in_progress(hba
);
6673 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6674 ufshcd_err_handling_unprepare(hba
);
6677 dev_info(hba
->dev
, "%s finished; HBA state %s\n", __func__
,
6678 ufshcd_state_name
[hba
->ufshcd_state
]);
6682 * ufshcd_update_uic_error - check and set fatal UIC error flags.
6683 * @hba: per-adapter instance
6686 * IRQ_HANDLED - If interrupt is valid
6687 * IRQ_NONE - If invalid interrupt
6689 static irqreturn_t
ufshcd_update_uic_error(struct ufs_hba
*hba
)
6692 irqreturn_t retval
= IRQ_NONE
;
6694 /* PHY layer error */
6695 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER
);
6696 if ((reg
& UIC_PHY_ADAPTER_LAYER_ERROR
) &&
6697 (reg
& UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK
)) {
6698 ufshcd_update_evt_hist(hba
, UFS_EVT_PA_ERR
, reg
);
6700 * To know whether this error is fatal or not, DB timeout
6701 * must be checked but this error is handled separately.
6703 if (reg
& UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK
)
6704 dev_dbg(hba
->dev
, "%s: UIC Lane error reported\n",
6707 /* Got a LINERESET indication. */
6708 if (reg
& UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR
) {
6709 struct uic_command
*cmd
= NULL
;
6711 hba
->uic_error
|= UFSHCD_UIC_PA_GENERIC_ERROR
;
6712 if (hba
->uic_async_done
&& hba
->active_uic_cmd
)
6713 cmd
= hba
->active_uic_cmd
;
6715 * Ignore the LINERESET during power mode change
6716 * operation via DME_SET command.
6718 if (cmd
&& (cmd
->command
== UIC_CMD_DME_SET
))
6719 hba
->uic_error
&= ~UFSHCD_UIC_PA_GENERIC_ERROR
;
6721 retval
|= IRQ_HANDLED
;
6724 /* PA_INIT_ERROR is fatal and needs UIC reset */
6725 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_DATA_LINK_LAYER
);
6726 if ((reg
& UIC_DATA_LINK_LAYER_ERROR
) &&
6727 (reg
& UIC_DATA_LINK_LAYER_ERROR_CODE_MASK
)) {
6728 ufshcd_update_evt_hist(hba
, UFS_EVT_DL_ERR
, reg
);
6730 if (reg
& UIC_DATA_LINK_LAYER_ERROR_PA_INIT
)
6731 hba
->uic_error
|= UFSHCD_UIC_DL_PA_INIT_ERROR
;
6732 else if (hba
->dev_quirks
&
6733 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
) {
6734 if (reg
& UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED
)
6736 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
;
6737 else if (reg
& UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT
)
6738 hba
->uic_error
|= UFSHCD_UIC_DL_TCx_REPLAY_ERROR
;
6740 retval
|= IRQ_HANDLED
;
6743 /* UIC NL/TL/DME errors needs software retry */
6744 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_NETWORK_LAYER
);
6745 if ((reg
& UIC_NETWORK_LAYER_ERROR
) &&
6746 (reg
& UIC_NETWORK_LAYER_ERROR_CODE_MASK
)) {
6747 ufshcd_update_evt_hist(hba
, UFS_EVT_NL_ERR
, reg
);
6748 hba
->uic_error
|= UFSHCD_UIC_NL_ERROR
;
6749 retval
|= IRQ_HANDLED
;
6752 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_TRANSPORT_LAYER
);
6753 if ((reg
& UIC_TRANSPORT_LAYER_ERROR
) &&
6754 (reg
& UIC_TRANSPORT_LAYER_ERROR_CODE_MASK
)) {
6755 ufshcd_update_evt_hist(hba
, UFS_EVT_TL_ERR
, reg
);
6756 hba
->uic_error
|= UFSHCD_UIC_TL_ERROR
;
6757 retval
|= IRQ_HANDLED
;
6760 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_DME
);
6761 if ((reg
& UIC_DME_ERROR
) &&
6762 (reg
& UIC_DME_ERROR_CODE_MASK
)) {
6763 ufshcd_update_evt_hist(hba
, UFS_EVT_DME_ERR
, reg
);
6764 hba
->uic_error
|= UFSHCD_UIC_DME_ERROR
;
6765 retval
|= IRQ_HANDLED
;
6768 dev_dbg(hba
->dev
, "%s: UIC error flags = 0x%08x\n",
6769 __func__
, hba
->uic_error
);
6774 * ufshcd_check_errors - Check for errors that need s/w attention
6775 * @hba: per-adapter instance
6776 * @intr_status: interrupt status generated by the controller
6779 * IRQ_HANDLED - If interrupt is valid
6780 * IRQ_NONE - If invalid interrupt
6782 static irqreturn_t
ufshcd_check_errors(struct ufs_hba
*hba
, u32 intr_status
)
6784 bool queue_eh_work
= false;
6785 irqreturn_t retval
= IRQ_NONE
;
6787 spin_lock(hba
->host
->host_lock
);
6788 hba
->errors
|= UFSHCD_ERROR_MASK
& intr_status
;
6790 if (hba
->errors
& INT_FATAL_ERRORS
) {
6791 ufshcd_update_evt_hist(hba
, UFS_EVT_FATAL_ERR
,
6793 queue_eh_work
= true;
6796 if (hba
->errors
& UIC_ERROR
) {
6798 retval
= ufshcd_update_uic_error(hba
);
6800 queue_eh_work
= true;
6803 if (hba
->errors
& UFSHCD_UIC_HIBERN8_MASK
) {
6805 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
6806 __func__
, (hba
->errors
& UIC_HIBERNATE_ENTER
) ?
6808 hba
->errors
, ufshcd_get_upmcrs(hba
));
6809 ufshcd_update_evt_hist(hba
, UFS_EVT_AUTO_HIBERN8_ERR
,
6811 ufshcd_set_link_broken(hba
);
6812 queue_eh_work
= true;
6815 if (queue_eh_work
) {
6817 * update the transfer error masks to sticky bits, let's do this
6818 * irrespective of current ufshcd_state.
6820 hba
->saved_err
|= hba
->errors
;
6821 hba
->saved_uic_err
|= hba
->uic_error
;
6823 /* dump controller state before resetting */
6824 if ((hba
->saved_err
&
6825 (INT_FATAL_ERRORS
| UFSHCD_UIC_HIBERN8_MASK
)) ||
6826 (hba
->saved_uic_err
&&
6827 (hba
->saved_uic_err
!= UFSHCD_UIC_PA_GENERIC_ERROR
))) {
6828 dev_err(hba
->dev
, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
6829 __func__
, hba
->saved_err
,
6830 hba
->saved_uic_err
);
6831 ufshcd_dump_regs(hba
, 0, UFSHCI_REG_SPACE_SIZE
,
6833 ufshcd_print_pwr_info(hba
);
6835 ufshcd_schedule_eh_work(hba
);
6836 retval
|= IRQ_HANDLED
;
6839 * if (!queue_eh_work) -
6840 * Other errors are either non-fatal where host recovers
6841 * itself without s/w intervention or errors that will be
6842 * handled by the SCSI core layer.
6846 spin_unlock(hba
->host
->host_lock
);
6851 * ufshcd_tmc_handler - handle task management function completion
6852 * @hba: per adapter instance
6855 * IRQ_HANDLED - If interrupt is valid
6856 * IRQ_NONE - If invalid interrupt
6858 static irqreturn_t
ufshcd_tmc_handler(struct ufs_hba
*hba
)
6860 unsigned long flags
, pending
, issued
;
6861 irqreturn_t ret
= IRQ_NONE
;
6864 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6865 pending
= ufshcd_readl(hba
, REG_UTP_TASK_REQ_DOOR_BELL
);
6866 issued
= hba
->outstanding_tasks
& ~pending
;
6867 for_each_set_bit(tag
, &issued
, hba
->nutmrs
) {
6868 struct request
*req
= hba
->tmf_rqs
[tag
];
6869 struct completion
*c
= req
->end_io_data
;
6874 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6880 * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events
6881 * @hba: per adapter instance
6883 * Return: IRQ_HANDLED if interrupt is handled.
6885 static irqreturn_t
ufshcd_handle_mcq_cq_events(struct ufs_hba
*hba
)
6887 struct ufs_hw_queue
*hwq
;
6888 unsigned long outstanding_cqs
;
6889 unsigned int nr_queues
;
6893 ret
= ufshcd_vops_get_outstanding_cqs(hba
, &outstanding_cqs
);
6895 outstanding_cqs
= (1U << hba
->nr_hw_queues
) - 1;
6897 /* Exclude the poll queues */
6898 nr_queues
= hba
->nr_hw_queues
- hba
->nr_queues
[HCTX_TYPE_POLL
];
6899 for_each_set_bit(i
, &outstanding_cqs
, nr_queues
) {
6902 events
= ufshcd_mcq_read_cqis(hba
, i
);
6904 ufshcd_mcq_write_cqis(hba
, events
, i
);
6906 if (events
& UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS
)
6907 ufshcd_mcq_poll_cqe_lock(hba
, hwq
);
6914 * ufshcd_sl_intr - Interrupt service routine
6915 * @hba: per adapter instance
6916 * @intr_status: contains interrupts generated by the controller
6919 * IRQ_HANDLED - If interrupt is valid
6920 * IRQ_NONE - If invalid interrupt
6922 static irqreturn_t
ufshcd_sl_intr(struct ufs_hba
*hba
, u32 intr_status
)
6924 irqreturn_t retval
= IRQ_NONE
;
6926 if (intr_status
& UFSHCD_UIC_MASK
)
6927 retval
|= ufshcd_uic_cmd_compl(hba
, intr_status
);
6929 if (intr_status
& UFSHCD_ERROR_MASK
|| hba
->errors
)
6930 retval
|= ufshcd_check_errors(hba
, intr_status
);
6932 if (intr_status
& UTP_TASK_REQ_COMPL
)
6933 retval
|= ufshcd_tmc_handler(hba
);
6935 if (intr_status
& UTP_TRANSFER_REQ_COMPL
)
6936 retval
|= ufshcd_transfer_req_compl(hba
);
6938 if (intr_status
& MCQ_CQ_EVENT_STATUS
)
6939 retval
|= ufshcd_handle_mcq_cq_events(hba
);
6945 * ufshcd_intr - Main interrupt service routine
6947 * @__hba: pointer to adapter instance
6950 * IRQ_HANDLED - If interrupt is valid
6951 * IRQ_NONE - If invalid interrupt
6953 static irqreturn_t
ufshcd_intr(int irq
, void *__hba
)
6955 u32 intr_status
, enabled_intr_status
= 0;
6956 irqreturn_t retval
= IRQ_NONE
;
6957 struct ufs_hba
*hba
= __hba
;
6958 int retries
= hba
->nutrs
;
6960 intr_status
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
6961 hba
->ufs_stats
.last_intr_status
= intr_status
;
6962 hba
->ufs_stats
.last_intr_ts
= local_clock();
6965 * There could be max of hba->nutrs reqs in flight and in worst case
6966 * if the reqs get finished 1 by 1 after the interrupt status is
6967 * read, make sure we handle them by checking the interrupt status
6968 * again in a loop until we process all of the reqs before returning.
6970 while (intr_status
&& retries
--) {
6971 enabled_intr_status
=
6972 intr_status
& ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
6973 ufshcd_writel(hba
, intr_status
, REG_INTERRUPT_STATUS
);
6974 if (enabled_intr_status
)
6975 retval
|= ufshcd_sl_intr(hba
, enabled_intr_status
);
6977 intr_status
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
6980 if (enabled_intr_status
&& retval
== IRQ_NONE
&&
6981 (!(enabled_intr_status
& UTP_TRANSFER_REQ_COMPL
) ||
6982 hba
->outstanding_reqs
) && !ufshcd_eh_in_progress(hba
)) {
6983 dev_err(hba
->dev
, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
6986 hba
->ufs_stats
.last_intr_status
,
6987 enabled_intr_status
);
6988 ufshcd_dump_regs(hba
, 0, UFSHCI_REG_SPACE_SIZE
, "host_regs: ");
6994 static int ufshcd_clear_tm_cmd(struct ufs_hba
*hba
, int tag
)
6997 u32 mask
= 1 << tag
;
6998 unsigned long flags
;
7000 if (!test_bit(tag
, &hba
->outstanding_tasks
))
7003 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7004 ufshcd_utmrl_clear(hba
, tag
);
7005 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7007 /* poll for max. 1 sec to clear door bell register by h/w */
7008 err
= ufshcd_wait_for_register(hba
,
7009 REG_UTP_TASK_REQ_DOOR_BELL
,
7010 mask
, 0, 1000, 1000);
7012 dev_err(hba
->dev
, "Clearing task management function with tag %d %s\n",
7013 tag
, err
< 0 ? "failed" : "succeeded");
7019 static int __ufshcd_issue_tm_cmd(struct ufs_hba
*hba
,
7020 struct utp_task_req_desc
*treq
, u8 tm_function
)
7022 struct request_queue
*q
= hba
->tmf_queue
;
7023 struct Scsi_Host
*host
= hba
->host
;
7024 DECLARE_COMPLETION_ONSTACK(wait
);
7025 struct request
*req
;
7026 unsigned long flags
;
7030 * blk_mq_alloc_request() is used here only to get a free tag.
7032 req
= blk_mq_alloc_request(q
, REQ_OP_DRV_OUT
, 0);
7034 return PTR_ERR(req
);
7036 req
->end_io_data
= &wait
;
7039 spin_lock_irqsave(host
->host_lock
, flags
);
7041 task_tag
= req
->tag
;
7042 hba
->tmf_rqs
[req
->tag
] = req
;
7043 treq
->upiu_req
.req_header
.task_tag
= task_tag
;
7045 memcpy(hba
->utmrdl_base_addr
+ task_tag
, treq
, sizeof(*treq
));
7046 ufshcd_vops_setup_task_mgmt(hba
, task_tag
, tm_function
);
7048 /* send command to the controller */
7049 __set_bit(task_tag
, &hba
->outstanding_tasks
);
7051 ufshcd_writel(hba
, 1 << task_tag
, REG_UTP_TASK_REQ_DOOR_BELL
);
7052 /* Make sure that doorbell is committed immediately */
7055 spin_unlock_irqrestore(host
->host_lock
, flags
);
7057 ufshcd_add_tm_upiu_trace(hba
, task_tag
, UFS_TM_SEND
);
7059 /* wait until the task management command is completed */
7060 err
= wait_for_completion_io_timeout(&wait
,
7061 msecs_to_jiffies(TM_CMD_TIMEOUT
));
7063 ufshcd_add_tm_upiu_trace(hba
, task_tag
, UFS_TM_ERR
);
7064 dev_err(hba
->dev
, "%s: task management cmd 0x%.2x timed-out\n",
7065 __func__
, tm_function
);
7066 if (ufshcd_clear_tm_cmd(hba
, task_tag
))
7067 dev_WARN(hba
->dev
, "%s: unable to clear tm cmd (slot %d) after timeout\n",
7068 __func__
, task_tag
);
7072 memcpy(treq
, hba
->utmrdl_base_addr
+ task_tag
, sizeof(*treq
));
7074 ufshcd_add_tm_upiu_trace(hba
, task_tag
, UFS_TM_COMP
);
7077 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7078 hba
->tmf_rqs
[req
->tag
] = NULL
;
7079 __clear_bit(task_tag
, &hba
->outstanding_tasks
);
7080 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7082 ufshcd_release(hba
);
7083 blk_mq_free_request(req
);
7089 * ufshcd_issue_tm_cmd - issues task management commands to controller
7090 * @hba: per adapter instance
7091 * @lun_id: LUN ID to which TM command is sent
7092 * @task_id: task ID to which the TM command is applicable
7093 * @tm_function: task management function opcode
7094 * @tm_response: task management service response return value
7096 * Return: non-zero value on error, zero on success.
7098 static int ufshcd_issue_tm_cmd(struct ufs_hba
*hba
, int lun_id
, int task_id
,
7099 u8 tm_function
, u8
*tm_response
)
7101 struct utp_task_req_desc treq
= { };
7102 enum utp_ocs ocs_value
;
7105 /* Configure task request descriptor */
7106 treq
.header
.interrupt
= 1;
7107 treq
.header
.ocs
= OCS_INVALID_COMMAND_STATUS
;
7109 /* Configure task request UPIU */
7110 treq
.upiu_req
.req_header
.transaction_code
= UPIU_TRANSACTION_TASK_REQ
;
7111 treq
.upiu_req
.req_header
.lun
= lun_id
;
7112 treq
.upiu_req
.req_header
.tm_function
= tm_function
;
7115 * The host shall provide the same value for LUN field in the basic
7116 * header and for Input Parameter.
7118 treq
.upiu_req
.input_param1
= cpu_to_be32(lun_id
);
7119 treq
.upiu_req
.input_param2
= cpu_to_be32(task_id
);
7121 err
= __ufshcd_issue_tm_cmd(hba
, &treq
, tm_function
);
7122 if (err
== -ETIMEDOUT
)
7125 ocs_value
= treq
.header
.ocs
& MASK_OCS
;
7126 if (ocs_value
!= OCS_SUCCESS
)
7127 dev_err(hba
->dev
, "%s: failed, ocs = 0x%x\n",
7128 __func__
, ocs_value
);
7129 else if (tm_response
)
7130 *tm_response
= be32_to_cpu(treq
.upiu_rsp
.output_param1
) &
7131 MASK_TM_SERVICE_RESP
;
7136 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
7137 * @hba: per-adapter instance
7138 * @req_upiu: upiu request
7139 * @rsp_upiu: upiu reply
7140 * @desc_buff: pointer to descriptor buffer, NULL if NA
7141 * @buff_len: descriptor size, 0 if NA
7142 * @cmd_type: specifies the type (NOP, Query...)
7143 * @desc_op: descriptor operation
7145 * Those type of requests uses UTP Transfer Request Descriptor - utrd.
7146 * Therefore, it "rides" the device management infrastructure: uses its tag and
7147 * tasks work queues.
7149 * Since there is only one available tag for device management commands,
7150 * the caller is expected to hold the hba->dev_cmd.lock mutex.
7152 * Return: 0 upon success; < 0 upon failure.
7154 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba
*hba
,
7155 struct utp_upiu_req
*req_upiu
,
7156 struct utp_upiu_req
*rsp_upiu
,
7157 u8
*desc_buff
, int *buff_len
,
7158 enum dev_cmd_type cmd_type
,
7159 enum query_opcode desc_op
)
7161 DECLARE_COMPLETION_ONSTACK(wait
);
7162 const u32 tag
= hba
->reserved_slot
;
7163 struct ufshcd_lrb
*lrbp
;
7167 /* Protects use of hba->reserved_slot. */
7168 lockdep_assert_held(&hba
->dev_cmd
.lock
);
7170 down_read(&hba
->clk_scaling_lock
);
7172 lrbp
= &hba
->lrb
[tag
];
7174 lrbp
->task_tag
= tag
;
7176 lrbp
->intr_cmd
= true;
7177 ufshcd_prepare_lrbp_crypto(NULL
, lrbp
);
7178 hba
->dev_cmd
.type
= cmd_type
;
7180 if (hba
->ufs_version
<= ufshci_version(1, 1))
7181 lrbp
->command_type
= UTP_CMD_TYPE_DEV_MANAGE
;
7183 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
7185 /* update the task tag in the request upiu */
7186 req_upiu
->header
.task_tag
= tag
;
7188 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, DMA_NONE
, 0);
7190 /* just copy the upiu request as it is */
7191 memcpy(lrbp
->ucd_req_ptr
, req_upiu
, sizeof(*lrbp
->ucd_req_ptr
));
7192 if (desc_buff
&& desc_op
== UPIU_QUERY_OPCODE_WRITE_DESC
) {
7193 /* The Data Segment Area is optional depending upon the query
7194 * function value. for WRITE DESCRIPTOR, the data segment
7195 * follows right after the tsf.
7197 memcpy(lrbp
->ucd_req_ptr
+ 1, desc_buff
, *buff_len
);
7201 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
7203 hba
->dev_cmd
.complete
= &wait
;
7205 ufshcd_add_query_upiu_trace(hba
, UFS_QUERY_SEND
, lrbp
->ucd_req_ptr
);
7207 ufshcd_send_command(hba
, tag
, hba
->dev_cmd_queue
);
7209 * ignore the returning value here - ufshcd_check_query_response is
7210 * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
7211 * read the response directly ignoring all errors.
7213 ufshcd_wait_for_dev_cmd(hba
, lrbp
, QUERY_REQ_TIMEOUT
);
7215 /* just copy the upiu response as it is */
7216 memcpy(rsp_upiu
, lrbp
->ucd_rsp_ptr
, sizeof(*rsp_upiu
));
7217 if (desc_buff
&& desc_op
== UPIU_QUERY_OPCODE_READ_DESC
) {
7218 u8
*descp
= (u8
*)lrbp
->ucd_rsp_ptr
+ sizeof(*rsp_upiu
);
7219 u16 resp_len
= be16_to_cpu(lrbp
->ucd_rsp_ptr
->header
7220 .data_segment_length
);
7222 if (*buff_len
>= resp_len
) {
7223 memcpy(desc_buff
, descp
, resp_len
);
7224 *buff_len
= resp_len
;
7227 "%s: rsp size %d is bigger than buffer size %d",
7228 __func__
, resp_len
, *buff_len
);
7233 ufshcd_add_query_upiu_trace(hba
, err
? UFS_QUERY_ERR
: UFS_QUERY_COMP
,
7234 (struct utp_upiu_req
*)lrbp
->ucd_rsp_ptr
);
7236 up_read(&hba
->clk_scaling_lock
);
7241 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
7242 * @hba: per-adapter instance
7243 * @req_upiu: upiu request
7244 * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
7245 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
7246 * @desc_buff: pointer to descriptor buffer, NULL if NA
7247 * @buff_len: descriptor size, 0 if NA
7248 * @desc_op: descriptor operation
7250 * Supports UTP Transfer requests (nop and query), and UTP Task
7251 * Management requests.
7252 * It is up to the caller to fill the upiu conent properly, as it will
7253 * be copied without any further input validations.
7255 * Return: 0 upon success; < 0 upon failure.
7257 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba
*hba
,
7258 struct utp_upiu_req
*req_upiu
,
7259 struct utp_upiu_req
*rsp_upiu
,
7260 enum upiu_request_transaction msgcode
,
7261 u8
*desc_buff
, int *buff_len
,
7262 enum query_opcode desc_op
)
7265 enum dev_cmd_type cmd_type
= DEV_CMD_TYPE_QUERY
;
7266 struct utp_task_req_desc treq
= { };
7267 enum utp_ocs ocs_value
;
7268 u8 tm_f
= req_upiu
->header
.tm_function
;
7271 case UPIU_TRANSACTION_NOP_OUT
:
7272 cmd_type
= DEV_CMD_TYPE_NOP
;
7274 case UPIU_TRANSACTION_QUERY_REQ
:
7276 mutex_lock(&hba
->dev_cmd
.lock
);
7277 err
= ufshcd_issue_devman_upiu_cmd(hba
, req_upiu
, rsp_upiu
,
7278 desc_buff
, buff_len
,
7280 mutex_unlock(&hba
->dev_cmd
.lock
);
7281 ufshcd_release(hba
);
7284 case UPIU_TRANSACTION_TASK_REQ
:
7285 treq
.header
.interrupt
= 1;
7286 treq
.header
.ocs
= OCS_INVALID_COMMAND_STATUS
;
7288 memcpy(&treq
.upiu_req
, req_upiu
, sizeof(*req_upiu
));
7290 err
= __ufshcd_issue_tm_cmd(hba
, &treq
, tm_f
);
7291 if (err
== -ETIMEDOUT
)
7294 ocs_value
= treq
.header
.ocs
& MASK_OCS
;
7295 if (ocs_value
!= OCS_SUCCESS
) {
7296 dev_err(hba
->dev
, "%s: failed, ocs = 0x%x\n", __func__
,
7301 memcpy(rsp_upiu
, &treq
.upiu_rsp
, sizeof(*rsp_upiu
));
7314 * ufshcd_advanced_rpmb_req_handler - handle advanced RPMB request
7315 * @hba: per adapter instance
7316 * @req_upiu: upiu request
7317 * @rsp_upiu: upiu reply
7318 * @req_ehs: EHS field which contains Advanced RPMB Request Message
7319 * @rsp_ehs: EHS field which returns Advanced RPMB Response Message
7320 * @sg_cnt: The number of sg lists actually used
7321 * @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
7322 * @dir: DMA direction
7324 * Return: zero on success, non-zero on failure.
7326 int ufshcd_advanced_rpmb_req_handler(struct ufs_hba
*hba
, struct utp_upiu_req
*req_upiu
,
7327 struct utp_upiu_req
*rsp_upiu
, struct ufs_ehs
*req_ehs
,
7328 struct ufs_ehs
*rsp_ehs
, int sg_cnt
, struct scatterlist
*sg_list
,
7329 enum dma_data_direction dir
)
7331 DECLARE_COMPLETION_ONSTACK(wait
);
7332 const u32 tag
= hba
->reserved_slot
;
7333 struct ufshcd_lrb
*lrbp
;
7340 /* Protects use of hba->reserved_slot. */
7342 mutex_lock(&hba
->dev_cmd
.lock
);
7343 down_read(&hba
->clk_scaling_lock
);
7345 lrbp
= &hba
->lrb
[tag
];
7347 lrbp
->task_tag
= tag
;
7348 lrbp
->lun
= UFS_UPIU_RPMB_WLUN
;
7350 lrbp
->intr_cmd
= true;
7351 ufshcd_prepare_lrbp_crypto(NULL
, lrbp
);
7352 hba
->dev_cmd
.type
= DEV_CMD_TYPE_RPMB
;
7354 /* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */
7355 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
7358 * According to UFSHCI 4.0 specification page 24, if EHSLUTRDS is 0, host controller takes
7359 * EHS length from CMD UPIU, and SW driver use EHS Length field in CMD UPIU. if it is 1,
7360 * HW controller takes EHS length from UTRD.
7362 if (hba
->capabilities
& MASK_EHSLUTRD_SUPPORTED
)
7363 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, dir
, 2);
7365 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, dir
, 0);
7367 /* update the task tag */
7368 req_upiu
->header
.task_tag
= tag
;
7370 /* copy the UPIU(contains CDB) request as it is */
7371 memcpy(lrbp
->ucd_req_ptr
, req_upiu
, sizeof(*lrbp
->ucd_req_ptr
));
7372 /* Copy EHS, starting with byte32, immediately after the CDB package */
7373 memcpy(lrbp
->ucd_req_ptr
+ 1, req_ehs
, sizeof(*req_ehs
));
7375 if (dir
!= DMA_NONE
&& sg_list
)
7376 ufshcd_sgl_to_prdt(hba
, lrbp
, sg_cnt
, sg_list
);
7378 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
7380 hba
->dev_cmd
.complete
= &wait
;
7382 ufshcd_send_command(hba
, tag
, hba
->dev_cmd_queue
);
7384 err
= ufshcd_wait_for_dev_cmd(hba
, lrbp
, ADVANCED_RPMB_REQ_TIMEOUT
);
7387 /* Just copy the upiu response as it is */
7388 memcpy(rsp_upiu
, lrbp
->ucd_rsp_ptr
, sizeof(*rsp_upiu
));
7389 /* Get the response UPIU result */
7390 result
= (lrbp
->ucd_rsp_ptr
->header
.response
<< 8) |
7391 lrbp
->ucd_rsp_ptr
->header
.status
;
7393 ehs_len
= lrbp
->ucd_rsp_ptr
->header
.ehs_length
;
7395 * Since the bLength in EHS indicates the total size of the EHS Header and EHS Data
7396 * in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB
7399 if (ehs_len
== 2 && rsp_ehs
) {
7401 * ucd_rsp_ptr points to a buffer with a length of 512 bytes
7402 * (ALIGNED_UPIU_SIZE = 512), and the EHS data just starts from byte32
7404 ehs_data
= (u8
*)lrbp
->ucd_rsp_ptr
+ EHS_OFFSET_IN_RESPONSE
;
7405 memcpy(rsp_ehs
, ehs_data
, ehs_len
* 32);
7409 up_read(&hba
->clk_scaling_lock
);
7410 mutex_unlock(&hba
->dev_cmd
.lock
);
7411 ufshcd_release(hba
);
7412 return err
? : result
;
7416 * ufshcd_eh_device_reset_handler() - Reset a single logical unit.
7417 * @cmd: SCSI command pointer
7419 * Return: SUCCESS or FAILED.
7421 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd
*cmd
)
7423 unsigned long flags
, pending_reqs
= 0, not_cleared
= 0;
7424 struct Scsi_Host
*host
;
7425 struct ufs_hba
*hba
;
7426 struct ufs_hw_queue
*hwq
;
7427 struct ufshcd_lrb
*lrbp
;
7428 u32 pos
, not_cleared_mask
= 0;
7432 host
= cmd
->device
->host
;
7433 hba
= shost_priv(host
);
7435 lun
= ufshcd_scsi_to_upiu_lun(cmd
->device
->lun
);
7436 err
= ufshcd_issue_tm_cmd(hba
, lun
, 0, UFS_LOGICAL_RESET
, &resp
);
7437 if (err
|| resp
!= UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
7443 if (is_mcq_enabled(hba
)) {
7444 for (pos
= 0; pos
< hba
->nutrs
; pos
++) {
7445 lrbp
= &hba
->lrb
[pos
];
7446 if (ufshcd_cmd_inflight(lrbp
->cmd
) &&
7448 ufshcd_clear_cmd(hba
, pos
);
7449 hwq
= ufshcd_mcq_req_to_hwq(hba
, scsi_cmd_to_rq(lrbp
->cmd
));
7450 ufshcd_mcq_poll_cqe_lock(hba
, hwq
);
7457 /* clear the commands that were pending for corresponding LUN */
7458 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
7459 for_each_set_bit(pos
, &hba
->outstanding_reqs
, hba
->nutrs
)
7460 if (hba
->lrb
[pos
].lun
== lun
)
7461 __set_bit(pos
, &pending_reqs
);
7462 hba
->outstanding_reqs
&= ~pending_reqs
;
7463 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
7465 for_each_set_bit(pos
, &pending_reqs
, hba
->nutrs
) {
7466 if (ufshcd_clear_cmd(hba
, pos
) < 0) {
7467 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
7468 not_cleared
= 1U << pos
&
7469 ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
7470 hba
->outstanding_reqs
|= not_cleared
;
7471 not_cleared_mask
|= not_cleared
;
7472 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
7474 dev_err(hba
->dev
, "%s: failed to clear request %d\n",
7478 __ufshcd_transfer_req_compl(hba
, pending_reqs
& ~not_cleared_mask
);
7481 hba
->req_abort_count
= 0;
7482 ufshcd_update_evt_hist(hba
, UFS_EVT_DEV_RESET
, (u32
)err
);
7486 dev_err(hba
->dev
, "%s: failed with err %d\n", __func__
, err
);
7492 static void ufshcd_set_req_abort_skip(struct ufs_hba
*hba
, unsigned long bitmap
)
7494 struct ufshcd_lrb
*lrbp
;
7497 for_each_set_bit(tag
, &bitmap
, hba
->nutrs
) {
7498 lrbp
= &hba
->lrb
[tag
];
7499 lrbp
->req_abort_skip
= true;
7504 * ufshcd_try_to_abort_task - abort a specific task
7505 * @hba: Pointer to adapter instance
7506 * @tag: Task tag/index to be aborted
7508 * Abort the pending command in device by sending UFS_ABORT_TASK task management
7509 * command, and in host controller by clearing the door-bell register. There can
7510 * be race between controller sending the command to the device while abort is
7511 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
7512 * really issued and then try to abort it.
7514 * Return: zero on success, non-zero on failure.
7516 int ufshcd_try_to_abort_task(struct ufs_hba
*hba
, int tag
)
7518 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[tag
];
7524 for (poll_cnt
= 100; poll_cnt
; poll_cnt
--) {
7525 err
= ufshcd_issue_tm_cmd(hba
, lrbp
->lun
, lrbp
->task_tag
,
7526 UFS_QUERY_TASK
, &resp
);
7527 if (!err
&& resp
== UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED
) {
7528 /* cmd pending in the device */
7529 dev_err(hba
->dev
, "%s: cmd pending in the device. tag = %d\n",
7532 } else if (!err
&& resp
== UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
7534 * cmd not pending in the device, check if it is
7537 dev_err(hba
->dev
, "%s: cmd at tag %d not pending in the device.\n",
7539 if (is_mcq_enabled(hba
)) {
7541 if (ufshcd_cmd_inflight(lrbp
->cmd
)) {
7542 /* sleep for max. 200us same delay as in SDB mode */
7543 usleep_range(100, 200);
7546 /* command completed already */
7547 dev_err(hba
->dev
, "%s: cmd at tag=%d is cleared.\n",
7552 /* Single Doorbell Mode */
7553 reg
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
7554 if (reg
& (1 << tag
)) {
7555 /* sleep for max. 200us to stabilize */
7556 usleep_range(100, 200);
7559 /* command completed already */
7560 dev_err(hba
->dev
, "%s: cmd at tag %d successfully cleared from DB.\n",
7565 "%s: no response from device. tag = %d, err %d\n",
7566 __func__
, tag
, err
);
7568 err
= resp
; /* service response error */
7578 err
= ufshcd_issue_tm_cmd(hba
, lrbp
->lun
, lrbp
->task_tag
,
7579 UFS_ABORT_TASK
, &resp
);
7580 if (err
|| resp
!= UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
7582 err
= resp
; /* service response error */
7583 dev_err(hba
->dev
, "%s: issued. tag = %d, err %d\n",
7584 __func__
, tag
, err
);
7589 err
= ufshcd_clear_cmd(hba
, tag
);
7591 dev_err(hba
->dev
, "%s: Failed clearing cmd at tag %d, err %d\n",
7592 __func__
, tag
, err
);
7599 * ufshcd_abort - scsi host template eh_abort_handler callback
7600 * @cmd: SCSI command pointer
7602 * Return: SUCCESS or FAILED.
7604 static int ufshcd_abort(struct scsi_cmnd
*cmd
)
7606 struct Scsi_Host
*host
= cmd
->device
->host
;
7607 struct ufs_hba
*hba
= shost_priv(host
);
7608 int tag
= scsi_cmd_to_rq(cmd
)->tag
;
7609 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[tag
];
7610 unsigned long flags
;
7617 if (!is_mcq_enabled(hba
)) {
7618 reg
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
7619 if (!test_bit(tag
, &hba
->outstanding_reqs
)) {
7620 /* If command is already aborted/completed, return FAILED. */
7622 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
7623 __func__
, tag
, hba
->outstanding_reqs
, reg
);
7628 /* Print Transfer Request of aborted task */
7629 dev_info(hba
->dev
, "%s: Device abort task at tag %d\n", __func__
, tag
);
7632 * Print detailed info about aborted request.
7633 * As more than one request might get aborted at the same time,
7634 * print full information only for the first aborted request in order
7635 * to reduce repeated printouts. For other aborted requests only print
7638 scsi_print_command(cmd
);
7639 if (!hba
->req_abort_count
) {
7640 ufshcd_update_evt_hist(hba
, UFS_EVT_ABORT
, tag
);
7641 ufshcd_print_evt_hist(hba
);
7642 ufshcd_print_host_state(hba
);
7643 ufshcd_print_pwr_info(hba
);
7644 ufshcd_print_tr(hba
, tag
, true);
7646 ufshcd_print_tr(hba
, tag
, false);
7648 hba
->req_abort_count
++;
7650 if (!is_mcq_enabled(hba
) && !(reg
& (1 << tag
))) {
7651 /* only execute this code in single doorbell mode */
7653 "%s: cmd was completed, but without a notifying intr, tag = %d",
7655 __ufshcd_transfer_req_compl(hba
, 1UL << tag
);
7660 * Task abort to the device W-LUN is illegal. When this command
7661 * will fail, due to spec violation, scsi err handling next step
7662 * will be to send LU reset which, again, is a spec violation.
7663 * To avoid these unnecessary/illegal steps, first we clean up
7664 * the lrb taken by this cmd and re-set it in outstanding_reqs,
7665 * then queue the eh_work and bail.
7667 if (lrbp
->lun
== UFS_UPIU_UFS_DEVICE_WLUN
) {
7668 ufshcd_update_evt_hist(hba
, UFS_EVT_ABORT
, lrbp
->lun
);
7670 spin_lock_irqsave(host
->host_lock
, flags
);
7671 hba
->force_reset
= true;
7672 ufshcd_schedule_eh_work(hba
);
7673 spin_unlock_irqrestore(host
->host_lock
, flags
);
7677 if (is_mcq_enabled(hba
)) {
7678 /* MCQ mode. Branch off to handle abort for mcq mode */
7679 err
= ufshcd_mcq_abort(cmd
);
7683 /* Skip task abort in case previous aborts failed and report failure */
7684 if (lrbp
->req_abort_skip
) {
7685 dev_err(hba
->dev
, "%s: skipping abort\n", __func__
);
7686 ufshcd_set_req_abort_skip(hba
, hba
->outstanding_reqs
);
7690 err
= ufshcd_try_to_abort_task(hba
, tag
);
7692 dev_err(hba
->dev
, "%s: failed with err %d\n", __func__
, err
);
7693 ufshcd_set_req_abort_skip(hba
, hba
->outstanding_reqs
);
7699 * Clear the corresponding bit from outstanding_reqs since the command
7700 * has been aborted successfully.
7702 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
7703 outstanding
= __test_and_clear_bit(tag
, &hba
->outstanding_reqs
);
7704 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
7707 ufshcd_release_scsi_cmd(hba
, lrbp
);
7712 /* Matches the ufshcd_hold() call at the start of this function. */
7713 ufshcd_release(hba
);
7718 * ufshcd_host_reset_and_restore - reset and restore host controller
7719 * @hba: per-adapter instance
7721 * Note that host controller reset may issue DME_RESET to
7722 * local and remote (device) Uni-Pro stack and the attributes
7723 * are reset to default state.
7725 * Return: zero on success, non-zero on failure.
7727 static int ufshcd_host_reset_and_restore(struct ufs_hba
*hba
)
7732 * Stop the host controller and complete the requests
7735 ufshcd_hba_stop(hba
);
7736 hba
->silence_err_logs
= true;
7737 ufshcd_complete_requests(hba
, true);
7738 hba
->silence_err_logs
= false;
7740 /* scale up clocks to max frequency before full reinitialization */
7741 ufshcd_scale_clks(hba
, ULONG_MAX
, true);
7743 err
= ufshcd_hba_enable(hba
);
7745 /* Establish the link again and restore the device */
7747 err
= ufshcd_probe_hba(hba
, false);
7750 dev_err(hba
->dev
, "%s: Host init failed %d\n", __func__
, err
);
7751 ufshcd_update_evt_hist(hba
, UFS_EVT_HOST_RESET
, (u32
)err
);
7756 * ufshcd_reset_and_restore - reset and re-initialize host/device
7757 * @hba: per-adapter instance
7759 * Reset and recover device, host and re-establish link. This
7760 * is helpful to recover the communication in fatal error conditions.
7762 * Return: zero on success, non-zero on failure.
7764 static int ufshcd_reset_and_restore(struct ufs_hba
*hba
)
7767 u32 saved_uic_err
= 0;
7769 unsigned long flags
;
7770 int retries
= MAX_HOST_RESET_RETRIES
;
7772 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7775 * This is a fresh start, cache and clear saved error first,
7776 * in case new error generated during reset and restore.
7778 saved_err
|= hba
->saved_err
;
7779 saved_uic_err
|= hba
->saved_uic_err
;
7781 hba
->saved_uic_err
= 0;
7782 hba
->force_reset
= false;
7783 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
7784 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7786 /* Reset the attached device */
7787 ufshcd_device_reset(hba
);
7789 err
= ufshcd_host_reset_and_restore(hba
);
7791 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7794 /* Do not exit unless operational or dead */
7795 if (hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
&&
7796 hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
&&
7797 hba
->ufshcd_state
!= UFSHCD_STATE_EH_SCHEDULED_NON_FATAL
)
7799 } while (err
&& --retries
);
7802 * Inform scsi mid-layer that we did reset and allow to handle
7803 * Unit Attention properly.
7805 scsi_report_bus_reset(hba
->host
, 0);
7807 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
7808 hba
->saved_err
|= saved_err
;
7809 hba
->saved_uic_err
|= saved_uic_err
;
7811 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7817 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
7818 * @cmd: SCSI command pointer
7820 * Return: SUCCESS or FAILED.
7822 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd
*cmd
)
7825 unsigned long flags
;
7826 struct ufs_hba
*hba
;
7828 hba
= shost_priv(cmd
->device
->host
);
7831 * If runtime PM sent SSU and got a timeout, scsi_error_handler is
7832 * stuck in this function waiting for flush_work(&hba->eh_work). And
7833 * ufshcd_err_handler(eh_work) is stuck waiting for runtime PM. Do
7834 * ufshcd_link_recovery instead of eh_work to prevent deadlock.
7836 if (hba
->pm_op_in_progress
) {
7837 if (ufshcd_link_recovery(hba
))
7843 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7844 hba
->force_reset
= true;
7845 ufshcd_schedule_eh_work(hba
);
7846 dev_err(hba
->dev
, "%s: reset in progress - 1\n", __func__
);
7847 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7849 flush_work(&hba
->eh_work
);
7851 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7852 if (hba
->ufshcd_state
== UFSHCD_STATE_ERROR
)
7854 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7860 * ufshcd_get_max_icc_level - calculate the ICC level
7861 * @sup_curr_uA: max. current supported by the regulator
7862 * @start_scan: row at the desc table to start scan from
7863 * @buff: power descriptor buffer
7865 * Return: calculated max ICC level for specific regulator.
7867 static u32
ufshcd_get_max_icc_level(int sup_curr_uA
, u32 start_scan
,
7875 for (i
= start_scan
; i
>= 0; i
--) {
7876 data
= get_unaligned_be16(&buff
[2 * i
]);
7877 unit
= (data
& ATTR_ICC_LVL_UNIT_MASK
) >>
7878 ATTR_ICC_LVL_UNIT_OFFSET
;
7879 curr_uA
= data
& ATTR_ICC_LVL_VALUE_MASK
;
7881 case UFSHCD_NANO_AMP
:
7882 curr_uA
= curr_uA
/ 1000;
7884 case UFSHCD_MILI_AMP
:
7885 curr_uA
= curr_uA
* 1000;
7888 curr_uA
= curr_uA
* 1000 * 1000;
7890 case UFSHCD_MICRO_AMP
:
7894 if (sup_curr_uA
>= curr_uA
)
7899 pr_err("%s: Couldn't find valid icc_level = %d", __func__
, i
);
7906 * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level
7907 * In case regulators are not initialized we'll return 0
7908 * @hba: per-adapter instance
7909 * @desc_buf: power descriptor buffer to extract ICC levels from.
7911 * Return: calculated ICC level.
7913 static u32
ufshcd_find_max_sup_active_icc_level(struct ufs_hba
*hba
,
7918 if (!hba
->vreg_info
.vcc
|| !hba
->vreg_info
.vccq
||
7919 !hba
->vreg_info
.vccq2
) {
7921 * Using dev_dbg to avoid messages during runtime PM to avoid
7922 * never-ending cycles of messages written back to storage by
7923 * user space causing runtime resume, causing more messages and
7927 "%s: Regulator capability was not set, actvIccLevel=%d",
7928 __func__
, icc_level
);
7932 if (hba
->vreg_info
.vcc
->max_uA
)
7933 icc_level
= ufshcd_get_max_icc_level(
7934 hba
->vreg_info
.vcc
->max_uA
,
7935 POWER_DESC_MAX_ACTV_ICC_LVLS
- 1,
7936 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCC_0
]);
7938 if (hba
->vreg_info
.vccq
->max_uA
)
7939 icc_level
= ufshcd_get_max_icc_level(
7940 hba
->vreg_info
.vccq
->max_uA
,
7942 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCCQ_0
]);
7944 if (hba
->vreg_info
.vccq2
->max_uA
)
7945 icc_level
= ufshcd_get_max_icc_level(
7946 hba
->vreg_info
.vccq2
->max_uA
,
7948 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCCQ2_0
]);
7953 static void ufshcd_set_active_icc_lvl(struct ufs_hba
*hba
)
7959 desc_buf
= kzalloc(QUERY_DESC_MAX_SIZE
, GFP_KERNEL
);
7963 ret
= ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_POWER
, 0, 0,
7964 desc_buf
, QUERY_DESC_MAX_SIZE
);
7967 "%s: Failed reading power descriptor ret = %d",
7972 icc_level
= ufshcd_find_max_sup_active_icc_level(hba
, desc_buf
);
7973 dev_dbg(hba
->dev
, "%s: setting icc_level 0x%x", __func__
, icc_level
);
7975 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
7976 QUERY_ATTR_IDN_ACTIVE_ICC_LVL
, 0, 0, &icc_level
);
7980 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
7981 __func__
, icc_level
, ret
);
7987 static inline void ufshcd_blk_pm_runtime_init(struct scsi_device
*sdev
)
7989 scsi_autopm_get_device(sdev
);
7990 blk_pm_runtime_init(sdev
->request_queue
, &sdev
->sdev_gendev
);
7991 if (sdev
->rpm_autosuspend
)
7992 pm_runtime_set_autosuspend_delay(&sdev
->sdev_gendev
,
7993 RPM_AUTOSUSPEND_DELAY_MS
);
7994 scsi_autopm_put_device(sdev
);
7998 * ufshcd_scsi_add_wlus - Adds required W-LUs
7999 * @hba: per-adapter instance
8001 * UFS device specification requires the UFS devices to support 4 well known
8003 * "REPORT_LUNS" (address: 01h)
8004 * "UFS Device" (address: 50h)
8005 * "RPMB" (address: 44h)
8006 * "BOOT" (address: 30h)
8007 * UFS device's power management needs to be controlled by "POWER CONDITION"
8008 * field of SSU (START STOP UNIT) command. But this "power condition" field
8009 * will take effect only when its sent to "UFS device" well known logical unit
8010 * hence we require the scsi_device instance to represent this logical unit in
8011 * order for the UFS host driver to send the SSU command for power management.
8013 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
8014 * Block) LU so user space process can control this LU. User space may also
8015 * want to have access to BOOT LU.
8017 * This function adds scsi device instances for each of all well known LUs
8018 * (except "REPORT LUNS" LU).
8020 * Return: zero on success (all required W-LUs are added successfully),
8021 * non-zero error value on failure (if failed to add any of the required W-LU).
8023 static int ufshcd_scsi_add_wlus(struct ufs_hba
*hba
)
8026 struct scsi_device
*sdev_boot
, *sdev_rpmb
;
8028 hba
->ufs_device_wlun
= __scsi_add_device(hba
->host
, 0, 0,
8029 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN
), NULL
);
8030 if (IS_ERR(hba
->ufs_device_wlun
)) {
8031 ret
= PTR_ERR(hba
->ufs_device_wlun
);
8032 hba
->ufs_device_wlun
= NULL
;
8035 scsi_device_put(hba
->ufs_device_wlun
);
8037 sdev_rpmb
= __scsi_add_device(hba
->host
, 0, 0,
8038 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN
), NULL
);
8039 if (IS_ERR(sdev_rpmb
)) {
8040 ret
= PTR_ERR(sdev_rpmb
);
8041 goto remove_ufs_device_wlun
;
8043 ufshcd_blk_pm_runtime_init(sdev_rpmb
);
8044 scsi_device_put(sdev_rpmb
);
8046 sdev_boot
= __scsi_add_device(hba
->host
, 0, 0,
8047 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN
), NULL
);
8048 if (IS_ERR(sdev_boot
)) {
8049 dev_err(hba
->dev
, "%s: BOOT WLUN not found\n", __func__
);
8051 ufshcd_blk_pm_runtime_init(sdev_boot
);
8052 scsi_device_put(sdev_boot
);
8056 remove_ufs_device_wlun
:
8057 scsi_remove_device(hba
->ufs_device_wlun
);
8062 static void ufshcd_wb_probe(struct ufs_hba
*hba
, const u8
*desc_buf
)
8064 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8066 u32 d_lu_wb_buf_alloc
;
8067 u32 ext_ufs_feature
;
8069 if (!ufshcd_is_wb_allowed(hba
))
8073 * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
8074 * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
8077 if (!(dev_info
->wspecversion
>= 0x310 ||
8078 dev_info
->wspecversion
== 0x220 ||
8079 (hba
->dev_quirks
& UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
)))
8082 ext_ufs_feature
= get_unaligned_be32(desc_buf
+
8083 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP
);
8085 if (!(ext_ufs_feature
& UFS_DEV_WRITE_BOOSTER_SUP
))
8089 * WB may be supported but not configured while provisioning. The spec
8090 * says, in dedicated wb buffer mode, a max of 1 lun would have wb
8091 * buffer configured.
8093 dev_info
->wb_buffer_type
= desc_buf
[DEVICE_DESC_PARAM_WB_TYPE
];
8095 dev_info
->b_presrv_uspc_en
=
8096 desc_buf
[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN
];
8098 if (dev_info
->wb_buffer_type
== WB_BUF_MODE_SHARED
) {
8099 if (!get_unaligned_be32(desc_buf
+
8100 DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS
))
8103 for (lun
= 0; lun
< UFS_UPIU_MAX_WB_LUN_ID
; lun
++) {
8104 d_lu_wb_buf_alloc
= 0;
8105 ufshcd_read_unit_desc_param(hba
,
8107 UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS
,
8108 (u8
*)&d_lu_wb_buf_alloc
,
8109 sizeof(d_lu_wb_buf_alloc
));
8110 if (d_lu_wb_buf_alloc
) {
8111 dev_info
->wb_dedicated_lu
= lun
;
8116 if (!d_lu_wb_buf_alloc
)
8120 if (!ufshcd_is_wb_buf_lifetime_available(hba
))
8126 hba
->caps
&= ~UFSHCD_CAP_WB_EN
;
8129 static void ufshcd_temp_notif_probe(struct ufs_hba
*hba
, const u8
*desc_buf
)
8131 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8132 u32 ext_ufs_feature
;
8135 if (!(hba
->caps
& UFSHCD_CAP_TEMP_NOTIF
) || dev_info
->wspecversion
< 0x300)
8138 ext_ufs_feature
= get_unaligned_be32(desc_buf
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP
);
8140 if (ext_ufs_feature
& UFS_DEV_LOW_TEMP_NOTIF
)
8141 mask
|= MASK_EE_TOO_LOW_TEMP
;
8143 if (ext_ufs_feature
& UFS_DEV_HIGH_TEMP_NOTIF
)
8144 mask
|= MASK_EE_TOO_HIGH_TEMP
;
8147 ufshcd_enable_ee(hba
, mask
);
8148 ufs_hwmon_probe(hba
, mask
);
8152 static void ufshcd_ext_iid_probe(struct ufs_hba
*hba
, u8
*desc_buf
)
8154 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8155 u32 ext_ufs_feature
;
8159 /* Only UFS-4.0 and above may support EXT_IID */
8160 if (dev_info
->wspecversion
< 0x400)
8163 ext_ufs_feature
= get_unaligned_be32(desc_buf
+
8164 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP
);
8165 if (!(ext_ufs_feature
& UFS_DEV_EXT_IID_SUP
))
8168 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
8169 QUERY_ATTR_IDN_EXT_IID_EN
, 0, 0, &ext_iid_en
);
8171 dev_err(hba
->dev
, "failed reading bEXTIIDEn. err = %d\n", err
);
8174 dev_info
->b_ext_iid_en
= ext_iid_en
;
8177 void ufshcd_fixup_dev_quirks(struct ufs_hba
*hba
,
8178 const struct ufs_dev_quirk
*fixups
)
8180 const struct ufs_dev_quirk
*f
;
8181 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8186 for (f
= fixups
; f
->quirk
; f
++) {
8187 if ((f
->wmanufacturerid
== dev_info
->wmanufacturerid
||
8188 f
->wmanufacturerid
== UFS_ANY_VENDOR
) &&
8189 ((dev_info
->model
&&
8190 STR_PRFX_EQUAL(f
->model
, dev_info
->model
)) ||
8191 !strcmp(f
->model
, UFS_ANY_MODEL
)))
8192 hba
->dev_quirks
|= f
->quirk
;
8195 EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks
);
8197 static void ufs_fixup_device_setup(struct ufs_hba
*hba
)
8199 /* fix by general quirk table */
8200 ufshcd_fixup_dev_quirks(hba
, ufs_fixups
);
8202 /* allow vendors to fix quirks */
8203 ufshcd_vops_fixup_dev_quirks(hba
);
8206 static void ufshcd_update_rtc(struct ufs_hba
*hba
)
8208 struct timespec64 ts64
;
8212 ktime_get_real_ts64(&ts64
);
8214 if (ts64
.tv_sec
< hba
->dev_info
.rtc_time_baseline
) {
8215 dev_warn_once(hba
->dev
, "%s: Current time precedes previous setting!\n", __func__
);
8220 * The Absolute RTC mode has a 136-year limit, spanning from 2010 to 2146. If a time beyond
8221 * 2146 is required, it is recommended to choose the relative RTC mode.
8223 val
= ts64
.tv_sec
- hba
->dev_info
.rtc_time_baseline
;
8225 ufshcd_rpm_get_sync(hba
);
8226 err
= ufshcd_query_attr(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
, QUERY_ATTR_IDN_SECONDS_PASSED
,
8228 ufshcd_rpm_put_sync(hba
);
8231 dev_err(hba
->dev
, "%s: Failed to update rtc %d\n", __func__
, err
);
8232 else if (hba
->dev_info
.rtc_type
== UFS_RTC_RELATIVE
)
8233 hba
->dev_info
.rtc_time_baseline
= ts64
.tv_sec
;
8236 static void ufshcd_rtc_work(struct work_struct
*work
)
8238 struct ufs_hba
*hba
;
8240 hba
= container_of(to_delayed_work(work
), struct ufs_hba
, ufs_rtc_update_work
);
8242 /* Update RTC only when there are no requests in progress and UFSHCI is operational */
8243 if (!ufshcd_is_ufs_dev_busy(hba
) && hba
->ufshcd_state
== UFSHCD_STATE_OPERATIONAL
)
8244 ufshcd_update_rtc(hba
);
8246 if (ufshcd_is_ufs_dev_active(hba
) && hba
->dev_info
.rtc_update_period
)
8247 schedule_delayed_work(&hba
->ufs_rtc_update_work
,
8248 msecs_to_jiffies(hba
->dev_info
.rtc_update_period
));
8251 static void ufs_init_rtc(struct ufs_hba
*hba
, u8
*desc_buf
)
8253 u16 periodic_rtc_update
= get_unaligned_be16(&desc_buf
[DEVICE_DESC_PARAM_FRQ_RTC
]);
8254 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8256 if (periodic_rtc_update
& UFS_RTC_TIME_BASELINE
) {
8257 dev_info
->rtc_type
= UFS_RTC_ABSOLUTE
;
8260 * The concept of measuring time in Linux as the number of seconds elapsed since
8261 * 00:00:00 UTC on January 1, 1970, and UFS ABS RTC is elapsed from January 1st
8262 * 2010 00:00, here we need to adjust ABS baseline.
8264 dev_info
->rtc_time_baseline
= mktime64(2010, 1, 1, 0, 0, 0) -
8265 mktime64(1970, 1, 1, 0, 0, 0);
8267 dev_info
->rtc_type
= UFS_RTC_RELATIVE
;
8268 dev_info
->rtc_time_baseline
= 0;
8272 * We ignore TIME_PERIOD defined in wPeriodicRTCUpdate because Spec does not clearly state
8273 * how to calculate the specific update period for each time unit. And we disable periodic
8274 * RTC update work, let user configure by sysfs node according to specific circumstance.
8276 dev_info
->rtc_update_period
= 0;
8279 static int ufs_get_device_desc(struct ufs_hba
*hba
)
8284 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8286 desc_buf
= kzalloc(QUERY_DESC_MAX_SIZE
, GFP_KERNEL
);
8292 err
= ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_DEVICE
, 0, 0, desc_buf
,
8293 QUERY_DESC_MAX_SIZE
);
8295 dev_err(hba
->dev
, "%s: Failed reading Device Desc. err = %d\n",
8301 * getting vendor (manufacturerID) and Bank Index in big endian
8304 dev_info
->wmanufacturerid
= desc_buf
[DEVICE_DESC_PARAM_MANF_ID
] << 8 |
8305 desc_buf
[DEVICE_DESC_PARAM_MANF_ID
+ 1];
8307 /* getting Specification Version in big endian format */
8308 dev_info
->wspecversion
= desc_buf
[DEVICE_DESC_PARAM_SPEC_VER
] << 8 |
8309 desc_buf
[DEVICE_DESC_PARAM_SPEC_VER
+ 1];
8310 dev_info
->bqueuedepth
= desc_buf
[DEVICE_DESC_PARAM_Q_DPTH
];
8312 model_index
= desc_buf
[DEVICE_DESC_PARAM_PRDCT_NAME
];
8314 err
= ufshcd_read_string_desc(hba
, model_index
,
8315 &dev_info
->model
, SD_ASCII_STD
);
8317 dev_err(hba
->dev
, "%s: Failed reading Product Name. err = %d\n",
8322 hba
->luns_avail
= desc_buf
[DEVICE_DESC_PARAM_NUM_LU
] +
8323 desc_buf
[DEVICE_DESC_PARAM_NUM_WLU
];
8325 ufs_fixup_device_setup(hba
);
8327 ufshcd_wb_probe(hba
, desc_buf
);
8329 ufshcd_temp_notif_probe(hba
, desc_buf
);
8331 ufs_init_rtc(hba
, desc_buf
);
8333 if (hba
->ext_iid_sup
)
8334 ufshcd_ext_iid_probe(hba
, desc_buf
);
8337 * ufshcd_read_string_desc returns size of the string
8338 * reset the error value
8347 static void ufs_put_device_desc(struct ufs_hba
*hba
)
8349 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8351 kfree(dev_info
->model
);
8352 dev_info
->model
= NULL
;
8356 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
8357 * @hba: per-adapter instance
8359 * PA_TActivate parameter can be tuned manually if UniPro version is less than
8360 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
8361 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
8362 * the hibern8 exit latency.
8364 * Return: zero on success, non-zero error value on failure.
8366 static int ufshcd_tune_pa_tactivate(struct ufs_hba
*hba
)
8369 u32 peer_rx_min_activatetime
= 0, tuned_pa_tactivate
;
8371 ret
= ufshcd_dme_peer_get(hba
,
8373 RX_MIN_ACTIVATETIME_CAPABILITY
,
8374 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
8375 &peer_rx_min_activatetime
);
8379 /* make sure proper unit conversion is applied */
8380 tuned_pa_tactivate
=
8381 ((peer_rx_min_activatetime
* RX_MIN_ACTIVATETIME_UNIT_US
)
8382 / PA_TACTIVATE_TIME_UNIT_US
);
8383 ret
= ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
),
8384 tuned_pa_tactivate
);
8391 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
8392 * @hba: per-adapter instance
8394 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
8395 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
8396 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
8397 * This optimal value can help reduce the hibern8 exit latency.
8399 * Return: zero on success, non-zero error value on failure.
8401 static int ufshcd_tune_pa_hibern8time(struct ufs_hba
*hba
)
8404 u32 local_tx_hibern8_time_cap
= 0, peer_rx_hibern8_time_cap
= 0;
8405 u32 max_hibern8_time
, tuned_pa_hibern8time
;
8407 ret
= ufshcd_dme_get(hba
,
8408 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY
,
8409 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
8410 &local_tx_hibern8_time_cap
);
8414 ret
= ufshcd_dme_peer_get(hba
,
8415 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY
,
8416 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
8417 &peer_rx_hibern8_time_cap
);
8421 max_hibern8_time
= max(local_tx_hibern8_time_cap
,
8422 peer_rx_hibern8_time_cap
);
8423 /* make sure proper unit conversion is applied */
8424 tuned_pa_hibern8time
= ((max_hibern8_time
* HIBERN8TIME_UNIT_US
)
8425 / PA_HIBERN8_TIME_UNIT_US
);
8426 ret
= ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_HIBERN8TIME
),
8427 tuned_pa_hibern8time
);
8433 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
8434 * less than device PA_TACTIVATE time.
8435 * @hba: per-adapter instance
8437 * Some UFS devices require host PA_TACTIVATE to be lower than device
8438 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
8441 * Return: zero on success, non-zero error value on failure.
8443 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba
*hba
)
8446 u32 granularity
, peer_granularity
;
8447 u32 pa_tactivate
, peer_pa_tactivate
;
8448 u32 pa_tactivate_us
, peer_pa_tactivate_us
;
8449 static const u8 gran_to_us_table
[] = {1, 4, 8, 16, 32, 100};
8451 ret
= ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_GRANULARITY
),
8456 ret
= ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_GRANULARITY
),
8461 if ((granularity
< PA_GRANULARITY_MIN_VAL
) ||
8462 (granularity
> PA_GRANULARITY_MAX_VAL
)) {
8463 dev_err(hba
->dev
, "%s: invalid host PA_GRANULARITY %d",
8464 __func__
, granularity
);
8468 if ((peer_granularity
< PA_GRANULARITY_MIN_VAL
) ||
8469 (peer_granularity
> PA_GRANULARITY_MAX_VAL
)) {
8470 dev_err(hba
->dev
, "%s: invalid device PA_GRANULARITY %d",
8471 __func__
, peer_granularity
);
8475 ret
= ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_TACTIVATE
), &pa_tactivate
);
8479 ret
= ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_TACTIVATE
),
8480 &peer_pa_tactivate
);
8484 pa_tactivate_us
= pa_tactivate
* gran_to_us_table
[granularity
- 1];
8485 peer_pa_tactivate_us
= peer_pa_tactivate
*
8486 gran_to_us_table
[peer_granularity
- 1];
8488 if (pa_tactivate_us
>= peer_pa_tactivate_us
) {
8489 u32 new_peer_pa_tactivate
;
8491 new_peer_pa_tactivate
= pa_tactivate_us
/
8492 gran_to_us_table
[peer_granularity
- 1];
8493 new_peer_pa_tactivate
++;
8494 ret
= ufshcd_dme_peer_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
),
8495 new_peer_pa_tactivate
);
8502 static void ufshcd_tune_unipro_params(struct ufs_hba
*hba
)
8504 if (ufshcd_is_unipro_pa_params_tuning_req(hba
)) {
8505 ufshcd_tune_pa_tactivate(hba
);
8506 ufshcd_tune_pa_hibern8time(hba
);
8509 ufshcd_vops_apply_dev_quirks(hba
);
8511 if (hba
->dev_quirks
& UFS_DEVICE_QUIRK_PA_TACTIVATE
)
8512 /* set 1ms timeout for PA_TACTIVATE */
8513 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
), 10);
8515 if (hba
->dev_quirks
& UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE
)
8516 ufshcd_quirk_tune_host_pa_tactivate(hba
);
8519 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba
*hba
)
8521 hba
->ufs_stats
.hibern8_exit_cnt
= 0;
8522 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
8523 hba
->req_abort_count
= 0;
8526 static int ufshcd_device_geo_params_init(struct ufs_hba
*hba
)
8531 desc_buf
= kzalloc(QUERY_DESC_MAX_SIZE
, GFP_KERNEL
);
8537 err
= ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_GEOMETRY
, 0, 0,
8538 desc_buf
, QUERY_DESC_MAX_SIZE
);
8540 dev_err(hba
->dev
, "%s: Failed reading Geometry Desc. err = %d\n",
8545 if (desc_buf
[GEOMETRY_DESC_PARAM_MAX_NUM_LUN
] == 1)
8546 hba
->dev_info
.max_lu_supported
= 32;
8547 else if (desc_buf
[GEOMETRY_DESC_PARAM_MAX_NUM_LUN
] == 0)
8548 hba
->dev_info
.max_lu_supported
= 8;
8555 struct ufs_ref_clk
{
8556 unsigned long freq_hz
;
8557 enum ufs_ref_clk_freq val
;
8560 static const struct ufs_ref_clk ufs_ref_clk_freqs
[] = {
8561 {19200000, REF_CLK_FREQ_19_2_MHZ
},
8562 {26000000, REF_CLK_FREQ_26_MHZ
},
8563 {38400000, REF_CLK_FREQ_38_4_MHZ
},
8564 {52000000, REF_CLK_FREQ_52_MHZ
},
8565 {0, REF_CLK_FREQ_INVAL
},
8568 static enum ufs_ref_clk_freq
8569 ufs_get_bref_clk_from_hz(unsigned long freq
)
8573 for (i
= 0; ufs_ref_clk_freqs
[i
].freq_hz
; i
++)
8574 if (ufs_ref_clk_freqs
[i
].freq_hz
== freq
)
8575 return ufs_ref_clk_freqs
[i
].val
;
8577 return REF_CLK_FREQ_INVAL
;
8580 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba
*hba
, struct clk
*refclk
)
8584 freq
= clk_get_rate(refclk
);
8586 hba
->dev_ref_clk_freq
=
8587 ufs_get_bref_clk_from_hz(freq
);
8589 if (hba
->dev_ref_clk_freq
== REF_CLK_FREQ_INVAL
)
8591 "invalid ref_clk setting = %ld\n", freq
);
8594 static int ufshcd_set_dev_ref_clk(struct ufs_hba
*hba
)
8598 u32 freq
= hba
->dev_ref_clk_freq
;
8600 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
8601 QUERY_ATTR_IDN_REF_CLK_FREQ
, 0, 0, &ref_clk
);
8604 dev_err(hba
->dev
, "failed reading bRefClkFreq. err = %d\n",
8609 if (ref_clk
== freq
)
8610 goto out
; /* nothing to update */
8612 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
8613 QUERY_ATTR_IDN_REF_CLK_FREQ
, 0, 0, &freq
);
8616 dev_err(hba
->dev
, "bRefClkFreq setting to %lu Hz failed\n",
8617 ufs_ref_clk_freqs
[freq
].freq_hz
);
8621 dev_dbg(hba
->dev
, "bRefClkFreq setting to %lu Hz succeeded\n",
8622 ufs_ref_clk_freqs
[freq
].freq_hz
);
8628 static int ufshcd_device_params_init(struct ufs_hba
*hba
)
8633 /* Init UFS geometry descriptor related parameters */
8634 ret
= ufshcd_device_geo_params_init(hba
);
8638 /* Check and apply UFS device quirks */
8639 ret
= ufs_get_device_desc(hba
);
8641 dev_err(hba
->dev
, "%s: Failed getting device info. err = %d\n",
8646 ufshcd_get_ref_clk_gating_wait(hba
);
8648 if (!ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_READ_FLAG
,
8649 QUERY_FLAG_IDN_PWR_ON_WPE
, 0, &flag
))
8650 hba
->dev_info
.f_power_on_wp_en
= flag
;
8652 /* Probe maximum power mode co-supported by both UFS host and device */
8653 if (ufshcd_get_max_pwr_mode(hba
))
8655 "%s: Failed getting max supported power mode\n",
8661 static void ufshcd_set_timestamp_attr(struct ufs_hba
*hba
)
8664 struct ufs_query_req
*request
= NULL
;
8665 struct ufs_query_res
*response
= NULL
;
8666 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8667 struct utp_upiu_query_v4_0
*upiu_data
;
8669 if (dev_info
->wspecversion
< 0x400)
8674 mutex_lock(&hba
->dev_cmd
.lock
);
8676 ufshcd_init_query(hba
, &request
, &response
,
8677 UPIU_QUERY_OPCODE_WRITE_ATTR
,
8678 QUERY_ATTR_IDN_TIMESTAMP
, 0, 0);
8680 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
8682 upiu_data
= (struct utp_upiu_query_v4_0
*)&request
->upiu_req
;
8684 put_unaligned_be64(ktime_get_real_ns(), &upiu_data
->osf3
);
8686 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
8689 dev_err(hba
->dev
, "%s: failed to set timestamp %d\n",
8692 mutex_unlock(&hba
->dev_cmd
.lock
);
8693 ufshcd_release(hba
);
8697 * ufshcd_add_lus - probe and add UFS logical units
8698 * @hba: per-adapter instance
8700 * Return: 0 upon success; < 0 upon failure.
8702 static int ufshcd_add_lus(struct ufs_hba
*hba
)
8706 /* Add required well known logical units to scsi mid layer */
8707 ret
= ufshcd_scsi_add_wlus(hba
);
8711 /* Initialize devfreq after UFS device is detected */
8712 if (ufshcd_is_clkscaling_supported(hba
)) {
8713 memcpy(&hba
->clk_scaling
.saved_pwr_info
,
8715 sizeof(struct ufs_pa_layer_attr
));
8716 hba
->clk_scaling
.is_allowed
= true;
8718 ret
= ufshcd_devfreq_init(hba
);
8722 hba
->clk_scaling
.is_enabled
= true;
8723 ufshcd_init_clk_scaling_sysfs(hba
);
8727 scsi_scan_host(hba
->host
);
8728 pm_runtime_put_sync(hba
->dev
);
8734 /* SDB - Single Doorbell */
8735 static void ufshcd_release_sdb_queue(struct ufs_hba
*hba
, int nutrs
)
8737 size_t ucdl_size
, utrdl_size
;
8739 ucdl_size
= ufshcd_get_ucd_size(hba
) * nutrs
;
8740 dmam_free_coherent(hba
->dev
, ucdl_size
, hba
->ucdl_base_addr
,
8741 hba
->ucdl_dma_addr
);
8743 utrdl_size
= sizeof(struct utp_transfer_req_desc
) * nutrs
;
8744 dmam_free_coherent(hba
->dev
, utrdl_size
, hba
->utrdl_base_addr
,
8745 hba
->utrdl_dma_addr
);
8747 devm_kfree(hba
->dev
, hba
->lrb
);
8750 static int ufshcd_alloc_mcq(struct ufs_hba
*hba
)
8753 int old_nutrs
= hba
->nutrs
;
8755 ret
= ufshcd_mcq_decide_queue_depth(hba
);
8760 ret
= ufshcd_mcq_init(hba
);
8765 * Previously allocated memory for nutrs may not be enough in MCQ mode.
8766 * Number of supported tags in MCQ mode may be larger than SDB mode.
8768 if (hba
->nutrs
!= old_nutrs
) {
8769 ufshcd_release_sdb_queue(hba
, old_nutrs
);
8770 ret
= ufshcd_memory_alloc(hba
);
8773 ufshcd_host_memory_configure(hba
);
8776 ret
= ufshcd_mcq_memory_alloc(hba
);
8782 hba
->nutrs
= old_nutrs
;
8786 static void ufshcd_config_mcq(struct ufs_hba
*hba
)
8791 ret
= ufshcd_mcq_vops_config_esi(hba
);
8792 dev_info(hba
->dev
, "ESI %sconfigured\n", ret
? "is not " : "");
8794 intrs
= UFSHCD_ENABLE_MCQ_INTRS
;
8795 if (hba
->quirks
& UFSHCD_QUIRK_MCQ_BROKEN_INTR
)
8796 intrs
&= ~MCQ_CQ_EVENT_STATUS
;
8797 ufshcd_enable_intr(hba
, intrs
);
8798 ufshcd_mcq_make_queues_operational(hba
);
8799 ufshcd_mcq_config_mac(hba
, hba
->nutrs
);
8801 hba
->host
->can_queue
= hba
->nutrs
- UFSHCD_NUM_RESERVED
;
8802 hba
->reserved_slot
= hba
->nutrs
- UFSHCD_NUM_RESERVED
;
8804 /* Select MCQ mode */
8805 ufshcd_writel(hba
, ufshcd_readl(hba
, REG_UFS_MEM_CFG
) | 0x1,
8807 hba
->mcq_enabled
= true;
8809 dev_info(hba
->dev
, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n",
8810 hba
->nr_hw_queues
, hba
->nr_queues
[HCTX_TYPE_DEFAULT
],
8811 hba
->nr_queues
[HCTX_TYPE_READ
], hba
->nr_queues
[HCTX_TYPE_POLL
],
8815 static int ufshcd_device_init(struct ufs_hba
*hba
, bool init_dev_params
)
8818 struct Scsi_Host
*host
= hba
->host
;
8820 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
8822 ret
= ufshcd_link_startup(hba
);
8826 if (hba
->quirks
& UFSHCD_QUIRK_SKIP_PH_CONFIGURATION
)
8829 /* Debug counters initialization */
8830 ufshcd_clear_dbg_ufs_stats(hba
);
8832 /* UniPro link is active now */
8833 ufshcd_set_link_active(hba
);
8835 /* Reconfigure MCQ upon reset */
8836 if (is_mcq_enabled(hba
) && !init_dev_params
)
8837 ufshcd_config_mcq(hba
);
8839 /* Verify device initialization by sending NOP OUT UPIU */
8840 ret
= ufshcd_verify_dev_init(hba
);
8844 /* Initiate UFS initialization, and waiting until completion */
8845 ret
= ufshcd_complete_dev_init(hba
);
8850 * Initialize UFS device parameters used by driver, these
8851 * parameters are associated with UFS descriptors.
8853 if (init_dev_params
) {
8854 ret
= ufshcd_device_params_init(hba
);
8857 if (is_mcq_supported(hba
) && !hba
->scsi_host_added
) {
8858 ret
= ufshcd_alloc_mcq(hba
);
8860 ufshcd_config_mcq(hba
);
8862 /* Continue with SDB mode */
8863 use_mcq_mode
= false;
8864 dev_err(hba
->dev
, "MCQ mode is disabled, err=%d\n",
8867 ret
= scsi_add_host(host
, hba
->dev
);
8869 dev_err(hba
->dev
, "scsi_add_host failed\n");
8872 hba
->scsi_host_added
= true;
8873 } else if (is_mcq_supported(hba
)) {
8874 /* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */
8875 ufshcd_config_mcq(hba
);
8879 ufshcd_tune_unipro_params(hba
);
8881 /* UFS device is also active now */
8882 ufshcd_set_ufs_dev_active(hba
);
8883 ufshcd_force_reset_auto_bkops(hba
);
8885 ufshcd_set_timestamp_attr(hba
);
8886 schedule_delayed_work(&hba
->ufs_rtc_update_work
,
8887 msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS
));
8889 /* Gear up to HS gear if supported */
8890 if (hba
->max_pwr_info
.is_valid
) {
8892 * Set the right value to bRefClkFreq before attempting to
8893 * switch to HS gears.
8895 if (hba
->dev_ref_clk_freq
!= REF_CLK_FREQ_INVAL
)
8896 ufshcd_set_dev_ref_clk(hba
);
8897 ret
= ufshcd_config_pwr_mode(hba
, &hba
->max_pwr_info
.info
);
8899 dev_err(hba
->dev
, "%s: Failed setting power mode, err = %d\n",
8909 * ufshcd_probe_hba - probe hba to detect device and initialize it
8910 * @hba: per-adapter instance
8911 * @init_dev_params: whether or not to call ufshcd_device_params_init().
8913 * Execute link-startup and verify device initialization
8915 * Return: 0 upon success; < 0 upon failure.
8917 static int ufshcd_probe_hba(struct ufs_hba
*hba
, bool init_dev_params
)
8919 ktime_t start
= ktime_get();
8920 unsigned long flags
;
8923 ret
= ufshcd_device_init(hba
, init_dev_params
);
8927 if (!hba
->pm_op_in_progress
&&
8928 (hba
->quirks
& UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH
)) {
8929 /* Reset the device and controller before doing reinit */
8930 ufshcd_device_reset(hba
);
8931 ufshcd_hba_stop(hba
);
8932 ufshcd_vops_reinit_notify(hba
);
8933 ret
= ufshcd_hba_enable(hba
);
8935 dev_err(hba
->dev
, "Host controller enable failed\n");
8936 ufshcd_print_evt_hist(hba
);
8937 ufshcd_print_host_state(hba
);
8941 /* Reinit the device */
8942 ret
= ufshcd_device_init(hba
, init_dev_params
);
8947 ufshcd_print_pwr_info(hba
);
8950 * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
8951 * and for removable UFS card as well, hence always set the parameter.
8952 * Note: Error handler may issue the device reset hence resetting
8953 * bActiveICCLevel as well so it is always safe to set this here.
8955 ufshcd_set_active_icc_lvl(hba
);
8957 /* Enable UFS Write Booster if supported */
8958 ufshcd_configure_wb(hba
);
8960 if (hba
->ee_usr_mask
)
8961 ufshcd_write_ee_control(hba
);
8962 ufshcd_configure_auto_hibern8(hba
);
8965 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
8967 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
8968 else if (hba
->ufshcd_state
== UFSHCD_STATE_RESET
)
8969 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
8970 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
8972 trace_ufshcd_init(dev_name(hba
->dev
), ret
,
8973 ktime_to_us(ktime_sub(ktime_get(), start
)),
8974 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
8979 * ufshcd_async_scan - asynchronous execution for probing hba
8980 * @data: data pointer to pass to this function
8981 * @cookie: cookie data
8983 static void ufshcd_async_scan(void *data
, async_cookie_t cookie
)
8985 struct ufs_hba
*hba
= (struct ufs_hba
*)data
;
8988 down(&hba
->host_sem
);
8989 /* Initialize hba, detect and initialize UFS device */
8990 ret
= ufshcd_probe_hba(hba
, true);
8995 /* Probe and add UFS logical units */
8996 ret
= ufshcd_add_lus(hba
);
8999 * If we failed to initialize the device or the device is not
9000 * present, turn off the power/clocks etc.
9003 pm_runtime_put_sync(hba
->dev
);
9004 ufshcd_hba_exit(hba
);
9008 static enum scsi_timeout_action
ufshcd_eh_timed_out(struct scsi_cmnd
*scmd
)
9010 struct ufs_hba
*hba
= shost_priv(scmd
->device
->host
);
9012 if (!hba
->system_suspending
) {
9013 /* Activate the error handler in the SCSI core. */
9014 return SCSI_EH_NOT_HANDLED
;
9018 * If we get here we know that no TMFs are outstanding and also that
9019 * the only pending command is a START STOP UNIT command. Handle the
9020 * timeout of that command directly to prevent a deadlock between
9021 * ufshcd_set_dev_pwr_mode() and ufshcd_err_handler().
9023 ufshcd_link_recovery(hba
);
9024 dev_info(hba
->dev
, "%s() finished; outstanding_tasks = %#lx.\n",
9025 __func__
, hba
->outstanding_tasks
);
9027 return hba
->outstanding_reqs
? SCSI_EH_RESET_TIMER
: SCSI_EH_DONE
;
9030 static const struct attribute_group
*ufshcd_driver_groups
[] = {
9031 &ufs_sysfs_unit_descriptor_group
,
9032 &ufs_sysfs_lun_attributes_group
,
9036 static struct ufs_hba_variant_params ufs_hba_vps
= {
9037 .hba_enable_delay_us
= 1000,
9038 .wb_flush_threshold
= UFS_WB_BUF_REMAIN_PERCENT(40),
9039 .devfreq_profile
.polling_ms
= 100,
9040 .devfreq_profile
.target
= ufshcd_devfreq_target
,
9041 .devfreq_profile
.get_dev_status
= ufshcd_devfreq_get_dev_status
,
9042 .ondemand_data
.upthreshold
= 70,
9043 .ondemand_data
.downdifferential
= 5,
9046 static const struct scsi_host_template ufshcd_driver_template
= {
9047 .module
= THIS_MODULE
,
9049 .proc_name
= UFSHCD
,
9050 .map_queues
= ufshcd_map_queues
,
9051 .queuecommand
= ufshcd_queuecommand
,
9052 .mq_poll
= ufshcd_poll
,
9053 .slave_alloc
= ufshcd_slave_alloc
,
9054 .slave_configure
= ufshcd_slave_configure
,
9055 .slave_destroy
= ufshcd_slave_destroy
,
9056 .change_queue_depth
= ufshcd_change_queue_depth
,
9057 .eh_abort_handler
= ufshcd_abort
,
9058 .eh_device_reset_handler
= ufshcd_eh_device_reset_handler
,
9059 .eh_host_reset_handler
= ufshcd_eh_host_reset_handler
,
9060 .eh_timed_out
= ufshcd_eh_timed_out
,
9062 .sg_tablesize
= SG_ALL
,
9063 .cmd_per_lun
= UFSHCD_CMD_PER_LUN
,
9064 .can_queue
= UFSHCD_CAN_QUEUE
,
9065 .max_segment_size
= PRDT_DATA_BYTE_COUNT_MAX
,
9066 .max_sectors
= SZ_1M
/ SECTOR_SIZE
,
9067 .max_host_blocked
= 1,
9068 .track_queue_depth
= 1,
9069 .skip_settle_delay
= 1,
9070 .sdev_groups
= ufshcd_driver_groups
,
9071 .rpm_autosuspend_delay
= RPM_AUTOSUSPEND_DELAY_MS
,
9074 static int ufshcd_config_vreg_load(struct device
*dev
, struct ufs_vreg
*vreg
,
9083 * "set_load" operation shall be required on those regulators
9084 * which specifically configured current limitation. Otherwise
9085 * zero max_uA may cause unexpected behavior when regulator is
9086 * enabled or set as high power mode.
9091 ret
= regulator_set_load(vreg
->reg
, ua
);
9093 dev_err(dev
, "%s: %s set load (ua=%d) failed, err=%d\n",
9094 __func__
, vreg
->name
, ua
, ret
);
9100 static inline int ufshcd_config_vreg_lpm(struct ufs_hba
*hba
,
9101 struct ufs_vreg
*vreg
)
9103 return ufshcd_config_vreg_load(hba
->dev
, vreg
, UFS_VREG_LPM_LOAD_UA
);
9106 static inline int ufshcd_config_vreg_hpm(struct ufs_hba
*hba
,
9107 struct ufs_vreg
*vreg
)
9112 return ufshcd_config_vreg_load(hba
->dev
, vreg
, vreg
->max_uA
);
9115 static int ufshcd_config_vreg(struct device
*dev
,
9116 struct ufs_vreg
*vreg
, bool on
)
9118 if (regulator_count_voltages(vreg
->reg
) <= 0)
9121 return ufshcd_config_vreg_load(dev
, vreg
, on
? vreg
->max_uA
: 0);
9124 static int ufshcd_enable_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
9128 if (!vreg
|| vreg
->enabled
)
9131 ret
= ufshcd_config_vreg(dev
, vreg
, true);
9133 ret
= regulator_enable(vreg
->reg
);
9136 vreg
->enabled
= true;
9138 dev_err(dev
, "%s: %s enable failed, err=%d\n",
9139 __func__
, vreg
->name
, ret
);
9144 static int ufshcd_disable_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
9148 if (!vreg
|| !vreg
->enabled
|| vreg
->always_on
)
9151 ret
= regulator_disable(vreg
->reg
);
9154 /* ignore errors on applying disable config */
9155 ufshcd_config_vreg(dev
, vreg
, false);
9156 vreg
->enabled
= false;
9158 dev_err(dev
, "%s: %s disable failed, err=%d\n",
9159 __func__
, vreg
->name
, ret
);
9165 static int ufshcd_setup_vreg(struct ufs_hba
*hba
, bool on
)
9168 struct device
*dev
= hba
->dev
;
9169 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
9171 ret
= ufshcd_toggle_vreg(dev
, info
->vcc
, on
);
9175 ret
= ufshcd_toggle_vreg(dev
, info
->vccq
, on
);
9179 ret
= ufshcd_toggle_vreg(dev
, info
->vccq2
, on
);
9183 ufshcd_toggle_vreg(dev
, info
->vccq2
, false);
9184 ufshcd_toggle_vreg(dev
, info
->vccq
, false);
9185 ufshcd_toggle_vreg(dev
, info
->vcc
, false);
9190 static int ufshcd_setup_hba_vreg(struct ufs_hba
*hba
, bool on
)
9192 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
9194 return ufshcd_toggle_vreg(hba
->dev
, info
->vdd_hba
, on
);
9197 int ufshcd_get_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
9204 vreg
->reg
= devm_regulator_get(dev
, vreg
->name
);
9205 if (IS_ERR(vreg
->reg
)) {
9206 ret
= PTR_ERR(vreg
->reg
);
9207 dev_err(dev
, "%s: %s get failed, err=%d\n",
9208 __func__
, vreg
->name
, ret
);
9213 EXPORT_SYMBOL_GPL(ufshcd_get_vreg
);
9215 static int ufshcd_init_vreg(struct ufs_hba
*hba
)
9218 struct device
*dev
= hba
->dev
;
9219 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
9221 ret
= ufshcd_get_vreg(dev
, info
->vcc
);
9225 ret
= ufshcd_get_vreg(dev
, info
->vccq
);
9227 ret
= ufshcd_get_vreg(dev
, info
->vccq2
);
9232 static int ufshcd_init_hba_vreg(struct ufs_hba
*hba
)
9234 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
9236 return ufshcd_get_vreg(hba
->dev
, info
->vdd_hba
);
9239 static int ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
)
9242 struct ufs_clk_info
*clki
;
9243 struct list_head
*head
= &hba
->clk_list_head
;
9244 unsigned long flags
;
9245 ktime_t start
= ktime_get();
9246 bool clk_state_changed
= false;
9248 if (list_empty(head
))
9251 ret
= ufshcd_vops_setup_clocks(hba
, on
, PRE_CHANGE
);
9255 list_for_each_entry(clki
, head
, list
) {
9256 if (!IS_ERR_OR_NULL(clki
->clk
)) {
9258 * Don't disable clocks which are needed
9259 * to keep the link active.
9261 if (ufshcd_is_link_active(hba
) &&
9262 clki
->keep_link_active
)
9265 clk_state_changed
= on
^ clki
->enabled
;
9266 if (on
&& !clki
->enabled
) {
9267 ret
= clk_prepare_enable(clki
->clk
);
9269 dev_err(hba
->dev
, "%s: %s prepare enable failed, %d\n",
9270 __func__
, clki
->name
, ret
);
9273 } else if (!on
&& clki
->enabled
) {
9274 clk_disable_unprepare(clki
->clk
);
9277 dev_dbg(hba
->dev
, "%s: clk: %s %sabled\n", __func__
,
9278 clki
->name
, on
? "en" : "dis");
9282 ret
= ufshcd_vops_setup_clocks(hba
, on
, POST_CHANGE
);
9288 list_for_each_entry(clki
, head
, list
) {
9289 if (!IS_ERR_OR_NULL(clki
->clk
) && clki
->enabled
)
9290 clk_disable_unprepare(clki
->clk
);
9292 } else if (!ret
&& on
) {
9293 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
9294 hba
->clk_gating
.state
= CLKS_ON
;
9295 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
9296 hba
->clk_gating
.state
);
9297 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
9300 if (clk_state_changed
)
9301 trace_ufshcd_profile_clk_gating(dev_name(hba
->dev
),
9302 (on
? "on" : "off"),
9303 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
9307 static enum ufs_ref_clk_freq
ufshcd_parse_ref_clk_property(struct ufs_hba
*hba
)
9310 int ret
= device_property_read_u32(hba
->dev
, "ref-clk-freq", &freq
);
9313 dev_dbg(hba
->dev
, "Cannot query 'ref-clk-freq' property = %d", ret
);
9314 return REF_CLK_FREQ_INVAL
;
9317 return ufs_get_bref_clk_from_hz(freq
);
9320 static int ufshcd_init_clocks(struct ufs_hba
*hba
)
9323 struct ufs_clk_info
*clki
;
9324 struct device
*dev
= hba
->dev
;
9325 struct list_head
*head
= &hba
->clk_list_head
;
9327 if (list_empty(head
))
9330 list_for_each_entry(clki
, head
, list
) {
9334 clki
->clk
= devm_clk_get(dev
, clki
->name
);
9335 if (IS_ERR(clki
->clk
)) {
9336 ret
= PTR_ERR(clki
->clk
);
9337 dev_err(dev
, "%s: %s clk get failed, %d\n",
9338 __func__
, clki
->name
, ret
);
9343 * Parse device ref clk freq as per device tree "ref_clk".
9344 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
9345 * in ufshcd_alloc_host().
9347 if (!strcmp(clki
->name
, "ref_clk"))
9348 ufshcd_parse_dev_ref_clk_freq(hba
, clki
->clk
);
9350 if (clki
->max_freq
) {
9351 ret
= clk_set_rate(clki
->clk
, clki
->max_freq
);
9353 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
9354 __func__
, clki
->name
,
9355 clki
->max_freq
, ret
);
9358 clki
->curr_freq
= clki
->max_freq
;
9360 dev_dbg(dev
, "%s: clk: %s, rate: %lu\n", __func__
,
9361 clki
->name
, clk_get_rate(clki
->clk
));
9364 /* Set Max. frequency for all clocks */
9365 if (hba
->use_pm_opp
) {
9366 ret
= ufshcd_opp_set_rate(hba
, ULONG_MAX
);
9368 dev_err(hba
->dev
, "%s: failed to set OPP: %d", __func__
,
9378 static int ufshcd_variant_hba_init(struct ufs_hba
*hba
)
9385 err
= ufshcd_vops_init(hba
);
9387 dev_err_probe(hba
->dev
, err
,
9388 "%s: variant %s init failed with err %d\n",
9389 __func__
, ufshcd_get_var_name(hba
), err
);
9394 static void ufshcd_variant_hba_exit(struct ufs_hba
*hba
)
9399 ufshcd_vops_exit(hba
);
9402 static int ufshcd_hba_init(struct ufs_hba
*hba
)
9407 * Handle host controller power separately from the UFS device power
9408 * rails as it will help controlling the UFS host controller power
9409 * collapse easily which is different than UFS device power collapse.
9410 * Also, enable the host controller power before we go ahead with rest
9411 * of the initialization here.
9413 err
= ufshcd_init_hba_vreg(hba
);
9417 err
= ufshcd_setup_hba_vreg(hba
, true);
9421 err
= ufshcd_init_clocks(hba
);
9423 goto out_disable_hba_vreg
;
9425 if (hba
->dev_ref_clk_freq
== REF_CLK_FREQ_INVAL
)
9426 hba
->dev_ref_clk_freq
= ufshcd_parse_ref_clk_property(hba
);
9428 err
= ufshcd_setup_clocks(hba
, true);
9430 goto out_disable_hba_vreg
;
9432 err
= ufshcd_init_vreg(hba
);
9434 goto out_disable_clks
;
9436 err
= ufshcd_setup_vreg(hba
, true);
9438 goto out_disable_clks
;
9440 err
= ufshcd_variant_hba_init(hba
);
9442 goto out_disable_vreg
;
9444 ufs_debugfs_hba_init(hba
);
9445 ufs_fault_inject_hba_init(hba
);
9447 hba
->is_powered
= true;
9451 ufshcd_setup_vreg(hba
, false);
9453 ufshcd_setup_clocks(hba
, false);
9454 out_disable_hba_vreg
:
9455 ufshcd_setup_hba_vreg(hba
, false);
9460 static void ufshcd_hba_exit(struct ufs_hba
*hba
)
9462 if (hba
->is_powered
) {
9463 ufshcd_exit_clk_scaling(hba
);
9464 ufshcd_exit_clk_gating(hba
);
9466 destroy_workqueue(hba
->eh_wq
);
9467 ufs_debugfs_hba_exit(hba
);
9468 ufshcd_variant_hba_exit(hba
);
9469 ufshcd_setup_vreg(hba
, false);
9470 ufshcd_setup_clocks(hba
, false);
9471 ufshcd_setup_hba_vreg(hba
, false);
9472 hba
->is_powered
= false;
9473 ufs_put_device_desc(hba
);
9477 static int ufshcd_execute_start_stop(struct scsi_device
*sdev
,
9478 enum ufs_dev_pwr_mode pwr_mode
,
9479 struct scsi_sense_hdr
*sshdr
)
9481 const unsigned char cdb
[6] = { START_STOP
, 0, 0, 0, pwr_mode
<< 4, 0 };
9482 const struct scsi_exec_args args
= {
9484 .req_flags
= BLK_MQ_REQ_PM
,
9485 .scmd_flags
= SCMD_FAIL_IF_RECOVERING
,
9488 return scsi_execute_cmd(sdev
, cdb
, REQ_OP_DRV_IN
, /*buffer=*/NULL
,
9489 /*bufflen=*/0, /*timeout=*/10 * HZ
, /*retries=*/0,
9494 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
9496 * @hba: per adapter instance
9497 * @pwr_mode: device power mode to set
9499 * Return: 0 if requested power mode is set successfully;
9500 * < 0 if failed to set the requested power mode.
9502 static int ufshcd_set_dev_pwr_mode(struct ufs_hba
*hba
,
9503 enum ufs_dev_pwr_mode pwr_mode
)
9505 struct scsi_sense_hdr sshdr
;
9506 struct scsi_device
*sdp
;
9507 unsigned long flags
;
9510 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
9511 sdp
= hba
->ufs_device_wlun
;
9512 if (sdp
&& scsi_device_online(sdp
))
9513 ret
= scsi_device_get(sdp
);
9516 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
9522 * If scsi commands fail, the scsi mid-layer schedules scsi error-
9523 * handling, which would wait for host to be resumed. Since we know
9524 * we are functional while we are here, skip host resume in error
9527 hba
->host
->eh_noresume
= 1;
9530 * Current function would be generally called from the power management
9531 * callbacks hence set the RQF_PM flag so that it doesn't resume the
9532 * already suspended childs.
9534 for (retries
= 3; retries
> 0; --retries
) {
9535 ret
= ufshcd_execute_start_stop(sdp
, pwr_mode
, &sshdr
);
9537 * scsi_execute() only returns a negative value if the request
9544 sdev_printk(KERN_WARNING
, sdp
,
9545 "START_STOP failed for power mode: %d, result %x\n",
9548 if (scsi_sense_valid(&sshdr
))
9549 scsi_print_sense_hdr(sdp
, NULL
, &sshdr
);
9553 hba
->curr_dev_pwr_mode
= pwr_mode
;
9556 scsi_device_put(sdp
);
9557 hba
->host
->eh_noresume
= 0;
9561 static int ufshcd_link_state_transition(struct ufs_hba
*hba
,
9562 enum uic_link_state req_link_state
,
9563 bool check_for_bkops
)
9567 if (req_link_state
== hba
->uic_link_state
)
9570 if (req_link_state
== UIC_LINK_HIBERN8_STATE
) {
9571 ret
= ufshcd_uic_hibern8_enter(hba
);
9573 ufshcd_set_link_hibern8(hba
);
9575 dev_err(hba
->dev
, "%s: hibern8 enter failed %d\n",
9581 * If autobkops is enabled, link can't be turned off because
9582 * turning off the link would also turn off the device, except in the
9583 * case of DeepSleep where the device is expected to remain powered.
9585 else if ((req_link_state
== UIC_LINK_OFF_STATE
) &&
9586 (!check_for_bkops
|| !hba
->auto_bkops_enabled
)) {
9588 * Let's make sure that link is in low power mode, we are doing
9589 * this currently by putting the link in Hibern8. Otherway to
9590 * put the link in low power mode is to send the DME end point
9591 * to device and then send the DME reset command to local
9592 * unipro. But putting the link in hibern8 is much faster.
9594 * Note also that putting the link in Hibern8 is a requirement
9595 * for entering DeepSleep.
9597 ret
= ufshcd_uic_hibern8_enter(hba
);
9599 dev_err(hba
->dev
, "%s: hibern8 enter failed %d\n",
9604 * Change controller state to "reset state" which
9605 * should also put the link in off/reset state
9607 ufshcd_hba_stop(hba
);
9609 * TODO: Check if we need any delay to make sure that
9610 * controller is reset
9612 ufshcd_set_link_off(hba
);
9619 static void ufshcd_vreg_set_lpm(struct ufs_hba
*hba
)
9621 bool vcc_off
= false;
9624 * It seems some UFS devices may keep drawing more than sleep current
9625 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
9626 * To avoid this situation, add 2ms delay before putting these UFS
9627 * rails in LPM mode.
9629 if (!ufshcd_is_link_active(hba
) &&
9630 hba
->dev_quirks
& UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
)
9631 usleep_range(2000, 2100);
9634 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
9637 * If UFS device and link is in OFF state, all power supplies (VCC,
9638 * VCCQ, VCCQ2) can be turned off if power on write protect is not
9639 * required. If UFS link is inactive (Hibern8 or OFF state) and device
9640 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
9642 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
9643 * in low power state which would save some power.
9645 * If Write Booster is enabled and the device needs to flush the WB
9646 * buffer OR if bkops status is urgent for WB, keep Vcc on.
9648 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
) &&
9649 !hba
->dev_info
.is_lu_power_on_wp
) {
9650 ufshcd_setup_vreg(hba
, false);
9652 } else if (!ufshcd_is_ufs_dev_active(hba
)) {
9653 ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, false);
9655 if (ufshcd_is_link_hibern8(hba
) || ufshcd_is_link_off(hba
)) {
9656 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq
);
9657 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq2
);
9662 * Some UFS devices require delay after VCC power rail is turned-off.
9664 if (vcc_off
&& hba
->vreg_info
.vcc
&&
9665 hba
->dev_quirks
& UFS_DEVICE_QUIRK_DELAY_AFTER_LPM
)
9666 usleep_range(5000, 5100);
9670 static int ufshcd_vreg_set_hpm(struct ufs_hba
*hba
)
9674 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
) &&
9675 !hba
->dev_info
.is_lu_power_on_wp
) {
9676 ret
= ufshcd_setup_vreg(hba
, true);
9677 } else if (!ufshcd_is_ufs_dev_active(hba
)) {
9678 if (!ufshcd_is_link_active(hba
)) {
9679 ret
= ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq
);
9682 ret
= ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq2
);
9686 ret
= ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, true);
9691 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq
);
9693 ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, false);
9697 #endif /* CONFIG_PM */
9699 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba
*hba
)
9701 if (ufshcd_is_link_off(hba
) || ufshcd_can_aggressive_pc(hba
))
9702 ufshcd_setup_hba_vreg(hba
, false);
9705 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba
*hba
)
9707 if (ufshcd_is_link_off(hba
) || ufshcd_can_aggressive_pc(hba
))
9708 ufshcd_setup_hba_vreg(hba
, true);
9711 static int __ufshcd_wl_suspend(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
9714 bool check_for_bkops
;
9715 enum ufs_pm_level pm_lvl
;
9716 enum ufs_dev_pwr_mode req_dev_pwr_mode
;
9717 enum uic_link_state req_link_state
;
9719 hba
->pm_op_in_progress
= true;
9720 if (pm_op
!= UFS_SHUTDOWN_PM
) {
9721 pm_lvl
= pm_op
== UFS_RUNTIME_PM
?
9722 hba
->rpm_lvl
: hba
->spm_lvl
;
9723 req_dev_pwr_mode
= ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl
);
9724 req_link_state
= ufs_get_pm_lvl_to_link_pwr_state(pm_lvl
);
9726 req_dev_pwr_mode
= UFS_POWERDOWN_PWR_MODE
;
9727 req_link_state
= UIC_LINK_OFF_STATE
;
9731 * If we can't transition into any of the low power modes
9732 * just gate the clocks.
9735 hba
->clk_gating
.is_suspended
= true;
9737 if (ufshcd_is_clkscaling_supported(hba
))
9738 ufshcd_clk_scaling_suspend(hba
, true);
9740 if (req_dev_pwr_mode
== UFS_ACTIVE_PWR_MODE
&&
9741 req_link_state
== UIC_LINK_ACTIVE_STATE
) {
9745 if ((req_dev_pwr_mode
== hba
->curr_dev_pwr_mode
) &&
9746 (req_link_state
== hba
->uic_link_state
))
9747 goto enable_scaling
;
9749 /* UFS device & link must be active before we enter in this function */
9750 if (!ufshcd_is_ufs_dev_active(hba
) || !ufshcd_is_link_active(hba
)) {
9752 goto enable_scaling
;
9755 if (pm_op
== UFS_RUNTIME_PM
) {
9756 if (ufshcd_can_autobkops_during_suspend(hba
)) {
9758 * The device is idle with no requests in the queue,
9759 * allow background operations if bkops status shows
9760 * that performance might be impacted.
9762 ret
= ufshcd_urgent_bkops(hba
);
9765 * If return err in suspend flow, IO will hang.
9766 * Trigger error handler and break suspend for
9769 ufshcd_force_error_recovery(hba
);
9771 goto enable_scaling
;
9774 /* make sure that auto bkops is disabled */
9775 ufshcd_disable_auto_bkops(hba
);
9778 * If device needs to do BKOP or WB buffer flush during
9779 * Hibern8, keep device power mode as "active power mode"
9782 hba
->dev_info
.b_rpm_dev_flush_capable
=
9783 hba
->auto_bkops_enabled
||
9784 (((req_link_state
== UIC_LINK_HIBERN8_STATE
) ||
9785 ((req_link_state
== UIC_LINK_ACTIVE_STATE
) &&
9786 ufshcd_is_auto_hibern8_enabled(hba
))) &&
9787 ufshcd_wb_need_flush(hba
));
9790 flush_work(&hba
->eeh_work
);
9792 ret
= ufshcd_vops_suspend(hba
, pm_op
, PRE_CHANGE
);
9794 goto enable_scaling
;
9796 if (req_dev_pwr_mode
!= hba
->curr_dev_pwr_mode
) {
9797 if (pm_op
!= UFS_RUNTIME_PM
)
9798 /* ensure that bkops is disabled */
9799 ufshcd_disable_auto_bkops(hba
);
9801 if (!hba
->dev_info
.b_rpm_dev_flush_capable
) {
9802 ret
= ufshcd_set_dev_pwr_mode(hba
, req_dev_pwr_mode
);
9803 if (ret
&& pm_op
!= UFS_SHUTDOWN_PM
) {
9805 * If return err in suspend flow, IO will hang.
9806 * Trigger error handler and break suspend for
9809 ufshcd_force_error_recovery(hba
);
9813 goto enable_scaling
;
9818 * In the case of DeepSleep, the device is expected to remain powered
9819 * with the link off, so do not check for bkops.
9821 check_for_bkops
= !ufshcd_is_ufs_dev_deepsleep(hba
);
9822 ret
= ufshcd_link_state_transition(hba
, req_link_state
, check_for_bkops
);
9823 if (ret
&& pm_op
!= UFS_SHUTDOWN_PM
) {
9825 * If return err in suspend flow, IO will hang.
9826 * Trigger error handler and break suspend for
9829 ufshcd_force_error_recovery(hba
);
9833 goto set_dev_active
;
9837 * Call vendor specific suspend callback. As these callbacks may access
9838 * vendor specific host controller register space call them before the
9839 * host clocks are ON.
9841 ret
= ufshcd_vops_suspend(hba
, pm_op
, POST_CHANGE
);
9843 goto set_link_active
;
9845 cancel_delayed_work_sync(&hba
->ufs_rtc_update_work
);
9850 * Device hardware reset is required to exit DeepSleep. Also, for
9851 * DeepSleep, the link is off so host reset and restore will be done
9854 if (ufshcd_is_ufs_dev_deepsleep(hba
)) {
9855 ufshcd_device_reset(hba
);
9856 WARN_ON(!ufshcd_is_link_off(hba
));
9858 if (ufshcd_is_link_hibern8(hba
) && !ufshcd_uic_hibern8_exit(hba
))
9859 ufshcd_set_link_active(hba
);
9860 else if (ufshcd_is_link_off(hba
))
9861 ufshcd_host_reset_and_restore(hba
);
9863 /* Can also get here needing to exit DeepSleep */
9864 if (ufshcd_is_ufs_dev_deepsleep(hba
)) {
9865 ufshcd_device_reset(hba
);
9866 ufshcd_host_reset_and_restore(hba
);
9868 if (!ufshcd_set_dev_pwr_mode(hba
, UFS_ACTIVE_PWR_MODE
))
9869 ufshcd_disable_auto_bkops(hba
);
9871 if (ufshcd_is_clkscaling_supported(hba
))
9872 ufshcd_clk_scaling_suspend(hba
, false);
9874 hba
->dev_info
.b_rpm_dev_flush_capable
= false;
9876 if (hba
->dev_info
.b_rpm_dev_flush_capable
) {
9877 schedule_delayed_work(&hba
->rpm_dev_flush_recheck_work
,
9878 msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS
));
9882 ufshcd_update_evt_hist(hba
, UFS_EVT_WL_SUSP_ERR
, (u32
)ret
);
9883 hba
->clk_gating
.is_suspended
= false;
9884 ufshcd_release(hba
);
9886 hba
->pm_op_in_progress
= false;
9891 static int __ufshcd_wl_resume(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
9894 enum uic_link_state old_link_state
= hba
->uic_link_state
;
9896 hba
->pm_op_in_progress
= true;
9899 * Call vendor specific resume callback. As these callbacks may access
9900 * vendor specific host controller register space call them when the
9901 * host clocks are ON.
9903 ret
= ufshcd_vops_resume(hba
, pm_op
);
9907 /* For DeepSleep, the only supported option is to have the link off */
9908 WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba
) && !ufshcd_is_link_off(hba
));
9910 if (ufshcd_is_link_hibern8(hba
)) {
9911 ret
= ufshcd_uic_hibern8_exit(hba
);
9913 ufshcd_set_link_active(hba
);
9915 dev_err(hba
->dev
, "%s: hibern8 exit failed %d\n",
9917 goto vendor_suspend
;
9919 } else if (ufshcd_is_link_off(hba
)) {
9921 * A full initialization of the host and the device is
9922 * required since the link was put to off during suspend.
9923 * Note, in the case of DeepSleep, the device will exit
9924 * DeepSleep due to device reset.
9926 ret
= ufshcd_reset_and_restore(hba
);
9928 * ufshcd_reset_and_restore() should have already
9929 * set the link state as active
9931 if (ret
|| !ufshcd_is_link_active(hba
))
9932 goto vendor_suspend
;
9935 if (!ufshcd_is_ufs_dev_active(hba
)) {
9936 ret
= ufshcd_set_dev_pwr_mode(hba
, UFS_ACTIVE_PWR_MODE
);
9938 goto set_old_link_state
;
9939 ufshcd_set_timestamp_attr(hba
);
9940 schedule_delayed_work(&hba
->ufs_rtc_update_work
,
9941 msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS
));
9944 if (ufshcd_keep_autobkops_enabled_except_suspend(hba
))
9945 ufshcd_enable_auto_bkops(hba
);
9948 * If BKOPs operations are urgently needed at this moment then
9949 * keep auto-bkops enabled or else disable it.
9951 ufshcd_urgent_bkops(hba
);
9953 if (hba
->ee_usr_mask
)
9954 ufshcd_write_ee_control(hba
);
9956 if (ufshcd_is_clkscaling_supported(hba
))
9957 ufshcd_clk_scaling_suspend(hba
, false);
9959 if (hba
->dev_info
.b_rpm_dev_flush_capable
) {
9960 hba
->dev_info
.b_rpm_dev_flush_capable
= false;
9961 cancel_delayed_work(&hba
->rpm_dev_flush_recheck_work
);
9964 ufshcd_configure_auto_hibern8(hba
);
9969 ufshcd_link_state_transition(hba
, old_link_state
, 0);
9971 ufshcd_vops_suspend(hba
, pm_op
, PRE_CHANGE
);
9972 ufshcd_vops_suspend(hba
, pm_op
, POST_CHANGE
);
9975 ufshcd_update_evt_hist(hba
, UFS_EVT_WL_RES_ERR
, (u32
)ret
);
9976 hba
->clk_gating
.is_suspended
= false;
9977 ufshcd_release(hba
);
9978 hba
->pm_op_in_progress
= false;
9982 static int ufshcd_wl_runtime_suspend(struct device
*dev
)
9984 struct scsi_device
*sdev
= to_scsi_device(dev
);
9985 struct ufs_hba
*hba
;
9987 ktime_t start
= ktime_get();
9989 hba
= shost_priv(sdev
->host
);
9991 ret
= __ufshcd_wl_suspend(hba
, UFS_RUNTIME_PM
);
9993 dev_err(&sdev
->sdev_gendev
, "%s failed: %d\n", __func__
, ret
);
9995 trace_ufshcd_wl_runtime_suspend(dev_name(dev
), ret
,
9996 ktime_to_us(ktime_sub(ktime_get(), start
)),
9997 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
10002 static int ufshcd_wl_runtime_resume(struct device
*dev
)
10004 struct scsi_device
*sdev
= to_scsi_device(dev
);
10005 struct ufs_hba
*hba
;
10007 ktime_t start
= ktime_get();
10009 hba
= shost_priv(sdev
->host
);
10011 ret
= __ufshcd_wl_resume(hba
, UFS_RUNTIME_PM
);
10013 dev_err(&sdev
->sdev_gendev
, "%s failed: %d\n", __func__
, ret
);
10015 trace_ufshcd_wl_runtime_resume(dev_name(dev
), ret
,
10016 ktime_to_us(ktime_sub(ktime_get(), start
)),
10017 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
10023 #ifdef CONFIG_PM_SLEEP
10024 static int ufshcd_wl_suspend(struct device
*dev
)
10026 struct scsi_device
*sdev
= to_scsi_device(dev
);
10027 struct ufs_hba
*hba
;
10029 ktime_t start
= ktime_get();
10031 hba
= shost_priv(sdev
->host
);
10032 down(&hba
->host_sem
);
10033 hba
->system_suspending
= true;
10035 if (pm_runtime_suspended(dev
))
10038 ret
= __ufshcd_wl_suspend(hba
, UFS_SYSTEM_PM
);
10040 dev_err(&sdev
->sdev_gendev
, "%s failed: %d\n", __func__
, ret
);
10041 up(&hba
->host_sem
);
10046 hba
->is_sys_suspended
= true;
10047 trace_ufshcd_wl_suspend(dev_name(dev
), ret
,
10048 ktime_to_us(ktime_sub(ktime_get(), start
)),
10049 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
10054 static int ufshcd_wl_resume(struct device
*dev
)
10056 struct scsi_device
*sdev
= to_scsi_device(dev
);
10057 struct ufs_hba
*hba
;
10059 ktime_t start
= ktime_get();
10061 hba
= shost_priv(sdev
->host
);
10063 if (pm_runtime_suspended(dev
))
10066 ret
= __ufshcd_wl_resume(hba
, UFS_SYSTEM_PM
);
10068 dev_err(&sdev
->sdev_gendev
, "%s failed: %d\n", __func__
, ret
);
10070 trace_ufshcd_wl_resume(dev_name(dev
), ret
,
10071 ktime_to_us(ktime_sub(ktime_get(), start
)),
10072 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
10074 hba
->is_sys_suspended
= false;
10075 hba
->system_suspending
= false;
10076 up(&hba
->host_sem
);
10082 * ufshcd_suspend - helper function for suspend operations
10083 * @hba: per adapter instance
10085 * This function will put disable irqs, turn off clocks
10086 * and set vreg and hba-vreg in lpm mode.
10088 * Return: 0 upon success; < 0 upon failure.
10090 static int ufshcd_suspend(struct ufs_hba
*hba
)
10094 if (!hba
->is_powered
)
10097 * Disable the host irq as host controller as there won't be any
10098 * host controller transaction expected till resume.
10100 ufshcd_disable_irq(hba
);
10101 ret
= ufshcd_setup_clocks(hba
, false);
10103 ufshcd_enable_irq(hba
);
10106 if (ufshcd_is_clkgating_allowed(hba
)) {
10107 hba
->clk_gating
.state
= CLKS_OFF
;
10108 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
10109 hba
->clk_gating
.state
);
10112 ufshcd_vreg_set_lpm(hba
);
10113 /* Put the host controller in low power mode if possible */
10114 ufshcd_hba_vreg_set_lpm(hba
);
10120 * ufshcd_resume - helper function for resume operations
10121 * @hba: per adapter instance
10123 * This function basically turns on the regulators, clocks and
10126 * Return: 0 for success and non-zero for failure.
10128 static int ufshcd_resume(struct ufs_hba
*hba
)
10132 if (!hba
->is_powered
)
10135 ufshcd_hba_vreg_set_hpm(hba
);
10136 ret
= ufshcd_vreg_set_hpm(hba
);
10140 /* Make sure clocks are enabled before accessing controller */
10141 ret
= ufshcd_setup_clocks(hba
, true);
10145 /* enable the host irq as host controller would be active soon */
10146 ufshcd_enable_irq(hba
);
10151 ufshcd_vreg_set_lpm(hba
);
10154 ufshcd_update_evt_hist(hba
, UFS_EVT_RESUME_ERR
, (u32
)ret
);
10157 #endif /* CONFIG_PM */
10159 #ifdef CONFIG_PM_SLEEP
10161 * ufshcd_system_suspend - system suspend callback
10162 * @dev: Device associated with the UFS controller.
10164 * Executed before putting the system into a sleep state in which the contents
10165 * of main memory are preserved.
10167 * Return: 0 for success and non-zero for failure.
10169 int ufshcd_system_suspend(struct device
*dev
)
10171 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10173 ktime_t start
= ktime_get();
10175 if (pm_runtime_suspended(hba
->dev
))
10178 ret
= ufshcd_suspend(hba
);
10180 trace_ufshcd_system_suspend(dev_name(hba
->dev
), ret
,
10181 ktime_to_us(ktime_sub(ktime_get(), start
)),
10182 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
10185 EXPORT_SYMBOL(ufshcd_system_suspend
);
10188 * ufshcd_system_resume - system resume callback
10189 * @dev: Device associated with the UFS controller.
10191 * Executed after waking the system up from a sleep state in which the contents
10192 * of main memory were preserved.
10194 * Return: 0 for success and non-zero for failure.
10196 int ufshcd_system_resume(struct device
*dev
)
10198 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10199 ktime_t start
= ktime_get();
10202 if (pm_runtime_suspended(hba
->dev
))
10205 ret
= ufshcd_resume(hba
);
10208 trace_ufshcd_system_resume(dev_name(hba
->dev
), ret
,
10209 ktime_to_us(ktime_sub(ktime_get(), start
)),
10210 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
10214 EXPORT_SYMBOL(ufshcd_system_resume
);
10215 #endif /* CONFIG_PM_SLEEP */
10219 * ufshcd_runtime_suspend - runtime suspend callback
10220 * @dev: Device associated with the UFS controller.
10222 * Check the description of ufshcd_suspend() function for more details.
10224 * Return: 0 for success and non-zero for failure.
10226 int ufshcd_runtime_suspend(struct device
*dev
)
10228 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10230 ktime_t start
= ktime_get();
10232 ret
= ufshcd_suspend(hba
);
10234 trace_ufshcd_runtime_suspend(dev_name(hba
->dev
), ret
,
10235 ktime_to_us(ktime_sub(ktime_get(), start
)),
10236 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
10239 EXPORT_SYMBOL(ufshcd_runtime_suspend
);
10242 * ufshcd_runtime_resume - runtime resume routine
10243 * @dev: Device associated with the UFS controller.
10245 * This function basically brings controller
10246 * to active state. Following operations are done in this function:
10248 * 1. Turn on all the controller related clocks
10249 * 2. Turn ON VCC rail
10251 * Return: 0 upon success; < 0 upon failure.
10253 int ufshcd_runtime_resume(struct device
*dev
)
10255 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10257 ktime_t start
= ktime_get();
10259 ret
= ufshcd_resume(hba
);
10261 trace_ufshcd_runtime_resume(dev_name(hba
->dev
), ret
,
10262 ktime_to_us(ktime_sub(ktime_get(), start
)),
10263 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
10266 EXPORT_SYMBOL(ufshcd_runtime_resume
);
10267 #endif /* CONFIG_PM */
10269 static void ufshcd_wl_shutdown(struct device
*dev
)
10271 struct scsi_device
*sdev
= to_scsi_device(dev
);
10272 struct ufs_hba
*hba
= shost_priv(sdev
->host
);
10274 down(&hba
->host_sem
);
10275 hba
->shutting_down
= true;
10276 up(&hba
->host_sem
);
10278 /* Turn on everything while shutting down */
10279 ufshcd_rpm_get_sync(hba
);
10280 scsi_device_quiesce(sdev
);
10281 shost_for_each_device(sdev
, hba
->host
) {
10282 if (sdev
== hba
->ufs_device_wlun
)
10284 scsi_device_quiesce(sdev
);
10286 __ufshcd_wl_suspend(hba
, UFS_SHUTDOWN_PM
);
10289 * Next, turn off the UFS controller and the UFS regulators. Disable
10292 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
))
10293 ufshcd_suspend(hba
);
10295 hba
->is_powered
= false;
10299 * ufshcd_remove - de-allocate SCSI host and host memory space
10300 * data structure memory
10301 * @hba: per adapter instance
10303 void ufshcd_remove(struct ufs_hba
*hba
)
10305 if (hba
->ufs_device_wlun
)
10306 ufshcd_rpm_get_sync(hba
);
10307 ufs_hwmon_remove(hba
);
10308 ufs_bsg_remove(hba
);
10309 ufs_sysfs_remove_nodes(hba
->dev
);
10310 blk_mq_destroy_queue(hba
->tmf_queue
);
10311 blk_put_queue(hba
->tmf_queue
);
10312 blk_mq_free_tag_set(&hba
->tmf_tag_set
);
10313 scsi_remove_host(hba
->host
);
10314 /* disable interrupts */
10315 ufshcd_disable_intr(hba
, hba
->intr_mask
);
10316 ufshcd_hba_stop(hba
);
10317 ufshcd_hba_exit(hba
);
10319 EXPORT_SYMBOL_GPL(ufshcd_remove
);
10321 #ifdef CONFIG_PM_SLEEP
10322 int ufshcd_system_freeze(struct device
*dev
)
10325 return ufshcd_system_suspend(dev
);
10328 EXPORT_SYMBOL_GPL(ufshcd_system_freeze
);
10330 int ufshcd_system_restore(struct device
*dev
)
10333 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10336 ret
= ufshcd_system_resume(dev
);
10340 /* Configure UTRL and UTMRL base address registers */
10341 ufshcd_writel(hba
, lower_32_bits(hba
->utrdl_dma_addr
),
10342 REG_UTP_TRANSFER_REQ_LIST_BASE_L
);
10343 ufshcd_writel(hba
, upper_32_bits(hba
->utrdl_dma_addr
),
10344 REG_UTP_TRANSFER_REQ_LIST_BASE_H
);
10345 ufshcd_writel(hba
, lower_32_bits(hba
->utmrdl_dma_addr
),
10346 REG_UTP_TASK_REQ_LIST_BASE_L
);
10347 ufshcd_writel(hba
, upper_32_bits(hba
->utmrdl_dma_addr
),
10348 REG_UTP_TASK_REQ_LIST_BASE_H
);
10350 * Make sure that UTRL and UTMRL base address registers
10351 * are updated with the latest queue addresses. Only after
10352 * updating these addresses, we can queue the new commands.
10356 /* Resuming from hibernate, assume that link was OFF */
10357 ufshcd_set_link_off(hba
);
10362 EXPORT_SYMBOL_GPL(ufshcd_system_restore
);
10364 int ufshcd_system_thaw(struct device
*dev
)
10366 return ufshcd_system_resume(dev
);
10368 EXPORT_SYMBOL_GPL(ufshcd_system_thaw
);
10369 #endif /* CONFIG_PM_SLEEP */
10372 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
10373 * @hba: pointer to Host Bus Adapter (HBA)
10375 void ufshcd_dealloc_host(struct ufs_hba
*hba
)
10377 scsi_host_put(hba
->host
);
10379 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host
);
10382 * ufshcd_set_dma_mask - Set dma mask based on the controller
10383 * addressing capability
10384 * @hba: per adapter instance
10386 * Return: 0 for success, non-zero for failure.
10388 static int ufshcd_set_dma_mask(struct ufs_hba
*hba
)
10390 if (hba
->capabilities
& MASK_64_ADDRESSING_SUPPORT
) {
10391 if (!dma_set_mask_and_coherent(hba
->dev
, DMA_BIT_MASK(64)))
10394 return dma_set_mask_and_coherent(hba
->dev
, DMA_BIT_MASK(32));
10398 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
10399 * @dev: pointer to device handle
10400 * @hba_handle: driver private handle
10402 * Return: 0 on success, non-zero value on failure.
10404 int ufshcd_alloc_host(struct device
*dev
, struct ufs_hba
**hba_handle
)
10406 struct Scsi_Host
*host
;
10407 struct ufs_hba
*hba
;
10412 "Invalid memory reference for dev is NULL\n");
10417 host
= scsi_host_alloc(&ufshcd_driver_template
,
10418 sizeof(struct ufs_hba
));
10420 dev_err(dev
, "scsi_host_alloc failed\n");
10424 host
->nr_maps
= HCTX_TYPE_POLL
+ 1;
10425 hba
= shost_priv(host
);
10428 hba
->dev_ref_clk_freq
= REF_CLK_FREQ_INVAL
;
10429 hba
->nop_out_timeout
= NOP_OUT_TIMEOUT
;
10430 ufshcd_set_sg_entry_size(hba
, sizeof(struct ufshcd_sg_entry
));
10431 INIT_LIST_HEAD(&hba
->clk_list_head
);
10432 spin_lock_init(&hba
->outstanding_lock
);
10439 EXPORT_SYMBOL(ufshcd_alloc_host
);
10441 /* This function exists because blk_mq_alloc_tag_set() requires this. */
10442 static blk_status_t
ufshcd_queue_tmf(struct blk_mq_hw_ctx
*hctx
,
10443 const struct blk_mq_queue_data
*qd
)
10445 WARN_ON_ONCE(true);
10446 return BLK_STS_NOTSUPP
;
10449 static const struct blk_mq_ops ufshcd_tmf_ops
= {
10450 .queue_rq
= ufshcd_queue_tmf
,
10454 * ufshcd_init - Driver initialization routine
10455 * @hba: per-adapter instance
10456 * @mmio_base: base register address
10457 * @irq: Interrupt line of device
10459 * Return: 0 on success, non-zero value on failure.
10461 int ufshcd_init(struct ufs_hba
*hba
, void __iomem
*mmio_base
, unsigned int irq
)
10464 struct Scsi_Host
*host
= hba
->host
;
10465 struct device
*dev
= hba
->dev
;
10466 char eh_wq_name
[sizeof("ufs_eh_wq_00")];
10469 * dev_set_drvdata() must be called before any callbacks are registered
10470 * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon,
10473 dev_set_drvdata(dev
, hba
);
10477 "Invalid memory reference for mmio_base is NULL\n");
10482 hba
->mmio_base
= mmio_base
;
10484 hba
->vps
= &ufs_hba_vps
;
10486 err
= ufshcd_hba_init(hba
);
10490 /* Read capabilities registers */
10491 err
= ufshcd_hba_capabilities(hba
);
10495 /* Get UFS version supported by the controller */
10496 hba
->ufs_version
= ufshcd_get_ufs_version(hba
);
10498 /* Get Interrupt bit mask per version */
10499 hba
->intr_mask
= ufshcd_get_intr_mask(hba
);
10501 err
= ufshcd_set_dma_mask(hba
);
10503 dev_err(hba
->dev
, "set dma mask failed\n");
10507 /* Allocate memory for host memory space */
10508 err
= ufshcd_memory_alloc(hba
);
10510 dev_err(hba
->dev
, "Memory allocation failed\n");
10514 /* Configure LRB */
10515 ufshcd_host_memory_configure(hba
);
10517 host
->can_queue
= hba
->nutrs
- UFSHCD_NUM_RESERVED
;
10518 host
->cmd_per_lun
= hba
->nutrs
- UFSHCD_NUM_RESERVED
;
10519 host
->max_id
= UFSHCD_MAX_ID
;
10520 host
->max_lun
= UFS_MAX_LUNS
;
10521 host
->max_channel
= UFSHCD_MAX_CHANNEL
;
10522 host
->unique_id
= host
->host_no
;
10523 host
->max_cmd_len
= UFS_CDB_SIZE
;
10524 host
->queuecommand_may_block
= !!(hba
->caps
& UFSHCD_CAP_CLK_GATING
);
10526 hba
->max_pwr_info
.is_valid
= false;
10528 /* Initialize work queues */
10529 snprintf(eh_wq_name
, sizeof(eh_wq_name
), "ufs_eh_wq_%d",
10530 hba
->host
->host_no
);
10531 hba
->eh_wq
= create_singlethread_workqueue(eh_wq_name
);
10533 dev_err(hba
->dev
, "%s: failed to create eh workqueue\n",
10538 INIT_WORK(&hba
->eh_work
, ufshcd_err_handler
);
10539 INIT_WORK(&hba
->eeh_work
, ufshcd_exception_event_handler
);
10541 sema_init(&hba
->host_sem
, 1);
10543 /* Initialize UIC command mutex */
10544 mutex_init(&hba
->uic_cmd_mutex
);
10546 /* Initialize mutex for device management commands */
10547 mutex_init(&hba
->dev_cmd
.lock
);
10549 /* Initialize mutex for exception event control */
10550 mutex_init(&hba
->ee_ctrl_mutex
);
10552 mutex_init(&hba
->wb_mutex
);
10553 init_rwsem(&hba
->clk_scaling_lock
);
10555 ufshcd_init_clk_gating(hba
);
10557 ufshcd_init_clk_scaling(hba
);
10560 * In order to avoid any spurious interrupt immediately after
10561 * registering UFS controller interrupt handler, clear any pending UFS
10562 * interrupt status and disable all the UFS interrupts.
10564 ufshcd_writel(hba
, ufshcd_readl(hba
, REG_INTERRUPT_STATUS
),
10565 REG_INTERRUPT_STATUS
);
10566 ufshcd_writel(hba
, 0, REG_INTERRUPT_ENABLE
);
10568 * Make sure that UFS interrupts are disabled and any pending interrupt
10569 * status is cleared before registering UFS interrupt handler.
10573 /* IRQ registration */
10574 err
= devm_request_irq(dev
, irq
, ufshcd_intr
, IRQF_SHARED
, UFSHCD
, hba
);
10576 dev_err(hba
->dev
, "request irq failed\n");
10579 hba
->is_irq_enabled
= true;
10582 if (!is_mcq_supported(hba
)) {
10583 err
= scsi_add_host(host
, hba
->dev
);
10585 dev_err(hba
->dev
, "scsi_add_host failed\n");
10590 hba
->tmf_tag_set
= (struct blk_mq_tag_set
) {
10592 .queue_depth
= hba
->nutmrs
,
10593 .ops
= &ufshcd_tmf_ops
,
10594 .flags
= BLK_MQ_F_NO_SCHED
,
10596 err
= blk_mq_alloc_tag_set(&hba
->tmf_tag_set
);
10598 goto out_remove_scsi_host
;
10599 hba
->tmf_queue
= blk_mq_init_queue(&hba
->tmf_tag_set
);
10600 if (IS_ERR(hba
->tmf_queue
)) {
10601 err
= PTR_ERR(hba
->tmf_queue
);
10602 goto free_tmf_tag_set
;
10604 hba
->tmf_rqs
= devm_kcalloc(hba
->dev
, hba
->nutmrs
,
10605 sizeof(*hba
->tmf_rqs
), GFP_KERNEL
);
10606 if (!hba
->tmf_rqs
) {
10608 goto free_tmf_queue
;
10611 /* Reset the attached device */
10612 ufshcd_device_reset(hba
);
10614 ufshcd_init_crypto(hba
);
10616 /* Host controller enable */
10617 err
= ufshcd_hba_enable(hba
);
10619 dev_err(hba
->dev
, "Host controller enable failed\n");
10620 ufshcd_print_evt_hist(hba
);
10621 ufshcd_print_host_state(hba
);
10622 goto free_tmf_queue
;
10626 * Set the default power management level for runtime and system PM.
10627 * Default power saving mode is to keep UFS link in Hibern8 state
10628 * and UFS device in sleep state.
10630 hba
->rpm_lvl
= ufs_get_desired_pm_lvl_for_dev_link_state(
10631 UFS_SLEEP_PWR_MODE
,
10632 UIC_LINK_HIBERN8_STATE
);
10633 hba
->spm_lvl
= ufs_get_desired_pm_lvl_for_dev_link_state(
10634 UFS_SLEEP_PWR_MODE
,
10635 UIC_LINK_HIBERN8_STATE
);
10637 INIT_DELAYED_WORK(&hba
->rpm_dev_flush_recheck_work
, ufshcd_rpm_dev_flush_recheck_work
);
10638 INIT_DELAYED_WORK(&hba
->ufs_rtc_update_work
, ufshcd_rtc_work
);
10640 /* Set the default auto-hiberate idle timer value to 150 ms */
10641 if (ufshcd_is_auto_hibern8_supported(hba
) && !hba
->ahit
) {
10642 hba
->ahit
= FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK
, 150) |
10643 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK
, 3);
10646 /* Hold auto suspend until async scan completes */
10647 pm_runtime_get_sync(dev
);
10648 atomic_set(&hba
->scsi_block_reqs_cnt
, 0);
10650 * We are assuming that device wasn't put in sleep/power-down
10651 * state exclusively during the boot stage before kernel.
10652 * This assumption helps avoid doing link startup twice during
10653 * ufshcd_probe_hba().
10655 ufshcd_set_ufs_dev_active(hba
);
10657 async_schedule(ufshcd_async_scan
, hba
);
10658 ufs_sysfs_add_nodes(hba
->dev
);
10660 device_enable_async_suspend(dev
);
10664 blk_mq_destroy_queue(hba
->tmf_queue
);
10665 blk_put_queue(hba
->tmf_queue
);
10667 blk_mq_free_tag_set(&hba
->tmf_tag_set
);
10668 out_remove_scsi_host
:
10669 scsi_remove_host(hba
->host
);
10671 hba
->is_irq_enabled
= false;
10672 ufshcd_hba_exit(hba
);
10676 EXPORT_SYMBOL_GPL(ufshcd_init
);
10678 void ufshcd_resume_complete(struct device
*dev
)
10680 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10682 if (hba
->complete_put
) {
10683 ufshcd_rpm_put(hba
);
10684 hba
->complete_put
= false;
10687 EXPORT_SYMBOL_GPL(ufshcd_resume_complete
);
10689 static bool ufshcd_rpm_ok_for_spm(struct ufs_hba
*hba
)
10691 struct device
*dev
= &hba
->ufs_device_wlun
->sdev_gendev
;
10692 enum ufs_dev_pwr_mode dev_pwr_mode
;
10693 enum uic_link_state link_state
;
10694 unsigned long flags
;
10697 spin_lock_irqsave(&dev
->power
.lock
, flags
);
10698 dev_pwr_mode
= ufs_get_pm_lvl_to_dev_pwr_mode(hba
->spm_lvl
);
10699 link_state
= ufs_get_pm_lvl_to_link_pwr_state(hba
->spm_lvl
);
10700 res
= pm_runtime_suspended(dev
) &&
10701 hba
->curr_dev_pwr_mode
== dev_pwr_mode
&&
10702 hba
->uic_link_state
== link_state
&&
10703 !hba
->dev_info
.b_rpm_dev_flush_capable
;
10704 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
10709 int __ufshcd_suspend_prepare(struct device
*dev
, bool rpm_ok_for_spm
)
10711 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10715 * SCSI assumes that runtime-pm and system-pm for scsi drivers
10716 * are same. And it doesn't wake up the device for system-suspend
10717 * if it's runtime suspended. But ufs doesn't follow that.
10718 * Refer ufshcd_resume_complete()
10720 if (hba
->ufs_device_wlun
) {
10721 /* Prevent runtime suspend */
10722 ufshcd_rpm_get_noresume(hba
);
10724 * Check if already runtime suspended in same state as system
10725 * suspend would be.
10727 if (!rpm_ok_for_spm
|| !ufshcd_rpm_ok_for_spm(hba
)) {
10728 /* RPM state is not ok for SPM, so runtime resume */
10729 ret
= ufshcd_rpm_resume(hba
);
10730 if (ret
< 0 && ret
!= -EACCES
) {
10731 ufshcd_rpm_put(hba
);
10735 hba
->complete_put
= true;
10739 EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare
);
10741 int ufshcd_suspend_prepare(struct device
*dev
)
10743 return __ufshcd_suspend_prepare(dev
, true);
10745 EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare
);
10747 #ifdef CONFIG_PM_SLEEP
10748 static int ufshcd_wl_poweroff(struct device
*dev
)
10750 struct scsi_device
*sdev
= to_scsi_device(dev
);
10751 struct ufs_hba
*hba
= shost_priv(sdev
->host
);
10753 __ufshcd_wl_suspend(hba
, UFS_SHUTDOWN_PM
);
10758 static int ufshcd_wl_probe(struct device
*dev
)
10760 struct scsi_device
*sdev
= to_scsi_device(dev
);
10762 if (!is_device_wlun(sdev
))
10765 blk_pm_runtime_init(sdev
->request_queue
, dev
);
10766 pm_runtime_set_autosuspend_delay(dev
, 0);
10767 pm_runtime_allow(dev
);
10772 static int ufshcd_wl_remove(struct device
*dev
)
10774 pm_runtime_forbid(dev
);
10778 static const struct dev_pm_ops ufshcd_wl_pm_ops
= {
10779 #ifdef CONFIG_PM_SLEEP
10780 .suspend
= ufshcd_wl_suspend
,
10781 .resume
= ufshcd_wl_resume
,
10782 .freeze
= ufshcd_wl_suspend
,
10783 .thaw
= ufshcd_wl_resume
,
10784 .poweroff
= ufshcd_wl_poweroff
,
10785 .restore
= ufshcd_wl_resume
,
10787 SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend
, ufshcd_wl_runtime_resume
, NULL
)
10790 static void ufshcd_check_header_layout(void)
10793 * gcc compilers before version 10 cannot do constant-folding for
10794 * sub-byte bitfields. Hence skip the layout checks for gcc 9 and
10797 if (IS_ENABLED(CONFIG_CC_IS_GCC
) && CONFIG_GCC_VERSION
< 100000)
10800 BUILD_BUG_ON(((u8
*)&(struct request_desc_header
){
10801 .cci
= 3})[0] != 3);
10803 BUILD_BUG_ON(((u8
*)&(struct request_desc_header
){
10804 .ehs_length
= 2})[1] != 2);
10806 BUILD_BUG_ON(((u8
*)&(struct request_desc_header
){
10807 .enable_crypto
= 1})[2]
10810 BUILD_BUG_ON((((u8
*)&(struct request_desc_header
){
10812 .data_direction
= 3,
10814 })[3]) != ((5 << 4) | (3 << 1) | 1));
10816 BUILD_BUG_ON(((__le32
*)&(struct request_desc_header
){
10817 .dunl
= cpu_to_le32(0xdeadbeef)})[1] !=
10818 cpu_to_le32(0xdeadbeef));
10820 BUILD_BUG_ON(((u8
*)&(struct request_desc_header
){
10821 .ocs
= 4})[8] != 4);
10823 BUILD_BUG_ON(((u8
*)&(struct request_desc_header
){
10824 .cds
= 5})[9] != 5);
10826 BUILD_BUG_ON(((__le32
*)&(struct request_desc_header
){
10827 .dunu
= cpu_to_le32(0xbadcafe)})[3] !=
10828 cpu_to_le32(0xbadcafe));
10830 BUILD_BUG_ON(((u8
*)&(struct utp_upiu_header
){
10831 .iid
= 0xf })[4] != 0xf0);
10833 BUILD_BUG_ON(((u8
*)&(struct utp_upiu_header
){
10834 .command_set_type
= 0xf })[4] != 0xf);
10838 * ufs_dev_wlun_template - describes ufs device wlun
10839 * ufs-device wlun - used to send pm commands
10840 * All luns are consumers of ufs-device wlun.
10842 * Currently, no sd driver is present for wluns.
10843 * Hence the no specific pm operations are performed.
10844 * With ufs design, SSU should be sent to ufs-device wlun.
10845 * Hence register a scsi driver for ufs wluns only.
10847 static struct scsi_driver ufs_dev_wlun_template
= {
10849 .name
= "ufs_device_wlun",
10850 .owner
= THIS_MODULE
,
10851 .probe
= ufshcd_wl_probe
,
10852 .remove
= ufshcd_wl_remove
,
10853 .pm
= &ufshcd_wl_pm_ops
,
10854 .shutdown
= ufshcd_wl_shutdown
,
10858 static int __init
ufshcd_core_init(void)
10862 ufshcd_check_header_layout();
10864 ufs_debugfs_init();
10866 ret
= scsi_register_driver(&ufs_dev_wlun_template
.gendrv
);
10868 ufs_debugfs_exit();
10872 static void __exit
ufshcd_core_exit(void)
10874 ufs_debugfs_exit();
10875 scsi_unregister_driver(&ufs_dev_wlun_template
.gendrv
);
10878 module_init(ufshcd_core_init
);
10879 module_exit(ufshcd_core_exit
);
10881 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
10882 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
10883 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
10884 MODULE_SOFTDEP("pre: governor_simpleondemand");
10885 MODULE_LICENSE("GPL");