1 // SPDX-License-Identifier: GPL-2.0+
3 * ufs.c - Universal Flash Subsystem (UFS) driver
5 * Taken from Linux Kernel v5.2 (drivers/scsi/ufs/ufshcd.c) and ported
8 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
14 #include <dm/device_compat.h>
15 #include <dm/devres.h>
17 #include <dm/device-internal.h>
22 #include <asm/dma-mapping.h>
26 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
29 /* maximum number of link-startup retries */
30 #define DME_LINKSTARTUP_RETRIES 3
32 /* maximum number of retries for a general UIC command */
33 #define UFS_UIC_COMMAND_RETRIES 3
35 /* Query request retries */
36 #define QUERY_REQ_RETRIES 3
37 /* Query request timeout */
38 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
40 /* maximum timeout in ms for a general UIC command */
41 #define UFS_UIC_CMD_TIMEOUT 1000
42 /* NOP OUT retries waiting for NOP IN response */
43 #define NOP_OUT_RETRIES 10
44 /* Timeout after 30 msecs if NOP OUT hangs without response */
45 #define NOP_OUT_TIMEOUT 30 /* msecs */
47 /* Only use one Task Tag for all requests */
50 /* Expose the flag value from utp_upiu_query.value */
51 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
53 #define MAX_PRDT_ENTRY 262144
55 /* maximum bytes per request */
56 #define UFS_MAX_BYTES (128 * 256 * 1024)
58 static inline bool ufshcd_is_hba_active(struct ufs_hba
*hba
);
59 static inline void ufshcd_hba_stop(struct ufs_hba
*hba
);
60 static int ufshcd_hba_enable(struct ufs_hba
*hba
);
63 * ufshcd_wait_for_register - wait for register value to change
65 static int ufshcd_wait_for_register(struct ufs_hba
*hba
, u32 reg
, u32 mask
,
66 u32 val
, unsigned long timeout_ms
)
69 unsigned long start
= get_timer(0);
71 /* ignore bits that we don't intend to wait on */
74 while ((ufshcd_readl(hba
, reg
) & mask
) != val
) {
75 if (get_timer(start
) > timeout_ms
) {
76 if ((ufshcd_readl(hba
, reg
) & mask
) != val
)
86 * ufshcd_init_pwr_info - setting the POR (power on reset)
87 * values in hba power info
89 static void ufshcd_init_pwr_info(struct ufs_hba
*hba
)
91 hba
->pwr_info
.gear_rx
= UFS_PWM_G1
;
92 hba
->pwr_info
.gear_tx
= UFS_PWM_G1
;
93 hba
->pwr_info
.lane_rx
= 1;
94 hba
->pwr_info
.lane_tx
= 1;
95 hba
->pwr_info
.pwr_rx
= SLOWAUTO_MODE
;
96 hba
->pwr_info
.pwr_tx
= SLOWAUTO_MODE
;
97 hba
->pwr_info
.hs_rate
= 0;
101 * ufshcd_print_pwr_info - print power params as saved in hba
104 static void ufshcd_print_pwr_info(struct ufs_hba
*hba
)
106 static const char * const names
[] = {
116 dev_err(hba
->dev
, "[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
117 hba
->pwr_info
.gear_rx
, hba
->pwr_info
.gear_tx
,
118 hba
->pwr_info
.lane_rx
, hba
->pwr_info
.lane_tx
,
119 names
[hba
->pwr_info
.pwr_rx
],
120 names
[hba
->pwr_info
.pwr_tx
],
121 hba
->pwr_info
.hs_rate
);
125 * ufshcd_ready_for_uic_cmd - Check if controller is ready
126 * to accept UIC commands
128 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba
*hba
)
130 if (ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) & UIC_COMMAND_READY
)
137 * ufshcd_get_uic_cmd_result - Get the UIC command result
139 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba
*hba
)
141 return ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_2
) &
142 MASK_UIC_COMMAND_RESULT
;
146 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
148 static inline u32
ufshcd_get_dme_attr_val(struct ufs_hba
*hba
)
150 return ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_3
);
154 * ufshcd_is_device_present - Check if any device connected to
155 * the host controller
157 static inline bool ufshcd_is_device_present(struct ufs_hba
*hba
)
159 return (ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) &
160 DEVICE_PRESENT
) ? true : false;
164 * ufshcd_send_uic_cmd - UFS Interconnect layer command API
167 static int ufshcd_send_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
169 unsigned long start
= 0;
171 u32 enabled_intr_status
;
173 if (!ufshcd_ready_for_uic_cmd(hba
)) {
175 "Controller not ready to accept UIC commands\n");
179 debug("sending uic command:%d\n", uic_cmd
->command
);
182 ufshcd_writel(hba
, uic_cmd
->argument1
, REG_UIC_COMMAND_ARG_1
);
183 ufshcd_writel(hba
, uic_cmd
->argument2
, REG_UIC_COMMAND_ARG_2
);
184 ufshcd_writel(hba
, uic_cmd
->argument3
, REG_UIC_COMMAND_ARG_3
);
187 ufshcd_writel(hba
, uic_cmd
->command
& COMMAND_OPCODE_MASK
,
190 start
= get_timer(0);
192 intr_status
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
193 enabled_intr_status
= intr_status
& hba
->intr_mask
;
194 ufshcd_writel(hba
, intr_status
, REG_INTERRUPT_STATUS
);
196 if (get_timer(start
) > UFS_UIC_CMD_TIMEOUT
) {
198 "Timedout waiting for UIC response\n");
203 if (enabled_intr_status
& UFSHCD_ERROR_MASK
) {
204 dev_err(hba
->dev
, "Error in status:%08x\n",
205 enabled_intr_status
);
209 } while (!(enabled_intr_status
& UFSHCD_UIC_MASK
));
211 uic_cmd
->argument2
= ufshcd_get_uic_cmd_result(hba
);
212 uic_cmd
->argument3
= ufshcd_get_dme_attr_val(hba
);
214 debug("Sent successfully\n");
220 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
223 int ufshcd_dme_set_attr(struct ufs_hba
*hba
, u32 attr_sel
, u8 attr_set
,
224 u32 mib_val
, u8 peer
)
226 struct uic_command uic_cmd
= {0};
227 static const char *const action
[] = {
231 const char *set
= action
[!!peer
];
233 int retries
= UFS_UIC_COMMAND_RETRIES
;
235 uic_cmd
.command
= peer
?
236 UIC_CMD_DME_PEER_SET
: UIC_CMD_DME_SET
;
237 uic_cmd
.argument1
= attr_sel
;
238 uic_cmd
.argument2
= UIC_ARG_ATTR_TYPE(attr_set
);
239 uic_cmd
.argument3
= mib_val
;
242 /* for peer attributes we retry upon failure */
243 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
245 dev_dbg(hba
->dev
, "%s: attr-id 0x%x val 0x%x error code %d\n",
246 set
, UIC_GET_ATTR_ID(attr_sel
), mib_val
, ret
);
247 } while (ret
&& peer
&& --retries
);
250 dev_err(hba
->dev
, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
251 set
, UIC_GET_ATTR_ID(attr_sel
), mib_val
,
252 UFS_UIC_COMMAND_RETRIES
- retries
);
258 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
261 int ufshcd_dme_get_attr(struct ufs_hba
*hba
, u32 attr_sel
,
262 u32
*mib_val
, u8 peer
)
264 struct uic_command uic_cmd
= {0};
265 static const char *const action
[] = {
269 const char *get
= action
[!!peer
];
271 int retries
= UFS_UIC_COMMAND_RETRIES
;
273 uic_cmd
.command
= peer
?
274 UIC_CMD_DME_PEER_GET
: UIC_CMD_DME_GET
;
275 uic_cmd
.argument1
= attr_sel
;
278 /* for peer attributes we retry upon failure */
279 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
281 dev_dbg(hba
->dev
, "%s: attr-id 0x%x error code %d\n",
282 get
, UIC_GET_ATTR_ID(attr_sel
), ret
);
283 } while (ret
&& peer
&& --retries
);
286 dev_err(hba
->dev
, "%s: attr-id 0x%x failed %d retries\n",
287 get
, UIC_GET_ATTR_ID(attr_sel
),
288 UFS_UIC_COMMAND_RETRIES
- retries
);
291 *mib_val
= uic_cmd
.argument3
;
296 static int ufshcd_disable_tx_lcc(struct ufs_hba
*hba
, bool peer
)
298 u32 tx_lanes
, i
, err
= 0;
301 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
304 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
306 for (i
= 0; i
< tx_lanes
; i
++) {
308 err
= ufshcd_dme_set(hba
,
309 UIC_ARG_MIB_SEL(TX_LCC_ENABLE
,
310 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i
)),
313 err
= ufshcd_dme_peer_set(hba
,
314 UIC_ARG_MIB_SEL(TX_LCC_ENABLE
,
315 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i
)),
318 dev_err(hba
->dev
, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
319 __func__
, peer
, i
, err
);
327 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba
*hba
)
329 return ufshcd_disable_tx_lcc(hba
, true);
333 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
336 static int ufshcd_dme_link_startup(struct ufs_hba
*hba
)
338 struct uic_command uic_cmd
= {0};
341 uic_cmd
.command
= UIC_CMD_DME_LINK_STARTUP
;
343 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
346 "dme-link-startup: error code %d\n", ret
);
351 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
354 static inline void ufshcd_disable_intr_aggr(struct ufs_hba
*hba
)
356 ufshcd_writel(hba
, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
360 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
362 static inline int ufshcd_get_lists_status(u32 reg
)
364 return !((reg
& UFSHCD_STATUS_READY
) == UFSHCD_STATUS_READY
);
368 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
369 * When run-stop registers are set to 1, it indicates the
370 * host controller that it can process the requests
372 static void ufshcd_enable_run_stop_reg(struct ufs_hba
*hba
)
374 ufshcd_writel(hba
, UTP_TASK_REQ_LIST_RUN_STOP_BIT
,
375 REG_UTP_TASK_REQ_LIST_RUN_STOP
);
376 ufshcd_writel(hba
, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT
,
377 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP
);
381 * ufshcd_enable_intr - enable interrupts
383 static void ufshcd_enable_intr(struct ufs_hba
*hba
, u32 intrs
)
385 u32 set
= ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
388 if (hba
->version
== UFSHCI_VERSION_10
) {
389 rw
= set
& INTERRUPT_MASK_RW_VER_10
;
390 set
= rw
| ((set
^ intrs
) & intrs
);
395 ufshcd_writel(hba
, set
, REG_INTERRUPT_ENABLE
);
397 hba
->intr_mask
= set
;
401 * ufshcd_make_hba_operational - Make UFS controller operational
403 * To bring UFS host controller to operational state,
404 * 1. Enable required interrupts
405 * 2. Configure interrupt aggregation
406 * 3. Program UTRL and UTMRL base address
407 * 4. Configure run-stop-registers
410 static int ufshcd_make_hba_operational(struct ufs_hba
*hba
)
415 /* Enable required interrupts */
416 ufshcd_enable_intr(hba
, UFSHCD_ENABLE_INTRS
);
418 /* Disable interrupt aggregation */
419 ufshcd_disable_intr_aggr(hba
);
421 /* Configure UTRL and UTMRL base address registers */
422 ufshcd_writel(hba
, lower_32_bits((dma_addr_t
)hba
->utrdl
),
423 REG_UTP_TRANSFER_REQ_LIST_BASE_L
);
424 ufshcd_writel(hba
, upper_32_bits((dma_addr_t
)hba
->utrdl
),
425 REG_UTP_TRANSFER_REQ_LIST_BASE_H
);
426 ufshcd_writel(hba
, lower_32_bits((dma_addr_t
)hba
->utmrdl
),
427 REG_UTP_TASK_REQ_LIST_BASE_L
);
428 ufshcd_writel(hba
, upper_32_bits((dma_addr_t
)hba
->utmrdl
),
429 REG_UTP_TASK_REQ_LIST_BASE_H
);
432 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
434 reg
= ufshcd_readl(hba
, REG_CONTROLLER_STATUS
);
435 if (!(ufshcd_get_lists_status(reg
))) {
436 ufshcd_enable_run_stop_reg(hba
);
439 "Host controller not ready to process requests");
449 * ufshcd_link_startup - Initialize unipro link startup
451 static int ufshcd_link_startup(struct ufs_hba
*hba
)
454 int retries
= DME_LINKSTARTUP_RETRIES
;
455 bool link_startup_again
= true;
459 ufshcd_ops_link_startup_notify(hba
, PRE_CHANGE
);
461 ret
= ufshcd_dme_link_startup(hba
);
463 /* check if device is detected by inter-connect layer */
464 if (!ret
&& !ufshcd_is_device_present(hba
)) {
465 dev_err(hba
->dev
, "%s: Device not present\n", __func__
);
471 * DME link lost indication is only received when link is up,
472 * but we can't be sure if the link is up until link startup
473 * succeeds. So reset the local Uni-Pro and try again.
475 if (ret
&& ufshcd_hba_enable(hba
))
477 } while (ret
&& retries
--);
480 /* failed to get the link up... retire */
483 if (link_startup_again
) {
484 link_startup_again
= false;
485 retries
= DME_LINKSTARTUP_RETRIES
;
489 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
490 ufshcd_init_pwr_info(hba
);
492 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_LCC
) {
493 ret
= ufshcd_disable_device_tx_lcc(hba
);
498 /* Include any host controller configuration via UIC commands */
499 ret
= ufshcd_ops_link_startup_notify(hba
, POST_CHANGE
);
503 ret
= ufshcd_make_hba_operational(hba
);
506 dev_err(hba
->dev
, "link startup failed %d\n", ret
);
512 * ufshcd_hba_stop - Send controller to reset state
514 static inline void ufshcd_hba_stop(struct ufs_hba
*hba
)
518 ufshcd_writel(hba
, CONTROLLER_DISABLE
, REG_CONTROLLER_ENABLE
);
519 err
= ufshcd_wait_for_register(hba
, REG_CONTROLLER_ENABLE
,
520 CONTROLLER_ENABLE
, CONTROLLER_DISABLE
,
523 dev_err(hba
->dev
, "%s: Controller disable failed\n", __func__
);
527 * ufshcd_is_hba_active - Get controller state
529 static inline bool ufshcd_is_hba_active(struct ufs_hba
*hba
)
531 return (ufshcd_readl(hba
, REG_CONTROLLER_ENABLE
) & CONTROLLER_ENABLE
)
536 * ufshcd_hba_start - Start controller initialization sequence
538 static inline void ufshcd_hba_start(struct ufs_hba
*hba
)
540 ufshcd_writel(hba
, CONTROLLER_ENABLE
, REG_CONTROLLER_ENABLE
);
544 * ufshcd_hba_enable - initialize the controller
546 static int ufshcd_hba_enable(struct ufs_hba
*hba
)
550 if (!ufshcd_is_hba_active(hba
))
551 /* change controller state to "reset state" */
552 ufshcd_hba_stop(hba
);
554 ufshcd_ops_hce_enable_notify(hba
, PRE_CHANGE
);
556 /* start controller initialization sequence */
557 ufshcd_hba_start(hba
);
560 * To initialize a UFS host controller HCE bit must be set to 1.
561 * During initialization the HCE bit value changes from 1->0->1.
562 * When the host controller completes initialization sequence
563 * it sets the value of HCE bit to 1. The same HCE bit is read back
564 * to check if the controller has completed initialization sequence.
565 * So without this delay the value HCE = 1, set in the previous
566 * instruction might be read back.
567 * This delay can be changed based on the controller.
571 /* wait for the host controller to complete initialization */
573 while (ufshcd_is_hba_active(hba
)) {
577 dev_err(hba
->dev
, "Controller enable failed\n");
583 /* enable UIC related interrupts */
584 ufshcd_enable_intr(hba
, UFSHCD_UIC_MASK
);
586 ufshcd_ops_hce_enable_notify(hba
, POST_CHANGE
);
592 * ufshcd_host_memory_configure - configure local reference block with
595 static void ufshcd_host_memory_configure(struct ufs_hba
*hba
)
597 struct utp_transfer_req_desc
*utrdlp
;
598 dma_addr_t cmd_desc_dma_addr
;
603 cmd_desc_dma_addr
= (dma_addr_t
)hba
->ucdl
;
605 utrdlp
->command_desc_base_addr_lo
=
606 cpu_to_le32(lower_32_bits(cmd_desc_dma_addr
));
607 utrdlp
->command_desc_base_addr_hi
=
608 cpu_to_le32(upper_32_bits(cmd_desc_dma_addr
));
610 response_offset
= offsetof(struct utp_transfer_cmd_desc
, response_upiu
);
611 prdt_offset
= offsetof(struct utp_transfer_cmd_desc
, prd_table
);
613 utrdlp
->response_upiu_offset
= cpu_to_le16(response_offset
>> 2);
614 utrdlp
->prd_table_offset
= cpu_to_le16(prdt_offset
>> 2);
615 utrdlp
->response_upiu_length
= cpu_to_le16(ALIGNED_UPIU_SIZE
>> 2);
617 hba
->ucd_req_ptr
= (struct utp_upiu_req
*)hba
->ucdl
;
619 (struct utp_upiu_rsp
*)&hba
->ucdl
->response_upiu
;
621 (struct ufshcd_sg_entry
*)&hba
->ucdl
->prd_table
;
625 * ufshcd_memory_alloc - allocate memory for host memory space data structures
627 static int ufshcd_memory_alloc(struct ufs_hba
*hba
)
629 /* Allocate one Transfer Request Descriptor
630 * Should be aligned to 1k boundary.
632 hba
->utrdl
= memalign(1024, sizeof(struct utp_transfer_req_desc
));
634 dev_err(hba
->dev
, "Transfer Descriptor memory allocation failed\n");
638 /* Allocate one Command Descriptor
639 * Should be aligned to 1k boundary.
641 hba
->ucdl
= memalign(1024, sizeof(struct utp_transfer_cmd_desc
));
643 dev_err(hba
->dev
, "Command descriptor memory allocation failed\n");
651 * ufshcd_get_intr_mask - Get the interrupt bit mask
653 static inline u32
ufshcd_get_intr_mask(struct ufs_hba
*hba
)
657 switch (hba
->version
) {
658 case UFSHCI_VERSION_10
:
659 intr_mask
= INTERRUPT_MASK_ALL_VER_10
;
661 case UFSHCI_VERSION_11
:
662 case UFSHCI_VERSION_20
:
663 intr_mask
= INTERRUPT_MASK_ALL_VER_11
;
665 case UFSHCI_VERSION_21
:
667 intr_mask
= INTERRUPT_MASK_ALL_VER_21
;
675 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
677 static inline u32
ufshcd_get_ufs_version(struct ufs_hba
*hba
)
679 return ufshcd_readl(hba
, REG_UFS_VERSION
);
683 * ufshcd_get_upmcrs - Get the power mode change request status
685 static inline u8
ufshcd_get_upmcrs(struct ufs_hba
*hba
)
687 return (ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) >> 8) & 0x7;
691 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
692 * descriptor according to request
694 static void ufshcd_prepare_req_desc_hdr(struct utp_transfer_req_desc
*req_desc
,
696 enum dma_data_direction cmd_dir
)
701 if (cmd_dir
== DMA_FROM_DEVICE
) {
702 data_direction
= UTP_DEVICE_TO_HOST
;
703 *upiu_flags
= UPIU_CMD_FLAGS_READ
;
704 } else if (cmd_dir
== DMA_TO_DEVICE
) {
705 data_direction
= UTP_HOST_TO_DEVICE
;
706 *upiu_flags
= UPIU_CMD_FLAGS_WRITE
;
708 data_direction
= UTP_NO_DATA_TRANSFER
;
709 *upiu_flags
= UPIU_CMD_FLAGS_NONE
;
712 dword_0
= data_direction
| (0x1 << UPIU_COMMAND_TYPE_OFFSET
);
714 /* Enable Interrupt for command */
715 dword_0
|= UTP_REQ_DESC_INT_CMD
;
717 /* Transfer request descriptor header fields */
718 req_desc
->header
.dword_0
= cpu_to_le32(dword_0
);
719 /* dword_1 is reserved, hence it is set to 0 */
720 req_desc
->header
.dword_1
= 0;
722 * assigning invalid value for command status. Controller
723 * updates OCS on command completion, with the command
726 req_desc
->header
.dword_2
=
727 cpu_to_le32(OCS_INVALID_COMMAND_STATUS
);
728 /* dword_3 is reserved, hence it is set to 0 */
729 req_desc
->header
.dword_3
= 0;
731 req_desc
->prd_table_length
= 0;
734 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba
*hba
,
737 struct utp_upiu_req
*ucd_req_ptr
= hba
->ucd_req_ptr
;
738 struct ufs_query
*query
= &hba
->dev_cmd
.query
;
739 u16 len
= be16_to_cpu(query
->request
.upiu_req
.length
);
741 /* Query request header */
742 ucd_req_ptr
->header
.dword_0
=
743 UPIU_HEADER_DWORD(UPIU_TRANSACTION_QUERY_REQ
,
744 upiu_flags
, 0, TASK_TAG
);
745 ucd_req_ptr
->header
.dword_1
=
746 UPIU_HEADER_DWORD(0, query
->request
.query_func
,
749 /* Data segment length only need for WRITE_DESC */
750 if (query
->request
.upiu_req
.opcode
== UPIU_QUERY_OPCODE_WRITE_DESC
)
751 ucd_req_ptr
->header
.dword_2
=
752 UPIU_HEADER_DWORD(0, 0, (len
>> 8), (u8
)len
);
754 ucd_req_ptr
->header
.dword_2
= 0;
756 /* Copy the Query Request buffer as is */
757 memcpy(&ucd_req_ptr
->qr
, &query
->request
.upiu_req
, QUERY_OSF_SIZE
);
759 /* Copy the Descriptor */
760 if (query
->request
.upiu_req
.opcode
== UPIU_QUERY_OPCODE_WRITE_DESC
)
761 memcpy(ucd_req_ptr
+ 1, query
->descriptor
, len
);
763 memset(hba
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
766 static inline void ufshcd_prepare_utp_nop_upiu(struct ufs_hba
*hba
)
768 struct utp_upiu_req
*ucd_req_ptr
= hba
->ucd_req_ptr
;
770 memset(ucd_req_ptr
, 0, sizeof(struct utp_upiu_req
));
772 /* command descriptor fields */
773 ucd_req_ptr
->header
.dword_0
=
774 UPIU_HEADER_DWORD(UPIU_TRANSACTION_NOP_OUT
, 0, 0, 0x1f);
775 /* clear rest of the fields of basic header */
776 ucd_req_ptr
->header
.dword_1
= 0;
777 ucd_req_ptr
->header
.dword_2
= 0;
779 memset(hba
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
783 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
784 * for Device Management Purposes
786 static int ufshcd_comp_devman_upiu(struct ufs_hba
*hba
,
787 enum dev_cmd_type cmd_type
)
791 struct utp_transfer_req_desc
*req_desc
= hba
->utrdl
;
793 hba
->dev_cmd
.type
= cmd_type
;
795 ufshcd_prepare_req_desc_hdr(req_desc
, &upiu_flags
, DMA_NONE
);
797 case DEV_CMD_TYPE_QUERY
:
798 ufshcd_prepare_utp_query_req_upiu(hba
, upiu_flags
);
800 case DEV_CMD_TYPE_NOP
:
801 ufshcd_prepare_utp_nop_upiu(hba
);
810 static int ufshcd_send_command(struct ufs_hba
*hba
, unsigned int task_tag
)
814 u32 enabled_intr_status
;
816 ufshcd_writel(hba
, 1 << task_tag
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
818 start
= get_timer(0);
820 intr_status
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
821 enabled_intr_status
= intr_status
& hba
->intr_mask
;
822 ufshcd_writel(hba
, intr_status
, REG_INTERRUPT_STATUS
);
824 if (get_timer(start
) > QUERY_REQ_TIMEOUT
) {
826 "Timedout waiting for UTP response\n");
831 if (enabled_intr_status
& UFSHCD_ERROR_MASK
) {
832 dev_err(hba
->dev
, "Error in status:%08x\n",
833 enabled_intr_status
);
837 } while (!(enabled_intr_status
& UTP_TRANSFER_REQ_COMPL
));
843 * ufshcd_get_req_rsp - returns the TR response transaction type
845 static inline int ufshcd_get_req_rsp(struct utp_upiu_rsp
*ucd_rsp_ptr
)
847 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_0
) >> 24;
851 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
854 static inline int ufshcd_get_tr_ocs(struct ufs_hba
*hba
)
856 return le32_to_cpu(hba
->utrdl
->header
.dword_2
) & MASK_OCS
;
859 static inline int ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp
*ucd_rsp_ptr
)
861 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_1
) & MASK_RSP_UPIU_RESULT
;
864 static int ufshcd_check_query_response(struct ufs_hba
*hba
)
866 struct ufs_query_res
*query_res
= &hba
->dev_cmd
.query
.response
;
868 /* Get the UPIU response */
869 query_res
->response
= ufshcd_get_rsp_upiu_result(hba
->ucd_rsp_ptr
) >>
870 UPIU_RSP_CODE_OFFSET
;
871 return query_res
->response
;
875 * ufshcd_copy_query_response() - Copy the Query Response and the data
878 static int ufshcd_copy_query_response(struct ufs_hba
*hba
)
880 struct ufs_query_res
*query_res
= &hba
->dev_cmd
.query
.response
;
882 memcpy(&query_res
->upiu_res
, &hba
->ucd_rsp_ptr
->qr
, QUERY_OSF_SIZE
);
884 /* Get the descriptor */
885 if (hba
->dev_cmd
.query
.descriptor
&&
886 hba
->ucd_rsp_ptr
->qr
.opcode
== UPIU_QUERY_OPCODE_READ_DESC
) {
887 u8
*descp
= (u8
*)hba
->ucd_rsp_ptr
+
888 GENERAL_UPIU_REQUEST_SIZE
;
892 /* data segment length */
893 resp_len
= be32_to_cpu(hba
->ucd_rsp_ptr
->header
.dword_2
) &
894 MASK_QUERY_DATA_SEG_LEN
;
896 be16_to_cpu(hba
->dev_cmd
.query
.request
.upiu_req
.length
);
897 if (likely(buf_len
>= resp_len
)) {
898 memcpy(hba
->dev_cmd
.query
.descriptor
, descp
, resp_len
);
901 "%s: Response size is bigger than buffer",
911 * ufshcd_exec_dev_cmd - API for sending device management requests
913 static int ufshcd_exec_dev_cmd(struct ufs_hba
*hba
, enum dev_cmd_type cmd_type
,
919 err
= ufshcd_comp_devman_upiu(hba
, cmd_type
);
923 err
= ufshcd_send_command(hba
, TASK_TAG
);
927 err
= ufshcd_get_tr_ocs(hba
);
929 dev_err(hba
->dev
, "Error in OCS:%d\n", err
);
933 resp
= ufshcd_get_req_rsp(hba
->ucd_rsp_ptr
);
935 case UPIU_TRANSACTION_NOP_IN
:
937 case UPIU_TRANSACTION_QUERY_RSP
:
938 err
= ufshcd_check_query_response(hba
);
940 err
= ufshcd_copy_query_response(hba
);
942 case UPIU_TRANSACTION_REJECT_UPIU
:
943 /* TODO: handle Reject UPIU Response */
945 dev_err(hba
->dev
, "%s: Reject UPIU not fully implemented\n",
950 dev_err(hba
->dev
, "%s: Invalid device management cmd response: %x\n",
958 * ufshcd_init_query() - init the query response and request parameters
960 static inline void ufshcd_init_query(struct ufs_hba
*hba
,
961 struct ufs_query_req
**request
,
962 struct ufs_query_res
**response
,
963 enum query_opcode opcode
,
964 u8 idn
, u8 index
, u8 selector
)
966 *request
= &hba
->dev_cmd
.query
.request
;
967 *response
= &hba
->dev_cmd
.query
.response
;
968 memset(*request
, 0, sizeof(struct ufs_query_req
));
969 memset(*response
, 0, sizeof(struct ufs_query_res
));
970 (*request
)->upiu_req
.opcode
= opcode
;
971 (*request
)->upiu_req
.idn
= idn
;
972 (*request
)->upiu_req
.index
= index
;
973 (*request
)->upiu_req
.selector
= selector
;
977 * ufshcd_query_flag() - API function for sending flag query requests
979 int ufshcd_query_flag(struct ufs_hba
*hba
, enum query_opcode opcode
,
980 enum flag_idn idn
, bool *flag_res
)
982 struct ufs_query_req
*request
= NULL
;
983 struct ufs_query_res
*response
= NULL
;
984 int err
, index
= 0, selector
= 0;
985 int timeout
= QUERY_REQ_TIMEOUT
;
987 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
991 case UPIU_QUERY_OPCODE_SET_FLAG
:
992 case UPIU_QUERY_OPCODE_CLEAR_FLAG
:
993 case UPIU_QUERY_OPCODE_TOGGLE_FLAG
:
994 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
996 case UPIU_QUERY_OPCODE_READ_FLAG
:
997 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
1000 dev_err(hba
->dev
, "%s: Invalid argument for read request\n",
1008 "%s: Expected query flag opcode but got = %d\n",
1014 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, timeout
);
1018 "%s: Sending flag query for idn %d failed, err = %d\n",
1019 __func__
, idn
, err
);
1024 *flag_res
= (be32_to_cpu(response
->upiu_res
.value
) &
1025 MASK_QUERY_UPIU_FLAG_LOC
) & 0x1;
1031 static int ufshcd_query_flag_retry(struct ufs_hba
*hba
,
1032 enum query_opcode opcode
,
1033 enum flag_idn idn
, bool *flag_res
)
1038 for (retries
= 0; retries
< QUERY_REQ_RETRIES
; retries
++) {
1039 ret
= ufshcd_query_flag(hba
, opcode
, idn
, flag_res
);
1042 "%s: failed with error %d, retries %d\n",
1043 __func__
, ret
, retries
);
1050 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
1051 __func__
, opcode
, idn
, ret
, retries
);
1055 static int __ufshcd_query_descriptor(struct ufs_hba
*hba
,
1056 enum query_opcode opcode
,
1057 enum desc_idn idn
, u8 index
, u8 selector
,
1058 u8
*desc_buf
, int *buf_len
)
1060 struct ufs_query_req
*request
= NULL
;
1061 struct ufs_query_res
*response
= NULL
;
1065 dev_err(hba
->dev
, "%s: descriptor buffer required for opcode 0x%x\n",
1071 if (*buf_len
< QUERY_DESC_MIN_SIZE
|| *buf_len
> QUERY_DESC_MAX_SIZE
) {
1072 dev_err(hba
->dev
, "%s: descriptor buffer size (%d) is out of range\n",
1073 __func__
, *buf_len
);
1078 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
1080 hba
->dev_cmd
.query
.descriptor
= desc_buf
;
1081 request
->upiu_req
.length
= cpu_to_be16(*buf_len
);
1084 case UPIU_QUERY_OPCODE_WRITE_DESC
:
1085 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
1087 case UPIU_QUERY_OPCODE_READ_DESC
:
1088 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
1091 dev_err(hba
->dev
, "%s: Expected query descriptor opcode but got = 0x%.2x\n",
1097 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
1100 dev_err(hba
->dev
, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
1101 __func__
, opcode
, idn
, index
, err
);
1105 hba
->dev_cmd
.query
.descriptor
= NULL
;
1106 *buf_len
= be16_to_cpu(response
->upiu_res
.length
);
1113 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
1115 int ufshcd_query_descriptor_retry(struct ufs_hba
*hba
, enum query_opcode opcode
,
1116 enum desc_idn idn
, u8 index
, u8 selector
,
1117 u8
*desc_buf
, int *buf_len
)
1122 for (retries
= QUERY_REQ_RETRIES
; retries
> 0; retries
--) {
1123 err
= __ufshcd_query_descriptor(hba
, opcode
, idn
, index
,
1124 selector
, desc_buf
, buf_len
);
1125 if (!err
|| err
== -EINVAL
)
1133 * ufshcd_read_desc_length - read the specified descriptor length from header
1135 static int ufshcd_read_desc_length(struct ufs_hba
*hba
, enum desc_idn desc_id
,
1136 int desc_index
, int *desc_length
)
1139 u8 header
[QUERY_DESC_HDR_SIZE
];
1140 int header_len
= QUERY_DESC_HDR_SIZE
;
1142 if (desc_id
>= QUERY_DESC_IDN_MAX
)
1145 ret
= ufshcd_query_descriptor_retry(hba
, UPIU_QUERY_OPCODE_READ_DESC
,
1146 desc_id
, desc_index
, 0, header
,
1150 dev_err(hba
->dev
, "%s: Failed to get descriptor header id %d",
1153 } else if (desc_id
!= header
[QUERY_DESC_DESC_TYPE_OFFSET
]) {
1154 dev_warn(hba
->dev
, "%s: descriptor header id %d and desc_id %d mismatch",
1155 __func__
, header
[QUERY_DESC_DESC_TYPE_OFFSET
],
1160 *desc_length
= header
[QUERY_DESC_LENGTH_OFFSET
];
1165 static void ufshcd_init_desc_sizes(struct ufs_hba
*hba
)
1169 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_DEVICE
, 0,
1170 &hba
->desc_size
.dev_desc
);
1172 hba
->desc_size
.dev_desc
= QUERY_DESC_DEVICE_DEF_SIZE
;
1174 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_POWER
, 0,
1175 &hba
->desc_size
.pwr_desc
);
1177 hba
->desc_size
.pwr_desc
= QUERY_DESC_POWER_DEF_SIZE
;
1179 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_INTERCONNECT
, 0,
1180 &hba
->desc_size
.interc_desc
);
1182 hba
->desc_size
.interc_desc
= QUERY_DESC_INTERCONNECT_DEF_SIZE
;
1184 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_CONFIGURATION
, 0,
1185 &hba
->desc_size
.conf_desc
);
1187 hba
->desc_size
.conf_desc
= QUERY_DESC_CONFIGURATION_DEF_SIZE
;
1189 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_UNIT
, 0,
1190 &hba
->desc_size
.unit_desc
);
1192 hba
->desc_size
.unit_desc
= QUERY_DESC_UNIT_DEF_SIZE
;
1194 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_GEOMETRY
, 0,
1195 &hba
->desc_size
.geom_desc
);
1197 hba
->desc_size
.geom_desc
= QUERY_DESC_GEOMETRY_DEF_SIZE
;
1199 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_HEALTH
, 0,
1200 &hba
->desc_size
.hlth_desc
);
1202 hba
->desc_size
.hlth_desc
= QUERY_DESC_HEALTH_DEF_SIZE
;
1206 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
1209 int ufshcd_map_desc_id_to_length(struct ufs_hba
*hba
, enum desc_idn desc_id
,
1213 case QUERY_DESC_IDN_DEVICE
:
1214 *desc_len
= hba
->desc_size
.dev_desc
;
1216 case QUERY_DESC_IDN_POWER
:
1217 *desc_len
= hba
->desc_size
.pwr_desc
;
1219 case QUERY_DESC_IDN_GEOMETRY
:
1220 *desc_len
= hba
->desc_size
.geom_desc
;
1222 case QUERY_DESC_IDN_CONFIGURATION
:
1223 *desc_len
= hba
->desc_size
.conf_desc
;
1225 case QUERY_DESC_IDN_UNIT
:
1226 *desc_len
= hba
->desc_size
.unit_desc
;
1228 case QUERY_DESC_IDN_INTERCONNECT
:
1229 *desc_len
= hba
->desc_size
.interc_desc
;
1231 case QUERY_DESC_IDN_STRING
:
1232 *desc_len
= QUERY_DESC_MAX_SIZE
;
1234 case QUERY_DESC_IDN_HEALTH
:
1235 *desc_len
= hba
->desc_size
.hlth_desc
;
1237 case QUERY_DESC_IDN_RFU_0
:
1238 case QUERY_DESC_IDN_RFU_1
:
1247 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length
);
1250 * ufshcd_read_desc_param - read the specified descriptor parameter
1253 int ufshcd_read_desc_param(struct ufs_hba
*hba
, enum desc_idn desc_id
,
1254 int desc_index
, u8 param_offset
, u8
*param_read_buf
,
1260 bool is_kmalloc
= true;
1263 if (desc_id
>= QUERY_DESC_IDN_MAX
|| !param_size
)
1266 /* Get the max length of descriptor from structure filled up at probe
1269 ret
= ufshcd_map_desc_id_to_length(hba
, desc_id
, &buff_len
);
1272 if (ret
|| !buff_len
) {
1273 dev_err(hba
->dev
, "%s: Failed to get full descriptor length",
1278 /* Check whether we need temp memory */
1279 if (param_offset
!= 0 || param_size
< buff_len
) {
1280 desc_buf
= kmalloc(buff_len
, GFP_KERNEL
);
1284 desc_buf
= param_read_buf
;
1288 /* Request for full descriptor */
1289 ret
= ufshcd_query_descriptor_retry(hba
, UPIU_QUERY_OPCODE_READ_DESC
,
1290 desc_id
, desc_index
, 0, desc_buf
,
1294 dev_err(hba
->dev
, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
1295 __func__
, desc_id
, desc_index
, param_offset
, ret
);
1300 if (desc_buf
[QUERY_DESC_DESC_TYPE_OFFSET
] != desc_id
) {
1301 dev_err(hba
->dev
, "%s: invalid desc_id %d in descriptor header",
1302 __func__
, desc_buf
[QUERY_DESC_DESC_TYPE_OFFSET
]);
1307 /* Check wherher we will not copy more data, than available */
1308 if (is_kmalloc
&& param_size
> buff_len
)
1309 param_size
= buff_len
;
1312 memcpy(param_read_buf
, &desc_buf
[param_offset
], param_size
);
1319 /* replace non-printable or non-ASCII characters with spaces */
1320 static inline void ufshcd_remove_non_printable(uint8_t *val
)
1325 if (*val
< 0x20 || *val
> 0x7e)
1330 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
1331 * state) and waits for it to take effect.
1334 static int ufshcd_uic_pwr_ctrl(struct ufs_hba
*hba
, struct uic_command
*cmd
)
1336 unsigned long start
= 0;
1340 ret
= ufshcd_send_uic_cmd(hba
, cmd
);
1343 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
1344 cmd
->command
, cmd
->argument3
, ret
);
1349 start
= get_timer(0);
1351 status
= ufshcd_get_upmcrs(hba
);
1352 if (get_timer(start
) > UFS_UIC_CMD_TIMEOUT
) {
1354 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
1355 cmd
->command
, status
);
1356 ret
= (status
!= PWR_OK
) ? status
: -1;
1359 } while (status
!= PWR_LOCAL
);
1365 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode change
1366 * using DME_SET primitives.
1368 static int ufshcd_uic_change_pwr_mode(struct ufs_hba
*hba
, u8 mode
)
1370 struct uic_command uic_cmd
= {0};
1373 uic_cmd
.command
= UIC_CMD_DME_SET
;
1374 uic_cmd
.argument1
= UIC_ARG_MIB(PA_PWRMODE
);
1375 uic_cmd
.argument3
= mode
;
1376 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
1382 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufs_hba
*hba
,
1383 struct scsi_cmd
*pccb
, u32 upiu_flags
)
1385 struct utp_upiu_req
*ucd_req_ptr
= hba
->ucd_req_ptr
;
1386 unsigned int cdb_len
;
1388 /* command descriptor fields */
1389 ucd_req_ptr
->header
.dword_0
=
1390 UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND
, upiu_flags
,
1391 pccb
->lun
, TASK_TAG
);
1392 ucd_req_ptr
->header
.dword_1
=
1393 UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI
, 0, 0, 0);
1395 /* Total EHS length and Data segment length will be zero */
1396 ucd_req_ptr
->header
.dword_2
= 0;
1398 ucd_req_ptr
->sc
.exp_data_transfer_len
= cpu_to_be32(pccb
->datalen
);
1400 cdb_len
= min_t(unsigned short, pccb
->cmdlen
, UFS_CDB_SIZE
);
1401 memset(ucd_req_ptr
->sc
.cdb
, 0, UFS_CDB_SIZE
);
1402 memcpy(ucd_req_ptr
->sc
.cdb
, pccb
->cmd
, cdb_len
);
1404 memset(hba
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
1407 static inline void prepare_prdt_desc(struct ufshcd_sg_entry
*entry
,
1408 unsigned char *buf
, ulong len
)
1410 entry
->size
= cpu_to_le32(len
) | GENMASK(1, 0);
1411 entry
->base_addr
= cpu_to_le32(lower_32_bits((unsigned long)buf
));
1412 entry
->upper_addr
= cpu_to_le32(upper_32_bits((unsigned long)buf
));
1415 static void prepare_prdt_table(struct ufs_hba
*hba
, struct scsi_cmd
*pccb
)
1417 struct utp_transfer_req_desc
*req_desc
= hba
->utrdl
;
1418 struct ufshcd_sg_entry
*prd_table
= hba
->ucd_prdt_ptr
;
1419 ulong datalen
= pccb
->datalen
;
1425 req_desc
->prd_table_length
= 0;
1429 table_length
= DIV_ROUND_UP(pccb
->datalen
, MAX_PRDT_ENTRY
);
1433 prepare_prdt_desc(&prd_table
[table_length
- i
- 1], buf
,
1434 MAX_PRDT_ENTRY
- 1);
1435 buf
+= MAX_PRDT_ENTRY
;
1436 datalen
-= MAX_PRDT_ENTRY
;
1439 prepare_prdt_desc(&prd_table
[table_length
- i
- 1], buf
, datalen
- 1);
1441 req_desc
->prd_table_length
= table_length
;
1444 static int ufs_scsi_exec(struct udevice
*scsi_dev
, struct scsi_cmd
*pccb
)
1446 struct ufs_hba
*hba
= dev_get_uclass_priv(scsi_dev
->parent
);
1447 struct utp_transfer_req_desc
*req_desc
= hba
->utrdl
;
1449 int ocs
, result
= 0;
1452 ufshcd_prepare_req_desc_hdr(req_desc
, &upiu_flags
, pccb
->dma_dir
);
1453 ufshcd_prepare_utp_scsi_cmd_upiu(hba
, pccb
, upiu_flags
);
1454 prepare_prdt_table(hba
, pccb
);
1456 ufshcd_send_command(hba
, TASK_TAG
);
1458 ocs
= ufshcd_get_tr_ocs(hba
);
1461 result
= ufshcd_get_req_rsp(hba
->ucd_rsp_ptr
);
1463 case UPIU_TRANSACTION_RESPONSE
:
1464 result
= ufshcd_get_rsp_upiu_result(hba
->ucd_rsp_ptr
);
1466 scsi_status
= result
& MASK_SCSI_STATUS
;
1471 case UPIU_TRANSACTION_REJECT_UPIU
:
1472 /* TODO: handle Reject UPIU Response */
1474 "Reject UPIU not fully implemented\n");
1478 "Unexpected request response code = %x\n",
1484 dev_err(hba
->dev
, "OCS error from controller = %x\n", ocs
);
1491 static inline int ufshcd_read_desc(struct ufs_hba
*hba
, enum desc_idn desc_id
,
1492 int desc_index
, u8
*buf
, u32 size
)
1494 return ufshcd_read_desc_param(hba
, desc_id
, desc_index
, 0, buf
, size
);
1497 static int ufshcd_read_device_desc(struct ufs_hba
*hba
, u8
*buf
, u32 size
)
1499 return ufshcd_read_desc(hba
, QUERY_DESC_IDN_DEVICE
, 0, buf
, size
);
1503 * ufshcd_read_string_desc - read string descriptor
1506 int ufshcd_read_string_desc(struct ufs_hba
*hba
, int desc_index
,
1507 u8
*buf
, u32 size
, bool ascii
)
1511 err
= ufshcd_read_desc(hba
, QUERY_DESC_IDN_STRING
, desc_index
, buf
,
1515 dev_err(hba
->dev
, "%s: reading String Desc failed after %d retries. err = %d\n",
1516 __func__
, QUERY_REQ_RETRIES
, err
);
1527 /* remove header and divide by 2 to move from UTF16 to UTF8 */
1528 ascii_len
= (desc_len
- QUERY_DESC_HDR_SIZE
) / 2 + 1;
1529 if (size
< ascii_len
+ QUERY_DESC_HDR_SIZE
) {
1530 dev_err(hba
->dev
, "%s: buffer allocated size is too small\n",
1536 buff_ascii
= kmalloc(ascii_len
, GFP_KERNEL
);
1543 * the descriptor contains string in UTF16 format
1544 * we need to convert to utf-8 so it can be displayed
1546 utf16_to_utf8(buff_ascii
,
1547 (uint16_t *)&buf
[QUERY_DESC_HDR_SIZE
], ascii_len
);
1549 /* replace non-printable or non-ASCII characters with spaces */
1550 for (i
= 0; i
< ascii_len
; i
++)
1551 ufshcd_remove_non_printable(&buff_ascii
[i
]);
1553 memset(buf
+ QUERY_DESC_HDR_SIZE
, 0,
1554 size
- QUERY_DESC_HDR_SIZE
);
1555 memcpy(buf
+ QUERY_DESC_HDR_SIZE
, buff_ascii
, ascii_len
);
1556 buf
[QUERY_DESC_LENGTH_OFFSET
] = ascii_len
+ QUERY_DESC_HDR_SIZE
;
1563 static int ufs_get_device_desc(struct ufs_hba
*hba
,
1564 struct ufs_dev_desc
*dev_desc
)
1571 buff_len
= max_t(size_t, hba
->desc_size
.dev_desc
,
1572 QUERY_DESC_MAX_SIZE
+ 1);
1573 desc_buf
= kmalloc(buff_len
, GFP_KERNEL
);
1579 err
= ufshcd_read_device_desc(hba
, desc_buf
, hba
->desc_size
.dev_desc
);
1581 dev_err(hba
->dev
, "%s: Failed reading Device Desc. err = %d\n",
1587 * getting vendor (manufacturerID) and Bank Index in big endian
1590 dev_desc
->wmanufacturerid
= desc_buf
[DEVICE_DESC_PARAM_MANF_ID
] << 8 |
1591 desc_buf
[DEVICE_DESC_PARAM_MANF_ID
+ 1];
1593 model_index
= desc_buf
[DEVICE_DESC_PARAM_PRDCT_NAME
];
1595 /* Zero-pad entire buffer for string termination. */
1596 memset(desc_buf
, 0, buff_len
);
1598 err
= ufshcd_read_string_desc(hba
, model_index
, desc_buf
,
1599 QUERY_DESC_MAX_SIZE
, true/*ASCII*/);
1601 dev_err(hba
->dev
, "%s: Failed reading Product Name. err = %d\n",
1606 desc_buf
[QUERY_DESC_MAX_SIZE
] = '\0';
1607 strlcpy(dev_desc
->model
, (char *)(desc_buf
+ QUERY_DESC_HDR_SIZE
),
1608 min_t(u8
, desc_buf
[QUERY_DESC_LENGTH_OFFSET
],
1611 /* Null terminate the model string */
1612 dev_desc
->model
[MAX_MODEL_LEN
] = '\0';
1620 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
1622 static int ufshcd_get_max_pwr_mode(struct ufs_hba
*hba
)
1624 struct ufs_pa_layer_attr
*pwr_info
= &hba
->max_pwr_info
.info
;
1626 if (hba
->max_pwr_info
.is_valid
)
1629 pwr_info
->pwr_tx
= FAST_MODE
;
1630 pwr_info
->pwr_rx
= FAST_MODE
;
1631 pwr_info
->hs_rate
= PA_HS_MODE_B
;
1633 /* Get the connected lane count */
1634 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES
),
1635 &pwr_info
->lane_rx
);
1636 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
1637 &pwr_info
->lane_tx
);
1639 if (!pwr_info
->lane_rx
|| !pwr_info
->lane_tx
) {
1640 dev_err(hba
->dev
, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
1641 __func__
, pwr_info
->lane_rx
, pwr_info
->lane_tx
);
1646 * First, get the maximum gears of HS speed.
1647 * If a zero value, it means there is no HSGEAR capability.
1648 * Then, get the maximum gears of PWM speed.
1650 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_MAXRXHSGEAR
), &pwr_info
->gear_rx
);
1651 if (!pwr_info
->gear_rx
) {
1652 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_MAXRXPWMGEAR
),
1653 &pwr_info
->gear_rx
);
1654 if (!pwr_info
->gear_rx
) {
1655 dev_err(hba
->dev
, "%s: invalid max pwm rx gear read = %d\n",
1656 __func__
, pwr_info
->gear_rx
);
1659 pwr_info
->pwr_rx
= SLOW_MODE
;
1662 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_MAXRXHSGEAR
),
1663 &pwr_info
->gear_tx
);
1664 if (!pwr_info
->gear_tx
) {
1665 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_MAXRXPWMGEAR
),
1666 &pwr_info
->gear_tx
);
1667 if (!pwr_info
->gear_tx
) {
1668 dev_err(hba
->dev
, "%s: invalid max pwm tx gear read = %d\n",
1669 __func__
, pwr_info
->gear_tx
);
1672 pwr_info
->pwr_tx
= SLOW_MODE
;
1675 hba
->max_pwr_info
.is_valid
= true;
1679 static int ufshcd_change_power_mode(struct ufs_hba
*hba
,
1680 struct ufs_pa_layer_attr
*pwr_mode
)
1684 /* if already configured to the requested pwr_mode */
1685 if (pwr_mode
->gear_rx
== hba
->pwr_info
.gear_rx
&&
1686 pwr_mode
->gear_tx
== hba
->pwr_info
.gear_tx
&&
1687 pwr_mode
->lane_rx
== hba
->pwr_info
.lane_rx
&&
1688 pwr_mode
->lane_tx
== hba
->pwr_info
.lane_tx
&&
1689 pwr_mode
->pwr_rx
== hba
->pwr_info
.pwr_rx
&&
1690 pwr_mode
->pwr_tx
== hba
->pwr_info
.pwr_tx
&&
1691 pwr_mode
->hs_rate
== hba
->pwr_info
.hs_rate
) {
1692 dev_dbg(hba
->dev
, "%s: power already configured\n", __func__
);
1697 * Configure attributes for power mode change with below.
1698 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
1699 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
1702 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXGEAR
), pwr_mode
->gear_rx
);
1703 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_ACTIVERXDATALANES
),
1705 if (pwr_mode
->pwr_rx
== FASTAUTO_MODE
|| pwr_mode
->pwr_rx
== FAST_MODE
)
1706 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXTERMINATION
), TRUE
);
1708 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXTERMINATION
), FALSE
);
1710 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXGEAR
), pwr_mode
->gear_tx
);
1711 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_ACTIVETXDATALANES
),
1713 if (pwr_mode
->pwr_tx
== FASTAUTO_MODE
|| pwr_mode
->pwr_tx
== FAST_MODE
)
1714 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXTERMINATION
), TRUE
);
1716 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXTERMINATION
), FALSE
);
1718 if (pwr_mode
->pwr_rx
== FASTAUTO_MODE
||
1719 pwr_mode
->pwr_tx
== FASTAUTO_MODE
||
1720 pwr_mode
->pwr_rx
== FAST_MODE
||
1721 pwr_mode
->pwr_tx
== FAST_MODE
)
1722 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_HSSERIES
),
1725 ret
= ufshcd_uic_change_pwr_mode(hba
, pwr_mode
->pwr_rx
<< 4 |
1730 "%s: power mode change failed %d\n", __func__
, ret
);
1735 /* Copy new Power Mode to power info */
1736 memcpy(&hba
->pwr_info
, pwr_mode
, sizeof(struct ufs_pa_layer_attr
));
1742 * ufshcd_verify_dev_init() - Verify device initialization
1745 static int ufshcd_verify_dev_init(struct ufs_hba
*hba
)
1750 for (retries
= NOP_OUT_RETRIES
; retries
> 0; retries
--) {
1751 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_NOP
,
1753 if (!err
|| err
== -ETIMEDOUT
)
1756 dev_dbg(hba
->dev
, "%s: error %d retrying\n", __func__
, err
);
1760 dev_err(hba
->dev
, "%s: NOP OUT failed %d\n", __func__
, err
);
1766 * ufshcd_complete_dev_init() - checks device readiness
1768 static int ufshcd_complete_dev_init(struct ufs_hba
*hba
)
1774 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_SET_FLAG
,
1775 QUERY_FLAG_IDN_FDEVICEINIT
, NULL
);
1778 "%s setting fDeviceInit flag failed with error %d\n",
1783 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
1784 for (i
= 0; i
< 1000 && !err
&& flag_res
; i
++)
1785 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_READ_FLAG
,
1786 QUERY_FLAG_IDN_FDEVICEINIT
,
1791 "%s reading fDeviceInit flag failed with error %d\n",
1795 "%s fDeviceInit was not cleared by the device\n",
1802 static void ufshcd_def_desc_sizes(struct ufs_hba
*hba
)
1804 hba
->desc_size
.dev_desc
= QUERY_DESC_DEVICE_DEF_SIZE
;
1805 hba
->desc_size
.pwr_desc
= QUERY_DESC_POWER_DEF_SIZE
;
1806 hba
->desc_size
.interc_desc
= QUERY_DESC_INTERCONNECT_DEF_SIZE
;
1807 hba
->desc_size
.conf_desc
= QUERY_DESC_CONFIGURATION_DEF_SIZE
;
1808 hba
->desc_size
.unit_desc
= QUERY_DESC_UNIT_DEF_SIZE
;
1809 hba
->desc_size
.geom_desc
= QUERY_DESC_GEOMETRY_DEF_SIZE
;
1810 hba
->desc_size
.hlth_desc
= QUERY_DESC_HEALTH_DEF_SIZE
;
1813 int ufs_start(struct ufs_hba
*hba
)
1815 struct ufs_dev_desc card
= {0};
1818 ret
= ufshcd_link_startup(hba
);
1822 ret
= ufshcd_verify_dev_init(hba
);
1826 ret
= ufshcd_complete_dev_init(hba
);
1830 /* Init check for device descriptor sizes */
1831 ufshcd_init_desc_sizes(hba
);
1833 ret
= ufs_get_device_desc(hba
, &card
);
1835 dev_err(hba
->dev
, "%s: Failed getting device info. err = %d\n",
1841 if (ufshcd_get_max_pwr_mode(hba
)) {
1843 "%s: Failed getting max supported power mode\n",
1846 ret
= ufshcd_change_power_mode(hba
, &hba
->max_pwr_info
.info
);
1848 dev_err(hba
->dev
, "%s: Failed setting power mode, err = %d\n",
1854 printf("Device at %s up at:", hba
->dev
->name
);
1855 ufshcd_print_pwr_info(hba
);
1861 int ufshcd_probe(struct udevice
*ufs_dev
, struct ufs_hba_ops
*hba_ops
)
1863 struct ufs_hba
*hba
= dev_get_uclass_priv(ufs_dev
);
1864 struct scsi_platdata
*scsi_plat
;
1865 struct udevice
*scsi_dev
;
1868 device_find_first_child(ufs_dev
, &scsi_dev
);
1872 scsi_plat
= dev_get_uclass_platdata(scsi_dev
);
1873 scsi_plat
->max_id
= UFSHCD_MAX_ID
;
1874 scsi_plat
->max_lun
= UFS_MAX_LUNS
;
1875 scsi_plat
->max_bytes_per_req
= UFS_MAX_BYTES
;
1879 hba
->mmio_base
= (void *)dev_read_addr(ufs_dev
);
1881 /* Set descriptor lengths to specification defaults */
1882 ufshcd_def_desc_sizes(hba
);
1884 ufshcd_ops_init(hba
);
1886 /* Read capabilties registers */
1887 hba
->capabilities
= ufshcd_readl(hba
, REG_CONTROLLER_CAPABILITIES
);
1889 /* Get UFS version supported by the controller */
1890 hba
->version
= ufshcd_get_ufs_version(hba
);
1891 if (hba
->version
!= UFSHCI_VERSION_10
&&
1892 hba
->version
!= UFSHCI_VERSION_11
&&
1893 hba
->version
!= UFSHCI_VERSION_20
&&
1894 hba
->version
!= UFSHCI_VERSION_21
)
1895 dev_err(hba
->dev
, "invalid UFS version 0x%x\n",
1898 /* Get Interrupt bit mask per version */
1899 hba
->intr_mask
= ufshcd_get_intr_mask(hba
);
1901 /* Allocate memory for host memory space */
1902 err
= ufshcd_memory_alloc(hba
);
1904 dev_err(hba
->dev
, "Memory allocation failed\n");
1908 /* Configure Local data structures */
1909 ufshcd_host_memory_configure(hba
);
1912 * In order to avoid any spurious interrupt immediately after
1913 * registering UFS controller interrupt handler, clear any pending UFS
1914 * interrupt status and disable all the UFS interrupts.
1916 ufshcd_writel(hba
, ufshcd_readl(hba
, REG_INTERRUPT_STATUS
),
1917 REG_INTERRUPT_STATUS
);
1918 ufshcd_writel(hba
, 0, REG_INTERRUPT_ENABLE
);
1920 err
= ufshcd_hba_enable(hba
);
1922 dev_err(hba
->dev
, "Host controller enable failed\n");
1926 err
= ufs_start(hba
);
1933 int ufs_scsi_bind(struct udevice
*ufs_dev
, struct udevice
**scsi_devp
)
1935 int ret
= device_bind_driver(ufs_dev
, "ufs_scsi", "ufs_scsi",
1941 static struct scsi_ops ufs_ops
= {
1942 .exec
= ufs_scsi_exec
,
1945 int ufs_probe_dev(int index
)
1947 struct udevice
*dev
;
1949 return uclass_get_device(UCLASS_UFS
, index
, &dev
);
1954 struct udevice
*dev
;
1958 ret
= uclass_get_device(UCLASS_UFS
, i
, &dev
);
1966 U_BOOT_DRIVER(ufs_scsi
) = {