1 // SPDX-License-Identifier: GPL-2.0+
3 * ufs.c - Universal Flash Subsystem (UFS) driver
5 * Taken from Linux Kernel v5.2 (drivers/scsi/ufs/ufshcd.c) and ported
8 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
15 #include <dm/device_compat.h>
16 #include <dm/devres.h>
18 #include <dm/device-internal.h>
23 #include <asm/dma-mapping.h>
24 #include <linux/bitops.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
30 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
33 /* maximum number of link-startup retries */
34 #define DME_LINKSTARTUP_RETRIES 3
36 /* maximum number of retries for a general UIC command */
37 #define UFS_UIC_COMMAND_RETRIES 3
39 /* Query request retries */
40 #define QUERY_REQ_RETRIES 3
41 /* Query request timeout */
42 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
44 /* maximum timeout in ms for a general UIC command */
45 #define UFS_UIC_CMD_TIMEOUT 1000
46 /* NOP OUT retries waiting for NOP IN response */
47 #define NOP_OUT_RETRIES 10
48 /* Timeout after 30 msecs if NOP OUT hangs without response */
49 #define NOP_OUT_TIMEOUT 30 /* msecs */
51 /* Only use one Task Tag for all requests */
54 /* Expose the flag value from utp_upiu_query.value */
55 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
57 #define MAX_PRDT_ENTRY 262144
59 /* maximum bytes per request */
60 #define UFS_MAX_BYTES (128 * 256 * 1024)
62 static inline bool ufshcd_is_hba_active(struct ufs_hba
*hba
);
63 static inline void ufshcd_hba_stop(struct ufs_hba
*hba
);
64 static int ufshcd_hba_enable(struct ufs_hba
*hba
);
67 * ufshcd_wait_for_register - wait for register value to change
69 static int ufshcd_wait_for_register(struct ufs_hba
*hba
, u32 reg
, u32 mask
,
70 u32 val
, unsigned long timeout_ms
)
73 unsigned long start
= get_timer(0);
75 /* ignore bits that we don't intend to wait on */
78 while ((ufshcd_readl(hba
, reg
) & mask
) != val
) {
79 if (get_timer(start
) > timeout_ms
) {
80 if ((ufshcd_readl(hba
, reg
) & mask
) != val
)
90 * ufshcd_init_pwr_info - setting the POR (power on reset)
91 * values in hba power info
93 static void ufshcd_init_pwr_info(struct ufs_hba
*hba
)
95 hba
->pwr_info
.gear_rx
= UFS_PWM_G1
;
96 hba
->pwr_info
.gear_tx
= UFS_PWM_G1
;
97 hba
->pwr_info
.lane_rx
= 1;
98 hba
->pwr_info
.lane_tx
= 1;
99 hba
->pwr_info
.pwr_rx
= SLOWAUTO_MODE
;
100 hba
->pwr_info
.pwr_tx
= SLOWAUTO_MODE
;
101 hba
->pwr_info
.hs_rate
= 0;
105 * ufshcd_print_pwr_info - print power params as saved in hba
108 static void ufshcd_print_pwr_info(struct ufs_hba
*hba
)
110 static const char * const names
[] = {
120 dev_err(hba
->dev
, "[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
121 hba
->pwr_info
.gear_rx
, hba
->pwr_info
.gear_tx
,
122 hba
->pwr_info
.lane_rx
, hba
->pwr_info
.lane_tx
,
123 names
[hba
->pwr_info
.pwr_rx
],
124 names
[hba
->pwr_info
.pwr_tx
],
125 hba
->pwr_info
.hs_rate
);
129 * ufshcd_ready_for_uic_cmd - Check if controller is ready
130 * to accept UIC commands
132 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba
*hba
)
134 if (ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) & UIC_COMMAND_READY
)
141 * ufshcd_get_uic_cmd_result - Get the UIC command result
143 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba
*hba
)
145 return ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_2
) &
146 MASK_UIC_COMMAND_RESULT
;
150 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
152 static inline u32
ufshcd_get_dme_attr_val(struct ufs_hba
*hba
)
154 return ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_3
);
158 * ufshcd_is_device_present - Check if any device connected to
159 * the host controller
161 static inline bool ufshcd_is_device_present(struct ufs_hba
*hba
)
163 return (ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) &
164 DEVICE_PRESENT
) ? true : false;
168 * ufshcd_send_uic_cmd - UFS Interconnect layer command API
171 static int ufshcd_send_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
173 unsigned long start
= 0;
175 u32 enabled_intr_status
;
177 if (!ufshcd_ready_for_uic_cmd(hba
)) {
179 "Controller not ready to accept UIC commands\n");
183 debug("sending uic command:%d\n", uic_cmd
->command
);
186 ufshcd_writel(hba
, uic_cmd
->argument1
, REG_UIC_COMMAND_ARG_1
);
187 ufshcd_writel(hba
, uic_cmd
->argument2
, REG_UIC_COMMAND_ARG_2
);
188 ufshcd_writel(hba
, uic_cmd
->argument3
, REG_UIC_COMMAND_ARG_3
);
191 ufshcd_writel(hba
, uic_cmd
->command
& COMMAND_OPCODE_MASK
,
194 start
= get_timer(0);
196 intr_status
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
197 enabled_intr_status
= intr_status
& hba
->intr_mask
;
198 ufshcd_writel(hba
, intr_status
, REG_INTERRUPT_STATUS
);
200 if (get_timer(start
) > UFS_UIC_CMD_TIMEOUT
) {
202 "Timedout waiting for UIC response\n");
207 if (enabled_intr_status
& UFSHCD_ERROR_MASK
) {
208 dev_err(hba
->dev
, "Error in status:%08x\n",
209 enabled_intr_status
);
213 } while (!(enabled_intr_status
& UFSHCD_UIC_MASK
));
215 uic_cmd
->argument2
= ufshcd_get_uic_cmd_result(hba
);
216 uic_cmd
->argument3
= ufshcd_get_dme_attr_val(hba
);
218 debug("Sent successfully\n");
224 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
227 int ufshcd_dme_set_attr(struct ufs_hba
*hba
, u32 attr_sel
, u8 attr_set
,
228 u32 mib_val
, u8 peer
)
230 struct uic_command uic_cmd
= {0};
231 static const char *const action
[] = {
235 const char *set
= action
[!!peer
];
237 int retries
= UFS_UIC_COMMAND_RETRIES
;
239 uic_cmd
.command
= peer
?
240 UIC_CMD_DME_PEER_SET
: UIC_CMD_DME_SET
;
241 uic_cmd
.argument1
= attr_sel
;
242 uic_cmd
.argument2
= UIC_ARG_ATTR_TYPE(attr_set
);
243 uic_cmd
.argument3
= mib_val
;
246 /* for peer attributes we retry upon failure */
247 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
249 dev_dbg(hba
->dev
, "%s: attr-id 0x%x val 0x%x error code %d\n",
250 set
, UIC_GET_ATTR_ID(attr_sel
), mib_val
, ret
);
251 } while (ret
&& peer
&& --retries
);
254 dev_err(hba
->dev
, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
255 set
, UIC_GET_ATTR_ID(attr_sel
), mib_val
,
256 UFS_UIC_COMMAND_RETRIES
- retries
);
262 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
265 int ufshcd_dme_get_attr(struct ufs_hba
*hba
, u32 attr_sel
,
266 u32
*mib_val
, u8 peer
)
268 struct uic_command uic_cmd
= {0};
269 static const char *const action
[] = {
273 const char *get
= action
[!!peer
];
275 int retries
= UFS_UIC_COMMAND_RETRIES
;
277 uic_cmd
.command
= peer
?
278 UIC_CMD_DME_PEER_GET
: UIC_CMD_DME_GET
;
279 uic_cmd
.argument1
= attr_sel
;
282 /* for peer attributes we retry upon failure */
283 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
285 dev_dbg(hba
->dev
, "%s: attr-id 0x%x error code %d\n",
286 get
, UIC_GET_ATTR_ID(attr_sel
), ret
);
287 } while (ret
&& peer
&& --retries
);
290 dev_err(hba
->dev
, "%s: attr-id 0x%x failed %d retries\n",
291 get
, UIC_GET_ATTR_ID(attr_sel
),
292 UFS_UIC_COMMAND_RETRIES
- retries
);
295 *mib_val
= uic_cmd
.argument3
;
300 static int ufshcd_disable_tx_lcc(struct ufs_hba
*hba
, bool peer
)
302 u32 tx_lanes
, i
, err
= 0;
305 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
308 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
310 for (i
= 0; i
< tx_lanes
; i
++) {
312 err
= ufshcd_dme_set(hba
,
313 UIC_ARG_MIB_SEL(TX_LCC_ENABLE
,
314 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i
)),
317 err
= ufshcd_dme_peer_set(hba
,
318 UIC_ARG_MIB_SEL(TX_LCC_ENABLE
,
319 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i
)),
322 dev_err(hba
->dev
, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
323 __func__
, peer
, i
, err
);
331 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba
*hba
)
333 return ufshcd_disable_tx_lcc(hba
, true);
337 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
340 static int ufshcd_dme_link_startup(struct ufs_hba
*hba
)
342 struct uic_command uic_cmd
= {0};
345 uic_cmd
.command
= UIC_CMD_DME_LINK_STARTUP
;
347 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
350 "dme-link-startup: error code %d\n", ret
);
355 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
358 static inline void ufshcd_disable_intr_aggr(struct ufs_hba
*hba
)
360 ufshcd_writel(hba
, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
364 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
366 static inline int ufshcd_get_lists_status(u32 reg
)
368 return !((reg
& UFSHCD_STATUS_READY
) == UFSHCD_STATUS_READY
);
372 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
373 * When run-stop registers are set to 1, it indicates the
374 * host controller that it can process the requests
376 static void ufshcd_enable_run_stop_reg(struct ufs_hba
*hba
)
378 ufshcd_writel(hba
, UTP_TASK_REQ_LIST_RUN_STOP_BIT
,
379 REG_UTP_TASK_REQ_LIST_RUN_STOP
);
380 ufshcd_writel(hba
, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT
,
381 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP
);
385 * ufshcd_enable_intr - enable interrupts
387 static void ufshcd_enable_intr(struct ufs_hba
*hba
, u32 intrs
)
389 u32 set
= ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
392 if (hba
->version
== UFSHCI_VERSION_10
) {
393 rw
= set
& INTERRUPT_MASK_RW_VER_10
;
394 set
= rw
| ((set
^ intrs
) & intrs
);
399 ufshcd_writel(hba
, set
, REG_INTERRUPT_ENABLE
);
401 hba
->intr_mask
= set
;
405 * ufshcd_make_hba_operational - Make UFS controller operational
407 * To bring UFS host controller to operational state,
408 * 1. Enable required interrupts
409 * 2. Configure interrupt aggregation
410 * 3. Program UTRL and UTMRL base address
411 * 4. Configure run-stop-registers
414 static int ufshcd_make_hba_operational(struct ufs_hba
*hba
)
419 /* Enable required interrupts */
420 ufshcd_enable_intr(hba
, UFSHCD_ENABLE_INTRS
);
422 /* Disable interrupt aggregation */
423 ufshcd_disable_intr_aggr(hba
);
425 /* Configure UTRL and UTMRL base address registers */
426 ufshcd_writel(hba
, lower_32_bits((dma_addr_t
)hba
->utrdl
),
427 REG_UTP_TRANSFER_REQ_LIST_BASE_L
);
428 ufshcd_writel(hba
, upper_32_bits((dma_addr_t
)hba
->utrdl
),
429 REG_UTP_TRANSFER_REQ_LIST_BASE_H
);
430 ufshcd_writel(hba
, lower_32_bits((dma_addr_t
)hba
->utmrdl
),
431 REG_UTP_TASK_REQ_LIST_BASE_L
);
432 ufshcd_writel(hba
, upper_32_bits((dma_addr_t
)hba
->utmrdl
),
433 REG_UTP_TASK_REQ_LIST_BASE_H
);
436 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
438 reg
= ufshcd_readl(hba
, REG_CONTROLLER_STATUS
);
439 if (!(ufshcd_get_lists_status(reg
))) {
440 ufshcd_enable_run_stop_reg(hba
);
443 "Host controller not ready to process requests");
453 * ufshcd_link_startup - Initialize unipro link startup
455 static int ufshcd_link_startup(struct ufs_hba
*hba
)
458 int retries
= DME_LINKSTARTUP_RETRIES
;
459 bool link_startup_again
= true;
463 ufshcd_ops_link_startup_notify(hba
, PRE_CHANGE
);
465 ret
= ufshcd_dme_link_startup(hba
);
467 /* check if device is detected by inter-connect layer */
468 if (!ret
&& !ufshcd_is_device_present(hba
)) {
469 dev_err(hba
->dev
, "%s: Device not present\n", __func__
);
475 * DME link lost indication is only received when link is up,
476 * but we can't be sure if the link is up until link startup
477 * succeeds. So reset the local Uni-Pro and try again.
479 if (ret
&& ufshcd_hba_enable(hba
))
481 } while (ret
&& retries
--);
484 /* failed to get the link up... retire */
487 if (link_startup_again
) {
488 link_startup_again
= false;
489 retries
= DME_LINKSTARTUP_RETRIES
;
493 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
494 ufshcd_init_pwr_info(hba
);
496 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_LCC
) {
497 ret
= ufshcd_disable_device_tx_lcc(hba
);
502 /* Include any host controller configuration via UIC commands */
503 ret
= ufshcd_ops_link_startup_notify(hba
, POST_CHANGE
);
507 ret
= ufshcd_make_hba_operational(hba
);
510 dev_err(hba
->dev
, "link startup failed %d\n", ret
);
516 * ufshcd_hba_stop - Send controller to reset state
518 static inline void ufshcd_hba_stop(struct ufs_hba
*hba
)
522 ufshcd_writel(hba
, CONTROLLER_DISABLE
, REG_CONTROLLER_ENABLE
);
523 err
= ufshcd_wait_for_register(hba
, REG_CONTROLLER_ENABLE
,
524 CONTROLLER_ENABLE
, CONTROLLER_DISABLE
,
527 dev_err(hba
->dev
, "%s: Controller disable failed\n", __func__
);
531 * ufshcd_is_hba_active - Get controller state
533 static inline bool ufshcd_is_hba_active(struct ufs_hba
*hba
)
535 return (ufshcd_readl(hba
, REG_CONTROLLER_ENABLE
) & CONTROLLER_ENABLE
)
540 * ufshcd_hba_start - Start controller initialization sequence
542 static inline void ufshcd_hba_start(struct ufs_hba
*hba
)
544 ufshcd_writel(hba
, CONTROLLER_ENABLE
, REG_CONTROLLER_ENABLE
);
548 * ufshcd_hba_enable - initialize the controller
550 static int ufshcd_hba_enable(struct ufs_hba
*hba
)
554 if (!ufshcd_is_hba_active(hba
))
555 /* change controller state to "reset state" */
556 ufshcd_hba_stop(hba
);
558 ufshcd_ops_hce_enable_notify(hba
, PRE_CHANGE
);
560 /* start controller initialization sequence */
561 ufshcd_hba_start(hba
);
564 * To initialize a UFS host controller HCE bit must be set to 1.
565 * During initialization the HCE bit value changes from 1->0->1.
566 * When the host controller completes initialization sequence
567 * it sets the value of HCE bit to 1. The same HCE bit is read back
568 * to check if the controller has completed initialization sequence.
569 * So without this delay the value HCE = 1, set in the previous
570 * instruction might be read back.
571 * This delay can be changed based on the controller.
575 /* wait for the host controller to complete initialization */
577 while (ufshcd_is_hba_active(hba
)) {
581 dev_err(hba
->dev
, "Controller enable failed\n");
587 /* enable UIC related interrupts */
588 ufshcd_enable_intr(hba
, UFSHCD_UIC_MASK
);
590 ufshcd_ops_hce_enable_notify(hba
, POST_CHANGE
);
596 * ufshcd_host_memory_configure - configure local reference block with
599 static void ufshcd_host_memory_configure(struct ufs_hba
*hba
)
601 struct utp_transfer_req_desc
*utrdlp
;
602 dma_addr_t cmd_desc_dma_addr
;
607 cmd_desc_dma_addr
= (dma_addr_t
)hba
->ucdl
;
609 utrdlp
->command_desc_base_addr_lo
=
610 cpu_to_le32(lower_32_bits(cmd_desc_dma_addr
));
611 utrdlp
->command_desc_base_addr_hi
=
612 cpu_to_le32(upper_32_bits(cmd_desc_dma_addr
));
614 response_offset
= offsetof(struct utp_transfer_cmd_desc
, response_upiu
);
615 prdt_offset
= offsetof(struct utp_transfer_cmd_desc
, prd_table
);
617 utrdlp
->response_upiu_offset
= cpu_to_le16(response_offset
>> 2);
618 utrdlp
->prd_table_offset
= cpu_to_le16(prdt_offset
>> 2);
619 utrdlp
->response_upiu_length
= cpu_to_le16(ALIGNED_UPIU_SIZE
>> 2);
621 hba
->ucd_req_ptr
= (struct utp_upiu_req
*)hba
->ucdl
;
623 (struct utp_upiu_rsp
*)&hba
->ucdl
->response_upiu
;
625 (struct ufshcd_sg_entry
*)&hba
->ucdl
->prd_table
;
629 * ufshcd_memory_alloc - allocate memory for host memory space data structures
631 static int ufshcd_memory_alloc(struct ufs_hba
*hba
)
633 /* Allocate one Transfer Request Descriptor
634 * Should be aligned to 1k boundary.
636 hba
->utrdl
= memalign(1024, sizeof(struct utp_transfer_req_desc
));
638 dev_err(hba
->dev
, "Transfer Descriptor memory allocation failed\n");
642 /* Allocate one Command Descriptor
643 * Should be aligned to 1k boundary.
645 hba
->ucdl
= memalign(1024, sizeof(struct utp_transfer_cmd_desc
));
647 dev_err(hba
->dev
, "Command descriptor memory allocation failed\n");
655 * ufshcd_get_intr_mask - Get the interrupt bit mask
657 static inline u32
ufshcd_get_intr_mask(struct ufs_hba
*hba
)
661 switch (hba
->version
) {
662 case UFSHCI_VERSION_10
:
663 intr_mask
= INTERRUPT_MASK_ALL_VER_10
;
665 case UFSHCI_VERSION_11
:
666 case UFSHCI_VERSION_20
:
667 intr_mask
= INTERRUPT_MASK_ALL_VER_11
;
669 case UFSHCI_VERSION_21
:
671 intr_mask
= INTERRUPT_MASK_ALL_VER_21
;
679 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
681 static inline u32
ufshcd_get_ufs_version(struct ufs_hba
*hba
)
683 return ufshcd_readl(hba
, REG_UFS_VERSION
);
687 * ufshcd_get_upmcrs - Get the power mode change request status
689 static inline u8
ufshcd_get_upmcrs(struct ufs_hba
*hba
)
691 return (ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) >> 8) & 0x7;
695 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
696 * descriptor according to request
698 static void ufshcd_prepare_req_desc_hdr(struct utp_transfer_req_desc
*req_desc
,
700 enum dma_data_direction cmd_dir
)
705 if (cmd_dir
== DMA_FROM_DEVICE
) {
706 data_direction
= UTP_DEVICE_TO_HOST
;
707 *upiu_flags
= UPIU_CMD_FLAGS_READ
;
708 } else if (cmd_dir
== DMA_TO_DEVICE
) {
709 data_direction
= UTP_HOST_TO_DEVICE
;
710 *upiu_flags
= UPIU_CMD_FLAGS_WRITE
;
712 data_direction
= UTP_NO_DATA_TRANSFER
;
713 *upiu_flags
= UPIU_CMD_FLAGS_NONE
;
716 dword_0
= data_direction
| (0x1 << UPIU_COMMAND_TYPE_OFFSET
);
718 /* Enable Interrupt for command */
719 dword_0
|= UTP_REQ_DESC_INT_CMD
;
721 /* Transfer request descriptor header fields */
722 req_desc
->header
.dword_0
= cpu_to_le32(dword_0
);
723 /* dword_1 is reserved, hence it is set to 0 */
724 req_desc
->header
.dword_1
= 0;
726 * assigning invalid value for command status. Controller
727 * updates OCS on command completion, with the command
730 req_desc
->header
.dword_2
=
731 cpu_to_le32(OCS_INVALID_COMMAND_STATUS
);
732 /* dword_3 is reserved, hence it is set to 0 */
733 req_desc
->header
.dword_3
= 0;
735 req_desc
->prd_table_length
= 0;
738 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba
*hba
,
741 struct utp_upiu_req
*ucd_req_ptr
= hba
->ucd_req_ptr
;
742 struct ufs_query
*query
= &hba
->dev_cmd
.query
;
743 u16 len
= be16_to_cpu(query
->request
.upiu_req
.length
);
745 /* Query request header */
746 ucd_req_ptr
->header
.dword_0
=
747 UPIU_HEADER_DWORD(UPIU_TRANSACTION_QUERY_REQ
,
748 upiu_flags
, 0, TASK_TAG
);
749 ucd_req_ptr
->header
.dword_1
=
750 UPIU_HEADER_DWORD(0, query
->request
.query_func
,
753 /* Data segment length only need for WRITE_DESC */
754 if (query
->request
.upiu_req
.opcode
== UPIU_QUERY_OPCODE_WRITE_DESC
)
755 ucd_req_ptr
->header
.dword_2
=
756 UPIU_HEADER_DWORD(0, 0, (len
>> 8), (u8
)len
);
758 ucd_req_ptr
->header
.dword_2
= 0;
760 /* Copy the Query Request buffer as is */
761 memcpy(&ucd_req_ptr
->qr
, &query
->request
.upiu_req
, QUERY_OSF_SIZE
);
763 /* Copy the Descriptor */
764 if (query
->request
.upiu_req
.opcode
== UPIU_QUERY_OPCODE_WRITE_DESC
)
765 memcpy(ucd_req_ptr
+ 1, query
->descriptor
, len
);
767 memset(hba
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
770 static inline void ufshcd_prepare_utp_nop_upiu(struct ufs_hba
*hba
)
772 struct utp_upiu_req
*ucd_req_ptr
= hba
->ucd_req_ptr
;
774 memset(ucd_req_ptr
, 0, sizeof(struct utp_upiu_req
));
776 /* command descriptor fields */
777 ucd_req_ptr
->header
.dword_0
=
778 UPIU_HEADER_DWORD(UPIU_TRANSACTION_NOP_OUT
, 0, 0, 0x1f);
779 /* clear rest of the fields of basic header */
780 ucd_req_ptr
->header
.dword_1
= 0;
781 ucd_req_ptr
->header
.dword_2
= 0;
783 memset(hba
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
787 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
788 * for Device Management Purposes
790 static int ufshcd_comp_devman_upiu(struct ufs_hba
*hba
,
791 enum dev_cmd_type cmd_type
)
795 struct utp_transfer_req_desc
*req_desc
= hba
->utrdl
;
797 hba
->dev_cmd
.type
= cmd_type
;
799 ufshcd_prepare_req_desc_hdr(req_desc
, &upiu_flags
, DMA_NONE
);
801 case DEV_CMD_TYPE_QUERY
:
802 ufshcd_prepare_utp_query_req_upiu(hba
, upiu_flags
);
804 case DEV_CMD_TYPE_NOP
:
805 ufshcd_prepare_utp_nop_upiu(hba
);
814 static int ufshcd_send_command(struct ufs_hba
*hba
, unsigned int task_tag
)
818 u32 enabled_intr_status
;
820 ufshcd_writel(hba
, 1 << task_tag
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
822 start
= get_timer(0);
824 intr_status
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
825 enabled_intr_status
= intr_status
& hba
->intr_mask
;
826 ufshcd_writel(hba
, intr_status
, REG_INTERRUPT_STATUS
);
828 if (get_timer(start
) > QUERY_REQ_TIMEOUT
) {
830 "Timedout waiting for UTP response\n");
835 if (enabled_intr_status
& UFSHCD_ERROR_MASK
) {
836 dev_err(hba
->dev
, "Error in status:%08x\n",
837 enabled_intr_status
);
841 } while (!(enabled_intr_status
& UTP_TRANSFER_REQ_COMPL
));
847 * ufshcd_get_req_rsp - returns the TR response transaction type
849 static inline int ufshcd_get_req_rsp(struct utp_upiu_rsp
*ucd_rsp_ptr
)
851 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_0
) >> 24;
855 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
858 static inline int ufshcd_get_tr_ocs(struct ufs_hba
*hba
)
860 return le32_to_cpu(hba
->utrdl
->header
.dword_2
) & MASK_OCS
;
863 static inline int ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp
*ucd_rsp_ptr
)
865 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_1
) & MASK_RSP_UPIU_RESULT
;
868 static int ufshcd_check_query_response(struct ufs_hba
*hba
)
870 struct ufs_query_res
*query_res
= &hba
->dev_cmd
.query
.response
;
872 /* Get the UPIU response */
873 query_res
->response
= ufshcd_get_rsp_upiu_result(hba
->ucd_rsp_ptr
) >>
874 UPIU_RSP_CODE_OFFSET
;
875 return query_res
->response
;
879 * ufshcd_copy_query_response() - Copy the Query Response and the data
882 static int ufshcd_copy_query_response(struct ufs_hba
*hba
)
884 struct ufs_query_res
*query_res
= &hba
->dev_cmd
.query
.response
;
886 memcpy(&query_res
->upiu_res
, &hba
->ucd_rsp_ptr
->qr
, QUERY_OSF_SIZE
);
888 /* Get the descriptor */
889 if (hba
->dev_cmd
.query
.descriptor
&&
890 hba
->ucd_rsp_ptr
->qr
.opcode
== UPIU_QUERY_OPCODE_READ_DESC
) {
891 u8
*descp
= (u8
*)hba
->ucd_rsp_ptr
+
892 GENERAL_UPIU_REQUEST_SIZE
;
896 /* data segment length */
897 resp_len
= be32_to_cpu(hba
->ucd_rsp_ptr
->header
.dword_2
) &
898 MASK_QUERY_DATA_SEG_LEN
;
900 be16_to_cpu(hba
->dev_cmd
.query
.request
.upiu_req
.length
);
901 if (likely(buf_len
>= resp_len
)) {
902 memcpy(hba
->dev_cmd
.query
.descriptor
, descp
, resp_len
);
905 "%s: Response size is bigger than buffer",
915 * ufshcd_exec_dev_cmd - API for sending device management requests
917 static int ufshcd_exec_dev_cmd(struct ufs_hba
*hba
, enum dev_cmd_type cmd_type
,
923 err
= ufshcd_comp_devman_upiu(hba
, cmd_type
);
927 err
= ufshcd_send_command(hba
, TASK_TAG
);
931 err
= ufshcd_get_tr_ocs(hba
);
933 dev_err(hba
->dev
, "Error in OCS:%d\n", err
);
937 resp
= ufshcd_get_req_rsp(hba
->ucd_rsp_ptr
);
939 case UPIU_TRANSACTION_NOP_IN
:
941 case UPIU_TRANSACTION_QUERY_RSP
:
942 err
= ufshcd_check_query_response(hba
);
944 err
= ufshcd_copy_query_response(hba
);
946 case UPIU_TRANSACTION_REJECT_UPIU
:
947 /* TODO: handle Reject UPIU Response */
949 dev_err(hba
->dev
, "%s: Reject UPIU not fully implemented\n",
954 dev_err(hba
->dev
, "%s: Invalid device management cmd response: %x\n",
962 * ufshcd_init_query() - init the query response and request parameters
964 static inline void ufshcd_init_query(struct ufs_hba
*hba
,
965 struct ufs_query_req
**request
,
966 struct ufs_query_res
**response
,
967 enum query_opcode opcode
,
968 u8 idn
, u8 index
, u8 selector
)
970 *request
= &hba
->dev_cmd
.query
.request
;
971 *response
= &hba
->dev_cmd
.query
.response
;
972 memset(*request
, 0, sizeof(struct ufs_query_req
));
973 memset(*response
, 0, sizeof(struct ufs_query_res
));
974 (*request
)->upiu_req
.opcode
= opcode
;
975 (*request
)->upiu_req
.idn
= idn
;
976 (*request
)->upiu_req
.index
= index
;
977 (*request
)->upiu_req
.selector
= selector
;
981 * ufshcd_query_flag() - API function for sending flag query requests
983 int ufshcd_query_flag(struct ufs_hba
*hba
, enum query_opcode opcode
,
984 enum flag_idn idn
, bool *flag_res
)
986 struct ufs_query_req
*request
= NULL
;
987 struct ufs_query_res
*response
= NULL
;
988 int err
, index
= 0, selector
= 0;
989 int timeout
= QUERY_REQ_TIMEOUT
;
991 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
995 case UPIU_QUERY_OPCODE_SET_FLAG
:
996 case UPIU_QUERY_OPCODE_CLEAR_FLAG
:
997 case UPIU_QUERY_OPCODE_TOGGLE_FLAG
:
998 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
1000 case UPIU_QUERY_OPCODE_READ_FLAG
:
1001 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
1003 /* No dummy reads */
1004 dev_err(hba
->dev
, "%s: Invalid argument for read request\n",
1012 "%s: Expected query flag opcode but got = %d\n",
1018 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, timeout
);
1022 "%s: Sending flag query for idn %d failed, err = %d\n",
1023 __func__
, idn
, err
);
1028 *flag_res
= (be32_to_cpu(response
->upiu_res
.value
) &
1029 MASK_QUERY_UPIU_FLAG_LOC
) & 0x1;
1035 static int ufshcd_query_flag_retry(struct ufs_hba
*hba
,
1036 enum query_opcode opcode
,
1037 enum flag_idn idn
, bool *flag_res
)
1042 for (retries
= 0; retries
< QUERY_REQ_RETRIES
; retries
++) {
1043 ret
= ufshcd_query_flag(hba
, opcode
, idn
, flag_res
);
1046 "%s: failed with error %d, retries %d\n",
1047 __func__
, ret
, retries
);
1054 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
1055 __func__
, opcode
, idn
, ret
, retries
);
1059 static int __ufshcd_query_descriptor(struct ufs_hba
*hba
,
1060 enum query_opcode opcode
,
1061 enum desc_idn idn
, u8 index
, u8 selector
,
1062 u8
*desc_buf
, int *buf_len
)
1064 struct ufs_query_req
*request
= NULL
;
1065 struct ufs_query_res
*response
= NULL
;
1069 dev_err(hba
->dev
, "%s: descriptor buffer required for opcode 0x%x\n",
1075 if (*buf_len
< QUERY_DESC_MIN_SIZE
|| *buf_len
> QUERY_DESC_MAX_SIZE
) {
1076 dev_err(hba
->dev
, "%s: descriptor buffer size (%d) is out of range\n",
1077 __func__
, *buf_len
);
1082 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
1084 hba
->dev_cmd
.query
.descriptor
= desc_buf
;
1085 request
->upiu_req
.length
= cpu_to_be16(*buf_len
);
1088 case UPIU_QUERY_OPCODE_WRITE_DESC
:
1089 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
1091 case UPIU_QUERY_OPCODE_READ_DESC
:
1092 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
1095 dev_err(hba
->dev
, "%s: Expected query descriptor opcode but got = 0x%.2x\n",
1101 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
1104 dev_err(hba
->dev
, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
1105 __func__
, opcode
, idn
, index
, err
);
1109 hba
->dev_cmd
.query
.descriptor
= NULL
;
1110 *buf_len
= be16_to_cpu(response
->upiu_res
.length
);
1117 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
1119 int ufshcd_query_descriptor_retry(struct ufs_hba
*hba
, enum query_opcode opcode
,
1120 enum desc_idn idn
, u8 index
, u8 selector
,
1121 u8
*desc_buf
, int *buf_len
)
1126 for (retries
= QUERY_REQ_RETRIES
; retries
> 0; retries
--) {
1127 err
= __ufshcd_query_descriptor(hba
, opcode
, idn
, index
,
1128 selector
, desc_buf
, buf_len
);
1129 if (!err
|| err
== -EINVAL
)
1137 * ufshcd_read_desc_length - read the specified descriptor length from header
1139 static int ufshcd_read_desc_length(struct ufs_hba
*hba
, enum desc_idn desc_id
,
1140 int desc_index
, int *desc_length
)
1143 u8 header
[QUERY_DESC_HDR_SIZE
];
1144 int header_len
= QUERY_DESC_HDR_SIZE
;
1146 if (desc_id
>= QUERY_DESC_IDN_MAX
)
1149 ret
= ufshcd_query_descriptor_retry(hba
, UPIU_QUERY_OPCODE_READ_DESC
,
1150 desc_id
, desc_index
, 0, header
,
1154 dev_err(hba
->dev
, "%s: Failed to get descriptor header id %d",
1157 } else if (desc_id
!= header
[QUERY_DESC_DESC_TYPE_OFFSET
]) {
1158 dev_warn(hba
->dev
, "%s: descriptor header id %d and desc_id %d mismatch",
1159 __func__
, header
[QUERY_DESC_DESC_TYPE_OFFSET
],
1164 *desc_length
= header
[QUERY_DESC_LENGTH_OFFSET
];
1169 static void ufshcd_init_desc_sizes(struct ufs_hba
*hba
)
1173 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_DEVICE
, 0,
1174 &hba
->desc_size
.dev_desc
);
1176 hba
->desc_size
.dev_desc
= QUERY_DESC_DEVICE_DEF_SIZE
;
1178 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_POWER
, 0,
1179 &hba
->desc_size
.pwr_desc
);
1181 hba
->desc_size
.pwr_desc
= QUERY_DESC_POWER_DEF_SIZE
;
1183 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_INTERCONNECT
, 0,
1184 &hba
->desc_size
.interc_desc
);
1186 hba
->desc_size
.interc_desc
= QUERY_DESC_INTERCONNECT_DEF_SIZE
;
1188 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_CONFIGURATION
, 0,
1189 &hba
->desc_size
.conf_desc
);
1191 hba
->desc_size
.conf_desc
= QUERY_DESC_CONFIGURATION_DEF_SIZE
;
1193 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_UNIT
, 0,
1194 &hba
->desc_size
.unit_desc
);
1196 hba
->desc_size
.unit_desc
= QUERY_DESC_UNIT_DEF_SIZE
;
1198 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_GEOMETRY
, 0,
1199 &hba
->desc_size
.geom_desc
);
1201 hba
->desc_size
.geom_desc
= QUERY_DESC_GEOMETRY_DEF_SIZE
;
1203 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_HEALTH
, 0,
1204 &hba
->desc_size
.hlth_desc
);
1206 hba
->desc_size
.hlth_desc
= QUERY_DESC_HEALTH_DEF_SIZE
;
1210 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
1213 int ufshcd_map_desc_id_to_length(struct ufs_hba
*hba
, enum desc_idn desc_id
,
1217 case QUERY_DESC_IDN_DEVICE
:
1218 *desc_len
= hba
->desc_size
.dev_desc
;
1220 case QUERY_DESC_IDN_POWER
:
1221 *desc_len
= hba
->desc_size
.pwr_desc
;
1223 case QUERY_DESC_IDN_GEOMETRY
:
1224 *desc_len
= hba
->desc_size
.geom_desc
;
1226 case QUERY_DESC_IDN_CONFIGURATION
:
1227 *desc_len
= hba
->desc_size
.conf_desc
;
1229 case QUERY_DESC_IDN_UNIT
:
1230 *desc_len
= hba
->desc_size
.unit_desc
;
1232 case QUERY_DESC_IDN_INTERCONNECT
:
1233 *desc_len
= hba
->desc_size
.interc_desc
;
1235 case QUERY_DESC_IDN_STRING
:
1236 *desc_len
= QUERY_DESC_MAX_SIZE
;
1238 case QUERY_DESC_IDN_HEALTH
:
1239 *desc_len
= hba
->desc_size
.hlth_desc
;
1241 case QUERY_DESC_IDN_RFU_0
:
1242 case QUERY_DESC_IDN_RFU_1
:
1251 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length
);
1254 * ufshcd_read_desc_param - read the specified descriptor parameter
1257 int ufshcd_read_desc_param(struct ufs_hba
*hba
, enum desc_idn desc_id
,
1258 int desc_index
, u8 param_offset
, u8
*param_read_buf
,
1264 bool is_kmalloc
= true;
1267 if (desc_id
>= QUERY_DESC_IDN_MAX
|| !param_size
)
1270 /* Get the max length of descriptor from structure filled up at probe
1273 ret
= ufshcd_map_desc_id_to_length(hba
, desc_id
, &buff_len
);
1276 if (ret
|| !buff_len
) {
1277 dev_err(hba
->dev
, "%s: Failed to get full descriptor length",
1282 /* Check whether we need temp memory */
1283 if (param_offset
!= 0 || param_size
< buff_len
) {
1284 desc_buf
= kmalloc(buff_len
, GFP_KERNEL
);
1288 desc_buf
= param_read_buf
;
1292 /* Request for full descriptor */
1293 ret
= ufshcd_query_descriptor_retry(hba
, UPIU_QUERY_OPCODE_READ_DESC
,
1294 desc_id
, desc_index
, 0, desc_buf
,
1298 dev_err(hba
->dev
, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
1299 __func__
, desc_id
, desc_index
, param_offset
, ret
);
1304 if (desc_buf
[QUERY_DESC_DESC_TYPE_OFFSET
] != desc_id
) {
1305 dev_err(hba
->dev
, "%s: invalid desc_id %d in descriptor header",
1306 __func__
, desc_buf
[QUERY_DESC_DESC_TYPE_OFFSET
]);
1311 /* Check wherher we will not copy more data, than available */
1312 if (is_kmalloc
&& param_size
> buff_len
)
1313 param_size
= buff_len
;
1316 memcpy(param_read_buf
, &desc_buf
[param_offset
], param_size
);
1323 /* replace non-printable or non-ASCII characters with spaces */
1324 static inline void ufshcd_remove_non_printable(uint8_t *val
)
1329 if (*val
< 0x20 || *val
> 0x7e)
1334 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
1335 * state) and waits for it to take effect.
1338 static int ufshcd_uic_pwr_ctrl(struct ufs_hba
*hba
, struct uic_command
*cmd
)
1340 unsigned long start
= 0;
1344 ret
= ufshcd_send_uic_cmd(hba
, cmd
);
1347 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
1348 cmd
->command
, cmd
->argument3
, ret
);
1353 start
= get_timer(0);
1355 status
= ufshcd_get_upmcrs(hba
);
1356 if (get_timer(start
) > UFS_UIC_CMD_TIMEOUT
) {
1358 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
1359 cmd
->command
, status
);
1360 ret
= (status
!= PWR_OK
) ? status
: -1;
1363 } while (status
!= PWR_LOCAL
);
1369 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode change
1370 * using DME_SET primitives.
1372 static int ufshcd_uic_change_pwr_mode(struct ufs_hba
*hba
, u8 mode
)
1374 struct uic_command uic_cmd
= {0};
1377 uic_cmd
.command
= UIC_CMD_DME_SET
;
1378 uic_cmd
.argument1
= UIC_ARG_MIB(PA_PWRMODE
);
1379 uic_cmd
.argument3
= mode
;
1380 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
1386 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufs_hba
*hba
,
1387 struct scsi_cmd
*pccb
, u32 upiu_flags
)
1389 struct utp_upiu_req
*ucd_req_ptr
= hba
->ucd_req_ptr
;
1390 unsigned int cdb_len
;
1392 /* command descriptor fields */
1393 ucd_req_ptr
->header
.dword_0
=
1394 UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND
, upiu_flags
,
1395 pccb
->lun
, TASK_TAG
);
1396 ucd_req_ptr
->header
.dword_1
=
1397 UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI
, 0, 0, 0);
1399 /* Total EHS length and Data segment length will be zero */
1400 ucd_req_ptr
->header
.dword_2
= 0;
1402 ucd_req_ptr
->sc
.exp_data_transfer_len
= cpu_to_be32(pccb
->datalen
);
1404 cdb_len
= min_t(unsigned short, pccb
->cmdlen
, UFS_CDB_SIZE
);
1405 memset(ucd_req_ptr
->sc
.cdb
, 0, UFS_CDB_SIZE
);
1406 memcpy(ucd_req_ptr
->sc
.cdb
, pccb
->cmd
, cdb_len
);
1408 memset(hba
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
1411 static inline void prepare_prdt_desc(struct ufshcd_sg_entry
*entry
,
1412 unsigned char *buf
, ulong len
)
1414 entry
->size
= cpu_to_le32(len
) | GENMASK(1, 0);
1415 entry
->base_addr
= cpu_to_le32(lower_32_bits((unsigned long)buf
));
1416 entry
->upper_addr
= cpu_to_le32(upper_32_bits((unsigned long)buf
));
1419 static void prepare_prdt_table(struct ufs_hba
*hba
, struct scsi_cmd
*pccb
)
1421 struct utp_transfer_req_desc
*req_desc
= hba
->utrdl
;
1422 struct ufshcd_sg_entry
*prd_table
= hba
->ucd_prdt_ptr
;
1423 ulong datalen
= pccb
->datalen
;
1429 req_desc
->prd_table_length
= 0;
1433 table_length
= DIV_ROUND_UP(pccb
->datalen
, MAX_PRDT_ENTRY
);
1437 prepare_prdt_desc(&prd_table
[table_length
- i
- 1], buf
,
1438 MAX_PRDT_ENTRY
- 1);
1439 buf
+= MAX_PRDT_ENTRY
;
1440 datalen
-= MAX_PRDT_ENTRY
;
1443 prepare_prdt_desc(&prd_table
[table_length
- i
- 1], buf
, datalen
- 1);
1445 req_desc
->prd_table_length
= table_length
;
1448 static int ufs_scsi_exec(struct udevice
*scsi_dev
, struct scsi_cmd
*pccb
)
1450 struct ufs_hba
*hba
= dev_get_uclass_priv(scsi_dev
->parent
);
1451 struct utp_transfer_req_desc
*req_desc
= hba
->utrdl
;
1453 int ocs
, result
= 0;
1456 ufshcd_prepare_req_desc_hdr(req_desc
, &upiu_flags
, pccb
->dma_dir
);
1457 ufshcd_prepare_utp_scsi_cmd_upiu(hba
, pccb
, upiu_flags
);
1458 prepare_prdt_table(hba
, pccb
);
1460 ufshcd_send_command(hba
, TASK_TAG
);
1462 ocs
= ufshcd_get_tr_ocs(hba
);
1465 result
= ufshcd_get_req_rsp(hba
->ucd_rsp_ptr
);
1467 case UPIU_TRANSACTION_RESPONSE
:
1468 result
= ufshcd_get_rsp_upiu_result(hba
->ucd_rsp_ptr
);
1470 scsi_status
= result
& MASK_SCSI_STATUS
;
1475 case UPIU_TRANSACTION_REJECT_UPIU
:
1476 /* TODO: handle Reject UPIU Response */
1478 "Reject UPIU not fully implemented\n");
1482 "Unexpected request response code = %x\n",
1488 dev_err(hba
->dev
, "OCS error from controller = %x\n", ocs
);
1495 static inline int ufshcd_read_desc(struct ufs_hba
*hba
, enum desc_idn desc_id
,
1496 int desc_index
, u8
*buf
, u32 size
)
1498 return ufshcd_read_desc_param(hba
, desc_id
, desc_index
, 0, buf
, size
);
1501 static int ufshcd_read_device_desc(struct ufs_hba
*hba
, u8
*buf
, u32 size
)
1503 return ufshcd_read_desc(hba
, QUERY_DESC_IDN_DEVICE
, 0, buf
, size
);
1507 * ufshcd_read_string_desc - read string descriptor
1510 int ufshcd_read_string_desc(struct ufs_hba
*hba
, int desc_index
,
1511 u8
*buf
, u32 size
, bool ascii
)
1515 err
= ufshcd_read_desc(hba
, QUERY_DESC_IDN_STRING
, desc_index
, buf
,
1519 dev_err(hba
->dev
, "%s: reading String Desc failed after %d retries. err = %d\n",
1520 __func__
, QUERY_REQ_RETRIES
, err
);
1531 /* remove header and divide by 2 to move from UTF16 to UTF8 */
1532 ascii_len
= (desc_len
- QUERY_DESC_HDR_SIZE
) / 2 + 1;
1533 if (size
< ascii_len
+ QUERY_DESC_HDR_SIZE
) {
1534 dev_err(hba
->dev
, "%s: buffer allocated size is too small\n",
1540 buff_ascii
= kmalloc(ascii_len
, GFP_KERNEL
);
1547 * the descriptor contains string in UTF16 format
1548 * we need to convert to utf-8 so it can be displayed
1550 utf16_to_utf8(buff_ascii
,
1551 (uint16_t *)&buf
[QUERY_DESC_HDR_SIZE
], ascii_len
);
1553 /* replace non-printable or non-ASCII characters with spaces */
1554 for (i
= 0; i
< ascii_len
; i
++)
1555 ufshcd_remove_non_printable(&buff_ascii
[i
]);
1557 memset(buf
+ QUERY_DESC_HDR_SIZE
, 0,
1558 size
- QUERY_DESC_HDR_SIZE
);
1559 memcpy(buf
+ QUERY_DESC_HDR_SIZE
, buff_ascii
, ascii_len
);
1560 buf
[QUERY_DESC_LENGTH_OFFSET
] = ascii_len
+ QUERY_DESC_HDR_SIZE
;
1567 static int ufs_get_device_desc(struct ufs_hba
*hba
,
1568 struct ufs_dev_desc
*dev_desc
)
1575 buff_len
= max_t(size_t, hba
->desc_size
.dev_desc
,
1576 QUERY_DESC_MAX_SIZE
+ 1);
1577 desc_buf
= kmalloc(buff_len
, GFP_KERNEL
);
1583 err
= ufshcd_read_device_desc(hba
, desc_buf
, hba
->desc_size
.dev_desc
);
1585 dev_err(hba
->dev
, "%s: Failed reading Device Desc. err = %d\n",
1591 * getting vendor (manufacturerID) and Bank Index in big endian
1594 dev_desc
->wmanufacturerid
= desc_buf
[DEVICE_DESC_PARAM_MANF_ID
] << 8 |
1595 desc_buf
[DEVICE_DESC_PARAM_MANF_ID
+ 1];
1597 model_index
= desc_buf
[DEVICE_DESC_PARAM_PRDCT_NAME
];
1599 /* Zero-pad entire buffer for string termination. */
1600 memset(desc_buf
, 0, buff_len
);
1602 err
= ufshcd_read_string_desc(hba
, model_index
, desc_buf
,
1603 QUERY_DESC_MAX_SIZE
, true/*ASCII*/);
1605 dev_err(hba
->dev
, "%s: Failed reading Product Name. err = %d\n",
1610 desc_buf
[QUERY_DESC_MAX_SIZE
] = '\0';
1611 strlcpy(dev_desc
->model
, (char *)(desc_buf
+ QUERY_DESC_HDR_SIZE
),
1612 min_t(u8
, desc_buf
[QUERY_DESC_LENGTH_OFFSET
],
1615 /* Null terminate the model string */
1616 dev_desc
->model
[MAX_MODEL_LEN
] = '\0';
1624 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
1626 static int ufshcd_get_max_pwr_mode(struct ufs_hba
*hba
)
1628 struct ufs_pa_layer_attr
*pwr_info
= &hba
->max_pwr_info
.info
;
1630 if (hba
->max_pwr_info
.is_valid
)
1633 pwr_info
->pwr_tx
= FAST_MODE
;
1634 pwr_info
->pwr_rx
= FAST_MODE
;
1635 pwr_info
->hs_rate
= PA_HS_MODE_B
;
1637 /* Get the connected lane count */
1638 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES
),
1639 &pwr_info
->lane_rx
);
1640 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
1641 &pwr_info
->lane_tx
);
1643 if (!pwr_info
->lane_rx
|| !pwr_info
->lane_tx
) {
1644 dev_err(hba
->dev
, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
1645 __func__
, pwr_info
->lane_rx
, pwr_info
->lane_tx
);
1650 * First, get the maximum gears of HS speed.
1651 * If a zero value, it means there is no HSGEAR capability.
1652 * Then, get the maximum gears of PWM speed.
1654 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_MAXRXHSGEAR
), &pwr_info
->gear_rx
);
1655 if (!pwr_info
->gear_rx
) {
1656 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_MAXRXPWMGEAR
),
1657 &pwr_info
->gear_rx
);
1658 if (!pwr_info
->gear_rx
) {
1659 dev_err(hba
->dev
, "%s: invalid max pwm rx gear read = %d\n",
1660 __func__
, pwr_info
->gear_rx
);
1663 pwr_info
->pwr_rx
= SLOW_MODE
;
1666 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_MAXRXHSGEAR
),
1667 &pwr_info
->gear_tx
);
1668 if (!pwr_info
->gear_tx
) {
1669 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_MAXRXPWMGEAR
),
1670 &pwr_info
->gear_tx
);
1671 if (!pwr_info
->gear_tx
) {
1672 dev_err(hba
->dev
, "%s: invalid max pwm tx gear read = %d\n",
1673 __func__
, pwr_info
->gear_tx
);
1676 pwr_info
->pwr_tx
= SLOW_MODE
;
1679 hba
->max_pwr_info
.is_valid
= true;
1683 static int ufshcd_change_power_mode(struct ufs_hba
*hba
,
1684 struct ufs_pa_layer_attr
*pwr_mode
)
1688 /* if already configured to the requested pwr_mode */
1689 if (pwr_mode
->gear_rx
== hba
->pwr_info
.gear_rx
&&
1690 pwr_mode
->gear_tx
== hba
->pwr_info
.gear_tx
&&
1691 pwr_mode
->lane_rx
== hba
->pwr_info
.lane_rx
&&
1692 pwr_mode
->lane_tx
== hba
->pwr_info
.lane_tx
&&
1693 pwr_mode
->pwr_rx
== hba
->pwr_info
.pwr_rx
&&
1694 pwr_mode
->pwr_tx
== hba
->pwr_info
.pwr_tx
&&
1695 pwr_mode
->hs_rate
== hba
->pwr_info
.hs_rate
) {
1696 dev_dbg(hba
->dev
, "%s: power already configured\n", __func__
);
1701 * Configure attributes for power mode change with below.
1702 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
1703 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
1706 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXGEAR
), pwr_mode
->gear_rx
);
1707 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_ACTIVERXDATALANES
),
1709 if (pwr_mode
->pwr_rx
== FASTAUTO_MODE
|| pwr_mode
->pwr_rx
== FAST_MODE
)
1710 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXTERMINATION
), TRUE
);
1712 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXTERMINATION
), FALSE
);
1714 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXGEAR
), pwr_mode
->gear_tx
);
1715 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_ACTIVETXDATALANES
),
1717 if (pwr_mode
->pwr_tx
== FASTAUTO_MODE
|| pwr_mode
->pwr_tx
== FAST_MODE
)
1718 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXTERMINATION
), TRUE
);
1720 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXTERMINATION
), FALSE
);
1722 if (pwr_mode
->pwr_rx
== FASTAUTO_MODE
||
1723 pwr_mode
->pwr_tx
== FASTAUTO_MODE
||
1724 pwr_mode
->pwr_rx
== FAST_MODE
||
1725 pwr_mode
->pwr_tx
== FAST_MODE
)
1726 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_HSSERIES
),
1729 ret
= ufshcd_uic_change_pwr_mode(hba
, pwr_mode
->pwr_rx
<< 4 |
1734 "%s: power mode change failed %d\n", __func__
, ret
);
1739 /* Copy new Power Mode to power info */
1740 memcpy(&hba
->pwr_info
, pwr_mode
, sizeof(struct ufs_pa_layer_attr
));
1746 * ufshcd_verify_dev_init() - Verify device initialization
1749 static int ufshcd_verify_dev_init(struct ufs_hba
*hba
)
1754 for (retries
= NOP_OUT_RETRIES
; retries
> 0; retries
--) {
1755 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_NOP
,
1757 if (!err
|| err
== -ETIMEDOUT
)
1760 dev_dbg(hba
->dev
, "%s: error %d retrying\n", __func__
, err
);
1764 dev_err(hba
->dev
, "%s: NOP OUT failed %d\n", __func__
, err
);
1770 * ufshcd_complete_dev_init() - checks device readiness
1772 static int ufshcd_complete_dev_init(struct ufs_hba
*hba
)
1778 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_SET_FLAG
,
1779 QUERY_FLAG_IDN_FDEVICEINIT
, NULL
);
1782 "%s setting fDeviceInit flag failed with error %d\n",
1787 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
1788 for (i
= 0; i
< 1000 && !err
&& flag_res
; i
++)
1789 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_READ_FLAG
,
1790 QUERY_FLAG_IDN_FDEVICEINIT
,
1795 "%s reading fDeviceInit flag failed with error %d\n",
1799 "%s fDeviceInit was not cleared by the device\n",
1806 static void ufshcd_def_desc_sizes(struct ufs_hba
*hba
)
1808 hba
->desc_size
.dev_desc
= QUERY_DESC_DEVICE_DEF_SIZE
;
1809 hba
->desc_size
.pwr_desc
= QUERY_DESC_POWER_DEF_SIZE
;
1810 hba
->desc_size
.interc_desc
= QUERY_DESC_INTERCONNECT_DEF_SIZE
;
1811 hba
->desc_size
.conf_desc
= QUERY_DESC_CONFIGURATION_DEF_SIZE
;
1812 hba
->desc_size
.unit_desc
= QUERY_DESC_UNIT_DEF_SIZE
;
1813 hba
->desc_size
.geom_desc
= QUERY_DESC_GEOMETRY_DEF_SIZE
;
1814 hba
->desc_size
.hlth_desc
= QUERY_DESC_HEALTH_DEF_SIZE
;
1817 int ufs_start(struct ufs_hba
*hba
)
1819 struct ufs_dev_desc card
= {0};
1822 ret
= ufshcd_link_startup(hba
);
1826 ret
= ufshcd_verify_dev_init(hba
);
1830 ret
= ufshcd_complete_dev_init(hba
);
1834 /* Init check for device descriptor sizes */
1835 ufshcd_init_desc_sizes(hba
);
1837 ret
= ufs_get_device_desc(hba
, &card
);
1839 dev_err(hba
->dev
, "%s: Failed getting device info. err = %d\n",
1845 if (ufshcd_get_max_pwr_mode(hba
)) {
1847 "%s: Failed getting max supported power mode\n",
1850 ret
= ufshcd_change_power_mode(hba
, &hba
->max_pwr_info
.info
);
1852 dev_err(hba
->dev
, "%s: Failed setting power mode, err = %d\n",
1858 printf("Device at %s up at:", hba
->dev
->name
);
1859 ufshcd_print_pwr_info(hba
);
1865 int ufshcd_probe(struct udevice
*ufs_dev
, struct ufs_hba_ops
*hba_ops
)
1867 struct ufs_hba
*hba
= dev_get_uclass_priv(ufs_dev
);
1868 struct scsi_plat
*scsi_plat
;
1869 struct udevice
*scsi_dev
;
1872 device_find_first_child(ufs_dev
, &scsi_dev
);
1876 scsi_plat
= dev_get_uclass_plat(scsi_dev
);
1877 scsi_plat
->max_id
= UFSHCD_MAX_ID
;
1878 scsi_plat
->max_lun
= UFS_MAX_LUNS
;
1879 scsi_plat
->max_bytes_per_req
= UFS_MAX_BYTES
;
1883 hba
->mmio_base
= (void *)dev_read_addr(ufs_dev
);
1885 /* Set descriptor lengths to specification defaults */
1886 ufshcd_def_desc_sizes(hba
);
1888 ufshcd_ops_init(hba
);
1890 /* Read capabilties registers */
1891 hba
->capabilities
= ufshcd_readl(hba
, REG_CONTROLLER_CAPABILITIES
);
1893 /* Get UFS version supported by the controller */
1894 hba
->version
= ufshcd_get_ufs_version(hba
);
1895 if (hba
->version
!= UFSHCI_VERSION_10
&&
1896 hba
->version
!= UFSHCI_VERSION_11
&&
1897 hba
->version
!= UFSHCI_VERSION_20
&&
1898 hba
->version
!= UFSHCI_VERSION_21
)
1899 dev_err(hba
->dev
, "invalid UFS version 0x%x\n",
1902 /* Get Interrupt bit mask per version */
1903 hba
->intr_mask
= ufshcd_get_intr_mask(hba
);
1905 /* Allocate memory for host memory space */
1906 err
= ufshcd_memory_alloc(hba
);
1908 dev_err(hba
->dev
, "Memory allocation failed\n");
1912 /* Configure Local data structures */
1913 ufshcd_host_memory_configure(hba
);
1916 * In order to avoid any spurious interrupt immediately after
1917 * registering UFS controller interrupt handler, clear any pending UFS
1918 * interrupt status and disable all the UFS interrupts.
1920 ufshcd_writel(hba
, ufshcd_readl(hba
, REG_INTERRUPT_STATUS
),
1921 REG_INTERRUPT_STATUS
);
1922 ufshcd_writel(hba
, 0, REG_INTERRUPT_ENABLE
);
1924 err
= ufshcd_hba_enable(hba
);
1926 dev_err(hba
->dev
, "Host controller enable failed\n");
1930 err
= ufs_start(hba
);
1937 int ufs_scsi_bind(struct udevice
*ufs_dev
, struct udevice
**scsi_devp
)
1939 int ret
= device_bind_driver(ufs_dev
, "ufs_scsi", "ufs_scsi",
1945 static struct scsi_ops ufs_ops
= {
1946 .exec
= ufs_scsi_exec
,
1949 int ufs_probe_dev(int index
)
1951 struct udevice
*dev
;
1953 return uclass_get_device(UCLASS_UFS
, index
, &dev
);
1958 struct udevice
*dev
;
1962 ret
= uclass_get_device(UCLASS_UFS
, i
, &dev
);
1970 U_BOOT_DRIVER(ufs_scsi
) = {