2 * Copyright 2008, Freescale Semiconductor, Inc
5 * Based vaguely on the Linux code
7 * SPDX-License-Identifier: GPL-2.0+
14 #include <dm/device-internal.h>
18 #include <power/regulator.h>
21 #include <linux/list.h>
23 #include "mmc_private.h"
25 static const unsigned int sd_au_size
[] = {
26 0, SZ_16K
/ 512, SZ_32K
/ 512,
27 SZ_64K
/ 512, SZ_128K
/ 512, SZ_256K
/ 512,
28 SZ_512K
/ 512, SZ_1M
/ 512, SZ_2M
/ 512,
29 SZ_4M
/ 512, SZ_8M
/ 512, (SZ_8M
+ SZ_4M
) / 512,
30 SZ_16M
/ 512, (SZ_16M
+ SZ_8M
) / 512, SZ_32M
/ 512, SZ_64M
/ 512,
33 static int mmc_set_signal_voltage(struct mmc
*mmc
, uint signal_voltage
);
34 static int mmc_power_cycle(struct mmc
*mmc
);
35 static int mmc_select_mode_and_width(struct mmc
*mmc
, uint card_caps
);
37 #if CONFIG_IS_ENABLED(MMC_TINY)
38 static struct mmc mmc_static
;
39 struct mmc
*find_mmc_device(int dev_num
)
44 void mmc_do_preinit(void)
46 struct mmc
*m
= &mmc_static
;
47 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
48 mmc_set_preinit(m
, 1);
54 struct blk_desc
*mmc_get_blk_desc(struct mmc
*mmc
)
56 return &mmc
->block_dev
;
60 #if !CONFIG_IS_ENABLED(DM_MMC)
62 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
63 static int mmc_wait_dat0(struct mmc
*mmc
, int state
, int timeout
)
69 __weak
int board_mmc_getwp(struct mmc
*mmc
)
74 int mmc_getwp(struct mmc
*mmc
)
78 wp
= board_mmc_getwp(mmc
);
81 if (mmc
->cfg
->ops
->getwp
)
82 wp
= mmc
->cfg
->ops
->getwp(mmc
);
90 __weak
int board_mmc_getcd(struct mmc
*mmc
)
96 #ifdef CONFIG_MMC_TRACE
97 void mmmc_trace_before_send(struct mmc
*mmc
, struct mmc_cmd
*cmd
)
99 printf("CMD_SEND:%d\n", cmd
->cmdidx
);
100 printf("\t\tARG\t\t\t 0x%08X\n", cmd
->cmdarg
);
103 void mmmc_trace_after_send(struct mmc
*mmc
, struct mmc_cmd
*cmd
, int ret
)
109 printf("\t\tRET\t\t\t %d\n", ret
);
111 switch (cmd
->resp_type
) {
113 printf("\t\tMMC_RSP_NONE\n");
116 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
120 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
124 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
126 printf("\t\t \t\t 0x%08X \n",
128 printf("\t\t \t\t 0x%08X \n",
130 printf("\t\t \t\t 0x%08X \n",
133 printf("\t\t\t\t\tDUMPING DATA\n");
134 for (i
= 0; i
< 4; i
++) {
136 printf("\t\t\t\t\t%03d - ", i
*4);
137 ptr
= (u8
*)&cmd
->response
[i
];
139 for (j
= 0; j
< 4; j
++)
140 printf("%02X ", *ptr
--);
145 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
149 printf("\t\tERROR MMC rsp not supported\n");
155 void mmc_trace_state(struct mmc
*mmc
, struct mmc_cmd
*cmd
)
159 status
= (cmd
->response
[0] & MMC_STATUS_CURR_STATE
) >> 9;
160 printf("CURR STATE:%d\n", status
);
164 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
165 const char *mmc_mode_name(enum bus_mode mode
)
167 static const char *const names
[] = {
168 [MMC_LEGACY
] = "MMC legacy",
169 [SD_LEGACY
] = "SD Legacy",
170 [MMC_HS
] = "MMC High Speed (26MHz)",
171 [SD_HS
] = "SD High Speed (50MHz)",
172 [UHS_SDR12
] = "UHS SDR12 (25MHz)",
173 [UHS_SDR25
] = "UHS SDR25 (50MHz)",
174 [UHS_SDR50
] = "UHS SDR50 (100MHz)",
175 [UHS_SDR104
] = "UHS SDR104 (208MHz)",
176 [UHS_DDR50
] = "UHS DDR50 (50MHz)",
177 [MMC_HS_52
] = "MMC High Speed (52MHz)",
178 [MMC_DDR_52
] = "MMC DDR52 (52MHz)",
179 [MMC_HS_200
] = "HS200 (200MHz)",
182 if (mode
>= MMC_MODES_END
)
183 return "Unknown mode";
189 static uint
mmc_mode2freq(struct mmc
*mmc
, enum bus_mode mode
)
191 static const int freqs
[] = {
192 [SD_LEGACY
] = 25000000,
195 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
196 [UHS_SDR12
] = 25000000,
197 [UHS_SDR25
] = 50000000,
198 [UHS_SDR50
] = 100000000,
199 [UHS_DDR50
] = 50000000,
200 #ifdef MMC_SUPPORTS_TUNING
201 [UHS_SDR104
] = 208000000,
204 [MMC_HS_52
] = 52000000,
205 [MMC_DDR_52
] = 52000000,
206 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
207 [MMC_HS_200
] = 200000000,
211 if (mode
== MMC_LEGACY
)
212 return mmc
->legacy_speed
;
213 else if (mode
>= MMC_MODES_END
)
219 static int mmc_select_mode(struct mmc
*mmc
, enum bus_mode mode
)
221 mmc
->selected_mode
= mode
;
222 mmc
->tran_speed
= mmc_mode2freq(mmc
, mode
);
223 mmc
->ddr_mode
= mmc_is_mode_ddr(mode
);
224 debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode
),
225 mmc
->tran_speed
/ 1000000);
229 #if !CONFIG_IS_ENABLED(DM_MMC)
230 int mmc_send_cmd(struct mmc
*mmc
, struct mmc_cmd
*cmd
, struct mmc_data
*data
)
234 mmmc_trace_before_send(mmc
, cmd
);
235 ret
= mmc
->cfg
->ops
->send_cmd(mmc
, cmd
, data
);
236 mmmc_trace_after_send(mmc
, cmd
, ret
);
242 int mmc_send_status(struct mmc
*mmc
, int timeout
)
245 int err
, retries
= 5;
247 cmd
.cmdidx
= MMC_CMD_SEND_STATUS
;
248 cmd
.resp_type
= MMC_RSP_R1
;
249 if (!mmc_host_is_spi(mmc
))
250 cmd
.cmdarg
= mmc
->rca
<< 16;
253 err
= mmc_send_cmd(mmc
, &cmd
, NULL
);
255 if ((cmd
.response
[0] & MMC_STATUS_RDY_FOR_DATA
) &&
256 (cmd
.response
[0] & MMC_STATUS_CURR_STATE
) !=
260 if (cmd
.response
[0] & MMC_STATUS_MASK
) {
261 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
262 pr_err("Status Error: 0x%08X\n",
267 } else if (--retries
< 0)
276 mmc_trace_state(mmc
, &cmd
);
278 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
279 pr_err("Timeout waiting card ready\n");
287 int mmc_set_blocklen(struct mmc
*mmc
, int len
)
295 cmd
.cmdidx
= MMC_CMD_SET_BLOCKLEN
;
296 cmd
.resp_type
= MMC_RSP_R1
;
299 err
= mmc_send_cmd(mmc
, &cmd
, NULL
);
301 #ifdef CONFIG_MMC_QUIRKS
302 if (err
&& (mmc
->quirks
& MMC_QUIRK_RETRY_SET_BLOCKLEN
)) {
305 * It has been seen that SET_BLOCKLEN may fail on the first
306 * attempt, let's try a few more time
309 err
= mmc_send_cmd(mmc
, &cmd
, NULL
);
319 #ifdef MMC_SUPPORTS_TUNING
320 static const u8 tuning_blk_pattern_4bit
[] = {
321 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
322 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
323 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
324 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
325 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
326 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
327 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
328 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
331 static const u8 tuning_blk_pattern_8bit
[] = {
332 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
333 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
334 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
335 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
336 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
337 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
338 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
339 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
340 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
341 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
342 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
343 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
344 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
345 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
346 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
347 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
350 int mmc_send_tuning(struct mmc
*mmc
, u32 opcode
, int *cmd_error
)
353 struct mmc_data data
;
354 const u8
*tuning_block_pattern
;
357 if (mmc
->bus_width
== 8) {
358 tuning_block_pattern
= tuning_blk_pattern_8bit
;
359 size
= sizeof(tuning_blk_pattern_8bit
);
360 } else if (mmc
->bus_width
== 4) {
361 tuning_block_pattern
= tuning_blk_pattern_4bit
;
362 size
= sizeof(tuning_blk_pattern_4bit
);
367 ALLOC_CACHE_ALIGN_BUFFER(u8
, data_buf
, size
);
371 cmd
.resp_type
= MMC_RSP_R1
;
373 data
.dest
= (void *)data_buf
;
375 data
.blocksize
= size
;
376 data
.flags
= MMC_DATA_READ
;
378 err
= mmc_send_cmd(mmc
, &cmd
, &data
);
382 if (memcmp(data_buf
, tuning_block_pattern
, size
))
389 static int mmc_read_blocks(struct mmc
*mmc
, void *dst
, lbaint_t start
,
393 struct mmc_data data
;
396 cmd
.cmdidx
= MMC_CMD_READ_MULTIPLE_BLOCK
;
398 cmd
.cmdidx
= MMC_CMD_READ_SINGLE_BLOCK
;
400 if (mmc
->high_capacity
)
403 cmd
.cmdarg
= start
* mmc
->read_bl_len
;
405 cmd
.resp_type
= MMC_RSP_R1
;
408 data
.blocks
= blkcnt
;
409 data
.blocksize
= mmc
->read_bl_len
;
410 data
.flags
= MMC_DATA_READ
;
412 if (mmc_send_cmd(mmc
, &cmd
, &data
))
416 cmd
.cmdidx
= MMC_CMD_STOP_TRANSMISSION
;
418 cmd
.resp_type
= MMC_RSP_R1b
;
419 if (mmc_send_cmd(mmc
, &cmd
, NULL
)) {
420 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
421 pr_err("mmc fail to send stop cmd\n");
430 #if CONFIG_IS_ENABLED(BLK)
431 ulong
mmc_bread(struct udevice
*dev
, lbaint_t start
, lbaint_t blkcnt
, void *dst
)
433 ulong
mmc_bread(struct blk_desc
*block_dev
, lbaint_t start
, lbaint_t blkcnt
,
437 #if CONFIG_IS_ENABLED(BLK)
438 struct blk_desc
*block_dev
= dev_get_uclass_platdata(dev
);
440 int dev_num
= block_dev
->devnum
;
442 lbaint_t cur
, blocks_todo
= blkcnt
;
447 struct mmc
*mmc
= find_mmc_device(dev_num
);
451 if (CONFIG_IS_ENABLED(MMC_TINY
))
452 err
= mmc_switch_part(mmc
, block_dev
->hwpart
);
454 err
= blk_dselect_hwpart(block_dev
, block_dev
->hwpart
);
459 if ((start
+ blkcnt
) > block_dev
->lba
) {
460 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
461 pr_err("MMC: block number 0x" LBAF
" exceeds max(0x" LBAF
")\n",
462 start
+ blkcnt
, block_dev
->lba
);
467 if (mmc_set_blocklen(mmc
, mmc
->read_bl_len
)) {
468 debug("%s: Failed to set blocklen\n", __func__
);
473 cur
= (blocks_todo
> mmc
->cfg
->b_max
) ?
474 mmc
->cfg
->b_max
: blocks_todo
;
475 if (mmc_read_blocks(mmc
, dst
, start
, cur
) != cur
) {
476 debug("%s: Failed to read blocks\n", __func__
);
481 dst
+= cur
* mmc
->read_bl_len
;
482 } while (blocks_todo
> 0);
487 static int mmc_go_idle(struct mmc
*mmc
)
494 cmd
.cmdidx
= MMC_CMD_GO_IDLE_STATE
;
496 cmd
.resp_type
= MMC_RSP_NONE
;
498 err
= mmc_send_cmd(mmc
, &cmd
, NULL
);
508 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
509 static int mmc_switch_voltage(struct mmc
*mmc
, int signal_voltage
)
515 * Send CMD11 only if the request is to switch the card to
518 if (signal_voltage
== MMC_SIGNAL_VOLTAGE_330
)
519 return mmc_set_signal_voltage(mmc
, signal_voltage
);
521 cmd
.cmdidx
= SD_CMD_SWITCH_UHS18V
;
523 cmd
.resp_type
= MMC_RSP_R1
;
525 err
= mmc_send_cmd(mmc
, &cmd
, NULL
);
529 if (!mmc_host_is_spi(mmc
) && (cmd
.response
[0] & MMC_STATUS_ERROR
))
533 * The card should drive cmd and dat[0:3] low immediately
534 * after the response of cmd11, but wait 100 us to be sure
536 err
= mmc_wait_dat0(mmc
, 0, 100);
543 * During a signal voltage level switch, the clock must be gated
544 * for 5 ms according to the SD spec
546 mmc_set_clock(mmc
, mmc
->clock
, true);
548 err
= mmc_set_signal_voltage(mmc
, signal_voltage
);
552 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
554 mmc_set_clock(mmc
, mmc
->clock
, false);
557 * Failure to switch is indicated by the card holding
558 * dat[0:3] low. Wait for at least 1 ms according to spec
560 err
= mmc_wait_dat0(mmc
, 1, 1000);
570 static int sd_send_op_cond(struct mmc
*mmc
, bool uhs_en
)
577 cmd
.cmdidx
= MMC_CMD_APP_CMD
;
578 cmd
.resp_type
= MMC_RSP_R1
;
581 err
= mmc_send_cmd(mmc
, &cmd
, NULL
);
586 cmd
.cmdidx
= SD_CMD_APP_SEND_OP_COND
;
587 cmd
.resp_type
= MMC_RSP_R3
;
590 * Most cards do not answer if some reserved bits
591 * in the ocr are set. However, Some controller
592 * can set bit 7 (reserved for low voltages), but
593 * how to manage low voltages SD card is not yet
596 cmd
.cmdarg
= mmc_host_is_spi(mmc
) ? 0 :
597 (mmc
->cfg
->voltages
& 0xff8000);
599 if (mmc
->version
== SD_VERSION_2
)
600 cmd
.cmdarg
|= OCR_HCS
;
603 cmd
.cmdarg
|= OCR_S18R
;
605 err
= mmc_send_cmd(mmc
, &cmd
, NULL
);
610 if (cmd
.response
[0] & OCR_BUSY
)
619 if (mmc
->version
!= SD_VERSION_2
)
620 mmc
->version
= SD_VERSION_1_0
;
622 if (mmc_host_is_spi(mmc
)) { /* read OCR for spi */
623 cmd
.cmdidx
= MMC_CMD_SPI_READ_OCR
;
624 cmd
.resp_type
= MMC_RSP_R3
;
627 err
= mmc_send_cmd(mmc
, &cmd
, NULL
);
633 mmc
->ocr
= cmd
.response
[0];
635 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
636 if (uhs_en
&& !(mmc_host_is_spi(mmc
)) && (cmd
.response
[0] & 0x41000000)
638 err
= mmc_switch_voltage(mmc
, MMC_SIGNAL_VOLTAGE_180
);
644 mmc
->high_capacity
= ((mmc
->ocr
& OCR_HCS
) == OCR_HCS
);
650 static int mmc_send_op_cond_iter(struct mmc
*mmc
, int use_arg
)
655 cmd
.cmdidx
= MMC_CMD_SEND_OP_COND
;
656 cmd
.resp_type
= MMC_RSP_R3
;
658 if (use_arg
&& !mmc_host_is_spi(mmc
))
659 cmd
.cmdarg
= OCR_HCS
|
660 (mmc
->cfg
->voltages
&
661 (mmc
->ocr
& OCR_VOLTAGE_MASK
)) |
662 (mmc
->ocr
& OCR_ACCESS_MODE
);
664 err
= mmc_send_cmd(mmc
, &cmd
, NULL
);
667 mmc
->ocr
= cmd
.response
[0];
671 static int mmc_send_op_cond(struct mmc
*mmc
)
675 /* Some cards seem to need this */
678 /* Asking to the card its capabilities */
679 for (i
= 0; i
< 2; i
++) {
680 err
= mmc_send_op_cond_iter(mmc
, i
!= 0);
684 /* exit if not busy (flag seems to be inverted) */
685 if (mmc
->ocr
& OCR_BUSY
)
688 mmc
->op_cond_pending
= 1;
692 static int mmc_complete_op_cond(struct mmc
*mmc
)
699 mmc
->op_cond_pending
= 0;
700 if (!(mmc
->ocr
& OCR_BUSY
)) {
701 /* Some cards seem to need this */
704 start
= get_timer(0);
706 err
= mmc_send_op_cond_iter(mmc
, 1);
709 if (mmc
->ocr
& OCR_BUSY
)
711 if (get_timer(start
) > timeout
)
717 if (mmc_host_is_spi(mmc
)) { /* read OCR for spi */
718 cmd
.cmdidx
= MMC_CMD_SPI_READ_OCR
;
719 cmd
.resp_type
= MMC_RSP_R3
;
722 err
= mmc_send_cmd(mmc
, &cmd
, NULL
);
727 mmc
->ocr
= cmd
.response
[0];
730 mmc
->version
= MMC_VERSION_UNKNOWN
;
732 mmc
->high_capacity
= ((mmc
->ocr
& OCR_HCS
) == OCR_HCS
);
739 static int mmc_send_ext_csd(struct mmc
*mmc
, u8
*ext_csd
)
742 struct mmc_data data
;
745 /* Get the Card Status Register */
746 cmd
.cmdidx
= MMC_CMD_SEND_EXT_CSD
;
747 cmd
.resp_type
= MMC_RSP_R1
;
750 data
.dest
= (char *)ext_csd
;
752 data
.blocksize
= MMC_MAX_BLOCK_LEN
;
753 data
.flags
= MMC_DATA_READ
;
755 err
= mmc_send_cmd(mmc
, &cmd
, &data
);
760 int mmc_switch(struct mmc
*mmc
, u8 set
, u8 index
, u8 value
)
767 cmd
.cmdidx
= MMC_CMD_SWITCH
;
768 cmd
.resp_type
= MMC_RSP_R1b
;
769 cmd
.cmdarg
= (MMC_SWITCH_MODE_WRITE_BYTE
<< 24) |
773 while (retries
> 0) {
774 ret
= mmc_send_cmd(mmc
, &cmd
, NULL
);
776 /* Waiting for the ready status */
778 ret
= mmc_send_status(mmc
, timeout
);
789 static int mmc_set_card_speed(struct mmc
*mmc
, enum bus_mode mode
)
794 ALLOC_CACHE_ALIGN_BUFFER(u8
, test_csd
, MMC_MAX_BLOCK_LEN
);
800 speed_bits
= EXT_CSD_TIMING_HS
;
803 speed_bits
= EXT_CSD_TIMING_HS200
;
806 speed_bits
= EXT_CSD_TIMING_LEGACY
;
811 err
= mmc_switch(mmc
, EXT_CSD_CMD_SET_NORMAL
, EXT_CSD_HS_TIMING
,
816 if ((mode
== MMC_HS
) || (mode
== MMC_HS_52
)) {
817 /* Now check to see that it worked */
818 err
= mmc_send_ext_csd(mmc
, test_csd
);
822 /* No high-speed support */
823 if (!test_csd
[EXT_CSD_HS_TIMING
])
830 static int mmc_get_capabilities(struct mmc
*mmc
)
832 u8
*ext_csd
= mmc
->ext_csd
;
835 mmc
->card_caps
= MMC_MODE_1BIT
| MMC_CAP(MMC_LEGACY
);
837 if (mmc_host_is_spi(mmc
))
840 /* Only version 4 supports high-speed */
841 if (mmc
->version
< MMC_VERSION_4
)
845 pr_err("No ext_csd found!\n"); /* this should enver happen */
849 mmc
->card_caps
|= MMC_MODE_4BIT
| MMC_MODE_8BIT
;
851 cardtype
= ext_csd
[EXT_CSD_CARD_TYPE
] & 0x3f;
852 mmc
->cardtype
= cardtype
;
854 if (cardtype
& (EXT_CSD_CARD_TYPE_HS200_1_2V
|
855 EXT_CSD_CARD_TYPE_HS200_1_8V
)) {
856 mmc
->card_caps
|= MMC_MODE_HS200
;
858 if (cardtype
& EXT_CSD_CARD_TYPE_52
) {
859 if (cardtype
& EXT_CSD_CARD_TYPE_DDR_52
)
860 mmc
->card_caps
|= MMC_MODE_DDR_52MHz
;
861 mmc
->card_caps
|= MMC_MODE_HS_52MHz
;
863 if (cardtype
& EXT_CSD_CARD_TYPE_26
)
864 mmc
->card_caps
|= MMC_MODE_HS
;
869 static int mmc_set_capacity(struct mmc
*mmc
, int part_num
)
873 mmc
->capacity
= mmc
->capacity_user
;
877 mmc
->capacity
= mmc
->capacity_boot
;
880 mmc
->capacity
= mmc
->capacity_rpmb
;
886 mmc
->capacity
= mmc
->capacity_gp
[part_num
- 4];
892 mmc_get_blk_desc(mmc
)->lba
= lldiv(mmc
->capacity
, mmc
->read_bl_len
);
897 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
898 static int mmc_boot_part_access_chk(struct mmc
*mmc
, unsigned int part_num
)
903 if (part_num
& PART_ACCESS_MASK
)
904 forbidden
= MMC_CAP(MMC_HS_200
);
906 if (MMC_CAP(mmc
->selected_mode
) & forbidden
) {
907 debug("selected mode (%s) is forbidden for part %d\n",
908 mmc_mode_name(mmc
->selected_mode
), part_num
);
910 } else if (mmc
->selected_mode
!= mmc
->best_mode
) {
911 debug("selected mode is not optimal\n");
916 return mmc_select_mode_and_width(mmc
,
917 mmc
->card_caps
& ~forbidden
);
922 static inline int mmc_boot_part_access_chk(struct mmc
*mmc
,
923 unsigned int part_num
)
929 int mmc_switch_part(struct mmc
*mmc
, unsigned int part_num
)
933 ret
= mmc_boot_part_access_chk(mmc
, part_num
);
937 ret
= mmc_switch(mmc
, EXT_CSD_CMD_SET_NORMAL
, EXT_CSD_PART_CONF
,
938 (mmc
->part_config
& ~PART_ACCESS_MASK
)
939 | (part_num
& PART_ACCESS_MASK
));
942 * Set the capacity if the switch succeeded or was intended
943 * to return to representing the raw device.
945 if ((ret
== 0) || ((ret
== -ENODEV
) && (part_num
== 0))) {
946 ret
= mmc_set_capacity(mmc
, part_num
);
947 mmc_get_blk_desc(mmc
)->hwpart
= part_num
;
953 int mmc_hwpart_config(struct mmc
*mmc
,
954 const struct mmc_hwpart_conf
*conf
,
955 enum mmc_hwpart_conf_mode mode
)
961 u32 max_enh_size_mult
;
962 u32 tot_enh_size_mult
= 0;
965 ALLOC_CACHE_ALIGN_BUFFER(u8
, ext_csd
, MMC_MAX_BLOCK_LEN
);
967 if (mode
< MMC_HWPART_CONF_CHECK
|| mode
> MMC_HWPART_CONF_COMPLETE
)
970 if (IS_SD(mmc
) || (mmc
->version
< MMC_VERSION_4_41
)) {
971 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
975 if (!(mmc
->part_support
& PART_SUPPORT
)) {
976 pr_err("Card does not support partitioning\n");
980 if (!mmc
->hc_wp_grp_size
) {
981 pr_err("Card does not define HC WP group size\n");
985 /* check partition alignment and total enhanced size */
986 if (conf
->user
.enh_size
) {
987 if (conf
->user
.enh_size
% mmc
->hc_wp_grp_size
||
988 conf
->user
.enh_start
% mmc
->hc_wp_grp_size
) {
989 pr_err("User data enhanced area not HC WP group "
993 part_attrs
|= EXT_CSD_ENH_USR
;
994 enh_size_mult
= conf
->user
.enh_size
/ mmc
->hc_wp_grp_size
;
995 if (mmc
->high_capacity
) {
996 enh_start_addr
= conf
->user
.enh_start
;
998 enh_start_addr
= (conf
->user
.enh_start
<< 9);
1004 tot_enh_size_mult
+= enh_size_mult
;
1006 for (pidx
= 0; pidx
< 4; pidx
++) {
1007 if (conf
->gp_part
[pidx
].size
% mmc
->hc_wp_grp_size
) {
1008 pr_err("GP%i partition not HC WP group size "
1009 "aligned\n", pidx
+1);
1012 gp_size_mult
[pidx
] = conf
->gp_part
[pidx
].size
/ mmc
->hc_wp_grp_size
;
1013 if (conf
->gp_part
[pidx
].size
&& conf
->gp_part
[pidx
].enhanced
) {
1014 part_attrs
|= EXT_CSD_ENH_GP(pidx
);
1015 tot_enh_size_mult
+= gp_size_mult
[pidx
];
1019 if (part_attrs
&& ! (mmc
->part_support
& ENHNCD_SUPPORT
)) {
1020 pr_err("Card does not support enhanced attribute\n");
1021 return -EMEDIUMTYPE
;
1024 err
= mmc_send_ext_csd(mmc
, ext_csd
);
1029 (ext_csd
[EXT_CSD_MAX_ENH_SIZE_MULT
+2] << 16) +
1030 (ext_csd
[EXT_CSD_MAX_ENH_SIZE_MULT
+1] << 8) +
1031 ext_csd
[EXT_CSD_MAX_ENH_SIZE_MULT
];
1032 if (tot_enh_size_mult
> max_enh_size_mult
) {
1033 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1034 tot_enh_size_mult
, max_enh_size_mult
);
1035 return -EMEDIUMTYPE
;
1038 /* The default value of EXT_CSD_WR_REL_SET is device
1039 * dependent, the values can only be changed if the
1040 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1041 * changed only once and before partitioning is completed. */
1042 wr_rel_set
= ext_csd
[EXT_CSD_WR_REL_SET
];
1043 if (conf
->user
.wr_rel_change
) {
1044 if (conf
->user
.wr_rel_set
)
1045 wr_rel_set
|= EXT_CSD_WR_DATA_REL_USR
;
1047 wr_rel_set
&= ~EXT_CSD_WR_DATA_REL_USR
;
1049 for (pidx
= 0; pidx
< 4; pidx
++) {
1050 if (conf
->gp_part
[pidx
].wr_rel_change
) {
1051 if (conf
->gp_part
[pidx
].wr_rel_set
)
1052 wr_rel_set
|= EXT_CSD_WR_DATA_REL_GP(pidx
);
1054 wr_rel_set
&= ~EXT_CSD_WR_DATA_REL_GP(pidx
);
1058 if (wr_rel_set
!= ext_csd
[EXT_CSD_WR_REL_SET
] &&
1059 !(ext_csd
[EXT_CSD_WR_REL_PARAM
] & EXT_CSD_HS_CTRL_REL
)) {
1060 puts("Card does not support host controlled partition write "
1061 "reliability settings\n");
1062 return -EMEDIUMTYPE
;
1065 if (ext_csd
[EXT_CSD_PARTITION_SETTING
] &
1066 EXT_CSD_PARTITION_SETTING_COMPLETED
) {
1067 pr_err("Card already partitioned\n");
1071 if (mode
== MMC_HWPART_CONF_CHECK
)
1074 /* Partitioning requires high-capacity size definitions */
1075 if (!(ext_csd
[EXT_CSD_ERASE_GROUP_DEF
] & 0x01)) {
1076 err
= mmc_switch(mmc
, EXT_CSD_CMD_SET_NORMAL
,
1077 EXT_CSD_ERASE_GROUP_DEF
, 1);
1082 ext_csd
[EXT_CSD_ERASE_GROUP_DEF
] = 1;
1084 /* update erase group size to be high-capacity */
1085 mmc
->erase_grp_size
=
1086 ext_csd
[EXT_CSD_HC_ERASE_GRP_SIZE
] * 1024;
1090 /* all OK, write the configuration */
1091 for (i
= 0; i
< 4; i
++) {
1092 err
= mmc_switch(mmc
, EXT_CSD_CMD_SET_NORMAL
,
1093 EXT_CSD_ENH_START_ADDR
+i
,
1094 (enh_start_addr
>> (i
*8)) & 0xFF);
1098 for (i
= 0; i
< 3; i
++) {
1099 err
= mmc_switch(mmc
, EXT_CSD_CMD_SET_NORMAL
,
1100 EXT_CSD_ENH_SIZE_MULT
+i
,
1101 (enh_size_mult
>> (i
*8)) & 0xFF);
1105 for (pidx
= 0; pidx
< 4; pidx
++) {
1106 for (i
= 0; i
< 3; i
++) {
1107 err
= mmc_switch(mmc
, EXT_CSD_CMD_SET_NORMAL
,
1108 EXT_CSD_GP_SIZE_MULT
+pidx
*3+i
,
1109 (gp_size_mult
[pidx
] >> (i
*8)) & 0xFF);
1114 err
= mmc_switch(mmc
, EXT_CSD_CMD_SET_NORMAL
,
1115 EXT_CSD_PARTITIONS_ATTRIBUTE
, part_attrs
);
1119 if (mode
== MMC_HWPART_CONF_SET
)
1122 /* The WR_REL_SET is a write-once register but shall be
1123 * written before setting PART_SETTING_COMPLETED. As it is
1124 * write-once we can only write it when completing the
1126 if (wr_rel_set
!= ext_csd
[EXT_CSD_WR_REL_SET
]) {
1127 err
= mmc_switch(mmc
, EXT_CSD_CMD_SET_NORMAL
,
1128 EXT_CSD_WR_REL_SET
, wr_rel_set
);
1133 /* Setting PART_SETTING_COMPLETED confirms the partition
1134 * configuration but it only becomes effective after power
1135 * cycle, so we do not adjust the partition related settings
1136 * in the mmc struct. */
1138 err
= mmc_switch(mmc
, EXT_CSD_CMD_SET_NORMAL
,
1139 EXT_CSD_PARTITION_SETTING
,
1140 EXT_CSD_PARTITION_SETTING_COMPLETED
);
1147 #if !CONFIG_IS_ENABLED(DM_MMC)
1148 int mmc_getcd(struct mmc
*mmc
)
1152 cd
= board_mmc_getcd(mmc
);
1155 if (mmc
->cfg
->ops
->getcd
)
1156 cd
= mmc
->cfg
->ops
->getcd(mmc
);
1165 static int sd_switch(struct mmc
*mmc
, int mode
, int group
, u8 value
, u8
*resp
)
1168 struct mmc_data data
;
1170 /* Switch the frequency */
1171 cmd
.cmdidx
= SD_CMD_SWITCH_FUNC
;
1172 cmd
.resp_type
= MMC_RSP_R1
;
1173 cmd
.cmdarg
= (mode
<< 31) | 0xffffff;
1174 cmd
.cmdarg
&= ~(0xf << (group
* 4));
1175 cmd
.cmdarg
|= value
<< (group
* 4);
1177 data
.dest
= (char *)resp
;
1178 data
.blocksize
= 64;
1180 data
.flags
= MMC_DATA_READ
;
1182 return mmc_send_cmd(mmc
, &cmd
, &data
);
1186 static int sd_get_capabilities(struct mmc
*mmc
)
1190 ALLOC_CACHE_ALIGN_BUFFER(__be32
, scr
, 2);
1191 ALLOC_CACHE_ALIGN_BUFFER(__be32
, switch_status
, 16);
1192 struct mmc_data data
;
1194 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1198 mmc
->card_caps
= MMC_MODE_1BIT
| MMC_CAP(SD_LEGACY
);
1200 if (mmc_host_is_spi(mmc
))
1203 /* Read the SCR to find out if this card supports higher speeds */
1204 cmd
.cmdidx
= MMC_CMD_APP_CMD
;
1205 cmd
.resp_type
= MMC_RSP_R1
;
1206 cmd
.cmdarg
= mmc
->rca
<< 16;
1208 err
= mmc_send_cmd(mmc
, &cmd
, NULL
);
1213 cmd
.cmdidx
= SD_CMD_APP_SEND_SCR
;
1214 cmd
.resp_type
= MMC_RSP_R1
;
1220 data
.dest
= (char *)scr
;
1223 data
.flags
= MMC_DATA_READ
;
1225 err
= mmc_send_cmd(mmc
, &cmd
, &data
);
1234 mmc
->scr
[0] = __be32_to_cpu(scr
[0]);
1235 mmc
->scr
[1] = __be32_to_cpu(scr
[1]);
1237 switch ((mmc
->scr
[0] >> 24) & 0xf) {
1239 mmc
->version
= SD_VERSION_1_0
;
1242 mmc
->version
= SD_VERSION_1_10
;
1245 mmc
->version
= SD_VERSION_2
;
1246 if ((mmc
->scr
[0] >> 15) & 0x1)
1247 mmc
->version
= SD_VERSION_3
;
1250 mmc
->version
= SD_VERSION_1_0
;
1254 if (mmc
->scr
[0] & SD_DATA_4BIT
)
1255 mmc
->card_caps
|= MMC_MODE_4BIT
;
1257 /* Version 1.0 doesn't support switching */
1258 if (mmc
->version
== SD_VERSION_1_0
)
1263 err
= sd_switch(mmc
, SD_SWITCH_CHECK
, 0, 1,
1264 (u8
*)switch_status
);
1269 /* The high-speed function is busy. Try again */
1270 if (!(__be32_to_cpu(switch_status
[7]) & SD_HIGHSPEED_BUSY
))
1274 /* If high-speed isn't supported, we return */
1275 if (__be32_to_cpu(switch_status
[3]) & SD_HIGHSPEED_SUPPORTED
)
1276 mmc
->card_caps
|= MMC_CAP(SD_HS
);
1278 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1279 /* Version before 3.0 don't support UHS modes */
1280 if (mmc
->version
< SD_VERSION_3
)
1283 sd3_bus_mode
= __be32_to_cpu(switch_status
[3]) >> 16 & 0x1f;
1284 if (sd3_bus_mode
& SD_MODE_UHS_SDR104
)
1285 mmc
->card_caps
|= MMC_CAP(UHS_SDR104
);
1286 if (sd3_bus_mode
& SD_MODE_UHS_SDR50
)
1287 mmc
->card_caps
|= MMC_CAP(UHS_SDR50
);
1288 if (sd3_bus_mode
& SD_MODE_UHS_SDR25
)
1289 mmc
->card_caps
|= MMC_CAP(UHS_SDR25
);
1290 if (sd3_bus_mode
& SD_MODE_UHS_SDR12
)
1291 mmc
->card_caps
|= MMC_CAP(UHS_SDR12
);
1292 if (sd3_bus_mode
& SD_MODE_UHS_DDR50
)
1293 mmc
->card_caps
|= MMC_CAP(UHS_DDR50
);
1299 static int sd_set_card_speed(struct mmc
*mmc
, enum bus_mode mode
)
1303 ALLOC_CACHE_ALIGN_BUFFER(uint
, switch_status
, 16);
1309 speed
= UHS_SDR12_BUS_SPEED
;
1313 speed
= UHS_SDR25_BUS_SPEED
;
1316 speed
= UHS_SDR50_BUS_SPEED
;
1319 speed
= UHS_DDR50_BUS_SPEED
;
1322 speed
= UHS_SDR104_BUS_SPEED
;
1328 err
= sd_switch(mmc
, SD_SWITCH_SWITCH
, 0, speed
, (u8
*)switch_status
);
1332 if ((__be32_to_cpu(switch_status
[4]) >> 24) != speed
)
1338 int sd_select_bus_width(struct mmc
*mmc
, int w
)
1343 if ((w
!= 4) && (w
!= 1))
1346 cmd
.cmdidx
= MMC_CMD_APP_CMD
;
1347 cmd
.resp_type
= MMC_RSP_R1
;
1348 cmd
.cmdarg
= mmc
->rca
<< 16;
1350 err
= mmc_send_cmd(mmc
, &cmd
, NULL
);
1354 cmd
.cmdidx
= SD_CMD_APP_SET_BUS_WIDTH
;
1355 cmd
.resp_type
= MMC_RSP_R1
;
1360 err
= mmc_send_cmd(mmc
, &cmd
, NULL
);
1367 static int sd_read_ssr(struct mmc
*mmc
)
1371 ALLOC_CACHE_ALIGN_BUFFER(uint
, ssr
, 16);
1372 struct mmc_data data
;
1374 unsigned int au
, eo
, et
, es
;
1376 cmd
.cmdidx
= MMC_CMD_APP_CMD
;
1377 cmd
.resp_type
= MMC_RSP_R1
;
1378 cmd
.cmdarg
= mmc
->rca
<< 16;
1380 err
= mmc_send_cmd(mmc
, &cmd
, NULL
);
1384 cmd
.cmdidx
= SD_CMD_APP_SD_STATUS
;
1385 cmd
.resp_type
= MMC_RSP_R1
;
1389 data
.dest
= (char *)ssr
;
1390 data
.blocksize
= 64;
1392 data
.flags
= MMC_DATA_READ
;
1394 err
= mmc_send_cmd(mmc
, &cmd
, &data
);
1402 for (i
= 0; i
< 16; i
++)
1403 ssr
[i
] = be32_to_cpu(ssr
[i
]);
1405 au
= (ssr
[2] >> 12) & 0xF;
1406 if ((au
<= 9) || (mmc
->version
== SD_VERSION_3
)) {
1407 mmc
->ssr
.au
= sd_au_size
[au
];
1408 es
= (ssr
[3] >> 24) & 0xFF;
1409 es
|= (ssr
[2] & 0xFF) << 8;
1410 et
= (ssr
[3] >> 18) & 0x3F;
1412 eo
= (ssr
[3] >> 16) & 0x3;
1413 mmc
->ssr
.erase_timeout
= (et
* 1000) / es
;
1414 mmc
->ssr
.erase_offset
= eo
* 1000;
1417 debug("Invalid Allocation Unit Size.\n");
1423 /* frequency bases */
1424 /* divided by 10 to be nice to platforms without floating point */
1425 static const int fbase
[] = {
1432 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1433 * to platforms without floating point.
1435 static const u8 multipliers
[] = {
1454 static inline int bus_width(uint cap
)
1456 if (cap
== MMC_MODE_8BIT
)
1458 if (cap
== MMC_MODE_4BIT
)
1460 if (cap
== MMC_MODE_1BIT
)
1462 pr_warn("invalid bus witdh capability 0x%x\n", cap
);
1466 #if !CONFIG_IS_ENABLED(DM_MMC)
1467 #ifdef MMC_SUPPORTS_TUNING
1468 static int mmc_execute_tuning(struct mmc
*mmc
, uint opcode
)
1474 static void mmc_send_init_stream(struct mmc
*mmc
)
1478 static int mmc_set_ios(struct mmc
*mmc
)
1482 if (mmc
->cfg
->ops
->set_ios
)
1483 ret
= mmc
->cfg
->ops
->set_ios(mmc
);
1489 int mmc_set_clock(struct mmc
*mmc
, uint clock
, bool disable
)
1491 if (clock
> mmc
->cfg
->f_max
)
1492 clock
= mmc
->cfg
->f_max
;
1494 if (clock
< mmc
->cfg
->f_min
)
1495 clock
= mmc
->cfg
->f_min
;
1498 mmc
->clk_disable
= disable
;
1500 return mmc_set_ios(mmc
);
1503 static int mmc_set_bus_width(struct mmc
*mmc
, uint width
)
1505 mmc
->bus_width
= width
;
1507 return mmc_set_ios(mmc
);
1510 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1512 * helper function to display the capabilities in a human
1513 * friendly manner. The capabilities include bus width and
1516 void mmc_dump_capabilities(const char *text
, uint caps
)
1520 printf("%s: widths [", text
);
1521 if (caps
& MMC_MODE_8BIT
)
1523 if (caps
& MMC_MODE_4BIT
)
1525 if (caps
& MMC_MODE_1BIT
)
1527 printf("\b\b] modes [");
1528 for (mode
= MMC_LEGACY
; mode
< MMC_MODES_END
; mode
++)
1529 if (MMC_CAP(mode
) & caps
)
1530 printf("%s, ", mmc_mode_name(mode
));
1535 struct mode_width_tuning
{
1538 #ifdef MMC_SUPPORTS_TUNING
1543 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1544 int mmc_voltage_to_mv(enum mmc_voltage voltage
)
1547 case MMC_SIGNAL_VOLTAGE_000
: return 0;
1548 case MMC_SIGNAL_VOLTAGE_330
: return 3300;
1549 case MMC_SIGNAL_VOLTAGE_180
: return 1800;
1550 case MMC_SIGNAL_VOLTAGE_120
: return 1200;
1555 static int mmc_set_signal_voltage(struct mmc
*mmc
, uint signal_voltage
)
1559 if (mmc
->signal_voltage
== signal_voltage
)
1562 mmc
->signal_voltage
= signal_voltage
;
1563 err
= mmc_set_ios(mmc
);
1565 debug("unable to set voltage (err %d)\n", err
);
1570 static inline int mmc_set_signal_voltage(struct mmc
*mmc
, uint signal_voltage
)
1576 static const struct mode_width_tuning sd_modes_by_pref
[] = {
1577 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1578 #ifdef MMC_SUPPORTS_TUNING
1581 .widths
= MMC_MODE_4BIT
| MMC_MODE_1BIT
,
1582 .tuning
= MMC_CMD_SEND_TUNING_BLOCK
1587 .widths
= MMC_MODE_4BIT
| MMC_MODE_1BIT
,
1591 .widths
= MMC_MODE_4BIT
| MMC_MODE_1BIT
,
1595 .widths
= MMC_MODE_4BIT
| MMC_MODE_1BIT
,
1600 .widths
= MMC_MODE_4BIT
| MMC_MODE_1BIT
,
1602 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1605 .widths
= MMC_MODE_4BIT
| MMC_MODE_1BIT
,
1610 .widths
= MMC_MODE_4BIT
| MMC_MODE_1BIT
,
1614 #define for_each_sd_mode_by_pref(caps, mwt) \
1615 for (mwt = sd_modes_by_pref;\
1616 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1618 if (caps & MMC_CAP(mwt->mode))
1620 static int sd_select_mode_and_width(struct mmc
*mmc
, uint card_caps
)
1623 uint widths
[] = {MMC_MODE_4BIT
, MMC_MODE_1BIT
};
1624 const struct mode_width_tuning
*mwt
;
1625 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1626 bool uhs_en
= (mmc
->ocr
& OCR_S18R
) ? true : false;
1628 bool uhs_en
= false;
1633 mmc_dump_capabilities("sd card", card_caps
);
1634 mmc_dump_capabilities("host", mmc
->host_caps
);
1637 /* Restrict card's capabilities by what the host can do */
1638 caps
= card_caps
& mmc
->host_caps
;
1643 for_each_sd_mode_by_pref(caps
, mwt
) {
1646 for (w
= widths
; w
< widths
+ ARRAY_SIZE(widths
); w
++) {
1647 if (*w
& caps
& mwt
->widths
) {
1648 debug("trying mode %s width %d (at %d MHz)\n",
1649 mmc_mode_name(mwt
->mode
),
1651 mmc_mode2freq(mmc
, mwt
->mode
) / 1000000);
1653 /* configure the bus width (card + host) */
1654 err
= sd_select_bus_width(mmc
, bus_width(*w
));
1657 mmc_set_bus_width(mmc
, bus_width(*w
));
1659 /* configure the bus mode (card) */
1660 err
= sd_set_card_speed(mmc
, mwt
->mode
);
1664 /* configure the bus mode (host) */
1665 mmc_select_mode(mmc
, mwt
->mode
);
1666 mmc_set_clock(mmc
, mmc
->tran_speed
, false);
1668 #ifdef MMC_SUPPORTS_TUNING
1669 /* execute tuning if needed */
1670 if (mwt
->tuning
&& !mmc_host_is_spi(mmc
)) {
1671 err
= mmc_execute_tuning(mmc
,
1674 debug("tuning failed\n");
1680 err
= sd_read_ssr(mmc
);
1684 pr_warn("bad ssr\n");
1687 /* revert to a safer bus speed */
1688 mmc_select_mode(mmc
, SD_LEGACY
);
1689 mmc_set_clock(mmc
, mmc
->tran_speed
, false);
1694 printf("unable to select a mode\n");
1699 * read the compare the part of ext csd that is constant.
1700 * This can be used to check that the transfer is working
1703 static int mmc_read_and_compare_ext_csd(struct mmc
*mmc
)
1706 const u8
*ext_csd
= mmc
->ext_csd
;
1707 ALLOC_CACHE_ALIGN_BUFFER(u8
, test_csd
, MMC_MAX_BLOCK_LEN
);
1709 if (mmc
->version
< MMC_VERSION_4
)
1712 err
= mmc_send_ext_csd(mmc
, test_csd
);
1716 /* Only compare read only fields */
1717 if (ext_csd
[EXT_CSD_PARTITIONING_SUPPORT
]
1718 == test_csd
[EXT_CSD_PARTITIONING_SUPPORT
] &&
1719 ext_csd
[EXT_CSD_HC_WP_GRP_SIZE
]
1720 == test_csd
[EXT_CSD_HC_WP_GRP_SIZE
] &&
1721 ext_csd
[EXT_CSD_REV
]
1722 == test_csd
[EXT_CSD_REV
] &&
1723 ext_csd
[EXT_CSD_HC_ERASE_GRP_SIZE
]
1724 == test_csd
[EXT_CSD_HC_ERASE_GRP_SIZE
] &&
1725 memcmp(&ext_csd
[EXT_CSD_SEC_CNT
],
1726 &test_csd
[EXT_CSD_SEC_CNT
], 4) == 0)
1732 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1733 static int mmc_set_lowest_voltage(struct mmc
*mmc
, enum bus_mode mode
,
1734 uint32_t allowed_mask
)
1740 if (mmc
->cardtype
& EXT_CSD_CARD_TYPE_HS200_1_8V
)
1741 card_mask
|= MMC_SIGNAL_VOLTAGE_180
;
1742 if (mmc
->cardtype
& EXT_CSD_CARD_TYPE_HS200_1_2V
)
1743 card_mask
|= MMC_SIGNAL_VOLTAGE_120
;
1746 if (mmc
->cardtype
& EXT_CSD_CARD_TYPE_DDR_1_8V
)
1747 card_mask
|= MMC_SIGNAL_VOLTAGE_330
|
1748 MMC_SIGNAL_VOLTAGE_180
;
1749 if (mmc
->cardtype
& EXT_CSD_CARD_TYPE_DDR_1_2V
)
1750 card_mask
|= MMC_SIGNAL_VOLTAGE_120
;
1753 card_mask
|= MMC_SIGNAL_VOLTAGE_330
;
1757 while (card_mask
& allowed_mask
) {
1758 enum mmc_voltage best_match
;
1760 best_match
= 1 << (ffs(card_mask
& allowed_mask
) - 1);
1761 if (!mmc_set_signal_voltage(mmc
, best_match
))
1764 allowed_mask
&= ~best_match
;
1770 static inline int mmc_set_lowest_voltage(struct mmc
*mmc
, enum bus_mode mode
,
1771 uint32_t allowed_mask
)
1777 static const struct mode_width_tuning mmc_modes_by_pref
[] = {
1778 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1781 .widths
= MMC_MODE_8BIT
| MMC_MODE_4BIT
,
1782 .tuning
= MMC_CMD_SEND_TUNING_BLOCK_HS200
1787 .widths
= MMC_MODE_8BIT
| MMC_MODE_4BIT
,
1791 .widths
= MMC_MODE_8BIT
| MMC_MODE_4BIT
| MMC_MODE_1BIT
,
1795 .widths
= MMC_MODE_8BIT
| MMC_MODE_4BIT
| MMC_MODE_1BIT
,
1799 .widths
= MMC_MODE_8BIT
| MMC_MODE_4BIT
| MMC_MODE_1BIT
,
1803 #define for_each_mmc_mode_by_pref(caps, mwt) \
1804 for (mwt = mmc_modes_by_pref;\
1805 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1807 if (caps & MMC_CAP(mwt->mode))
1809 static const struct ext_csd_bus_width
{
1813 } ext_csd_bus_width
[] = {
1814 {MMC_MODE_8BIT
, true, EXT_CSD_DDR_BUS_WIDTH_8
},
1815 {MMC_MODE_4BIT
, true, EXT_CSD_DDR_BUS_WIDTH_4
},
1816 {MMC_MODE_8BIT
, false, EXT_CSD_BUS_WIDTH_8
},
1817 {MMC_MODE_4BIT
, false, EXT_CSD_BUS_WIDTH_4
},
1818 {MMC_MODE_1BIT
, false, EXT_CSD_BUS_WIDTH_1
},
1821 #define for_each_supported_width(caps, ddr, ecbv) \
1822 for (ecbv = ext_csd_bus_width;\
1823 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1825 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1827 static int mmc_select_mode_and_width(struct mmc
*mmc
, uint card_caps
)
1830 const struct mode_width_tuning
*mwt
;
1831 const struct ext_csd_bus_width
*ecbw
;
1834 mmc_dump_capabilities("mmc", card_caps
);
1835 mmc_dump_capabilities("host", mmc
->host_caps
);
1838 /* Restrict card's capabilities by what the host can do */
1839 card_caps
&= mmc
->host_caps
;
1841 /* Only version 4 of MMC supports wider bus widths */
1842 if (mmc
->version
< MMC_VERSION_4
)
1845 if (!mmc
->ext_csd
) {
1846 debug("No ext_csd found!\n"); /* this should enver happen */
1850 mmc_set_clock(mmc
, mmc
->legacy_speed
, false);
1852 for_each_mmc_mode_by_pref(card_caps
, mwt
) {
1853 for_each_supported_width(card_caps
& mwt
->widths
,
1854 mmc_is_mode_ddr(mwt
->mode
), ecbw
) {
1855 enum mmc_voltage old_voltage
;
1856 debug("trying mode %s width %d (at %d MHz)\n",
1857 mmc_mode_name(mwt
->mode
),
1858 bus_width(ecbw
->cap
),
1859 mmc_mode2freq(mmc
, mwt
->mode
) / 1000000);
1860 old_voltage
= mmc
->signal_voltage
;
1861 err
= mmc_set_lowest_voltage(mmc
, mwt
->mode
,
1862 MMC_ALL_SIGNAL_VOLTAGE
);
1866 /* configure the bus width (card + host) */
1867 err
= mmc_switch(mmc
, EXT_CSD_CMD_SET_NORMAL
,
1869 ecbw
->ext_csd_bits
& ~EXT_CSD_DDR_FLAG
);
1872 mmc_set_bus_width(mmc
, bus_width(ecbw
->cap
));
1874 /* configure the bus speed (card) */
1875 err
= mmc_set_card_speed(mmc
, mwt
->mode
);
1880 * configure the bus width AND the ddr mode (card)
1881 * The host side will be taken care of in the next step
1883 if (ecbw
->ext_csd_bits
& EXT_CSD_DDR_FLAG
) {
1884 err
= mmc_switch(mmc
, EXT_CSD_CMD_SET_NORMAL
,
1886 ecbw
->ext_csd_bits
);
1891 /* configure the bus mode (host) */
1892 mmc_select_mode(mmc
, mwt
->mode
);
1893 mmc_set_clock(mmc
, mmc
->tran_speed
, false);
1894 #ifdef MMC_SUPPORTS_TUNING
1896 /* execute tuning if needed */
1898 err
= mmc_execute_tuning(mmc
, mwt
->tuning
);
1900 debug("tuning failed\n");
1906 /* do a transfer to check the configuration */
1907 err
= mmc_read_and_compare_ext_csd(mmc
);
1911 mmc_set_signal_voltage(mmc
, old_voltage
);
1912 /* if an error occured, revert to a safer bus mode */
1913 mmc_switch(mmc
, EXT_CSD_CMD_SET_NORMAL
,
1914 EXT_CSD_BUS_WIDTH
, EXT_CSD_BUS_WIDTH_1
);
1915 mmc_select_mode(mmc
, MMC_LEGACY
);
1916 mmc_set_bus_width(mmc
, 1);
1920 pr_err("unable to select a mode\n");
1925 static int mmc_startup_v4(struct mmc
*mmc
)
1929 bool has_parts
= false;
1930 bool part_completed
;
1931 ALLOC_CACHE_ALIGN_BUFFER(u8
, ext_csd
, MMC_MAX_BLOCK_LEN
);
1933 if (IS_SD(mmc
) || (mmc
->version
< MMC_VERSION_4
))
1936 /* check ext_csd version and capacity */
1937 err
= mmc_send_ext_csd(mmc
, ext_csd
);
1941 /* store the ext csd for future reference */
1943 mmc
->ext_csd
= malloc(MMC_MAX_BLOCK_LEN
);
1946 memcpy(mmc
->ext_csd
, ext_csd
, MMC_MAX_BLOCK_LEN
);
1948 if (ext_csd
[EXT_CSD_REV
] >= 2) {
1950 * According to the JEDEC Standard, the value of
1951 * ext_csd's capacity is valid if the value is more
1954 capacity
= ext_csd
[EXT_CSD_SEC_CNT
] << 0
1955 | ext_csd
[EXT_CSD_SEC_CNT
+ 1] << 8
1956 | ext_csd
[EXT_CSD_SEC_CNT
+ 2] << 16
1957 | ext_csd
[EXT_CSD_SEC_CNT
+ 3] << 24;
1958 capacity
*= MMC_MAX_BLOCK_LEN
;
1959 if ((capacity
>> 20) > 2 * 1024)
1960 mmc
->capacity_user
= capacity
;
1963 switch (ext_csd
[EXT_CSD_REV
]) {
1965 mmc
->version
= MMC_VERSION_4_1
;
1968 mmc
->version
= MMC_VERSION_4_2
;
1971 mmc
->version
= MMC_VERSION_4_3
;
1974 mmc
->version
= MMC_VERSION_4_41
;
1977 mmc
->version
= MMC_VERSION_4_5
;
1980 mmc
->version
= MMC_VERSION_5_0
;
1983 mmc
->version
= MMC_VERSION_5_1
;
1987 /* The partition data may be non-zero but it is only
1988 * effective if PARTITION_SETTING_COMPLETED is set in
1989 * EXT_CSD, so ignore any data if this bit is not set,
1990 * except for enabling the high-capacity group size
1991 * definition (see below).
1993 part_completed
= !!(ext_csd
[EXT_CSD_PARTITION_SETTING
] &
1994 EXT_CSD_PARTITION_SETTING_COMPLETED
);
1996 /* store the partition info of emmc */
1997 mmc
->part_support
= ext_csd
[EXT_CSD_PARTITIONING_SUPPORT
];
1998 if ((ext_csd
[EXT_CSD_PARTITIONING_SUPPORT
] & PART_SUPPORT
) ||
1999 ext_csd
[EXT_CSD_BOOT_MULT
])
2000 mmc
->part_config
= ext_csd
[EXT_CSD_PART_CONF
];
2001 if (part_completed
&&
2002 (ext_csd
[EXT_CSD_PARTITIONING_SUPPORT
] & ENHNCD_SUPPORT
))
2003 mmc
->part_attr
= ext_csd
[EXT_CSD_PARTITIONS_ATTRIBUTE
];
2005 mmc
->capacity_boot
= ext_csd
[EXT_CSD_BOOT_MULT
] << 17;
2007 mmc
->capacity_rpmb
= ext_csd
[EXT_CSD_RPMB_MULT
] << 17;
2009 for (i
= 0; i
< 4; i
++) {
2010 int idx
= EXT_CSD_GP_SIZE_MULT
+ i
* 3;
2011 uint mult
= (ext_csd
[idx
+ 2] << 16) +
2012 (ext_csd
[idx
+ 1] << 8) + ext_csd
[idx
];
2015 if (!part_completed
)
2017 mmc
->capacity_gp
[i
] = mult
;
2018 mmc
->capacity_gp
[i
] *=
2019 ext_csd
[EXT_CSD_HC_ERASE_GRP_SIZE
];
2020 mmc
->capacity_gp
[i
] *= ext_csd
[EXT_CSD_HC_WP_GRP_SIZE
];
2021 mmc
->capacity_gp
[i
] <<= 19;
2024 if (part_completed
) {
2025 mmc
->enh_user_size
=
2026 (ext_csd
[EXT_CSD_ENH_SIZE_MULT
+ 2] << 16) +
2027 (ext_csd
[EXT_CSD_ENH_SIZE_MULT
+ 1] << 8) +
2028 ext_csd
[EXT_CSD_ENH_SIZE_MULT
];
2029 mmc
->enh_user_size
*= ext_csd
[EXT_CSD_HC_ERASE_GRP_SIZE
];
2030 mmc
->enh_user_size
*= ext_csd
[EXT_CSD_HC_WP_GRP_SIZE
];
2031 mmc
->enh_user_size
<<= 19;
2032 mmc
->enh_user_start
=
2033 (ext_csd
[EXT_CSD_ENH_START_ADDR
+ 3] << 24) +
2034 (ext_csd
[EXT_CSD_ENH_START_ADDR
+ 2] << 16) +
2035 (ext_csd
[EXT_CSD_ENH_START_ADDR
+ 1] << 8) +
2036 ext_csd
[EXT_CSD_ENH_START_ADDR
];
2037 if (mmc
->high_capacity
)
2038 mmc
->enh_user_start
<<= 9;
2042 * Host needs to enable ERASE_GRP_DEF bit if device is
2043 * partitioned. This bit will be lost every time after a reset
2044 * or power off. This will affect erase size.
2048 if ((ext_csd
[EXT_CSD_PARTITIONING_SUPPORT
] & PART_SUPPORT
) &&
2049 (ext_csd
[EXT_CSD_PARTITIONS_ATTRIBUTE
] & PART_ENH_ATTRIB
))
2052 err
= mmc_switch(mmc
, EXT_CSD_CMD_SET_NORMAL
,
2053 EXT_CSD_ERASE_GROUP_DEF
, 1);
2058 ext_csd
[EXT_CSD_ERASE_GROUP_DEF
] = 1;
2061 if (ext_csd
[EXT_CSD_ERASE_GROUP_DEF
] & 0x01) {
2062 /* Read out group size from ext_csd */
2063 mmc
->erase_grp_size
=
2064 ext_csd
[EXT_CSD_HC_ERASE_GRP_SIZE
] * 1024;
2066 * if high capacity and partition setting completed
2067 * SEC_COUNT is valid even if it is smaller than 2 GiB
2068 * JEDEC Standard JESD84-B45, 6.2.4
2070 if (mmc
->high_capacity
&& part_completed
) {
2071 capacity
= (ext_csd
[EXT_CSD_SEC_CNT
]) |
2072 (ext_csd
[EXT_CSD_SEC_CNT
+ 1] << 8) |
2073 (ext_csd
[EXT_CSD_SEC_CNT
+ 2] << 16) |
2074 (ext_csd
[EXT_CSD_SEC_CNT
+ 3] << 24);
2075 capacity
*= MMC_MAX_BLOCK_LEN
;
2076 mmc
->capacity_user
= capacity
;
2079 /* Calculate the group size from the csd value. */
2080 int erase_gsz
, erase_gmul
;
2082 erase_gsz
= (mmc
->csd
[2] & 0x00007c00) >> 10;
2083 erase_gmul
= (mmc
->csd
[2] & 0x000003e0) >> 5;
2084 mmc
->erase_grp_size
= (erase_gsz
+ 1)
2088 mmc
->hc_wp_grp_size
= 1024
2089 * ext_csd
[EXT_CSD_HC_ERASE_GRP_SIZE
]
2090 * ext_csd
[EXT_CSD_HC_WP_GRP_SIZE
];
2092 mmc
->wr_rel_set
= ext_csd
[EXT_CSD_WR_REL_SET
];
2098 mmc
->ext_csd
= NULL
;
2103 static int mmc_startup(struct mmc
*mmc
)
2109 struct blk_desc
*bdesc
;
2111 #ifdef CONFIG_MMC_SPI_CRC_ON
2112 if (mmc_host_is_spi(mmc
)) { /* enable CRC check for spi */
2113 cmd
.cmdidx
= MMC_CMD_SPI_CRC_ON_OFF
;
2114 cmd
.resp_type
= MMC_RSP_R1
;
2116 err
= mmc_send_cmd(mmc
, &cmd
, NULL
);
2122 /* Put the Card in Identify Mode */
2123 cmd
.cmdidx
= mmc_host_is_spi(mmc
) ? MMC_CMD_SEND_CID
:
2124 MMC_CMD_ALL_SEND_CID
; /* cmd not supported in spi */
2125 cmd
.resp_type
= MMC_RSP_R2
;
2128 err
= mmc_send_cmd(mmc
, &cmd
, NULL
);
2130 #ifdef CONFIG_MMC_QUIRKS
2131 if (err
&& (mmc
->quirks
& MMC_QUIRK_RETRY_SEND_CID
)) {
2134 * It has been seen that SEND_CID may fail on the first
2135 * attempt, let's try a few more time
2138 err
= mmc_send_cmd(mmc
, &cmd
, NULL
);
2141 } while (retries
--);
2148 memcpy(mmc
->cid
, cmd
.response
, 16);
2151 * For MMC cards, set the Relative Address.
2152 * For SD cards, get the Relatvie Address.
2153 * This also puts the cards into Standby State
2155 if (!mmc_host_is_spi(mmc
)) { /* cmd not supported in spi */
2156 cmd
.cmdidx
= SD_CMD_SEND_RELATIVE_ADDR
;
2157 cmd
.cmdarg
= mmc
->rca
<< 16;
2158 cmd
.resp_type
= MMC_RSP_R6
;
2160 err
= mmc_send_cmd(mmc
, &cmd
, NULL
);
2166 mmc
->rca
= (cmd
.response
[0] >> 16) & 0xffff;
2169 /* Get the Card-Specific Data */
2170 cmd
.cmdidx
= MMC_CMD_SEND_CSD
;
2171 cmd
.resp_type
= MMC_RSP_R2
;
2172 cmd
.cmdarg
= mmc
->rca
<< 16;
2174 err
= mmc_send_cmd(mmc
, &cmd
, NULL
);
2179 mmc
->csd
[0] = cmd
.response
[0];
2180 mmc
->csd
[1] = cmd
.response
[1];
2181 mmc
->csd
[2] = cmd
.response
[2];
2182 mmc
->csd
[3] = cmd
.response
[3];
2184 if (mmc
->version
== MMC_VERSION_UNKNOWN
) {
2185 int version
= (cmd
.response
[0] >> 26) & 0xf;
2189 mmc
->version
= MMC_VERSION_1_2
;
2192 mmc
->version
= MMC_VERSION_1_4
;
2195 mmc
->version
= MMC_VERSION_2_2
;
2198 mmc
->version
= MMC_VERSION_3
;
2201 mmc
->version
= MMC_VERSION_4
;
2204 mmc
->version
= MMC_VERSION_1_2
;
2209 /* divide frequency by 10, since the mults are 10x bigger */
2210 freq
= fbase
[(cmd
.response
[0] & 0x7)];
2211 mult
= multipliers
[((cmd
.response
[0] >> 3) & 0xf)];
2213 mmc
->legacy_speed
= freq
* mult
;
2214 mmc_select_mode(mmc
, MMC_LEGACY
);
2216 mmc
->dsr_imp
= ((cmd
.response
[1] >> 12) & 0x1);
2217 mmc
->read_bl_len
= 1 << ((cmd
.response
[1] >> 16) & 0xf);
2220 mmc
->write_bl_len
= mmc
->read_bl_len
;
2222 mmc
->write_bl_len
= 1 << ((cmd
.response
[3] >> 22) & 0xf);
2224 if (mmc
->high_capacity
) {
2225 csize
= (mmc
->csd
[1] & 0x3f) << 16
2226 | (mmc
->csd
[2] & 0xffff0000) >> 16;
2229 csize
= (mmc
->csd
[1] & 0x3ff) << 2
2230 | (mmc
->csd
[2] & 0xc0000000) >> 30;
2231 cmult
= (mmc
->csd
[2] & 0x00038000) >> 15;
2234 mmc
->capacity_user
= (csize
+ 1) << (cmult
+ 2);
2235 mmc
->capacity_user
*= mmc
->read_bl_len
;
2236 mmc
->capacity_boot
= 0;
2237 mmc
->capacity_rpmb
= 0;
2238 for (i
= 0; i
< 4; i
++)
2239 mmc
->capacity_gp
[i
] = 0;
2241 if (mmc
->read_bl_len
> MMC_MAX_BLOCK_LEN
)
2242 mmc
->read_bl_len
= MMC_MAX_BLOCK_LEN
;
2244 if (mmc
->write_bl_len
> MMC_MAX_BLOCK_LEN
)
2245 mmc
->write_bl_len
= MMC_MAX_BLOCK_LEN
;
2247 if ((mmc
->dsr_imp
) && (0xffffffff != mmc
->dsr
)) {
2248 cmd
.cmdidx
= MMC_CMD_SET_DSR
;
2249 cmd
.cmdarg
= (mmc
->dsr
& 0xffff) << 16;
2250 cmd
.resp_type
= MMC_RSP_NONE
;
2251 if (mmc_send_cmd(mmc
, &cmd
, NULL
))
2252 pr_warn("MMC: SET_DSR failed\n");
2255 /* Select the card, and put it into Transfer Mode */
2256 if (!mmc_host_is_spi(mmc
)) { /* cmd not supported in spi */
2257 cmd
.cmdidx
= MMC_CMD_SELECT_CARD
;
2258 cmd
.resp_type
= MMC_RSP_R1
;
2259 cmd
.cmdarg
= mmc
->rca
<< 16;
2260 err
= mmc_send_cmd(mmc
, &cmd
, NULL
);
2267 * For SD, its erase group is always one sector
2269 mmc
->erase_grp_size
= 1;
2270 mmc
->part_config
= MMCPART_NOAVAILABLE
;
2272 err
= mmc_startup_v4(mmc
);
2276 err
= mmc_set_capacity(mmc
, mmc_get_blk_desc(mmc
)->hwpart
);
2281 err
= sd_get_capabilities(mmc
);
2284 err
= sd_select_mode_and_width(mmc
, mmc
->card_caps
);
2286 err
= mmc_get_capabilities(mmc
);
2289 mmc_select_mode_and_width(mmc
, mmc
->card_caps
);
2295 mmc
->best_mode
= mmc
->selected_mode
;
2297 /* Fix the block length for DDR mode */
2298 if (mmc
->ddr_mode
) {
2299 mmc
->read_bl_len
= MMC_MAX_BLOCK_LEN
;
2300 mmc
->write_bl_len
= MMC_MAX_BLOCK_LEN
;
2303 /* fill in device description */
2304 bdesc
= mmc_get_blk_desc(mmc
);
2308 bdesc
->blksz
= mmc
->read_bl_len
;
2309 bdesc
->log2blksz
= LOG2(bdesc
->blksz
);
2310 bdesc
->lba
= lldiv(mmc
->capacity
, mmc
->read_bl_len
);
2311 #if !defined(CONFIG_SPL_BUILD) || \
2312 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2313 !defined(CONFIG_USE_TINY_PRINTF))
2314 sprintf(bdesc
->vendor
, "Man %06x Snr %04x%04x",
2315 mmc
->cid
[0] >> 24, (mmc
->cid
[2] & 0xffff),
2316 (mmc
->cid
[3] >> 16) & 0xffff);
2317 sprintf(bdesc
->product
, "%c%c%c%c%c%c", mmc
->cid
[0] & 0xff,
2318 (mmc
->cid
[1] >> 24), (mmc
->cid
[1] >> 16) & 0xff,
2319 (mmc
->cid
[1] >> 8) & 0xff, mmc
->cid
[1] & 0xff,
2320 (mmc
->cid
[2] >> 24) & 0xff);
2321 sprintf(bdesc
->revision
, "%d.%d", (mmc
->cid
[2] >> 20) & 0xf,
2322 (mmc
->cid
[2] >> 16) & 0xf);
2324 bdesc
->vendor
[0] = 0;
2325 bdesc
->product
[0] = 0;
2326 bdesc
->revision
[0] = 0;
2328 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2335 static int mmc_send_if_cond(struct mmc
*mmc
)
2340 cmd
.cmdidx
= SD_CMD_SEND_IF_COND
;
2341 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2342 cmd
.cmdarg
= ((mmc
->cfg
->voltages
& 0xff8000) != 0) << 8 | 0xaa;
2343 cmd
.resp_type
= MMC_RSP_R7
;
2345 err
= mmc_send_cmd(mmc
, &cmd
, NULL
);
2350 if ((cmd
.response
[0] & 0xff) != 0xaa)
2353 mmc
->version
= SD_VERSION_2
;
2358 #if !CONFIG_IS_ENABLED(DM_MMC)
2359 /* board-specific MMC power initializations. */
2360 __weak
void board_mmc_power_init(void)
2365 static int mmc_power_init(struct mmc
*mmc
)
2367 #if CONFIG_IS_ENABLED(DM_MMC)
2368 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2371 ret
= device_get_supply_regulator(mmc
->dev
, "vmmc-supply",
2374 debug("%s: No vmmc supply\n", mmc
->dev
->name
);
2376 ret
= device_get_supply_regulator(mmc
->dev
, "vqmmc-supply",
2377 &mmc
->vqmmc_supply
);
2379 debug("%s: No vqmmc supply\n", mmc
->dev
->name
);
2381 #else /* !CONFIG_DM_MMC */
2383 * Driver model should use a regulator, as above, rather than calling
2384 * out to board code.
2386 board_mmc_power_init();
2392 * put the host in the initial state:
2393 * - turn on Vdd (card power supply)
2394 * - configure the bus width and clock to minimal values
2396 static void mmc_set_initial_state(struct mmc
*mmc
)
2400 /* First try to set 3.3V. If it fails set to 1.8V */
2401 err
= mmc_set_signal_voltage(mmc
, MMC_SIGNAL_VOLTAGE_330
);
2403 err
= mmc_set_signal_voltage(mmc
, MMC_SIGNAL_VOLTAGE_180
);
2405 pr_warn("mmc: failed to set signal voltage\n");
2407 mmc_select_mode(mmc
, MMC_LEGACY
);
2408 mmc_set_bus_width(mmc
, 1);
2409 mmc_set_clock(mmc
, 0, false);
2412 static int mmc_power_on(struct mmc
*mmc
)
2414 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2415 if (mmc
->vmmc_supply
) {
2416 int ret
= regulator_set_enable(mmc
->vmmc_supply
, true);
2419 puts("Error enabling VMMC supply\n");
2427 static int mmc_power_off(struct mmc
*mmc
)
2429 mmc_set_clock(mmc
, 1, true);
2430 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2431 if (mmc
->vmmc_supply
) {
2432 int ret
= regulator_set_enable(mmc
->vmmc_supply
, false);
2435 debug("Error disabling VMMC supply\n");
2443 static int mmc_power_cycle(struct mmc
*mmc
)
2447 ret
= mmc_power_off(mmc
);
2451 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2452 * to be on the safer side.
2455 return mmc_power_on(mmc
);
2458 int mmc_start_init(struct mmc
*mmc
)
2461 bool uhs_en
= supports_uhs(mmc
->cfg
->host_caps
);
2465 * all hosts are capable of 1 bit bus-width and able to use the legacy
2468 mmc
->host_caps
= mmc
->cfg
->host_caps
| MMC_CAP(SD_LEGACY
) |
2469 MMC_CAP(MMC_LEGACY
) | MMC_MODE_1BIT
;
2471 /* we pretend there's no card when init is NULL */
2472 no_card
= mmc_getcd(mmc
) == 0;
2473 #if !CONFIG_IS_ENABLED(DM_MMC)
2474 no_card
= no_card
|| (mmc
->cfg
->ops
->init
== NULL
);
2478 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2479 printf("MMC: no card present\n");
2487 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2488 mmc_adapter_card_type_ident();
2490 err
= mmc_power_init(mmc
);
2494 #ifdef CONFIG_MMC_QUIRKS
2495 mmc
->quirks
= MMC_QUIRK_RETRY_SET_BLOCKLEN
|
2496 MMC_QUIRK_RETRY_SEND_CID
;
2499 err
= mmc_power_cycle(mmc
);
2502 * if power cycling is not supported, we should not try
2503 * to use the UHS modes, because we wouldn't be able to
2504 * recover from an error during the UHS initialization.
2506 debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2508 mmc
->host_caps
&= ~UHS_CAPS
;
2509 err
= mmc_power_on(mmc
);
2514 #if CONFIG_IS_ENABLED(DM_MMC)
2515 /* The device has already been probed ready for use */
2517 /* made sure it's not NULL earlier */
2518 err
= mmc
->cfg
->ops
->init(mmc
);
2525 mmc_set_initial_state(mmc
);
2526 mmc_send_init_stream(mmc
);
2528 /* Reset the Card */
2529 err
= mmc_go_idle(mmc
);
2534 /* The internal partition reset to user partition(0) at every CMD0*/
2535 mmc_get_blk_desc(mmc
)->hwpart
= 0;
2537 /* Test for SD version 2 */
2538 err
= mmc_send_if_cond(mmc
);
2540 /* Now try to get the SD card's operating condition */
2541 err
= sd_send_op_cond(mmc
, uhs_en
);
2542 if (err
&& uhs_en
) {
2544 mmc_power_cycle(mmc
);
2548 /* If the command timed out, we check for an MMC card */
2549 if (err
== -ETIMEDOUT
) {
2550 err
= mmc_send_op_cond(mmc
);
2553 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2554 pr_err("Card did not respond to voltage select!\n");
2561 mmc
->init_in_progress
= 1;
2566 static int mmc_complete_init(struct mmc
*mmc
)
2570 mmc
->init_in_progress
= 0;
2571 if (mmc
->op_cond_pending
)
2572 err
= mmc_complete_op_cond(mmc
);
2575 err
= mmc_startup(mmc
);
2583 int mmc_init(struct mmc
*mmc
)
2586 __maybe_unused
unsigned start
;
2587 #if CONFIG_IS_ENABLED(DM_MMC)
2588 struct mmc_uclass_priv
*upriv
= dev_get_uclass_priv(mmc
->dev
);
2595 start
= get_timer(0);
2597 if (!mmc
->init_in_progress
)
2598 err
= mmc_start_init(mmc
);
2601 err
= mmc_complete_init(mmc
);
2603 printf("%s: %d, time %lu\n", __func__
, err
, get_timer(start
));
2608 int mmc_set_dsr(struct mmc
*mmc
, u16 val
)
2614 /* CPU-specific MMC initializations */
2615 __weak
int cpu_mmc_init(bd_t
*bis
)
2620 /* board-specific MMC initializations. */
2621 __weak
int board_mmc_init(bd_t
*bis
)
2626 void mmc_set_preinit(struct mmc
*mmc
, int preinit
)
2628 mmc
->preinit
= preinit
;
2631 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2632 static int mmc_probe(bd_t
*bis
)
2636 #elif CONFIG_IS_ENABLED(DM_MMC)
2637 static int mmc_probe(bd_t
*bis
)
2641 struct udevice
*dev
;
2643 ret
= uclass_get(UCLASS_MMC
, &uc
);
2648 * Try to add them in sequence order. Really with driver model we
2649 * should allow holes, but the current MMC list does not allow that.
2650 * So if we request 0, 1, 3 we will get 0, 1, 2.
2652 for (i
= 0; ; i
++) {
2653 ret
= uclass_get_device_by_seq(UCLASS_MMC
, i
, &dev
);
2657 uclass_foreach_dev(dev
, uc
) {
2658 ret
= device_probe(dev
);
2660 pr_err("%s - probe failed: %d\n", dev
->name
, ret
);
2666 static int mmc_probe(bd_t
*bis
)
2668 if (board_mmc_init(bis
) < 0)
2675 int mmc_initialize(bd_t
*bis
)
2677 static int initialized
= 0;
2679 if (initialized
) /* Avoid initializing mmc multiple times */
2683 #if !CONFIG_IS_ENABLED(BLK)
2684 #if !CONFIG_IS_ENABLED(MMC_TINY)
2688 ret
= mmc_probe(bis
);
2692 #ifndef CONFIG_SPL_BUILD
2693 print_mmc_devices(',');
2700 #ifdef CONFIG_CMD_BKOPS_ENABLE
2701 int mmc_set_bkops_enable(struct mmc
*mmc
)
2704 ALLOC_CACHE_ALIGN_BUFFER(u8
, ext_csd
, MMC_MAX_BLOCK_LEN
);
2706 err
= mmc_send_ext_csd(mmc
, ext_csd
);
2708 puts("Could not get ext_csd register values\n");
2712 if (!(ext_csd
[EXT_CSD_BKOPS_SUPPORT
] & 0x1)) {
2713 puts("Background operations not supported on device\n");
2714 return -EMEDIUMTYPE
;
2717 if (ext_csd
[EXT_CSD_BKOPS_EN
] & 0x1) {
2718 puts("Background operations already enabled\n");
2722 err
= mmc_switch(mmc
, EXT_CSD_CMD_SET_NORMAL
, EXT_CSD_BKOPS_EN
, 1);
2724 puts("Failed to enable manual background operations\n");
2728 puts("Enabled manual background operations\n");