]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/mmc/mmc.c
mmc: Retry some MMC cmds on failure
[people/ms/u-boot.git] / drivers / mmc / mmc.c
1 /*
2 * Copyright 2008, Freescale Semiconductor, Inc
3 * Andy Fleming
4 *
5 * Based vaguely on the Linux code
6 *
7 * SPDX-License-Identifier: GPL-2.0+
8 */
9
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24
25 static const unsigned int sd_au_size[] = {
26 0, SZ_16K / 512, SZ_32K / 512,
27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
31 };
32
33 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
34 static int mmc_power_cycle(struct mmc *mmc);
35 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
36
37 #if CONFIG_IS_ENABLED(MMC_TINY)
38 static struct mmc mmc_static;
39 struct mmc *find_mmc_device(int dev_num)
40 {
41 return &mmc_static;
42 }
43
44 void mmc_do_preinit(void)
45 {
46 struct mmc *m = &mmc_static;
47 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
48 mmc_set_preinit(m, 1);
49 #endif
50 if (m->preinit)
51 mmc_start_init(m);
52 }
53
54 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
55 {
56 return &mmc->block_dev;
57 }
58 #endif
59
60 #if !CONFIG_IS_ENABLED(DM_MMC)
61
62 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
63 {
64 return -ENOSYS;
65 }
66
67 __weak int board_mmc_getwp(struct mmc *mmc)
68 {
69 return -1;
70 }
71
72 int mmc_getwp(struct mmc *mmc)
73 {
74 int wp;
75
76 wp = board_mmc_getwp(mmc);
77
78 if (wp < 0) {
79 if (mmc->cfg->ops->getwp)
80 wp = mmc->cfg->ops->getwp(mmc);
81 else
82 wp = 0;
83 }
84
85 return wp;
86 }
87
88 __weak int board_mmc_getcd(struct mmc *mmc)
89 {
90 return -1;
91 }
92 #endif
93
94 #ifdef CONFIG_MMC_TRACE
95 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
96 {
97 printf("CMD_SEND:%d\n", cmd->cmdidx);
98 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
99 }
100
101 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
102 {
103 int i;
104 u8 *ptr;
105
106 if (ret) {
107 printf("\t\tRET\t\t\t %d\n", ret);
108 } else {
109 switch (cmd->resp_type) {
110 case MMC_RSP_NONE:
111 printf("\t\tMMC_RSP_NONE\n");
112 break;
113 case MMC_RSP_R1:
114 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
115 cmd->response[0]);
116 break;
117 case MMC_RSP_R1b:
118 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
119 cmd->response[0]);
120 break;
121 case MMC_RSP_R2:
122 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
123 cmd->response[0]);
124 printf("\t\t \t\t 0x%08X \n",
125 cmd->response[1]);
126 printf("\t\t \t\t 0x%08X \n",
127 cmd->response[2]);
128 printf("\t\t \t\t 0x%08X \n",
129 cmd->response[3]);
130 printf("\n");
131 printf("\t\t\t\t\tDUMPING DATA\n");
132 for (i = 0; i < 4; i++) {
133 int j;
134 printf("\t\t\t\t\t%03d - ", i*4);
135 ptr = (u8 *)&cmd->response[i];
136 ptr += 3;
137 for (j = 0; j < 4; j++)
138 printf("%02X ", *ptr--);
139 printf("\n");
140 }
141 break;
142 case MMC_RSP_R3:
143 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
144 cmd->response[0]);
145 break;
146 default:
147 printf("\t\tERROR MMC rsp not supported\n");
148 break;
149 }
150 }
151 }
152
153 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
154 {
155 int status;
156
157 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
158 printf("CURR STATE:%d\n", status);
159 }
160 #endif
161
162 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
163 const char *mmc_mode_name(enum bus_mode mode)
164 {
165 static const char *const names[] = {
166 [MMC_LEGACY] = "MMC legacy",
167 [SD_LEGACY] = "SD Legacy",
168 [MMC_HS] = "MMC High Speed (26MHz)",
169 [SD_HS] = "SD High Speed (50MHz)",
170 [UHS_SDR12] = "UHS SDR12 (25MHz)",
171 [UHS_SDR25] = "UHS SDR25 (50MHz)",
172 [UHS_SDR50] = "UHS SDR50 (100MHz)",
173 [UHS_SDR104] = "UHS SDR104 (208MHz)",
174 [UHS_DDR50] = "UHS DDR50 (50MHz)",
175 [MMC_HS_52] = "MMC High Speed (52MHz)",
176 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
177 [MMC_HS_200] = "HS200 (200MHz)",
178 };
179
180 if (mode >= MMC_MODES_END)
181 return "Unknown mode";
182 else
183 return names[mode];
184 }
185 #endif
186
187 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
188 {
189 static const int freqs[] = {
190 [SD_LEGACY] = 25000000,
191 [MMC_HS] = 26000000,
192 [SD_HS] = 50000000,
193 [UHS_SDR12] = 25000000,
194 [UHS_SDR25] = 50000000,
195 [UHS_SDR50] = 100000000,
196 [UHS_SDR104] = 208000000,
197 [UHS_DDR50] = 50000000,
198 [MMC_HS_52] = 52000000,
199 [MMC_DDR_52] = 52000000,
200 [MMC_HS_200] = 200000000,
201 };
202
203 if (mode == MMC_LEGACY)
204 return mmc->legacy_speed;
205 else if (mode >= MMC_MODES_END)
206 return 0;
207 else
208 return freqs[mode];
209 }
210
211 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
212 {
213 mmc->selected_mode = mode;
214 mmc->tran_speed = mmc_mode2freq(mmc, mode);
215 mmc->ddr_mode = mmc_is_mode_ddr(mode);
216 debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
217 mmc->tran_speed / 1000000);
218 return 0;
219 }
220
221 #if !CONFIG_IS_ENABLED(DM_MMC)
222 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
223 {
224 int ret;
225
226 mmmc_trace_before_send(mmc, cmd);
227 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
228 mmmc_trace_after_send(mmc, cmd, ret);
229
230 return ret;
231 }
232 #endif
233
234 int mmc_send_status(struct mmc *mmc, int timeout)
235 {
236 struct mmc_cmd cmd;
237 int err, retries = 5;
238
239 cmd.cmdidx = MMC_CMD_SEND_STATUS;
240 cmd.resp_type = MMC_RSP_R1;
241 if (!mmc_host_is_spi(mmc))
242 cmd.cmdarg = mmc->rca << 16;
243
244 while (1) {
245 err = mmc_send_cmd(mmc, &cmd, NULL);
246 if (!err) {
247 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
248 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
249 MMC_STATE_PRG)
250 break;
251
252 if (cmd.response[0] & MMC_STATUS_MASK) {
253 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
254 printf("Status Error: 0x%08X\n",
255 cmd.response[0]);
256 #endif
257 return -ECOMM;
258 }
259 } else if (--retries < 0)
260 return err;
261
262 if (timeout-- <= 0)
263 break;
264
265 udelay(1000);
266 }
267
268 mmc_trace_state(mmc, &cmd);
269 if (timeout <= 0) {
270 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
271 printf("Timeout waiting card ready\n");
272 #endif
273 return -ETIMEDOUT;
274 }
275
276 return 0;
277 }
278
279 int mmc_set_blocklen(struct mmc *mmc, int len)
280 {
281 struct mmc_cmd cmd;
282 int err;
283
284 if (mmc->ddr_mode)
285 return 0;
286
287 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
288 cmd.resp_type = MMC_RSP_R1;
289 cmd.cmdarg = len;
290
291 err = mmc_send_cmd(mmc, &cmd, NULL);
292
293 #ifdef CONFIG_MMC_QUIRKS
294 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
295 int retries = 4;
296 /*
297 * It has been seen that SET_BLOCKLEN may fail on the first
298 * attempt, let's try a few more time
299 */
300 do {
301 err = mmc_send_cmd(mmc, &cmd, NULL);
302 if (!err)
303 break;
304 } while (retries--);
305 }
306 #endif
307
308 return err;
309 }
310
311 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
312 lbaint_t blkcnt)
313 {
314 struct mmc_cmd cmd;
315 struct mmc_data data;
316
317 if (blkcnt > 1)
318 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
319 else
320 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
321
322 if (mmc->high_capacity)
323 cmd.cmdarg = start;
324 else
325 cmd.cmdarg = start * mmc->read_bl_len;
326
327 cmd.resp_type = MMC_RSP_R1;
328
329 data.dest = dst;
330 data.blocks = blkcnt;
331 data.blocksize = mmc->read_bl_len;
332 data.flags = MMC_DATA_READ;
333
334 if (mmc_send_cmd(mmc, &cmd, &data))
335 return 0;
336
337 if (blkcnt > 1) {
338 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
339 cmd.cmdarg = 0;
340 cmd.resp_type = MMC_RSP_R1b;
341 if (mmc_send_cmd(mmc, &cmd, NULL)) {
342 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
343 printf("mmc fail to send stop cmd\n");
344 #endif
345 return 0;
346 }
347 }
348
349 return blkcnt;
350 }
351
352 #if CONFIG_IS_ENABLED(BLK)
353 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
354 #else
355 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
356 void *dst)
357 #endif
358 {
359 #if CONFIG_IS_ENABLED(BLK)
360 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
361 #endif
362 int dev_num = block_dev->devnum;
363 int err;
364 lbaint_t cur, blocks_todo = blkcnt;
365
366 if (blkcnt == 0)
367 return 0;
368
369 struct mmc *mmc = find_mmc_device(dev_num);
370 if (!mmc)
371 return 0;
372
373 if (CONFIG_IS_ENABLED(MMC_TINY))
374 err = mmc_switch_part(mmc, block_dev->hwpart);
375 else
376 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
377
378 if (err < 0)
379 return 0;
380
381 if ((start + blkcnt) > block_dev->lba) {
382 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
383 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
384 start + blkcnt, block_dev->lba);
385 #endif
386 return 0;
387 }
388
389 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
390 debug("%s: Failed to set blocklen\n", __func__);
391 return 0;
392 }
393
394 do {
395 cur = (blocks_todo > mmc->cfg->b_max) ?
396 mmc->cfg->b_max : blocks_todo;
397 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
398 debug("%s: Failed to read blocks\n", __func__);
399 return 0;
400 }
401 blocks_todo -= cur;
402 start += cur;
403 dst += cur * mmc->read_bl_len;
404 } while (blocks_todo > 0);
405
406 return blkcnt;
407 }
408
409 static int mmc_go_idle(struct mmc *mmc)
410 {
411 struct mmc_cmd cmd;
412 int err;
413
414 udelay(1000);
415
416 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
417 cmd.cmdarg = 0;
418 cmd.resp_type = MMC_RSP_NONE;
419
420 err = mmc_send_cmd(mmc, &cmd, NULL);
421
422 if (err)
423 return err;
424
425 udelay(2000);
426
427 return 0;
428 }
429
430 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
431 {
432 struct mmc_cmd cmd;
433 int err = 0;
434
435 /*
436 * Send CMD11 only if the request is to switch the card to
437 * 1.8V signalling.
438 */
439 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
440 return mmc_set_signal_voltage(mmc, signal_voltage);
441
442 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
443 cmd.cmdarg = 0;
444 cmd.resp_type = MMC_RSP_R1;
445
446 err = mmc_send_cmd(mmc, &cmd, NULL);
447 if (err)
448 return err;
449
450 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
451 return -EIO;
452
453 /*
454 * The card should drive cmd and dat[0:3] low immediately
455 * after the response of cmd11, but wait 100 us to be sure
456 */
457 err = mmc_wait_dat0(mmc, 0, 100);
458 if (err == -ENOSYS)
459 udelay(100);
460 else if (err)
461 return -ETIMEDOUT;
462
463 /*
464 * During a signal voltage level switch, the clock must be gated
465 * for 5 ms according to the SD spec
466 */
467 mmc_set_clock(mmc, mmc->clock, true);
468
469 err = mmc_set_signal_voltage(mmc, signal_voltage);
470 if (err)
471 return err;
472
473 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
474 mdelay(10);
475 mmc_set_clock(mmc, mmc->clock, false);
476
477 /*
478 * Failure to switch is indicated by the card holding
479 * dat[0:3] low. Wait for at least 1 ms according to spec
480 */
481 err = mmc_wait_dat0(mmc, 1, 1000);
482 if (err == -ENOSYS)
483 udelay(1000);
484 else if (err)
485 return -ETIMEDOUT;
486
487 return 0;
488 }
489
490 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
491 {
492 int timeout = 1000;
493 int err;
494 struct mmc_cmd cmd;
495
496 while (1) {
497 cmd.cmdidx = MMC_CMD_APP_CMD;
498 cmd.resp_type = MMC_RSP_R1;
499 cmd.cmdarg = 0;
500
501 err = mmc_send_cmd(mmc, &cmd, NULL);
502
503 if (err)
504 return err;
505
506 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
507 cmd.resp_type = MMC_RSP_R3;
508
509 /*
510 * Most cards do not answer if some reserved bits
511 * in the ocr are set. However, Some controller
512 * can set bit 7 (reserved for low voltages), but
513 * how to manage low voltages SD card is not yet
514 * specified.
515 */
516 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
517 (mmc->cfg->voltages & 0xff8000);
518
519 if (mmc->version == SD_VERSION_2)
520 cmd.cmdarg |= OCR_HCS;
521
522 if (uhs_en)
523 cmd.cmdarg |= OCR_S18R;
524
525 err = mmc_send_cmd(mmc, &cmd, NULL);
526
527 if (err)
528 return err;
529
530 if (cmd.response[0] & OCR_BUSY)
531 break;
532
533 if (timeout-- <= 0)
534 return -EOPNOTSUPP;
535
536 udelay(1000);
537 }
538
539 if (mmc->version != SD_VERSION_2)
540 mmc->version = SD_VERSION_1_0;
541
542 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
543 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
544 cmd.resp_type = MMC_RSP_R3;
545 cmd.cmdarg = 0;
546
547 err = mmc_send_cmd(mmc, &cmd, NULL);
548
549 if (err)
550 return err;
551 }
552
553 mmc->ocr = cmd.response[0];
554
555 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
556 == 0x41000000) {
557 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
558 if (err)
559 return err;
560 }
561
562 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
563 mmc->rca = 0;
564
565 return 0;
566 }
567
568 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
569 {
570 struct mmc_cmd cmd;
571 int err;
572
573 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
574 cmd.resp_type = MMC_RSP_R3;
575 cmd.cmdarg = 0;
576 if (use_arg && !mmc_host_is_spi(mmc))
577 cmd.cmdarg = OCR_HCS |
578 (mmc->cfg->voltages &
579 (mmc->ocr & OCR_VOLTAGE_MASK)) |
580 (mmc->ocr & OCR_ACCESS_MODE);
581
582 err = mmc_send_cmd(mmc, &cmd, NULL);
583 if (err)
584 return err;
585 mmc->ocr = cmd.response[0];
586 return 0;
587 }
588
589 static int mmc_send_op_cond(struct mmc *mmc)
590 {
591 int err, i;
592
593 /* Some cards seem to need this */
594 mmc_go_idle(mmc);
595
596 /* Asking to the card its capabilities */
597 for (i = 0; i < 2; i++) {
598 err = mmc_send_op_cond_iter(mmc, i != 0);
599 if (err)
600 return err;
601
602 /* exit if not busy (flag seems to be inverted) */
603 if (mmc->ocr & OCR_BUSY)
604 break;
605 }
606 mmc->op_cond_pending = 1;
607 return 0;
608 }
609
610 static int mmc_complete_op_cond(struct mmc *mmc)
611 {
612 struct mmc_cmd cmd;
613 int timeout = 1000;
614 uint start;
615 int err;
616
617 mmc->op_cond_pending = 0;
618 if (!(mmc->ocr & OCR_BUSY)) {
619 /* Some cards seem to need this */
620 mmc_go_idle(mmc);
621
622 start = get_timer(0);
623 while (1) {
624 err = mmc_send_op_cond_iter(mmc, 1);
625 if (err)
626 return err;
627 if (mmc->ocr & OCR_BUSY)
628 break;
629 if (get_timer(start) > timeout)
630 return -EOPNOTSUPP;
631 udelay(100);
632 }
633 }
634
635 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
636 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
637 cmd.resp_type = MMC_RSP_R3;
638 cmd.cmdarg = 0;
639
640 err = mmc_send_cmd(mmc, &cmd, NULL);
641
642 if (err)
643 return err;
644
645 mmc->ocr = cmd.response[0];
646 }
647
648 mmc->version = MMC_VERSION_UNKNOWN;
649
650 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
651 mmc->rca = 1;
652
653 return 0;
654 }
655
656
657 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
658 {
659 struct mmc_cmd cmd;
660 struct mmc_data data;
661 int err;
662
663 /* Get the Card Status Register */
664 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
665 cmd.resp_type = MMC_RSP_R1;
666 cmd.cmdarg = 0;
667
668 data.dest = (char *)ext_csd;
669 data.blocks = 1;
670 data.blocksize = MMC_MAX_BLOCK_LEN;
671 data.flags = MMC_DATA_READ;
672
673 err = mmc_send_cmd(mmc, &cmd, &data);
674
675 return err;
676 }
677
678 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
679 {
680 struct mmc_cmd cmd;
681 int timeout = 1000;
682 int retries = 3;
683 int ret;
684
685 cmd.cmdidx = MMC_CMD_SWITCH;
686 cmd.resp_type = MMC_RSP_R1b;
687 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
688 (index << 16) |
689 (value << 8);
690
691 while (retries > 0) {
692 ret = mmc_send_cmd(mmc, &cmd, NULL);
693
694 /* Waiting for the ready status */
695 if (!ret) {
696 ret = mmc_send_status(mmc, timeout);
697 return ret;
698 }
699
700 retries--;
701 }
702
703 return ret;
704
705 }
706
707 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
708 {
709 int err;
710 int speed_bits;
711
712 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
713
714 switch (mode) {
715 case MMC_HS:
716 case MMC_HS_52:
717 case MMC_DDR_52:
718 speed_bits = EXT_CSD_TIMING_HS;
719 break;
720 case MMC_HS_200:
721 speed_bits = EXT_CSD_TIMING_HS200;
722 break;
723 case MMC_LEGACY:
724 speed_bits = EXT_CSD_TIMING_LEGACY;
725 break;
726 default:
727 return -EINVAL;
728 }
729 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
730 speed_bits);
731 if (err)
732 return err;
733
734 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
735 /* Now check to see that it worked */
736 err = mmc_send_ext_csd(mmc, test_csd);
737 if (err)
738 return err;
739
740 /* No high-speed support */
741 if (!test_csd[EXT_CSD_HS_TIMING])
742 return -ENOTSUPP;
743 }
744
745 return 0;
746 }
747
748 static int mmc_get_capabilities(struct mmc *mmc)
749 {
750 u8 *ext_csd = mmc->ext_csd;
751 char cardtype;
752
753 mmc->card_caps = MMC_MODE_1BIT;
754
755 if (mmc_host_is_spi(mmc))
756 return 0;
757
758 /* Only version 4 supports high-speed */
759 if (mmc->version < MMC_VERSION_4)
760 return 0;
761
762 if (!ext_csd) {
763 printf("No ext_csd found!\n"); /* this should enver happen */
764 return -ENOTSUPP;
765 }
766
767 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
768
769 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0x3f;
770
771 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
772 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
773 mmc->card_caps |= MMC_MODE_HS200;
774 }
775 if (cardtype & EXT_CSD_CARD_TYPE_52) {
776 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
777 mmc->card_caps |= MMC_MODE_DDR_52MHz;
778 mmc->card_caps |= MMC_MODE_HS_52MHz;
779 }
780 if (cardtype & EXT_CSD_CARD_TYPE_26)
781 mmc->card_caps |= MMC_MODE_HS;
782
783 return 0;
784 }
785
786 static int mmc_set_capacity(struct mmc *mmc, int part_num)
787 {
788 switch (part_num) {
789 case 0:
790 mmc->capacity = mmc->capacity_user;
791 break;
792 case 1:
793 case 2:
794 mmc->capacity = mmc->capacity_boot;
795 break;
796 case 3:
797 mmc->capacity = mmc->capacity_rpmb;
798 break;
799 case 4:
800 case 5:
801 case 6:
802 case 7:
803 mmc->capacity = mmc->capacity_gp[part_num - 4];
804 break;
805 default:
806 return -1;
807 }
808
809 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
810
811 return 0;
812 }
813
814 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
815 {
816 int forbidden = 0;
817 bool change = false;
818
819 if (part_num & PART_ACCESS_MASK)
820 forbidden = MMC_CAP(MMC_HS_200);
821
822 if (MMC_CAP(mmc->selected_mode) & forbidden) {
823 debug("selected mode (%s) is forbidden for part %d\n",
824 mmc_mode_name(mmc->selected_mode), part_num);
825 change = true;
826 } else if (mmc->selected_mode != mmc->best_mode) {
827 debug("selected mode is not optimal\n");
828 change = true;
829 }
830
831 if (change)
832 return mmc_select_mode_and_width(mmc,
833 mmc->card_caps & ~forbidden);
834
835 return 0;
836 }
837
838 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
839 {
840 int ret;
841
842 ret = mmc_boot_part_access_chk(mmc, part_num);
843 if (ret)
844 return ret;
845
846 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
847 (mmc->part_config & ~PART_ACCESS_MASK)
848 | (part_num & PART_ACCESS_MASK));
849
850 /*
851 * Set the capacity if the switch succeeded or was intended
852 * to return to representing the raw device.
853 */
854 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
855 ret = mmc_set_capacity(mmc, part_num);
856 mmc_get_blk_desc(mmc)->hwpart = part_num;
857 }
858
859 return ret;
860 }
861
862 int mmc_hwpart_config(struct mmc *mmc,
863 const struct mmc_hwpart_conf *conf,
864 enum mmc_hwpart_conf_mode mode)
865 {
866 u8 part_attrs = 0;
867 u32 enh_size_mult;
868 u32 enh_start_addr;
869 u32 gp_size_mult[4];
870 u32 max_enh_size_mult;
871 u32 tot_enh_size_mult = 0;
872 u8 wr_rel_set;
873 int i, pidx, err;
874 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
875
876 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
877 return -EINVAL;
878
879 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
880 printf("eMMC >= 4.4 required for enhanced user data area\n");
881 return -EMEDIUMTYPE;
882 }
883
884 if (!(mmc->part_support & PART_SUPPORT)) {
885 printf("Card does not support partitioning\n");
886 return -EMEDIUMTYPE;
887 }
888
889 if (!mmc->hc_wp_grp_size) {
890 printf("Card does not define HC WP group size\n");
891 return -EMEDIUMTYPE;
892 }
893
894 /* check partition alignment and total enhanced size */
895 if (conf->user.enh_size) {
896 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
897 conf->user.enh_start % mmc->hc_wp_grp_size) {
898 printf("User data enhanced area not HC WP group "
899 "size aligned\n");
900 return -EINVAL;
901 }
902 part_attrs |= EXT_CSD_ENH_USR;
903 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
904 if (mmc->high_capacity) {
905 enh_start_addr = conf->user.enh_start;
906 } else {
907 enh_start_addr = (conf->user.enh_start << 9);
908 }
909 } else {
910 enh_size_mult = 0;
911 enh_start_addr = 0;
912 }
913 tot_enh_size_mult += enh_size_mult;
914
915 for (pidx = 0; pidx < 4; pidx++) {
916 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
917 printf("GP%i partition not HC WP group size "
918 "aligned\n", pidx+1);
919 return -EINVAL;
920 }
921 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
922 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
923 part_attrs |= EXT_CSD_ENH_GP(pidx);
924 tot_enh_size_mult += gp_size_mult[pidx];
925 }
926 }
927
928 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
929 printf("Card does not support enhanced attribute\n");
930 return -EMEDIUMTYPE;
931 }
932
933 err = mmc_send_ext_csd(mmc, ext_csd);
934 if (err)
935 return err;
936
937 max_enh_size_mult =
938 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
939 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
940 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
941 if (tot_enh_size_mult > max_enh_size_mult) {
942 printf("Total enhanced size exceeds maximum (%u > %u)\n",
943 tot_enh_size_mult, max_enh_size_mult);
944 return -EMEDIUMTYPE;
945 }
946
947 /* The default value of EXT_CSD_WR_REL_SET is device
948 * dependent, the values can only be changed if the
949 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
950 * changed only once and before partitioning is completed. */
951 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
952 if (conf->user.wr_rel_change) {
953 if (conf->user.wr_rel_set)
954 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
955 else
956 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
957 }
958 for (pidx = 0; pidx < 4; pidx++) {
959 if (conf->gp_part[pidx].wr_rel_change) {
960 if (conf->gp_part[pidx].wr_rel_set)
961 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
962 else
963 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
964 }
965 }
966
967 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
968 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
969 puts("Card does not support host controlled partition write "
970 "reliability settings\n");
971 return -EMEDIUMTYPE;
972 }
973
974 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
975 EXT_CSD_PARTITION_SETTING_COMPLETED) {
976 printf("Card already partitioned\n");
977 return -EPERM;
978 }
979
980 if (mode == MMC_HWPART_CONF_CHECK)
981 return 0;
982
983 /* Partitioning requires high-capacity size definitions */
984 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
985 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
986 EXT_CSD_ERASE_GROUP_DEF, 1);
987
988 if (err)
989 return err;
990
991 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
992
993 /* update erase group size to be high-capacity */
994 mmc->erase_grp_size =
995 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
996
997 }
998
999 /* all OK, write the configuration */
1000 for (i = 0; i < 4; i++) {
1001 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1002 EXT_CSD_ENH_START_ADDR+i,
1003 (enh_start_addr >> (i*8)) & 0xFF);
1004 if (err)
1005 return err;
1006 }
1007 for (i = 0; i < 3; i++) {
1008 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1009 EXT_CSD_ENH_SIZE_MULT+i,
1010 (enh_size_mult >> (i*8)) & 0xFF);
1011 if (err)
1012 return err;
1013 }
1014 for (pidx = 0; pidx < 4; pidx++) {
1015 for (i = 0; i < 3; i++) {
1016 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1017 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1018 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1019 if (err)
1020 return err;
1021 }
1022 }
1023 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1024 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1025 if (err)
1026 return err;
1027
1028 if (mode == MMC_HWPART_CONF_SET)
1029 return 0;
1030
1031 /* The WR_REL_SET is a write-once register but shall be
1032 * written before setting PART_SETTING_COMPLETED. As it is
1033 * write-once we can only write it when completing the
1034 * partitioning. */
1035 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1036 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1037 EXT_CSD_WR_REL_SET, wr_rel_set);
1038 if (err)
1039 return err;
1040 }
1041
1042 /* Setting PART_SETTING_COMPLETED confirms the partition
1043 * configuration but it only becomes effective after power
1044 * cycle, so we do not adjust the partition related settings
1045 * in the mmc struct. */
1046
1047 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1048 EXT_CSD_PARTITION_SETTING,
1049 EXT_CSD_PARTITION_SETTING_COMPLETED);
1050 if (err)
1051 return err;
1052
1053 return 0;
1054 }
1055
1056 #if !CONFIG_IS_ENABLED(DM_MMC)
1057 int mmc_getcd(struct mmc *mmc)
1058 {
1059 int cd;
1060
1061 cd = board_mmc_getcd(mmc);
1062
1063 if (cd < 0) {
1064 if (mmc->cfg->ops->getcd)
1065 cd = mmc->cfg->ops->getcd(mmc);
1066 else
1067 cd = 1;
1068 }
1069
1070 return cd;
1071 }
1072 #endif
1073
1074 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1075 {
1076 struct mmc_cmd cmd;
1077 struct mmc_data data;
1078
1079 /* Switch the frequency */
1080 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1081 cmd.resp_type = MMC_RSP_R1;
1082 cmd.cmdarg = (mode << 31) | 0xffffff;
1083 cmd.cmdarg &= ~(0xf << (group * 4));
1084 cmd.cmdarg |= value << (group * 4);
1085
1086 data.dest = (char *)resp;
1087 data.blocksize = 64;
1088 data.blocks = 1;
1089 data.flags = MMC_DATA_READ;
1090
1091 return mmc_send_cmd(mmc, &cmd, &data);
1092 }
1093
1094
1095 static int sd_get_capabilities(struct mmc *mmc)
1096 {
1097 int err;
1098 struct mmc_cmd cmd;
1099 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1100 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1101 struct mmc_data data;
1102 int timeout;
1103 u32 sd3_bus_mode;
1104
1105 mmc->card_caps = MMC_MODE_1BIT;
1106
1107 if (mmc_host_is_spi(mmc))
1108 return 0;
1109
1110 /* Read the SCR to find out if this card supports higher speeds */
1111 cmd.cmdidx = MMC_CMD_APP_CMD;
1112 cmd.resp_type = MMC_RSP_R1;
1113 cmd.cmdarg = mmc->rca << 16;
1114
1115 err = mmc_send_cmd(mmc, &cmd, NULL);
1116
1117 if (err)
1118 return err;
1119
1120 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1121 cmd.resp_type = MMC_RSP_R1;
1122 cmd.cmdarg = 0;
1123
1124 timeout = 3;
1125
1126 retry_scr:
1127 data.dest = (char *)scr;
1128 data.blocksize = 8;
1129 data.blocks = 1;
1130 data.flags = MMC_DATA_READ;
1131
1132 err = mmc_send_cmd(mmc, &cmd, &data);
1133
1134 if (err) {
1135 if (timeout--)
1136 goto retry_scr;
1137
1138 return err;
1139 }
1140
1141 mmc->scr[0] = __be32_to_cpu(scr[0]);
1142 mmc->scr[1] = __be32_to_cpu(scr[1]);
1143
1144 switch ((mmc->scr[0] >> 24) & 0xf) {
1145 case 0:
1146 mmc->version = SD_VERSION_1_0;
1147 break;
1148 case 1:
1149 mmc->version = SD_VERSION_1_10;
1150 break;
1151 case 2:
1152 mmc->version = SD_VERSION_2;
1153 if ((mmc->scr[0] >> 15) & 0x1)
1154 mmc->version = SD_VERSION_3;
1155 break;
1156 default:
1157 mmc->version = SD_VERSION_1_0;
1158 break;
1159 }
1160
1161 if (mmc->scr[0] & SD_DATA_4BIT)
1162 mmc->card_caps |= MMC_MODE_4BIT;
1163
1164 /* Version 1.0 doesn't support switching */
1165 if (mmc->version == SD_VERSION_1_0)
1166 return 0;
1167
1168 timeout = 4;
1169 while (timeout--) {
1170 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1171 (u8 *)switch_status);
1172
1173 if (err)
1174 return err;
1175
1176 /* The high-speed function is busy. Try again */
1177 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1178 break;
1179 }
1180
1181 /* If high-speed isn't supported, we return */
1182 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1183 mmc->card_caps |= MMC_CAP(SD_HS);
1184
1185 /* Version before 3.0 don't support UHS modes */
1186 if (mmc->version < SD_VERSION_3)
1187 return 0;
1188
1189 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1190 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1191 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1192 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1193 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1194 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1195 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1196 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1197 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1198 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1199 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1200
1201 return 0;
1202 }
1203
1204 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1205 {
1206 int err;
1207
1208 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1209 int speed;
1210
1211 switch (mode) {
1212 case SD_LEGACY:
1213 case UHS_SDR12:
1214 speed = UHS_SDR12_BUS_SPEED;
1215 break;
1216 case SD_HS:
1217 case UHS_SDR25:
1218 speed = UHS_SDR25_BUS_SPEED;
1219 break;
1220 case UHS_SDR50:
1221 speed = UHS_SDR50_BUS_SPEED;
1222 break;
1223 case UHS_DDR50:
1224 speed = UHS_DDR50_BUS_SPEED;
1225 break;
1226 case UHS_SDR104:
1227 speed = UHS_SDR104_BUS_SPEED;
1228 break;
1229 default:
1230 return -EINVAL;
1231 }
1232
1233 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1234 if (err)
1235 return err;
1236
1237 if ((__be32_to_cpu(switch_status[4]) >> 24) != speed)
1238 return -ENOTSUPP;
1239
1240 return 0;
1241 }
1242
1243 int sd_select_bus_width(struct mmc *mmc, int w)
1244 {
1245 int err;
1246 struct mmc_cmd cmd;
1247
1248 if ((w != 4) && (w != 1))
1249 return -EINVAL;
1250
1251 cmd.cmdidx = MMC_CMD_APP_CMD;
1252 cmd.resp_type = MMC_RSP_R1;
1253 cmd.cmdarg = mmc->rca << 16;
1254
1255 err = mmc_send_cmd(mmc, &cmd, NULL);
1256 if (err)
1257 return err;
1258
1259 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1260 cmd.resp_type = MMC_RSP_R1;
1261 if (w == 4)
1262 cmd.cmdarg = 2;
1263 else if (w == 1)
1264 cmd.cmdarg = 0;
1265 err = mmc_send_cmd(mmc, &cmd, NULL);
1266 if (err)
1267 return err;
1268
1269 return 0;
1270 }
1271
1272 static int sd_read_ssr(struct mmc *mmc)
1273 {
1274 int err, i;
1275 struct mmc_cmd cmd;
1276 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1277 struct mmc_data data;
1278 int timeout = 3;
1279 unsigned int au, eo, et, es;
1280
1281 cmd.cmdidx = MMC_CMD_APP_CMD;
1282 cmd.resp_type = MMC_RSP_R1;
1283 cmd.cmdarg = mmc->rca << 16;
1284
1285 err = mmc_send_cmd(mmc, &cmd, NULL);
1286 if (err)
1287 return err;
1288
1289 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1290 cmd.resp_type = MMC_RSP_R1;
1291 cmd.cmdarg = 0;
1292
1293 retry_ssr:
1294 data.dest = (char *)ssr;
1295 data.blocksize = 64;
1296 data.blocks = 1;
1297 data.flags = MMC_DATA_READ;
1298
1299 err = mmc_send_cmd(mmc, &cmd, &data);
1300 if (err) {
1301 if (timeout--)
1302 goto retry_ssr;
1303
1304 return err;
1305 }
1306
1307 for (i = 0; i < 16; i++)
1308 ssr[i] = be32_to_cpu(ssr[i]);
1309
1310 au = (ssr[2] >> 12) & 0xF;
1311 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1312 mmc->ssr.au = sd_au_size[au];
1313 es = (ssr[3] >> 24) & 0xFF;
1314 es |= (ssr[2] & 0xFF) << 8;
1315 et = (ssr[3] >> 18) & 0x3F;
1316 if (es && et) {
1317 eo = (ssr[3] >> 16) & 0x3;
1318 mmc->ssr.erase_timeout = (et * 1000) / es;
1319 mmc->ssr.erase_offset = eo * 1000;
1320 }
1321 } else {
1322 debug("Invalid Allocation Unit Size.\n");
1323 }
1324
1325 return 0;
1326 }
1327
1328 /* frequency bases */
1329 /* divided by 10 to be nice to platforms without floating point */
1330 static const int fbase[] = {
1331 10000,
1332 100000,
1333 1000000,
1334 10000000,
1335 };
1336
1337 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1338 * to platforms without floating point.
1339 */
1340 static const u8 multipliers[] = {
1341 0, /* reserved */
1342 10,
1343 12,
1344 13,
1345 15,
1346 20,
1347 25,
1348 30,
1349 35,
1350 40,
1351 45,
1352 50,
1353 55,
1354 60,
1355 70,
1356 80,
1357 };
1358
1359 static inline int bus_width(uint cap)
1360 {
1361 if (cap == MMC_MODE_8BIT)
1362 return 8;
1363 if (cap == MMC_MODE_4BIT)
1364 return 4;
1365 if (cap == MMC_MODE_1BIT)
1366 return 1;
1367 printf("invalid bus witdh capability 0x%x\n", cap);
1368 return 0;
1369 }
1370
1371 #if !CONFIG_IS_ENABLED(DM_MMC)
1372 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1373 {
1374 return -ENOTSUPP;
1375 }
1376
1377 static void mmc_send_init_stream(struct mmc *mmc)
1378 {
1379 }
1380
1381 static int mmc_set_ios(struct mmc *mmc)
1382 {
1383 int ret = 0;
1384
1385 if (mmc->cfg->ops->set_ios)
1386 ret = mmc->cfg->ops->set_ios(mmc);
1387
1388 return ret;
1389 }
1390 #endif
1391
1392 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1393 {
1394 if (clock > mmc->cfg->f_max)
1395 clock = mmc->cfg->f_max;
1396
1397 if (clock < mmc->cfg->f_min)
1398 clock = mmc->cfg->f_min;
1399
1400 mmc->clock = clock;
1401 mmc->clk_disable = disable;
1402
1403 return mmc_set_ios(mmc);
1404 }
1405
1406 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1407 {
1408 mmc->bus_width = width;
1409
1410 return mmc_set_ios(mmc);
1411 }
1412
1413 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1414 /*
1415 * helper function to display the capabilities in a human
1416 * friendly manner. The capabilities include bus width and
1417 * supported modes.
1418 */
1419 void mmc_dump_capabilities(const char *text, uint caps)
1420 {
1421 enum bus_mode mode;
1422
1423 printf("%s: widths [", text);
1424 if (caps & MMC_MODE_8BIT)
1425 printf("8, ");
1426 if (caps & MMC_MODE_4BIT)
1427 printf("4, ");
1428 if (caps & MMC_MODE_1BIT)
1429 printf("1, ");
1430 printf("\b\b] modes [");
1431 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1432 if (MMC_CAP(mode) & caps)
1433 printf("%s, ", mmc_mode_name(mode));
1434 printf("\b\b]\n");
1435 }
1436 #endif
1437
1438 struct mode_width_tuning {
1439 enum bus_mode mode;
1440 uint widths;
1441 uint tuning;
1442 };
1443
1444 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1445 {
1446 mmc->signal_voltage = signal_voltage;
1447 return mmc_set_ios(mmc);
1448 }
1449
1450 static const struct mode_width_tuning sd_modes_by_pref[] = {
1451 {
1452 .mode = UHS_SDR104,
1453 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1454 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1455 },
1456 {
1457 .mode = UHS_SDR50,
1458 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1459 },
1460 {
1461 .mode = UHS_DDR50,
1462 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1463 },
1464 {
1465 .mode = UHS_SDR25,
1466 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1467 },
1468 {
1469 .mode = SD_HS,
1470 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1471 },
1472 {
1473 .mode = UHS_SDR12,
1474 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1475 },
1476 {
1477 .mode = SD_LEGACY,
1478 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1479 }
1480 };
1481
1482 #define for_each_sd_mode_by_pref(caps, mwt) \
1483 for (mwt = sd_modes_by_pref;\
1484 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1485 mwt++) \
1486 if (caps & MMC_CAP(mwt->mode))
1487
1488 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1489 {
1490 int err;
1491 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1492 const struct mode_width_tuning *mwt;
1493 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1494 uint caps;
1495
1496
1497 /* Restrict card's capabilities by what the host can do */
1498 caps = card_caps & (mmc->host_caps | MMC_MODE_1BIT);
1499
1500 if (!uhs_en)
1501 caps &= ~UHS_CAPS;
1502
1503 for_each_sd_mode_by_pref(caps, mwt) {
1504 uint *w;
1505
1506 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1507 if (*w & caps & mwt->widths) {
1508 debug("trying mode %s width %d (at %d MHz)\n",
1509 mmc_mode_name(mwt->mode),
1510 bus_width(*w),
1511 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1512
1513 /* configure the bus width (card + host) */
1514 err = sd_select_bus_width(mmc, bus_width(*w));
1515 if (err)
1516 goto error;
1517 mmc_set_bus_width(mmc, bus_width(*w));
1518
1519 /* configure the bus mode (card) */
1520 err = sd_set_card_speed(mmc, mwt->mode);
1521 if (err)
1522 goto error;
1523
1524 /* configure the bus mode (host) */
1525 mmc_select_mode(mmc, mwt->mode);
1526 mmc_set_clock(mmc, mmc->tran_speed, false);
1527
1528 /* execute tuning if needed */
1529 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1530 err = mmc_execute_tuning(mmc,
1531 mwt->tuning);
1532 if (err) {
1533 debug("tuning failed\n");
1534 goto error;
1535 }
1536 }
1537
1538 err = sd_read_ssr(mmc);
1539 if (!err)
1540 return 0;
1541
1542 printf("bad ssr\n");
1543
1544 error:
1545 /* revert to a safer bus speed */
1546 mmc_select_mode(mmc, SD_LEGACY);
1547 mmc_set_clock(mmc, mmc->tran_speed, false);
1548 }
1549 }
1550 }
1551
1552 printf("unable to select a mode\n");
1553 return -ENOTSUPP;
1554 }
1555
1556 /*
1557 * read the compare the part of ext csd that is constant.
1558 * This can be used to check that the transfer is working
1559 * as expected.
1560 */
1561 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1562 {
1563 int err;
1564 const u8 *ext_csd = mmc->ext_csd;
1565 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1566
1567 err = mmc_send_ext_csd(mmc, test_csd);
1568 if (err)
1569 return err;
1570
1571 /* Only compare read only fields */
1572 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1573 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1574 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1575 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1576 ext_csd[EXT_CSD_REV]
1577 == test_csd[EXT_CSD_REV] &&
1578 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1579 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1580 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1581 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1582 return 0;
1583
1584 return -EBADMSG;
1585 }
1586
1587 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1588 {
1589 .mode = MMC_HS_200,
1590 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1591 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1592 },
1593 {
1594 .mode = MMC_DDR_52,
1595 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1596 },
1597 {
1598 .mode = MMC_HS_52,
1599 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1600 },
1601 {
1602 .mode = MMC_HS,
1603 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1604 },
1605 {
1606 .mode = MMC_LEGACY,
1607 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1608 }
1609 };
1610
1611 #define for_each_mmc_mode_by_pref(caps, mwt) \
1612 for (mwt = mmc_modes_by_pref;\
1613 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1614 mwt++) \
1615 if (caps & MMC_CAP(mwt->mode))
1616
1617 static const struct ext_csd_bus_width {
1618 uint cap;
1619 bool is_ddr;
1620 uint ext_csd_bits;
1621 } ext_csd_bus_width[] = {
1622 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1623 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1624 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1625 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1626 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1627 };
1628
1629 #define for_each_supported_width(caps, ddr, ecbv) \
1630 for (ecbv = ext_csd_bus_width;\
1631 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1632 ecbv++) \
1633 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1634
1635 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1636 {
1637 int err;
1638 const struct mode_width_tuning *mwt;
1639 const struct ext_csd_bus_width *ecbw;
1640
1641 /* Restrict card's capabilities by what the host can do */
1642 card_caps &= (mmc->host_caps | MMC_MODE_1BIT);
1643
1644 /* Only version 4 of MMC supports wider bus widths */
1645 if (mmc->version < MMC_VERSION_4)
1646 return 0;
1647
1648 if (!mmc->ext_csd) {
1649 debug("No ext_csd found!\n"); /* this should enver happen */
1650 return -ENOTSUPP;
1651 }
1652
1653 mmc_set_clock(mmc, mmc->legacy_speed, false);
1654
1655 for_each_mmc_mode_by_pref(card_caps, mwt) {
1656 for_each_supported_width(card_caps & mwt->widths,
1657 mmc_is_mode_ddr(mwt->mode), ecbw) {
1658 debug("trying mode %s width %d (at %d MHz)\n",
1659 mmc_mode_name(mwt->mode),
1660 bus_width(ecbw->cap),
1661 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1662 /* configure the bus width (card + host) */
1663 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1664 EXT_CSD_BUS_WIDTH,
1665 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1666 if (err)
1667 goto error;
1668 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1669
1670 /* configure the bus speed (card) */
1671 err = mmc_set_card_speed(mmc, mwt->mode);
1672 if (err)
1673 goto error;
1674
1675 /*
1676 * configure the bus width AND the ddr mode (card)
1677 * The host side will be taken care of in the next step
1678 */
1679 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1680 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1681 EXT_CSD_BUS_WIDTH,
1682 ecbw->ext_csd_bits);
1683 if (err)
1684 goto error;
1685 }
1686
1687 /* configure the bus mode (host) */
1688 mmc_select_mode(mmc, mwt->mode);
1689 mmc_set_clock(mmc, mmc->tran_speed, false);
1690
1691 /* execute tuning if needed */
1692 if (mwt->tuning) {
1693 err = mmc_execute_tuning(mmc, mwt->tuning);
1694 if (err) {
1695 debug("tuning failed\n");
1696 goto error;
1697 }
1698 }
1699
1700 /* do a transfer to check the configuration */
1701 err = mmc_read_and_compare_ext_csd(mmc);
1702 if (!err)
1703 return 0;
1704 error:
1705 /* if an error occured, revert to a safer bus mode */
1706 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1707 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
1708 mmc_select_mode(mmc, MMC_LEGACY);
1709 mmc_set_bus_width(mmc, 1);
1710 }
1711 }
1712
1713 printf("unable to select a mode\n");
1714
1715 return -ENOTSUPP;
1716 }
1717
1718 static int mmc_startup_v4(struct mmc *mmc)
1719 {
1720 int err, i;
1721 u64 capacity;
1722 bool has_parts = false;
1723 bool part_completed;
1724 u8 *ext_csd;
1725
1726 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1727 return 0;
1728
1729 ext_csd = malloc_cache_aligned(MMC_MAX_BLOCK_LEN);
1730 if (!ext_csd)
1731 return -ENOMEM;
1732
1733 mmc->ext_csd = ext_csd;
1734
1735 /* check ext_csd version and capacity */
1736 err = mmc_send_ext_csd(mmc, ext_csd);
1737 if (err)
1738 return err;
1739 if (ext_csd[EXT_CSD_REV] >= 2) {
1740 /*
1741 * According to the JEDEC Standard, the value of
1742 * ext_csd's capacity is valid if the value is more
1743 * than 2GB
1744 */
1745 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1746 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1747 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1748 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1749 capacity *= MMC_MAX_BLOCK_LEN;
1750 if ((capacity >> 20) > 2 * 1024)
1751 mmc->capacity_user = capacity;
1752 }
1753
1754 switch (ext_csd[EXT_CSD_REV]) {
1755 case 1:
1756 mmc->version = MMC_VERSION_4_1;
1757 break;
1758 case 2:
1759 mmc->version = MMC_VERSION_4_2;
1760 break;
1761 case 3:
1762 mmc->version = MMC_VERSION_4_3;
1763 break;
1764 case 5:
1765 mmc->version = MMC_VERSION_4_41;
1766 break;
1767 case 6:
1768 mmc->version = MMC_VERSION_4_5;
1769 break;
1770 case 7:
1771 mmc->version = MMC_VERSION_5_0;
1772 break;
1773 case 8:
1774 mmc->version = MMC_VERSION_5_1;
1775 break;
1776 }
1777
1778 /* The partition data may be non-zero but it is only
1779 * effective if PARTITION_SETTING_COMPLETED is set in
1780 * EXT_CSD, so ignore any data if this bit is not set,
1781 * except for enabling the high-capacity group size
1782 * definition (see below).
1783 */
1784 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1785 EXT_CSD_PARTITION_SETTING_COMPLETED);
1786
1787 /* store the partition info of emmc */
1788 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
1789 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
1790 ext_csd[EXT_CSD_BOOT_MULT])
1791 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
1792 if (part_completed &&
1793 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
1794 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
1795
1796 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
1797
1798 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
1799
1800 for (i = 0; i < 4; i++) {
1801 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
1802 uint mult = (ext_csd[idx + 2] << 16) +
1803 (ext_csd[idx + 1] << 8) + ext_csd[idx];
1804 if (mult)
1805 has_parts = true;
1806 if (!part_completed)
1807 continue;
1808 mmc->capacity_gp[i] = mult;
1809 mmc->capacity_gp[i] *=
1810 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1811 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1812 mmc->capacity_gp[i] <<= 19;
1813 }
1814
1815 if (part_completed) {
1816 mmc->enh_user_size =
1817 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
1818 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
1819 ext_csd[EXT_CSD_ENH_SIZE_MULT];
1820 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1821 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1822 mmc->enh_user_size <<= 19;
1823 mmc->enh_user_start =
1824 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
1825 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
1826 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
1827 ext_csd[EXT_CSD_ENH_START_ADDR];
1828 if (mmc->high_capacity)
1829 mmc->enh_user_start <<= 9;
1830 }
1831
1832 /*
1833 * Host needs to enable ERASE_GRP_DEF bit if device is
1834 * partitioned. This bit will be lost every time after a reset
1835 * or power off. This will affect erase size.
1836 */
1837 if (part_completed)
1838 has_parts = true;
1839 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
1840 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
1841 has_parts = true;
1842 if (has_parts) {
1843 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1844 EXT_CSD_ERASE_GROUP_DEF, 1);
1845
1846 if (err)
1847 return err;
1848
1849 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1850 }
1851
1852 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
1853 /* Read out group size from ext_csd */
1854 mmc->erase_grp_size =
1855 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1856 /*
1857 * if high capacity and partition setting completed
1858 * SEC_COUNT is valid even if it is smaller than 2 GiB
1859 * JEDEC Standard JESD84-B45, 6.2.4
1860 */
1861 if (mmc->high_capacity && part_completed) {
1862 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
1863 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
1864 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
1865 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
1866 capacity *= MMC_MAX_BLOCK_LEN;
1867 mmc->capacity_user = capacity;
1868 }
1869 } else {
1870 /* Calculate the group size from the csd value. */
1871 int erase_gsz, erase_gmul;
1872
1873 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
1874 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
1875 mmc->erase_grp_size = (erase_gsz + 1)
1876 * (erase_gmul + 1);
1877 }
1878
1879 mmc->hc_wp_grp_size = 1024
1880 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1881 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1882
1883 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1884
1885 return 0;
1886 }
1887
1888 static int mmc_startup(struct mmc *mmc)
1889 {
1890 int err, i;
1891 uint mult, freq;
1892 u64 cmult, csize;
1893 struct mmc_cmd cmd;
1894 struct blk_desc *bdesc;
1895
1896 #ifdef CONFIG_MMC_SPI_CRC_ON
1897 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
1898 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
1899 cmd.resp_type = MMC_RSP_R1;
1900 cmd.cmdarg = 1;
1901 err = mmc_send_cmd(mmc, &cmd, NULL);
1902 if (err)
1903 return err;
1904 }
1905 #endif
1906
1907 /* Put the Card in Identify Mode */
1908 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
1909 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
1910 cmd.resp_type = MMC_RSP_R2;
1911 cmd.cmdarg = 0;
1912
1913 err = mmc_send_cmd(mmc, &cmd, NULL);
1914
1915 #ifdef CONFIG_MMC_QUIRKS
1916 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
1917 int retries = 4;
1918 /*
1919 * It has been seen that SEND_CID may fail on the first
1920 * attempt, let's try a few more time
1921 */
1922 do {
1923 err = mmc_send_cmd(mmc, &cmd, NULL);
1924 if (!err)
1925 break;
1926 } while (retries--);
1927 }
1928 #endif
1929
1930 if (err)
1931 return err;
1932
1933 memcpy(mmc->cid, cmd.response, 16);
1934
1935 /*
1936 * For MMC cards, set the Relative Address.
1937 * For SD cards, get the Relatvie Address.
1938 * This also puts the cards into Standby State
1939 */
1940 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1941 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
1942 cmd.cmdarg = mmc->rca << 16;
1943 cmd.resp_type = MMC_RSP_R6;
1944
1945 err = mmc_send_cmd(mmc, &cmd, NULL);
1946
1947 if (err)
1948 return err;
1949
1950 if (IS_SD(mmc))
1951 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
1952 }
1953
1954 /* Get the Card-Specific Data */
1955 cmd.cmdidx = MMC_CMD_SEND_CSD;
1956 cmd.resp_type = MMC_RSP_R2;
1957 cmd.cmdarg = mmc->rca << 16;
1958
1959 err = mmc_send_cmd(mmc, &cmd, NULL);
1960
1961 if (err)
1962 return err;
1963
1964 mmc->csd[0] = cmd.response[0];
1965 mmc->csd[1] = cmd.response[1];
1966 mmc->csd[2] = cmd.response[2];
1967 mmc->csd[3] = cmd.response[3];
1968
1969 if (mmc->version == MMC_VERSION_UNKNOWN) {
1970 int version = (cmd.response[0] >> 26) & 0xf;
1971
1972 switch (version) {
1973 case 0:
1974 mmc->version = MMC_VERSION_1_2;
1975 break;
1976 case 1:
1977 mmc->version = MMC_VERSION_1_4;
1978 break;
1979 case 2:
1980 mmc->version = MMC_VERSION_2_2;
1981 break;
1982 case 3:
1983 mmc->version = MMC_VERSION_3;
1984 break;
1985 case 4:
1986 mmc->version = MMC_VERSION_4;
1987 break;
1988 default:
1989 mmc->version = MMC_VERSION_1_2;
1990 break;
1991 }
1992 }
1993
1994 /* divide frequency by 10, since the mults are 10x bigger */
1995 freq = fbase[(cmd.response[0] & 0x7)];
1996 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
1997
1998 mmc->legacy_speed = freq * mult;
1999 mmc_select_mode(mmc, MMC_LEGACY);
2000
2001 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2002 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2003
2004 if (IS_SD(mmc))
2005 mmc->write_bl_len = mmc->read_bl_len;
2006 else
2007 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2008
2009 if (mmc->high_capacity) {
2010 csize = (mmc->csd[1] & 0x3f) << 16
2011 | (mmc->csd[2] & 0xffff0000) >> 16;
2012 cmult = 8;
2013 } else {
2014 csize = (mmc->csd[1] & 0x3ff) << 2
2015 | (mmc->csd[2] & 0xc0000000) >> 30;
2016 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2017 }
2018
2019 mmc->capacity_user = (csize + 1) << (cmult + 2);
2020 mmc->capacity_user *= mmc->read_bl_len;
2021 mmc->capacity_boot = 0;
2022 mmc->capacity_rpmb = 0;
2023 for (i = 0; i < 4; i++)
2024 mmc->capacity_gp[i] = 0;
2025
2026 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2027 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2028
2029 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2030 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2031
2032 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2033 cmd.cmdidx = MMC_CMD_SET_DSR;
2034 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2035 cmd.resp_type = MMC_RSP_NONE;
2036 if (mmc_send_cmd(mmc, &cmd, NULL))
2037 printf("MMC: SET_DSR failed\n");
2038 }
2039
2040 /* Select the card, and put it into Transfer Mode */
2041 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2042 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2043 cmd.resp_type = MMC_RSP_R1;
2044 cmd.cmdarg = mmc->rca << 16;
2045 err = mmc_send_cmd(mmc, &cmd, NULL);
2046
2047 if (err)
2048 return err;
2049 }
2050
2051 /*
2052 * For SD, its erase group is always one sector
2053 */
2054 mmc->erase_grp_size = 1;
2055 mmc->part_config = MMCPART_NOAVAILABLE;
2056
2057 err = mmc_startup_v4(mmc);
2058 if (err)
2059 return err;
2060
2061 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2062 if (err)
2063 return err;
2064
2065 if (IS_SD(mmc)) {
2066 err = sd_get_capabilities(mmc);
2067 if (err)
2068 return err;
2069 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2070 } else {
2071 err = mmc_get_capabilities(mmc);
2072 if (err)
2073 return err;
2074 mmc_select_mode_and_width(mmc, mmc->card_caps);
2075 }
2076
2077 if (err)
2078 return err;
2079
2080 mmc->best_mode = mmc->selected_mode;
2081
2082 /* Fix the block length for DDR mode */
2083 if (mmc->ddr_mode) {
2084 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2085 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2086 }
2087
2088 /* fill in device description */
2089 bdesc = mmc_get_blk_desc(mmc);
2090 bdesc->lun = 0;
2091 bdesc->hwpart = 0;
2092 bdesc->type = 0;
2093 bdesc->blksz = mmc->read_bl_len;
2094 bdesc->log2blksz = LOG2(bdesc->blksz);
2095 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2096 #if !defined(CONFIG_SPL_BUILD) || \
2097 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2098 !defined(CONFIG_USE_TINY_PRINTF))
2099 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2100 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2101 (mmc->cid[3] >> 16) & 0xffff);
2102 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2103 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2104 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2105 (mmc->cid[2] >> 24) & 0xff);
2106 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2107 (mmc->cid[2] >> 16) & 0xf);
2108 #else
2109 bdesc->vendor[0] = 0;
2110 bdesc->product[0] = 0;
2111 bdesc->revision[0] = 0;
2112 #endif
2113 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2114 part_init(bdesc);
2115 #endif
2116
2117 return 0;
2118 }
2119
2120 static int mmc_send_if_cond(struct mmc *mmc)
2121 {
2122 struct mmc_cmd cmd;
2123 int err;
2124
2125 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2126 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2127 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2128 cmd.resp_type = MMC_RSP_R7;
2129
2130 err = mmc_send_cmd(mmc, &cmd, NULL);
2131
2132 if (err)
2133 return err;
2134
2135 if ((cmd.response[0] & 0xff) != 0xaa)
2136 return -EOPNOTSUPP;
2137 else
2138 mmc->version = SD_VERSION_2;
2139
2140 return 0;
2141 }
2142
2143 #if !CONFIG_IS_ENABLED(DM_MMC)
2144 /* board-specific MMC power initializations. */
2145 __weak void board_mmc_power_init(void)
2146 {
2147 }
2148 #endif
2149
2150 static int mmc_power_init(struct mmc *mmc)
2151 {
2152 #if CONFIG_IS_ENABLED(DM_MMC)
2153 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2154 int ret;
2155
2156 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2157 &mmc->vmmc_supply);
2158 if (ret)
2159 debug("%s: No vmmc supply\n", mmc->dev->name);
2160
2161 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2162 &mmc->vqmmc_supply);
2163 if (ret)
2164 debug("%s: No vqmmc supply\n", mmc->dev->name);
2165 #endif
2166 #else /* !CONFIG_DM_MMC */
2167 /*
2168 * Driver model should use a regulator, as above, rather than calling
2169 * out to board code.
2170 */
2171 board_mmc_power_init();
2172 #endif
2173 return 0;
2174 }
2175
2176 /*
2177 * put the host in the initial state:
2178 * - turn on Vdd (card power supply)
2179 * - configure the bus width and clock to minimal values
2180 */
2181 static void mmc_set_initial_state(struct mmc *mmc)
2182 {
2183 int err;
2184
2185 /* First try to set 3.3V. If it fails set to 1.8V */
2186 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2187 if (err != 0)
2188 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2189 if (err != 0)
2190 printf("mmc: failed to set signal voltage\n");
2191
2192 mmc_select_mode(mmc, MMC_LEGACY);
2193 mmc_set_bus_width(mmc, 1);
2194 mmc_set_clock(mmc, 0, false);
2195 }
2196
2197 static int mmc_power_on(struct mmc *mmc)
2198 {
2199 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2200 if (mmc->vmmc_supply) {
2201 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2202
2203 if (ret) {
2204 puts("Error enabling VMMC supply\n");
2205 return ret;
2206 }
2207 }
2208 #endif
2209 return 0;
2210 }
2211
2212 static int mmc_power_off(struct mmc *mmc)
2213 {
2214 mmc_set_clock(mmc, 1, true);
2215 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2216 if (mmc->vmmc_supply) {
2217 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2218
2219 if (ret) {
2220 debug("Error disabling VMMC supply\n");
2221 return ret;
2222 }
2223 }
2224 #endif
2225 return 0;
2226 }
2227
2228 static int mmc_power_cycle(struct mmc *mmc)
2229 {
2230 int ret;
2231
2232 ret = mmc_power_off(mmc);
2233 if (ret)
2234 return ret;
2235 /*
2236 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2237 * to be on the safer side.
2238 */
2239 udelay(2000);
2240 return mmc_power_on(mmc);
2241 }
2242
2243 int mmc_start_init(struct mmc *mmc)
2244 {
2245 bool no_card;
2246 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2247 int err;
2248
2249 mmc->host_caps = mmc->cfg->host_caps;
2250
2251 /* we pretend there's no card when init is NULL */
2252 no_card = mmc_getcd(mmc) == 0;
2253 #if !CONFIG_IS_ENABLED(DM_MMC)
2254 no_card = no_card || (mmc->cfg->ops->init == NULL);
2255 #endif
2256 if (no_card) {
2257 mmc->has_init = 0;
2258 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2259 printf("MMC: no card present\n");
2260 #endif
2261 return -ENOMEDIUM;
2262 }
2263
2264 if (mmc->has_init)
2265 return 0;
2266
2267 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2268 mmc_adapter_card_type_ident();
2269 #endif
2270 err = mmc_power_init(mmc);
2271 if (err)
2272 return err;
2273
2274 #ifdef CONFIG_MMC_QUIRKS
2275 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2276 MMC_QUIRK_RETRY_SEND_CID;
2277 #endif
2278
2279 err = mmc_power_cycle(mmc);
2280 if (err) {
2281 /*
2282 * if power cycling is not supported, we should not try
2283 * to use the UHS modes, because we wouldn't be able to
2284 * recover from an error during the UHS initialization.
2285 */
2286 debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2287 uhs_en = false;
2288 mmc->host_caps &= ~UHS_CAPS;
2289 err = mmc_power_on(mmc);
2290 }
2291 if (err)
2292 return err;
2293
2294 #if CONFIG_IS_ENABLED(DM_MMC)
2295 /* The device has already been probed ready for use */
2296 #else
2297 /* made sure it's not NULL earlier */
2298 err = mmc->cfg->ops->init(mmc);
2299 if (err)
2300 return err;
2301 #endif
2302 mmc->ddr_mode = 0;
2303
2304 retry:
2305 mmc_set_initial_state(mmc);
2306 mmc_send_init_stream(mmc);
2307
2308 /* Reset the Card */
2309 err = mmc_go_idle(mmc);
2310
2311 if (err)
2312 return err;
2313
2314 /* The internal partition reset to user partition(0) at every CMD0*/
2315 mmc_get_blk_desc(mmc)->hwpart = 0;
2316
2317 /* Test for SD version 2 */
2318 err = mmc_send_if_cond(mmc);
2319
2320 /* Now try to get the SD card's operating condition */
2321 err = sd_send_op_cond(mmc, uhs_en);
2322 if (err && uhs_en) {
2323 uhs_en = false;
2324 mmc_power_cycle(mmc);
2325 goto retry;
2326 }
2327
2328 /* If the command timed out, we check for an MMC card */
2329 if (err == -ETIMEDOUT) {
2330 err = mmc_send_op_cond(mmc);
2331
2332 if (err) {
2333 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2334 printf("Card did not respond to voltage select!\n");
2335 #endif
2336 return -EOPNOTSUPP;
2337 }
2338 }
2339
2340 if (!err)
2341 mmc->init_in_progress = 1;
2342
2343 return err;
2344 }
2345
2346 static int mmc_complete_init(struct mmc *mmc)
2347 {
2348 int err = 0;
2349
2350 mmc->init_in_progress = 0;
2351 if (mmc->op_cond_pending)
2352 err = mmc_complete_op_cond(mmc);
2353
2354 if (!err)
2355 err = mmc_startup(mmc);
2356 if (err)
2357 mmc->has_init = 0;
2358 else
2359 mmc->has_init = 1;
2360 return err;
2361 }
2362
2363 int mmc_init(struct mmc *mmc)
2364 {
2365 int err = 0;
2366 __maybe_unused unsigned start;
2367 #if CONFIG_IS_ENABLED(DM_MMC)
2368 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2369
2370 upriv->mmc = mmc;
2371 #endif
2372 if (mmc->has_init)
2373 return 0;
2374
2375 start = get_timer(0);
2376
2377 if (!mmc->init_in_progress)
2378 err = mmc_start_init(mmc);
2379
2380 if (!err)
2381 err = mmc_complete_init(mmc);
2382 if (err)
2383 printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2384
2385 return err;
2386 }
2387
2388 int mmc_set_dsr(struct mmc *mmc, u16 val)
2389 {
2390 mmc->dsr = val;
2391 return 0;
2392 }
2393
2394 /* CPU-specific MMC initializations */
2395 __weak int cpu_mmc_init(bd_t *bis)
2396 {
2397 return -1;
2398 }
2399
2400 /* board-specific MMC initializations. */
2401 __weak int board_mmc_init(bd_t *bis)
2402 {
2403 return -1;
2404 }
2405
2406 void mmc_set_preinit(struct mmc *mmc, int preinit)
2407 {
2408 mmc->preinit = preinit;
2409 }
2410
2411 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2412 static int mmc_probe(bd_t *bis)
2413 {
2414 return 0;
2415 }
2416 #elif CONFIG_IS_ENABLED(DM_MMC)
2417 static int mmc_probe(bd_t *bis)
2418 {
2419 int ret, i;
2420 struct uclass *uc;
2421 struct udevice *dev;
2422
2423 ret = uclass_get(UCLASS_MMC, &uc);
2424 if (ret)
2425 return ret;
2426
2427 /*
2428 * Try to add them in sequence order. Really with driver model we
2429 * should allow holes, but the current MMC list does not allow that.
2430 * So if we request 0, 1, 3 we will get 0, 1, 2.
2431 */
2432 for (i = 0; ; i++) {
2433 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2434 if (ret == -ENODEV)
2435 break;
2436 }
2437 uclass_foreach_dev(dev, uc) {
2438 ret = device_probe(dev);
2439 if (ret)
2440 printf("%s - probe failed: %d\n", dev->name, ret);
2441 }
2442
2443 return 0;
2444 }
2445 #else
2446 static int mmc_probe(bd_t *bis)
2447 {
2448 if (board_mmc_init(bis) < 0)
2449 cpu_mmc_init(bis);
2450
2451 return 0;
2452 }
2453 #endif
2454
2455 int mmc_initialize(bd_t *bis)
2456 {
2457 static int initialized = 0;
2458 int ret;
2459 if (initialized) /* Avoid initializing mmc multiple times */
2460 return 0;
2461 initialized = 1;
2462
2463 #if !CONFIG_IS_ENABLED(BLK)
2464 #if !CONFIG_IS_ENABLED(MMC_TINY)
2465 mmc_list_init();
2466 #endif
2467 #endif
2468 ret = mmc_probe(bis);
2469 if (ret)
2470 return ret;
2471
2472 #ifndef CONFIG_SPL_BUILD
2473 print_mmc_devices(',');
2474 #endif
2475
2476 mmc_do_preinit();
2477 return 0;
2478 }
2479
2480 #ifdef CONFIG_CMD_BKOPS_ENABLE
2481 int mmc_set_bkops_enable(struct mmc *mmc)
2482 {
2483 int err;
2484 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2485
2486 err = mmc_send_ext_csd(mmc, ext_csd);
2487 if (err) {
2488 puts("Could not get ext_csd register values\n");
2489 return err;
2490 }
2491
2492 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2493 puts("Background operations not supported on device\n");
2494 return -EMEDIUMTYPE;
2495 }
2496
2497 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2498 puts("Background operations already enabled\n");
2499 return 0;
2500 }
2501
2502 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2503 if (err) {
2504 puts("Failed to enable manual background operations\n");
2505 return err;
2506 }
2507
2508 puts("Enabled manual background operations\n");
2509
2510 return 0;
2511 }
2512 #endif