]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/mmc/mmc.c
mmc: use the right voltage level for MMC DDR and HS200 modes
[people/ms/u-boot.git] / drivers / mmc / mmc.c
1 /*
2 * Copyright 2008, Freescale Semiconductor, Inc
3 * Andy Fleming
4 *
5 * Based vaguely on the Linux code
6 *
7 * SPDX-License-Identifier: GPL-2.0+
8 */
9
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24
25 static const unsigned int sd_au_size[] = {
26 0, SZ_16K / 512, SZ_32K / 512,
27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
31 };
32
33 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
34 static int mmc_power_cycle(struct mmc *mmc);
35 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
36
37 #if CONFIG_IS_ENABLED(MMC_TINY)
38 static struct mmc mmc_static;
39 struct mmc *find_mmc_device(int dev_num)
40 {
41 return &mmc_static;
42 }
43
44 void mmc_do_preinit(void)
45 {
46 struct mmc *m = &mmc_static;
47 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
48 mmc_set_preinit(m, 1);
49 #endif
50 if (m->preinit)
51 mmc_start_init(m);
52 }
53
54 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
55 {
56 return &mmc->block_dev;
57 }
58 #endif
59
60 #if !CONFIG_IS_ENABLED(DM_MMC)
61
62 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
63 {
64 return -ENOSYS;
65 }
66
67 __weak int board_mmc_getwp(struct mmc *mmc)
68 {
69 return -1;
70 }
71
72 int mmc_getwp(struct mmc *mmc)
73 {
74 int wp;
75
76 wp = board_mmc_getwp(mmc);
77
78 if (wp < 0) {
79 if (mmc->cfg->ops->getwp)
80 wp = mmc->cfg->ops->getwp(mmc);
81 else
82 wp = 0;
83 }
84
85 return wp;
86 }
87
88 __weak int board_mmc_getcd(struct mmc *mmc)
89 {
90 return -1;
91 }
92 #endif
93
94 #ifdef CONFIG_MMC_TRACE
95 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
96 {
97 printf("CMD_SEND:%d\n", cmd->cmdidx);
98 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
99 }
100
101 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
102 {
103 int i;
104 u8 *ptr;
105
106 if (ret) {
107 printf("\t\tRET\t\t\t %d\n", ret);
108 } else {
109 switch (cmd->resp_type) {
110 case MMC_RSP_NONE:
111 printf("\t\tMMC_RSP_NONE\n");
112 break;
113 case MMC_RSP_R1:
114 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
115 cmd->response[0]);
116 break;
117 case MMC_RSP_R1b:
118 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
119 cmd->response[0]);
120 break;
121 case MMC_RSP_R2:
122 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
123 cmd->response[0]);
124 printf("\t\t \t\t 0x%08X \n",
125 cmd->response[1]);
126 printf("\t\t \t\t 0x%08X \n",
127 cmd->response[2]);
128 printf("\t\t \t\t 0x%08X \n",
129 cmd->response[3]);
130 printf("\n");
131 printf("\t\t\t\t\tDUMPING DATA\n");
132 for (i = 0; i < 4; i++) {
133 int j;
134 printf("\t\t\t\t\t%03d - ", i*4);
135 ptr = (u8 *)&cmd->response[i];
136 ptr += 3;
137 for (j = 0; j < 4; j++)
138 printf("%02X ", *ptr--);
139 printf("\n");
140 }
141 break;
142 case MMC_RSP_R3:
143 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
144 cmd->response[0]);
145 break;
146 default:
147 printf("\t\tERROR MMC rsp not supported\n");
148 break;
149 }
150 }
151 }
152
153 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
154 {
155 int status;
156
157 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
158 printf("CURR STATE:%d\n", status);
159 }
160 #endif
161
162 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
163 const char *mmc_mode_name(enum bus_mode mode)
164 {
165 static const char *const names[] = {
166 [MMC_LEGACY] = "MMC legacy",
167 [SD_LEGACY] = "SD Legacy",
168 [MMC_HS] = "MMC High Speed (26MHz)",
169 [SD_HS] = "SD High Speed (50MHz)",
170 [UHS_SDR12] = "UHS SDR12 (25MHz)",
171 [UHS_SDR25] = "UHS SDR25 (50MHz)",
172 [UHS_SDR50] = "UHS SDR50 (100MHz)",
173 [UHS_SDR104] = "UHS SDR104 (208MHz)",
174 [UHS_DDR50] = "UHS DDR50 (50MHz)",
175 [MMC_HS_52] = "MMC High Speed (52MHz)",
176 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
177 [MMC_HS_200] = "HS200 (200MHz)",
178 };
179
180 if (mode >= MMC_MODES_END)
181 return "Unknown mode";
182 else
183 return names[mode];
184 }
185 #endif
186
187 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
188 {
189 static const int freqs[] = {
190 [SD_LEGACY] = 25000000,
191 [MMC_HS] = 26000000,
192 [SD_HS] = 50000000,
193 [UHS_SDR12] = 25000000,
194 [UHS_SDR25] = 50000000,
195 [UHS_SDR50] = 100000000,
196 [UHS_SDR104] = 208000000,
197 [UHS_DDR50] = 50000000,
198 [MMC_HS_52] = 52000000,
199 [MMC_DDR_52] = 52000000,
200 [MMC_HS_200] = 200000000,
201 };
202
203 if (mode == MMC_LEGACY)
204 return mmc->legacy_speed;
205 else if (mode >= MMC_MODES_END)
206 return 0;
207 else
208 return freqs[mode];
209 }
210
211 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
212 {
213 mmc->selected_mode = mode;
214 mmc->tran_speed = mmc_mode2freq(mmc, mode);
215 mmc->ddr_mode = mmc_is_mode_ddr(mode);
216 debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
217 mmc->tran_speed / 1000000);
218 return 0;
219 }
220
221 #if !CONFIG_IS_ENABLED(DM_MMC)
222 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
223 {
224 int ret;
225
226 mmmc_trace_before_send(mmc, cmd);
227 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
228 mmmc_trace_after_send(mmc, cmd, ret);
229
230 return ret;
231 }
232 #endif
233
234 int mmc_send_status(struct mmc *mmc, int timeout)
235 {
236 struct mmc_cmd cmd;
237 int err, retries = 5;
238
239 cmd.cmdidx = MMC_CMD_SEND_STATUS;
240 cmd.resp_type = MMC_RSP_R1;
241 if (!mmc_host_is_spi(mmc))
242 cmd.cmdarg = mmc->rca << 16;
243
244 while (1) {
245 err = mmc_send_cmd(mmc, &cmd, NULL);
246 if (!err) {
247 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
248 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
249 MMC_STATE_PRG)
250 break;
251
252 if (cmd.response[0] & MMC_STATUS_MASK) {
253 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
254 printf("Status Error: 0x%08X\n",
255 cmd.response[0]);
256 #endif
257 return -ECOMM;
258 }
259 } else if (--retries < 0)
260 return err;
261
262 if (timeout-- <= 0)
263 break;
264
265 udelay(1000);
266 }
267
268 mmc_trace_state(mmc, &cmd);
269 if (timeout <= 0) {
270 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
271 printf("Timeout waiting card ready\n");
272 #endif
273 return -ETIMEDOUT;
274 }
275
276 return 0;
277 }
278
279 int mmc_set_blocklen(struct mmc *mmc, int len)
280 {
281 struct mmc_cmd cmd;
282 int err;
283
284 if (mmc->ddr_mode)
285 return 0;
286
287 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
288 cmd.resp_type = MMC_RSP_R1;
289 cmd.cmdarg = len;
290
291 err = mmc_send_cmd(mmc, &cmd, NULL);
292
293 #ifdef CONFIG_MMC_QUIRKS
294 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
295 int retries = 4;
296 /*
297 * It has been seen that SET_BLOCKLEN may fail on the first
298 * attempt, let's try a few more time
299 */
300 do {
301 err = mmc_send_cmd(mmc, &cmd, NULL);
302 if (!err)
303 break;
304 } while (retries--);
305 }
306 #endif
307
308 return err;
309 }
310
311 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
312 lbaint_t blkcnt)
313 {
314 struct mmc_cmd cmd;
315 struct mmc_data data;
316
317 if (blkcnt > 1)
318 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
319 else
320 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
321
322 if (mmc->high_capacity)
323 cmd.cmdarg = start;
324 else
325 cmd.cmdarg = start * mmc->read_bl_len;
326
327 cmd.resp_type = MMC_RSP_R1;
328
329 data.dest = dst;
330 data.blocks = blkcnt;
331 data.blocksize = mmc->read_bl_len;
332 data.flags = MMC_DATA_READ;
333
334 if (mmc_send_cmd(mmc, &cmd, &data))
335 return 0;
336
337 if (blkcnt > 1) {
338 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
339 cmd.cmdarg = 0;
340 cmd.resp_type = MMC_RSP_R1b;
341 if (mmc_send_cmd(mmc, &cmd, NULL)) {
342 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
343 printf("mmc fail to send stop cmd\n");
344 #endif
345 return 0;
346 }
347 }
348
349 return blkcnt;
350 }
351
352 #if CONFIG_IS_ENABLED(BLK)
353 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
354 #else
355 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
356 void *dst)
357 #endif
358 {
359 #if CONFIG_IS_ENABLED(BLK)
360 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
361 #endif
362 int dev_num = block_dev->devnum;
363 int err;
364 lbaint_t cur, blocks_todo = blkcnt;
365
366 if (blkcnt == 0)
367 return 0;
368
369 struct mmc *mmc = find_mmc_device(dev_num);
370 if (!mmc)
371 return 0;
372
373 if (CONFIG_IS_ENABLED(MMC_TINY))
374 err = mmc_switch_part(mmc, block_dev->hwpart);
375 else
376 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
377
378 if (err < 0)
379 return 0;
380
381 if ((start + blkcnt) > block_dev->lba) {
382 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
383 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
384 start + blkcnt, block_dev->lba);
385 #endif
386 return 0;
387 }
388
389 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
390 debug("%s: Failed to set blocklen\n", __func__);
391 return 0;
392 }
393
394 do {
395 cur = (blocks_todo > mmc->cfg->b_max) ?
396 mmc->cfg->b_max : blocks_todo;
397 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
398 debug("%s: Failed to read blocks\n", __func__);
399 return 0;
400 }
401 blocks_todo -= cur;
402 start += cur;
403 dst += cur * mmc->read_bl_len;
404 } while (blocks_todo > 0);
405
406 return blkcnt;
407 }
408
409 static int mmc_go_idle(struct mmc *mmc)
410 {
411 struct mmc_cmd cmd;
412 int err;
413
414 udelay(1000);
415
416 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
417 cmd.cmdarg = 0;
418 cmd.resp_type = MMC_RSP_NONE;
419
420 err = mmc_send_cmd(mmc, &cmd, NULL);
421
422 if (err)
423 return err;
424
425 udelay(2000);
426
427 return 0;
428 }
429
430 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
431 {
432 struct mmc_cmd cmd;
433 int err = 0;
434
435 /*
436 * Send CMD11 only if the request is to switch the card to
437 * 1.8V signalling.
438 */
439 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
440 return mmc_set_signal_voltage(mmc, signal_voltage);
441
442 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
443 cmd.cmdarg = 0;
444 cmd.resp_type = MMC_RSP_R1;
445
446 err = mmc_send_cmd(mmc, &cmd, NULL);
447 if (err)
448 return err;
449
450 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
451 return -EIO;
452
453 /*
454 * The card should drive cmd and dat[0:3] low immediately
455 * after the response of cmd11, but wait 100 us to be sure
456 */
457 err = mmc_wait_dat0(mmc, 0, 100);
458 if (err == -ENOSYS)
459 udelay(100);
460 else if (err)
461 return -ETIMEDOUT;
462
463 /*
464 * During a signal voltage level switch, the clock must be gated
465 * for 5 ms according to the SD spec
466 */
467 mmc_set_clock(mmc, mmc->clock, true);
468
469 err = mmc_set_signal_voltage(mmc, signal_voltage);
470 if (err)
471 return err;
472
473 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
474 mdelay(10);
475 mmc_set_clock(mmc, mmc->clock, false);
476
477 /*
478 * Failure to switch is indicated by the card holding
479 * dat[0:3] low. Wait for at least 1 ms according to spec
480 */
481 err = mmc_wait_dat0(mmc, 1, 1000);
482 if (err == -ENOSYS)
483 udelay(1000);
484 else if (err)
485 return -ETIMEDOUT;
486
487 return 0;
488 }
489
490 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
491 {
492 int timeout = 1000;
493 int err;
494 struct mmc_cmd cmd;
495
496 while (1) {
497 cmd.cmdidx = MMC_CMD_APP_CMD;
498 cmd.resp_type = MMC_RSP_R1;
499 cmd.cmdarg = 0;
500
501 err = mmc_send_cmd(mmc, &cmd, NULL);
502
503 if (err)
504 return err;
505
506 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
507 cmd.resp_type = MMC_RSP_R3;
508
509 /*
510 * Most cards do not answer if some reserved bits
511 * in the ocr are set. However, Some controller
512 * can set bit 7 (reserved for low voltages), but
513 * how to manage low voltages SD card is not yet
514 * specified.
515 */
516 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
517 (mmc->cfg->voltages & 0xff8000);
518
519 if (mmc->version == SD_VERSION_2)
520 cmd.cmdarg |= OCR_HCS;
521
522 if (uhs_en)
523 cmd.cmdarg |= OCR_S18R;
524
525 err = mmc_send_cmd(mmc, &cmd, NULL);
526
527 if (err)
528 return err;
529
530 if (cmd.response[0] & OCR_BUSY)
531 break;
532
533 if (timeout-- <= 0)
534 return -EOPNOTSUPP;
535
536 udelay(1000);
537 }
538
539 if (mmc->version != SD_VERSION_2)
540 mmc->version = SD_VERSION_1_0;
541
542 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
543 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
544 cmd.resp_type = MMC_RSP_R3;
545 cmd.cmdarg = 0;
546
547 err = mmc_send_cmd(mmc, &cmd, NULL);
548
549 if (err)
550 return err;
551 }
552
553 mmc->ocr = cmd.response[0];
554
555 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
556 == 0x41000000) {
557 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
558 if (err)
559 return err;
560 }
561
562 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
563 mmc->rca = 0;
564
565 return 0;
566 }
567
568 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
569 {
570 struct mmc_cmd cmd;
571 int err;
572
573 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
574 cmd.resp_type = MMC_RSP_R3;
575 cmd.cmdarg = 0;
576 if (use_arg && !mmc_host_is_spi(mmc))
577 cmd.cmdarg = OCR_HCS |
578 (mmc->cfg->voltages &
579 (mmc->ocr & OCR_VOLTAGE_MASK)) |
580 (mmc->ocr & OCR_ACCESS_MODE);
581
582 err = mmc_send_cmd(mmc, &cmd, NULL);
583 if (err)
584 return err;
585 mmc->ocr = cmd.response[0];
586 return 0;
587 }
588
589 static int mmc_send_op_cond(struct mmc *mmc)
590 {
591 int err, i;
592
593 /* Some cards seem to need this */
594 mmc_go_idle(mmc);
595
596 /* Asking to the card its capabilities */
597 for (i = 0; i < 2; i++) {
598 err = mmc_send_op_cond_iter(mmc, i != 0);
599 if (err)
600 return err;
601
602 /* exit if not busy (flag seems to be inverted) */
603 if (mmc->ocr & OCR_BUSY)
604 break;
605 }
606 mmc->op_cond_pending = 1;
607 return 0;
608 }
609
610 static int mmc_complete_op_cond(struct mmc *mmc)
611 {
612 struct mmc_cmd cmd;
613 int timeout = 1000;
614 uint start;
615 int err;
616
617 mmc->op_cond_pending = 0;
618 if (!(mmc->ocr & OCR_BUSY)) {
619 /* Some cards seem to need this */
620 mmc_go_idle(mmc);
621
622 start = get_timer(0);
623 while (1) {
624 err = mmc_send_op_cond_iter(mmc, 1);
625 if (err)
626 return err;
627 if (mmc->ocr & OCR_BUSY)
628 break;
629 if (get_timer(start) > timeout)
630 return -EOPNOTSUPP;
631 udelay(100);
632 }
633 }
634
635 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
636 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
637 cmd.resp_type = MMC_RSP_R3;
638 cmd.cmdarg = 0;
639
640 err = mmc_send_cmd(mmc, &cmd, NULL);
641
642 if (err)
643 return err;
644
645 mmc->ocr = cmd.response[0];
646 }
647
648 mmc->version = MMC_VERSION_UNKNOWN;
649
650 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
651 mmc->rca = 1;
652
653 return 0;
654 }
655
656
657 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
658 {
659 struct mmc_cmd cmd;
660 struct mmc_data data;
661 int err;
662
663 /* Get the Card Status Register */
664 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
665 cmd.resp_type = MMC_RSP_R1;
666 cmd.cmdarg = 0;
667
668 data.dest = (char *)ext_csd;
669 data.blocks = 1;
670 data.blocksize = MMC_MAX_BLOCK_LEN;
671 data.flags = MMC_DATA_READ;
672
673 err = mmc_send_cmd(mmc, &cmd, &data);
674
675 return err;
676 }
677
678 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
679 {
680 struct mmc_cmd cmd;
681 int timeout = 1000;
682 int retries = 3;
683 int ret;
684
685 cmd.cmdidx = MMC_CMD_SWITCH;
686 cmd.resp_type = MMC_RSP_R1b;
687 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
688 (index << 16) |
689 (value << 8);
690
691 while (retries > 0) {
692 ret = mmc_send_cmd(mmc, &cmd, NULL);
693
694 /* Waiting for the ready status */
695 if (!ret) {
696 ret = mmc_send_status(mmc, timeout);
697 return ret;
698 }
699
700 retries--;
701 }
702
703 return ret;
704
705 }
706
707 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
708 {
709 int err;
710 int speed_bits;
711
712 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
713
714 switch (mode) {
715 case MMC_HS:
716 case MMC_HS_52:
717 case MMC_DDR_52:
718 speed_bits = EXT_CSD_TIMING_HS;
719 break;
720 case MMC_HS_200:
721 speed_bits = EXT_CSD_TIMING_HS200;
722 break;
723 case MMC_LEGACY:
724 speed_bits = EXT_CSD_TIMING_LEGACY;
725 break;
726 default:
727 return -EINVAL;
728 }
729 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
730 speed_bits);
731 if (err)
732 return err;
733
734 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
735 /* Now check to see that it worked */
736 err = mmc_send_ext_csd(mmc, test_csd);
737 if (err)
738 return err;
739
740 /* No high-speed support */
741 if (!test_csd[EXT_CSD_HS_TIMING])
742 return -ENOTSUPP;
743 }
744
745 return 0;
746 }
747
748 static int mmc_get_capabilities(struct mmc *mmc)
749 {
750 u8 *ext_csd = mmc->ext_csd;
751 char cardtype;
752
753 mmc->card_caps = MMC_MODE_1BIT;
754
755 if (mmc_host_is_spi(mmc))
756 return 0;
757
758 /* Only version 4 supports high-speed */
759 if (mmc->version < MMC_VERSION_4)
760 return 0;
761
762 if (!ext_csd) {
763 printf("No ext_csd found!\n"); /* this should enver happen */
764 return -ENOTSUPP;
765 }
766
767 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
768
769 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0x3f;
770 mmc->cardtype = cardtype;
771
772 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
773 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
774 mmc->card_caps |= MMC_MODE_HS200;
775 }
776 if (cardtype & EXT_CSD_CARD_TYPE_52) {
777 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
778 mmc->card_caps |= MMC_MODE_DDR_52MHz;
779 mmc->card_caps |= MMC_MODE_HS_52MHz;
780 }
781 if (cardtype & EXT_CSD_CARD_TYPE_26)
782 mmc->card_caps |= MMC_MODE_HS;
783
784 return 0;
785 }
786
787 static int mmc_set_capacity(struct mmc *mmc, int part_num)
788 {
789 switch (part_num) {
790 case 0:
791 mmc->capacity = mmc->capacity_user;
792 break;
793 case 1:
794 case 2:
795 mmc->capacity = mmc->capacity_boot;
796 break;
797 case 3:
798 mmc->capacity = mmc->capacity_rpmb;
799 break;
800 case 4:
801 case 5:
802 case 6:
803 case 7:
804 mmc->capacity = mmc->capacity_gp[part_num - 4];
805 break;
806 default:
807 return -1;
808 }
809
810 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
811
812 return 0;
813 }
814
815 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
816 {
817 int forbidden = 0;
818 bool change = false;
819
820 if (part_num & PART_ACCESS_MASK)
821 forbidden = MMC_CAP(MMC_HS_200);
822
823 if (MMC_CAP(mmc->selected_mode) & forbidden) {
824 debug("selected mode (%s) is forbidden for part %d\n",
825 mmc_mode_name(mmc->selected_mode), part_num);
826 change = true;
827 } else if (mmc->selected_mode != mmc->best_mode) {
828 debug("selected mode is not optimal\n");
829 change = true;
830 }
831
832 if (change)
833 return mmc_select_mode_and_width(mmc,
834 mmc->card_caps & ~forbidden);
835
836 return 0;
837 }
838
839 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
840 {
841 int ret;
842
843 ret = mmc_boot_part_access_chk(mmc, part_num);
844 if (ret)
845 return ret;
846
847 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
848 (mmc->part_config & ~PART_ACCESS_MASK)
849 | (part_num & PART_ACCESS_MASK));
850
851 /*
852 * Set the capacity if the switch succeeded or was intended
853 * to return to representing the raw device.
854 */
855 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
856 ret = mmc_set_capacity(mmc, part_num);
857 mmc_get_blk_desc(mmc)->hwpart = part_num;
858 }
859
860 return ret;
861 }
862
863 int mmc_hwpart_config(struct mmc *mmc,
864 const struct mmc_hwpart_conf *conf,
865 enum mmc_hwpart_conf_mode mode)
866 {
867 u8 part_attrs = 0;
868 u32 enh_size_mult;
869 u32 enh_start_addr;
870 u32 gp_size_mult[4];
871 u32 max_enh_size_mult;
872 u32 tot_enh_size_mult = 0;
873 u8 wr_rel_set;
874 int i, pidx, err;
875 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
876
877 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
878 return -EINVAL;
879
880 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
881 printf("eMMC >= 4.4 required for enhanced user data area\n");
882 return -EMEDIUMTYPE;
883 }
884
885 if (!(mmc->part_support & PART_SUPPORT)) {
886 printf("Card does not support partitioning\n");
887 return -EMEDIUMTYPE;
888 }
889
890 if (!mmc->hc_wp_grp_size) {
891 printf("Card does not define HC WP group size\n");
892 return -EMEDIUMTYPE;
893 }
894
895 /* check partition alignment and total enhanced size */
896 if (conf->user.enh_size) {
897 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
898 conf->user.enh_start % mmc->hc_wp_grp_size) {
899 printf("User data enhanced area not HC WP group "
900 "size aligned\n");
901 return -EINVAL;
902 }
903 part_attrs |= EXT_CSD_ENH_USR;
904 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
905 if (mmc->high_capacity) {
906 enh_start_addr = conf->user.enh_start;
907 } else {
908 enh_start_addr = (conf->user.enh_start << 9);
909 }
910 } else {
911 enh_size_mult = 0;
912 enh_start_addr = 0;
913 }
914 tot_enh_size_mult += enh_size_mult;
915
916 for (pidx = 0; pidx < 4; pidx++) {
917 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
918 printf("GP%i partition not HC WP group size "
919 "aligned\n", pidx+1);
920 return -EINVAL;
921 }
922 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
923 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
924 part_attrs |= EXT_CSD_ENH_GP(pidx);
925 tot_enh_size_mult += gp_size_mult[pidx];
926 }
927 }
928
929 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
930 printf("Card does not support enhanced attribute\n");
931 return -EMEDIUMTYPE;
932 }
933
934 err = mmc_send_ext_csd(mmc, ext_csd);
935 if (err)
936 return err;
937
938 max_enh_size_mult =
939 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
940 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
941 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
942 if (tot_enh_size_mult > max_enh_size_mult) {
943 printf("Total enhanced size exceeds maximum (%u > %u)\n",
944 tot_enh_size_mult, max_enh_size_mult);
945 return -EMEDIUMTYPE;
946 }
947
948 /* The default value of EXT_CSD_WR_REL_SET is device
949 * dependent, the values can only be changed if the
950 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
951 * changed only once and before partitioning is completed. */
952 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
953 if (conf->user.wr_rel_change) {
954 if (conf->user.wr_rel_set)
955 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
956 else
957 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
958 }
959 for (pidx = 0; pidx < 4; pidx++) {
960 if (conf->gp_part[pidx].wr_rel_change) {
961 if (conf->gp_part[pidx].wr_rel_set)
962 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
963 else
964 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
965 }
966 }
967
968 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
969 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
970 puts("Card does not support host controlled partition write "
971 "reliability settings\n");
972 return -EMEDIUMTYPE;
973 }
974
975 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
976 EXT_CSD_PARTITION_SETTING_COMPLETED) {
977 printf("Card already partitioned\n");
978 return -EPERM;
979 }
980
981 if (mode == MMC_HWPART_CONF_CHECK)
982 return 0;
983
984 /* Partitioning requires high-capacity size definitions */
985 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
986 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
987 EXT_CSD_ERASE_GROUP_DEF, 1);
988
989 if (err)
990 return err;
991
992 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
993
994 /* update erase group size to be high-capacity */
995 mmc->erase_grp_size =
996 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
997
998 }
999
1000 /* all OK, write the configuration */
1001 for (i = 0; i < 4; i++) {
1002 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1003 EXT_CSD_ENH_START_ADDR+i,
1004 (enh_start_addr >> (i*8)) & 0xFF);
1005 if (err)
1006 return err;
1007 }
1008 for (i = 0; i < 3; i++) {
1009 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1010 EXT_CSD_ENH_SIZE_MULT+i,
1011 (enh_size_mult >> (i*8)) & 0xFF);
1012 if (err)
1013 return err;
1014 }
1015 for (pidx = 0; pidx < 4; pidx++) {
1016 for (i = 0; i < 3; i++) {
1017 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1018 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1019 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1020 if (err)
1021 return err;
1022 }
1023 }
1024 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1025 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1026 if (err)
1027 return err;
1028
1029 if (mode == MMC_HWPART_CONF_SET)
1030 return 0;
1031
1032 /* The WR_REL_SET is a write-once register but shall be
1033 * written before setting PART_SETTING_COMPLETED. As it is
1034 * write-once we can only write it when completing the
1035 * partitioning. */
1036 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1037 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1038 EXT_CSD_WR_REL_SET, wr_rel_set);
1039 if (err)
1040 return err;
1041 }
1042
1043 /* Setting PART_SETTING_COMPLETED confirms the partition
1044 * configuration but it only becomes effective after power
1045 * cycle, so we do not adjust the partition related settings
1046 * in the mmc struct. */
1047
1048 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1049 EXT_CSD_PARTITION_SETTING,
1050 EXT_CSD_PARTITION_SETTING_COMPLETED);
1051 if (err)
1052 return err;
1053
1054 return 0;
1055 }
1056
1057 #if !CONFIG_IS_ENABLED(DM_MMC)
1058 int mmc_getcd(struct mmc *mmc)
1059 {
1060 int cd;
1061
1062 cd = board_mmc_getcd(mmc);
1063
1064 if (cd < 0) {
1065 if (mmc->cfg->ops->getcd)
1066 cd = mmc->cfg->ops->getcd(mmc);
1067 else
1068 cd = 1;
1069 }
1070
1071 return cd;
1072 }
1073 #endif
1074
1075 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1076 {
1077 struct mmc_cmd cmd;
1078 struct mmc_data data;
1079
1080 /* Switch the frequency */
1081 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1082 cmd.resp_type = MMC_RSP_R1;
1083 cmd.cmdarg = (mode << 31) | 0xffffff;
1084 cmd.cmdarg &= ~(0xf << (group * 4));
1085 cmd.cmdarg |= value << (group * 4);
1086
1087 data.dest = (char *)resp;
1088 data.blocksize = 64;
1089 data.blocks = 1;
1090 data.flags = MMC_DATA_READ;
1091
1092 return mmc_send_cmd(mmc, &cmd, &data);
1093 }
1094
1095
1096 static int sd_get_capabilities(struct mmc *mmc)
1097 {
1098 int err;
1099 struct mmc_cmd cmd;
1100 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1101 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1102 struct mmc_data data;
1103 int timeout;
1104 u32 sd3_bus_mode;
1105
1106 mmc->card_caps = MMC_MODE_1BIT;
1107
1108 if (mmc_host_is_spi(mmc))
1109 return 0;
1110
1111 /* Read the SCR to find out if this card supports higher speeds */
1112 cmd.cmdidx = MMC_CMD_APP_CMD;
1113 cmd.resp_type = MMC_RSP_R1;
1114 cmd.cmdarg = mmc->rca << 16;
1115
1116 err = mmc_send_cmd(mmc, &cmd, NULL);
1117
1118 if (err)
1119 return err;
1120
1121 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1122 cmd.resp_type = MMC_RSP_R1;
1123 cmd.cmdarg = 0;
1124
1125 timeout = 3;
1126
1127 retry_scr:
1128 data.dest = (char *)scr;
1129 data.blocksize = 8;
1130 data.blocks = 1;
1131 data.flags = MMC_DATA_READ;
1132
1133 err = mmc_send_cmd(mmc, &cmd, &data);
1134
1135 if (err) {
1136 if (timeout--)
1137 goto retry_scr;
1138
1139 return err;
1140 }
1141
1142 mmc->scr[0] = __be32_to_cpu(scr[0]);
1143 mmc->scr[1] = __be32_to_cpu(scr[1]);
1144
1145 switch ((mmc->scr[0] >> 24) & 0xf) {
1146 case 0:
1147 mmc->version = SD_VERSION_1_0;
1148 break;
1149 case 1:
1150 mmc->version = SD_VERSION_1_10;
1151 break;
1152 case 2:
1153 mmc->version = SD_VERSION_2;
1154 if ((mmc->scr[0] >> 15) & 0x1)
1155 mmc->version = SD_VERSION_3;
1156 break;
1157 default:
1158 mmc->version = SD_VERSION_1_0;
1159 break;
1160 }
1161
1162 if (mmc->scr[0] & SD_DATA_4BIT)
1163 mmc->card_caps |= MMC_MODE_4BIT;
1164
1165 /* Version 1.0 doesn't support switching */
1166 if (mmc->version == SD_VERSION_1_0)
1167 return 0;
1168
1169 timeout = 4;
1170 while (timeout--) {
1171 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1172 (u8 *)switch_status);
1173
1174 if (err)
1175 return err;
1176
1177 /* The high-speed function is busy. Try again */
1178 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1179 break;
1180 }
1181
1182 /* If high-speed isn't supported, we return */
1183 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1184 mmc->card_caps |= MMC_CAP(SD_HS);
1185
1186 /* Version before 3.0 don't support UHS modes */
1187 if (mmc->version < SD_VERSION_3)
1188 return 0;
1189
1190 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1191 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1192 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1193 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1194 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1195 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1196 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1197 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1198 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1199 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1200 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1201
1202 return 0;
1203 }
1204
1205 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1206 {
1207 int err;
1208
1209 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1210 int speed;
1211
1212 switch (mode) {
1213 case SD_LEGACY:
1214 case UHS_SDR12:
1215 speed = UHS_SDR12_BUS_SPEED;
1216 break;
1217 case SD_HS:
1218 case UHS_SDR25:
1219 speed = UHS_SDR25_BUS_SPEED;
1220 break;
1221 case UHS_SDR50:
1222 speed = UHS_SDR50_BUS_SPEED;
1223 break;
1224 case UHS_DDR50:
1225 speed = UHS_DDR50_BUS_SPEED;
1226 break;
1227 case UHS_SDR104:
1228 speed = UHS_SDR104_BUS_SPEED;
1229 break;
1230 default:
1231 return -EINVAL;
1232 }
1233
1234 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1235 if (err)
1236 return err;
1237
1238 if ((__be32_to_cpu(switch_status[4]) >> 24) != speed)
1239 return -ENOTSUPP;
1240
1241 return 0;
1242 }
1243
1244 int sd_select_bus_width(struct mmc *mmc, int w)
1245 {
1246 int err;
1247 struct mmc_cmd cmd;
1248
1249 if ((w != 4) && (w != 1))
1250 return -EINVAL;
1251
1252 cmd.cmdidx = MMC_CMD_APP_CMD;
1253 cmd.resp_type = MMC_RSP_R1;
1254 cmd.cmdarg = mmc->rca << 16;
1255
1256 err = mmc_send_cmd(mmc, &cmd, NULL);
1257 if (err)
1258 return err;
1259
1260 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1261 cmd.resp_type = MMC_RSP_R1;
1262 if (w == 4)
1263 cmd.cmdarg = 2;
1264 else if (w == 1)
1265 cmd.cmdarg = 0;
1266 err = mmc_send_cmd(mmc, &cmd, NULL);
1267 if (err)
1268 return err;
1269
1270 return 0;
1271 }
1272
1273 static int sd_read_ssr(struct mmc *mmc)
1274 {
1275 int err, i;
1276 struct mmc_cmd cmd;
1277 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1278 struct mmc_data data;
1279 int timeout = 3;
1280 unsigned int au, eo, et, es;
1281
1282 cmd.cmdidx = MMC_CMD_APP_CMD;
1283 cmd.resp_type = MMC_RSP_R1;
1284 cmd.cmdarg = mmc->rca << 16;
1285
1286 err = mmc_send_cmd(mmc, &cmd, NULL);
1287 if (err)
1288 return err;
1289
1290 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1291 cmd.resp_type = MMC_RSP_R1;
1292 cmd.cmdarg = 0;
1293
1294 retry_ssr:
1295 data.dest = (char *)ssr;
1296 data.blocksize = 64;
1297 data.blocks = 1;
1298 data.flags = MMC_DATA_READ;
1299
1300 err = mmc_send_cmd(mmc, &cmd, &data);
1301 if (err) {
1302 if (timeout--)
1303 goto retry_ssr;
1304
1305 return err;
1306 }
1307
1308 for (i = 0; i < 16; i++)
1309 ssr[i] = be32_to_cpu(ssr[i]);
1310
1311 au = (ssr[2] >> 12) & 0xF;
1312 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1313 mmc->ssr.au = sd_au_size[au];
1314 es = (ssr[3] >> 24) & 0xFF;
1315 es |= (ssr[2] & 0xFF) << 8;
1316 et = (ssr[3] >> 18) & 0x3F;
1317 if (es && et) {
1318 eo = (ssr[3] >> 16) & 0x3;
1319 mmc->ssr.erase_timeout = (et * 1000) / es;
1320 mmc->ssr.erase_offset = eo * 1000;
1321 }
1322 } else {
1323 debug("Invalid Allocation Unit Size.\n");
1324 }
1325
1326 return 0;
1327 }
1328
1329 /* frequency bases */
1330 /* divided by 10 to be nice to platforms without floating point */
1331 static const int fbase[] = {
1332 10000,
1333 100000,
1334 1000000,
1335 10000000,
1336 };
1337
1338 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1339 * to platforms without floating point.
1340 */
1341 static const u8 multipliers[] = {
1342 0, /* reserved */
1343 10,
1344 12,
1345 13,
1346 15,
1347 20,
1348 25,
1349 30,
1350 35,
1351 40,
1352 45,
1353 50,
1354 55,
1355 60,
1356 70,
1357 80,
1358 };
1359
1360 static inline int bus_width(uint cap)
1361 {
1362 if (cap == MMC_MODE_8BIT)
1363 return 8;
1364 if (cap == MMC_MODE_4BIT)
1365 return 4;
1366 if (cap == MMC_MODE_1BIT)
1367 return 1;
1368 printf("invalid bus witdh capability 0x%x\n", cap);
1369 return 0;
1370 }
1371
1372 #if !CONFIG_IS_ENABLED(DM_MMC)
1373 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1374 {
1375 return -ENOTSUPP;
1376 }
1377
1378 static void mmc_send_init_stream(struct mmc *mmc)
1379 {
1380 }
1381
1382 static int mmc_set_ios(struct mmc *mmc)
1383 {
1384 int ret = 0;
1385
1386 if (mmc->cfg->ops->set_ios)
1387 ret = mmc->cfg->ops->set_ios(mmc);
1388
1389 return ret;
1390 }
1391 #endif
1392
1393 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1394 {
1395 if (clock > mmc->cfg->f_max)
1396 clock = mmc->cfg->f_max;
1397
1398 if (clock < mmc->cfg->f_min)
1399 clock = mmc->cfg->f_min;
1400
1401 mmc->clock = clock;
1402 mmc->clk_disable = disable;
1403
1404 return mmc_set_ios(mmc);
1405 }
1406
1407 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1408 {
1409 mmc->bus_width = width;
1410
1411 return mmc_set_ios(mmc);
1412 }
1413
1414 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1415 /*
1416 * helper function to display the capabilities in a human
1417 * friendly manner. The capabilities include bus width and
1418 * supported modes.
1419 */
1420 void mmc_dump_capabilities(const char *text, uint caps)
1421 {
1422 enum bus_mode mode;
1423
1424 printf("%s: widths [", text);
1425 if (caps & MMC_MODE_8BIT)
1426 printf("8, ");
1427 if (caps & MMC_MODE_4BIT)
1428 printf("4, ");
1429 if (caps & MMC_MODE_1BIT)
1430 printf("1, ");
1431 printf("\b\b] modes [");
1432 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1433 if (MMC_CAP(mode) & caps)
1434 printf("%s, ", mmc_mode_name(mode));
1435 printf("\b\b]\n");
1436 }
1437 #endif
1438
1439 struct mode_width_tuning {
1440 enum bus_mode mode;
1441 uint widths;
1442 uint tuning;
1443 };
1444
1445 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1446 {
1447 switch (voltage) {
1448 case MMC_SIGNAL_VOLTAGE_000: return 0;
1449 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1450 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1451 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1452 }
1453 return -EINVAL;
1454 }
1455
1456 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1457 {
1458 int err;
1459
1460 if (mmc->signal_voltage == signal_voltage)
1461 return 0;
1462
1463 mmc->signal_voltage = signal_voltage;
1464 err = mmc_set_ios(mmc);
1465 if (err)
1466 debug("unable to set voltage (err %d)\n", err);
1467
1468 return err;
1469 }
1470
1471 static const struct mode_width_tuning sd_modes_by_pref[] = {
1472 {
1473 .mode = UHS_SDR104,
1474 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1475 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1476 },
1477 {
1478 .mode = UHS_SDR50,
1479 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1480 },
1481 {
1482 .mode = UHS_DDR50,
1483 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1484 },
1485 {
1486 .mode = UHS_SDR25,
1487 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1488 },
1489 {
1490 .mode = SD_HS,
1491 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1492 },
1493 {
1494 .mode = UHS_SDR12,
1495 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1496 },
1497 {
1498 .mode = SD_LEGACY,
1499 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1500 }
1501 };
1502
1503 #define for_each_sd_mode_by_pref(caps, mwt) \
1504 for (mwt = sd_modes_by_pref;\
1505 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1506 mwt++) \
1507 if (caps & MMC_CAP(mwt->mode))
1508
1509 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1510 {
1511 int err;
1512 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1513 const struct mode_width_tuning *mwt;
1514 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1515 uint caps;
1516
1517
1518 /* Restrict card's capabilities by what the host can do */
1519 caps = card_caps & (mmc->host_caps | MMC_MODE_1BIT);
1520
1521 if (!uhs_en)
1522 caps &= ~UHS_CAPS;
1523
1524 for_each_sd_mode_by_pref(caps, mwt) {
1525 uint *w;
1526
1527 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1528 if (*w & caps & mwt->widths) {
1529 debug("trying mode %s width %d (at %d MHz)\n",
1530 mmc_mode_name(mwt->mode),
1531 bus_width(*w),
1532 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1533
1534 /* configure the bus width (card + host) */
1535 err = sd_select_bus_width(mmc, bus_width(*w));
1536 if (err)
1537 goto error;
1538 mmc_set_bus_width(mmc, bus_width(*w));
1539
1540 /* configure the bus mode (card) */
1541 err = sd_set_card_speed(mmc, mwt->mode);
1542 if (err)
1543 goto error;
1544
1545 /* configure the bus mode (host) */
1546 mmc_select_mode(mmc, mwt->mode);
1547 mmc_set_clock(mmc, mmc->tran_speed, false);
1548
1549 /* execute tuning if needed */
1550 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1551 err = mmc_execute_tuning(mmc,
1552 mwt->tuning);
1553 if (err) {
1554 debug("tuning failed\n");
1555 goto error;
1556 }
1557 }
1558
1559 err = sd_read_ssr(mmc);
1560 if (!err)
1561 return 0;
1562
1563 printf("bad ssr\n");
1564
1565 error:
1566 /* revert to a safer bus speed */
1567 mmc_select_mode(mmc, SD_LEGACY);
1568 mmc_set_clock(mmc, mmc->tran_speed, false);
1569 }
1570 }
1571 }
1572
1573 printf("unable to select a mode\n");
1574 return -ENOTSUPP;
1575 }
1576
1577 /*
1578 * read the compare the part of ext csd that is constant.
1579 * This can be used to check that the transfer is working
1580 * as expected.
1581 */
1582 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1583 {
1584 int err;
1585 const u8 *ext_csd = mmc->ext_csd;
1586 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1587
1588 err = mmc_send_ext_csd(mmc, test_csd);
1589 if (err)
1590 return err;
1591
1592 /* Only compare read only fields */
1593 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1594 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1595 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1596 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1597 ext_csd[EXT_CSD_REV]
1598 == test_csd[EXT_CSD_REV] &&
1599 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1600 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1601 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1602 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1603 return 0;
1604
1605 return -EBADMSG;
1606 }
1607
1608 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1609 uint32_t allowed_mask)
1610 {
1611 u32 card_mask = 0;
1612
1613 switch (mode) {
1614 case MMC_HS_200:
1615 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_8V)
1616 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1617 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_2V)
1618 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1619 break;
1620 case MMC_DDR_52:
1621 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1622 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1623 MMC_SIGNAL_VOLTAGE_180;
1624 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1625 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1626 break;
1627 default:
1628 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1629 break;
1630 }
1631
1632 while (card_mask & allowed_mask) {
1633 enum mmc_voltage best_match;
1634
1635 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1636 if (!mmc_set_signal_voltage(mmc, best_match))
1637 return 0;
1638
1639 allowed_mask &= ~best_match;
1640 }
1641
1642 return -ENOTSUPP;
1643 }
1644
1645 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1646 {
1647 .mode = MMC_HS_200,
1648 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1649 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1650 },
1651 {
1652 .mode = MMC_DDR_52,
1653 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1654 },
1655 {
1656 .mode = MMC_HS_52,
1657 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1658 },
1659 {
1660 .mode = MMC_HS,
1661 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1662 },
1663 {
1664 .mode = MMC_LEGACY,
1665 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1666 }
1667 };
1668
1669 #define for_each_mmc_mode_by_pref(caps, mwt) \
1670 for (mwt = mmc_modes_by_pref;\
1671 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1672 mwt++) \
1673 if (caps & MMC_CAP(mwt->mode))
1674
1675 static const struct ext_csd_bus_width {
1676 uint cap;
1677 bool is_ddr;
1678 uint ext_csd_bits;
1679 } ext_csd_bus_width[] = {
1680 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1681 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1682 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1683 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1684 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1685 };
1686
1687 #define for_each_supported_width(caps, ddr, ecbv) \
1688 for (ecbv = ext_csd_bus_width;\
1689 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1690 ecbv++) \
1691 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1692
1693 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1694 {
1695 int err;
1696 const struct mode_width_tuning *mwt;
1697 const struct ext_csd_bus_width *ecbw;
1698
1699 /* Restrict card's capabilities by what the host can do */
1700 card_caps &= (mmc->host_caps | MMC_MODE_1BIT);
1701
1702 /* Only version 4 of MMC supports wider bus widths */
1703 if (mmc->version < MMC_VERSION_4)
1704 return 0;
1705
1706 if (!mmc->ext_csd) {
1707 debug("No ext_csd found!\n"); /* this should enver happen */
1708 return -ENOTSUPP;
1709 }
1710
1711 mmc_set_clock(mmc, mmc->legacy_speed, false);
1712
1713 for_each_mmc_mode_by_pref(card_caps, mwt) {
1714 for_each_supported_width(card_caps & mwt->widths,
1715 mmc_is_mode_ddr(mwt->mode), ecbw) {
1716 enum mmc_voltage old_voltage;
1717 debug("trying mode %s width %d (at %d MHz)\n",
1718 mmc_mode_name(mwt->mode),
1719 bus_width(ecbw->cap),
1720 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1721 old_voltage = mmc->signal_voltage;
1722 err = mmc_set_lowest_voltage(mmc, mwt->mode,
1723 MMC_ALL_SIGNAL_VOLTAGE);
1724 if (err)
1725 continue;
1726
1727 /* configure the bus width (card + host) */
1728 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1729 EXT_CSD_BUS_WIDTH,
1730 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1731 if (err)
1732 goto error;
1733 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1734
1735 /* configure the bus speed (card) */
1736 err = mmc_set_card_speed(mmc, mwt->mode);
1737 if (err)
1738 goto error;
1739
1740 /*
1741 * configure the bus width AND the ddr mode (card)
1742 * The host side will be taken care of in the next step
1743 */
1744 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1745 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1746 EXT_CSD_BUS_WIDTH,
1747 ecbw->ext_csd_bits);
1748 if (err)
1749 goto error;
1750 }
1751
1752 /* configure the bus mode (host) */
1753 mmc_select_mode(mmc, mwt->mode);
1754 mmc_set_clock(mmc, mmc->tran_speed, false);
1755
1756 /* execute tuning if needed */
1757 if (mwt->tuning) {
1758 err = mmc_execute_tuning(mmc, mwt->tuning);
1759 if (err) {
1760 debug("tuning failed\n");
1761 goto error;
1762 }
1763 }
1764
1765 /* do a transfer to check the configuration */
1766 err = mmc_read_and_compare_ext_csd(mmc);
1767 if (!err)
1768 return 0;
1769 error:
1770 mmc_set_signal_voltage(mmc, old_voltage);
1771 /* if an error occured, revert to a safer bus mode */
1772 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1773 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
1774 mmc_select_mode(mmc, MMC_LEGACY);
1775 mmc_set_bus_width(mmc, 1);
1776 }
1777 }
1778
1779 printf("unable to select a mode\n");
1780
1781 return -ENOTSUPP;
1782 }
1783
1784 static int mmc_startup_v4(struct mmc *mmc)
1785 {
1786 int err, i;
1787 u64 capacity;
1788 bool has_parts = false;
1789 bool part_completed;
1790 u8 *ext_csd;
1791
1792 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1793 return 0;
1794
1795 ext_csd = malloc_cache_aligned(MMC_MAX_BLOCK_LEN);
1796 if (!ext_csd)
1797 return -ENOMEM;
1798
1799 mmc->ext_csd = ext_csd;
1800
1801 /* check ext_csd version and capacity */
1802 err = mmc_send_ext_csd(mmc, ext_csd);
1803 if (err)
1804 return err;
1805 if (ext_csd[EXT_CSD_REV] >= 2) {
1806 /*
1807 * According to the JEDEC Standard, the value of
1808 * ext_csd's capacity is valid if the value is more
1809 * than 2GB
1810 */
1811 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1812 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1813 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1814 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1815 capacity *= MMC_MAX_BLOCK_LEN;
1816 if ((capacity >> 20) > 2 * 1024)
1817 mmc->capacity_user = capacity;
1818 }
1819
1820 switch (ext_csd[EXT_CSD_REV]) {
1821 case 1:
1822 mmc->version = MMC_VERSION_4_1;
1823 break;
1824 case 2:
1825 mmc->version = MMC_VERSION_4_2;
1826 break;
1827 case 3:
1828 mmc->version = MMC_VERSION_4_3;
1829 break;
1830 case 5:
1831 mmc->version = MMC_VERSION_4_41;
1832 break;
1833 case 6:
1834 mmc->version = MMC_VERSION_4_5;
1835 break;
1836 case 7:
1837 mmc->version = MMC_VERSION_5_0;
1838 break;
1839 case 8:
1840 mmc->version = MMC_VERSION_5_1;
1841 break;
1842 }
1843
1844 /* The partition data may be non-zero but it is only
1845 * effective if PARTITION_SETTING_COMPLETED is set in
1846 * EXT_CSD, so ignore any data if this bit is not set,
1847 * except for enabling the high-capacity group size
1848 * definition (see below).
1849 */
1850 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1851 EXT_CSD_PARTITION_SETTING_COMPLETED);
1852
1853 /* store the partition info of emmc */
1854 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
1855 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
1856 ext_csd[EXT_CSD_BOOT_MULT])
1857 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
1858 if (part_completed &&
1859 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
1860 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
1861
1862 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
1863
1864 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
1865
1866 for (i = 0; i < 4; i++) {
1867 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
1868 uint mult = (ext_csd[idx + 2] << 16) +
1869 (ext_csd[idx + 1] << 8) + ext_csd[idx];
1870 if (mult)
1871 has_parts = true;
1872 if (!part_completed)
1873 continue;
1874 mmc->capacity_gp[i] = mult;
1875 mmc->capacity_gp[i] *=
1876 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1877 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1878 mmc->capacity_gp[i] <<= 19;
1879 }
1880
1881 if (part_completed) {
1882 mmc->enh_user_size =
1883 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
1884 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
1885 ext_csd[EXT_CSD_ENH_SIZE_MULT];
1886 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1887 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1888 mmc->enh_user_size <<= 19;
1889 mmc->enh_user_start =
1890 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
1891 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
1892 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
1893 ext_csd[EXT_CSD_ENH_START_ADDR];
1894 if (mmc->high_capacity)
1895 mmc->enh_user_start <<= 9;
1896 }
1897
1898 /*
1899 * Host needs to enable ERASE_GRP_DEF bit if device is
1900 * partitioned. This bit will be lost every time after a reset
1901 * or power off. This will affect erase size.
1902 */
1903 if (part_completed)
1904 has_parts = true;
1905 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
1906 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
1907 has_parts = true;
1908 if (has_parts) {
1909 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1910 EXT_CSD_ERASE_GROUP_DEF, 1);
1911
1912 if (err)
1913 return err;
1914
1915 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1916 }
1917
1918 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
1919 /* Read out group size from ext_csd */
1920 mmc->erase_grp_size =
1921 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1922 /*
1923 * if high capacity and partition setting completed
1924 * SEC_COUNT is valid even if it is smaller than 2 GiB
1925 * JEDEC Standard JESD84-B45, 6.2.4
1926 */
1927 if (mmc->high_capacity && part_completed) {
1928 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
1929 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
1930 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
1931 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
1932 capacity *= MMC_MAX_BLOCK_LEN;
1933 mmc->capacity_user = capacity;
1934 }
1935 } else {
1936 /* Calculate the group size from the csd value. */
1937 int erase_gsz, erase_gmul;
1938
1939 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
1940 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
1941 mmc->erase_grp_size = (erase_gsz + 1)
1942 * (erase_gmul + 1);
1943 }
1944
1945 mmc->hc_wp_grp_size = 1024
1946 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1947 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1948
1949 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1950
1951 return 0;
1952 }
1953
1954 static int mmc_startup(struct mmc *mmc)
1955 {
1956 int err, i;
1957 uint mult, freq;
1958 u64 cmult, csize;
1959 struct mmc_cmd cmd;
1960 struct blk_desc *bdesc;
1961
1962 #ifdef CONFIG_MMC_SPI_CRC_ON
1963 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
1964 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
1965 cmd.resp_type = MMC_RSP_R1;
1966 cmd.cmdarg = 1;
1967 err = mmc_send_cmd(mmc, &cmd, NULL);
1968 if (err)
1969 return err;
1970 }
1971 #endif
1972
1973 /* Put the Card in Identify Mode */
1974 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
1975 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
1976 cmd.resp_type = MMC_RSP_R2;
1977 cmd.cmdarg = 0;
1978
1979 err = mmc_send_cmd(mmc, &cmd, NULL);
1980
1981 #ifdef CONFIG_MMC_QUIRKS
1982 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
1983 int retries = 4;
1984 /*
1985 * It has been seen that SEND_CID may fail on the first
1986 * attempt, let's try a few more time
1987 */
1988 do {
1989 err = mmc_send_cmd(mmc, &cmd, NULL);
1990 if (!err)
1991 break;
1992 } while (retries--);
1993 }
1994 #endif
1995
1996 if (err)
1997 return err;
1998
1999 memcpy(mmc->cid, cmd.response, 16);
2000
2001 /*
2002 * For MMC cards, set the Relative Address.
2003 * For SD cards, get the Relatvie Address.
2004 * This also puts the cards into Standby State
2005 */
2006 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2007 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2008 cmd.cmdarg = mmc->rca << 16;
2009 cmd.resp_type = MMC_RSP_R6;
2010
2011 err = mmc_send_cmd(mmc, &cmd, NULL);
2012
2013 if (err)
2014 return err;
2015
2016 if (IS_SD(mmc))
2017 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2018 }
2019
2020 /* Get the Card-Specific Data */
2021 cmd.cmdidx = MMC_CMD_SEND_CSD;
2022 cmd.resp_type = MMC_RSP_R2;
2023 cmd.cmdarg = mmc->rca << 16;
2024
2025 err = mmc_send_cmd(mmc, &cmd, NULL);
2026
2027 if (err)
2028 return err;
2029
2030 mmc->csd[0] = cmd.response[0];
2031 mmc->csd[1] = cmd.response[1];
2032 mmc->csd[2] = cmd.response[2];
2033 mmc->csd[3] = cmd.response[3];
2034
2035 if (mmc->version == MMC_VERSION_UNKNOWN) {
2036 int version = (cmd.response[0] >> 26) & 0xf;
2037
2038 switch (version) {
2039 case 0:
2040 mmc->version = MMC_VERSION_1_2;
2041 break;
2042 case 1:
2043 mmc->version = MMC_VERSION_1_4;
2044 break;
2045 case 2:
2046 mmc->version = MMC_VERSION_2_2;
2047 break;
2048 case 3:
2049 mmc->version = MMC_VERSION_3;
2050 break;
2051 case 4:
2052 mmc->version = MMC_VERSION_4;
2053 break;
2054 default:
2055 mmc->version = MMC_VERSION_1_2;
2056 break;
2057 }
2058 }
2059
2060 /* divide frequency by 10, since the mults are 10x bigger */
2061 freq = fbase[(cmd.response[0] & 0x7)];
2062 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2063
2064 mmc->legacy_speed = freq * mult;
2065 mmc_select_mode(mmc, MMC_LEGACY);
2066
2067 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2068 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2069
2070 if (IS_SD(mmc))
2071 mmc->write_bl_len = mmc->read_bl_len;
2072 else
2073 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2074
2075 if (mmc->high_capacity) {
2076 csize = (mmc->csd[1] & 0x3f) << 16
2077 | (mmc->csd[2] & 0xffff0000) >> 16;
2078 cmult = 8;
2079 } else {
2080 csize = (mmc->csd[1] & 0x3ff) << 2
2081 | (mmc->csd[2] & 0xc0000000) >> 30;
2082 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2083 }
2084
2085 mmc->capacity_user = (csize + 1) << (cmult + 2);
2086 mmc->capacity_user *= mmc->read_bl_len;
2087 mmc->capacity_boot = 0;
2088 mmc->capacity_rpmb = 0;
2089 for (i = 0; i < 4; i++)
2090 mmc->capacity_gp[i] = 0;
2091
2092 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2093 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2094
2095 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2096 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2097
2098 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2099 cmd.cmdidx = MMC_CMD_SET_DSR;
2100 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2101 cmd.resp_type = MMC_RSP_NONE;
2102 if (mmc_send_cmd(mmc, &cmd, NULL))
2103 printf("MMC: SET_DSR failed\n");
2104 }
2105
2106 /* Select the card, and put it into Transfer Mode */
2107 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2108 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2109 cmd.resp_type = MMC_RSP_R1;
2110 cmd.cmdarg = mmc->rca << 16;
2111 err = mmc_send_cmd(mmc, &cmd, NULL);
2112
2113 if (err)
2114 return err;
2115 }
2116
2117 /*
2118 * For SD, its erase group is always one sector
2119 */
2120 mmc->erase_grp_size = 1;
2121 mmc->part_config = MMCPART_NOAVAILABLE;
2122
2123 err = mmc_startup_v4(mmc);
2124 if (err)
2125 return err;
2126
2127 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2128 if (err)
2129 return err;
2130
2131 if (IS_SD(mmc)) {
2132 err = sd_get_capabilities(mmc);
2133 if (err)
2134 return err;
2135 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2136 } else {
2137 err = mmc_get_capabilities(mmc);
2138 if (err)
2139 return err;
2140 mmc_select_mode_and_width(mmc, mmc->card_caps);
2141 }
2142
2143 if (err)
2144 return err;
2145
2146 mmc->best_mode = mmc->selected_mode;
2147
2148 /* Fix the block length for DDR mode */
2149 if (mmc->ddr_mode) {
2150 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2151 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2152 }
2153
2154 /* fill in device description */
2155 bdesc = mmc_get_blk_desc(mmc);
2156 bdesc->lun = 0;
2157 bdesc->hwpart = 0;
2158 bdesc->type = 0;
2159 bdesc->blksz = mmc->read_bl_len;
2160 bdesc->log2blksz = LOG2(bdesc->blksz);
2161 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2162 #if !defined(CONFIG_SPL_BUILD) || \
2163 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2164 !defined(CONFIG_USE_TINY_PRINTF))
2165 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2166 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2167 (mmc->cid[3] >> 16) & 0xffff);
2168 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2169 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2170 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2171 (mmc->cid[2] >> 24) & 0xff);
2172 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2173 (mmc->cid[2] >> 16) & 0xf);
2174 #else
2175 bdesc->vendor[0] = 0;
2176 bdesc->product[0] = 0;
2177 bdesc->revision[0] = 0;
2178 #endif
2179 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2180 part_init(bdesc);
2181 #endif
2182
2183 return 0;
2184 }
2185
2186 static int mmc_send_if_cond(struct mmc *mmc)
2187 {
2188 struct mmc_cmd cmd;
2189 int err;
2190
2191 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2192 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2193 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2194 cmd.resp_type = MMC_RSP_R7;
2195
2196 err = mmc_send_cmd(mmc, &cmd, NULL);
2197
2198 if (err)
2199 return err;
2200
2201 if ((cmd.response[0] & 0xff) != 0xaa)
2202 return -EOPNOTSUPP;
2203 else
2204 mmc->version = SD_VERSION_2;
2205
2206 return 0;
2207 }
2208
2209 #if !CONFIG_IS_ENABLED(DM_MMC)
2210 /* board-specific MMC power initializations. */
2211 __weak void board_mmc_power_init(void)
2212 {
2213 }
2214 #endif
2215
2216 static int mmc_power_init(struct mmc *mmc)
2217 {
2218 #if CONFIG_IS_ENABLED(DM_MMC)
2219 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2220 int ret;
2221
2222 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2223 &mmc->vmmc_supply);
2224 if (ret)
2225 debug("%s: No vmmc supply\n", mmc->dev->name);
2226
2227 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2228 &mmc->vqmmc_supply);
2229 if (ret)
2230 debug("%s: No vqmmc supply\n", mmc->dev->name);
2231 #endif
2232 #else /* !CONFIG_DM_MMC */
2233 /*
2234 * Driver model should use a regulator, as above, rather than calling
2235 * out to board code.
2236 */
2237 board_mmc_power_init();
2238 #endif
2239 return 0;
2240 }
2241
2242 /*
2243 * put the host in the initial state:
2244 * - turn on Vdd (card power supply)
2245 * - configure the bus width and clock to minimal values
2246 */
2247 static void mmc_set_initial_state(struct mmc *mmc)
2248 {
2249 int err;
2250
2251 /* First try to set 3.3V. If it fails set to 1.8V */
2252 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2253 if (err != 0)
2254 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2255 if (err != 0)
2256 printf("mmc: failed to set signal voltage\n");
2257
2258 mmc_select_mode(mmc, MMC_LEGACY);
2259 mmc_set_bus_width(mmc, 1);
2260 mmc_set_clock(mmc, 0, false);
2261 }
2262
2263 static int mmc_power_on(struct mmc *mmc)
2264 {
2265 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2266 if (mmc->vmmc_supply) {
2267 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2268
2269 if (ret) {
2270 puts("Error enabling VMMC supply\n");
2271 return ret;
2272 }
2273 }
2274 #endif
2275 return 0;
2276 }
2277
2278 static int mmc_power_off(struct mmc *mmc)
2279 {
2280 mmc_set_clock(mmc, 1, true);
2281 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2282 if (mmc->vmmc_supply) {
2283 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2284
2285 if (ret) {
2286 debug("Error disabling VMMC supply\n");
2287 return ret;
2288 }
2289 }
2290 #endif
2291 return 0;
2292 }
2293
2294 static int mmc_power_cycle(struct mmc *mmc)
2295 {
2296 int ret;
2297
2298 ret = mmc_power_off(mmc);
2299 if (ret)
2300 return ret;
2301 /*
2302 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2303 * to be on the safer side.
2304 */
2305 udelay(2000);
2306 return mmc_power_on(mmc);
2307 }
2308
2309 int mmc_start_init(struct mmc *mmc)
2310 {
2311 bool no_card;
2312 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2313 int err;
2314
2315 mmc->host_caps = mmc->cfg->host_caps;
2316
2317 /* we pretend there's no card when init is NULL */
2318 no_card = mmc_getcd(mmc) == 0;
2319 #if !CONFIG_IS_ENABLED(DM_MMC)
2320 no_card = no_card || (mmc->cfg->ops->init == NULL);
2321 #endif
2322 if (no_card) {
2323 mmc->has_init = 0;
2324 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2325 printf("MMC: no card present\n");
2326 #endif
2327 return -ENOMEDIUM;
2328 }
2329
2330 if (mmc->has_init)
2331 return 0;
2332
2333 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2334 mmc_adapter_card_type_ident();
2335 #endif
2336 err = mmc_power_init(mmc);
2337 if (err)
2338 return err;
2339
2340 #ifdef CONFIG_MMC_QUIRKS
2341 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2342 MMC_QUIRK_RETRY_SEND_CID;
2343 #endif
2344
2345 err = mmc_power_cycle(mmc);
2346 if (err) {
2347 /*
2348 * if power cycling is not supported, we should not try
2349 * to use the UHS modes, because we wouldn't be able to
2350 * recover from an error during the UHS initialization.
2351 */
2352 debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2353 uhs_en = false;
2354 mmc->host_caps &= ~UHS_CAPS;
2355 err = mmc_power_on(mmc);
2356 }
2357 if (err)
2358 return err;
2359
2360 #if CONFIG_IS_ENABLED(DM_MMC)
2361 /* The device has already been probed ready for use */
2362 #else
2363 /* made sure it's not NULL earlier */
2364 err = mmc->cfg->ops->init(mmc);
2365 if (err)
2366 return err;
2367 #endif
2368 mmc->ddr_mode = 0;
2369
2370 retry:
2371 mmc_set_initial_state(mmc);
2372 mmc_send_init_stream(mmc);
2373
2374 /* Reset the Card */
2375 err = mmc_go_idle(mmc);
2376
2377 if (err)
2378 return err;
2379
2380 /* The internal partition reset to user partition(0) at every CMD0*/
2381 mmc_get_blk_desc(mmc)->hwpart = 0;
2382
2383 /* Test for SD version 2 */
2384 err = mmc_send_if_cond(mmc);
2385
2386 /* Now try to get the SD card's operating condition */
2387 err = sd_send_op_cond(mmc, uhs_en);
2388 if (err && uhs_en) {
2389 uhs_en = false;
2390 mmc_power_cycle(mmc);
2391 goto retry;
2392 }
2393
2394 /* If the command timed out, we check for an MMC card */
2395 if (err == -ETIMEDOUT) {
2396 err = mmc_send_op_cond(mmc);
2397
2398 if (err) {
2399 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2400 printf("Card did not respond to voltage select!\n");
2401 #endif
2402 return -EOPNOTSUPP;
2403 }
2404 }
2405
2406 if (!err)
2407 mmc->init_in_progress = 1;
2408
2409 return err;
2410 }
2411
2412 static int mmc_complete_init(struct mmc *mmc)
2413 {
2414 int err = 0;
2415
2416 mmc->init_in_progress = 0;
2417 if (mmc->op_cond_pending)
2418 err = mmc_complete_op_cond(mmc);
2419
2420 if (!err)
2421 err = mmc_startup(mmc);
2422 if (err)
2423 mmc->has_init = 0;
2424 else
2425 mmc->has_init = 1;
2426 return err;
2427 }
2428
2429 int mmc_init(struct mmc *mmc)
2430 {
2431 int err = 0;
2432 __maybe_unused unsigned start;
2433 #if CONFIG_IS_ENABLED(DM_MMC)
2434 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2435
2436 upriv->mmc = mmc;
2437 #endif
2438 if (mmc->has_init)
2439 return 0;
2440
2441 start = get_timer(0);
2442
2443 if (!mmc->init_in_progress)
2444 err = mmc_start_init(mmc);
2445
2446 if (!err)
2447 err = mmc_complete_init(mmc);
2448 if (err)
2449 printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2450
2451 return err;
2452 }
2453
2454 int mmc_set_dsr(struct mmc *mmc, u16 val)
2455 {
2456 mmc->dsr = val;
2457 return 0;
2458 }
2459
2460 /* CPU-specific MMC initializations */
2461 __weak int cpu_mmc_init(bd_t *bis)
2462 {
2463 return -1;
2464 }
2465
2466 /* board-specific MMC initializations. */
2467 __weak int board_mmc_init(bd_t *bis)
2468 {
2469 return -1;
2470 }
2471
2472 void mmc_set_preinit(struct mmc *mmc, int preinit)
2473 {
2474 mmc->preinit = preinit;
2475 }
2476
2477 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2478 static int mmc_probe(bd_t *bis)
2479 {
2480 return 0;
2481 }
2482 #elif CONFIG_IS_ENABLED(DM_MMC)
2483 static int mmc_probe(bd_t *bis)
2484 {
2485 int ret, i;
2486 struct uclass *uc;
2487 struct udevice *dev;
2488
2489 ret = uclass_get(UCLASS_MMC, &uc);
2490 if (ret)
2491 return ret;
2492
2493 /*
2494 * Try to add them in sequence order. Really with driver model we
2495 * should allow holes, but the current MMC list does not allow that.
2496 * So if we request 0, 1, 3 we will get 0, 1, 2.
2497 */
2498 for (i = 0; ; i++) {
2499 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2500 if (ret == -ENODEV)
2501 break;
2502 }
2503 uclass_foreach_dev(dev, uc) {
2504 ret = device_probe(dev);
2505 if (ret)
2506 printf("%s - probe failed: %d\n", dev->name, ret);
2507 }
2508
2509 return 0;
2510 }
2511 #else
2512 static int mmc_probe(bd_t *bis)
2513 {
2514 if (board_mmc_init(bis) < 0)
2515 cpu_mmc_init(bis);
2516
2517 return 0;
2518 }
2519 #endif
2520
2521 int mmc_initialize(bd_t *bis)
2522 {
2523 static int initialized = 0;
2524 int ret;
2525 if (initialized) /* Avoid initializing mmc multiple times */
2526 return 0;
2527 initialized = 1;
2528
2529 #if !CONFIG_IS_ENABLED(BLK)
2530 #if !CONFIG_IS_ENABLED(MMC_TINY)
2531 mmc_list_init();
2532 #endif
2533 #endif
2534 ret = mmc_probe(bis);
2535 if (ret)
2536 return ret;
2537
2538 #ifndef CONFIG_SPL_BUILD
2539 print_mmc_devices(',');
2540 #endif
2541
2542 mmc_do_preinit();
2543 return 0;
2544 }
2545
2546 #ifdef CONFIG_CMD_BKOPS_ENABLE
2547 int mmc_set_bkops_enable(struct mmc *mmc)
2548 {
2549 int err;
2550 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2551
2552 err = mmc_send_ext_csd(mmc, ext_csd);
2553 if (err) {
2554 puts("Could not get ext_csd register values\n");
2555 return err;
2556 }
2557
2558 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2559 puts("Background operations not supported on device\n");
2560 return -EMEDIUMTYPE;
2561 }
2562
2563 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2564 puts("Background operations already enabled\n");
2565 return 0;
2566 }
2567
2568 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2569 if (err) {
2570 puts("Failed to enable manual background operations\n");
2571 return err;
2572 }
2573
2574 puts("Enabled manual background operations\n");
2575
2576 return 0;
2577 }
2578 #endif