]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/mmc/mmc.c
mmc: Change mode when switching to a boot partition
[people/ms/u-boot.git] / drivers / mmc / mmc.c
1 /*
2 * Copyright 2008, Freescale Semiconductor, Inc
3 * Andy Fleming
4 *
5 * Based vaguely on the Linux code
6 *
7 * SPDX-License-Identifier: GPL-2.0+
8 */
9
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24
25 static const unsigned int sd_au_size[] = {
26 0, SZ_16K / 512, SZ_32K / 512,
27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
31 };
32
33 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
34 static int mmc_power_cycle(struct mmc *mmc);
35 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
36
37 #if CONFIG_IS_ENABLED(MMC_TINY)
38 static struct mmc mmc_static;
39 struct mmc *find_mmc_device(int dev_num)
40 {
41 return &mmc_static;
42 }
43
44 void mmc_do_preinit(void)
45 {
46 struct mmc *m = &mmc_static;
47 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
48 mmc_set_preinit(m, 1);
49 #endif
50 if (m->preinit)
51 mmc_start_init(m);
52 }
53
54 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
55 {
56 return &mmc->block_dev;
57 }
58 #endif
59
60 #if !CONFIG_IS_ENABLED(DM_MMC)
61
62 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
63 {
64 return -ENOSYS;
65 }
66
67 __weak int board_mmc_getwp(struct mmc *mmc)
68 {
69 return -1;
70 }
71
72 int mmc_getwp(struct mmc *mmc)
73 {
74 int wp;
75
76 wp = board_mmc_getwp(mmc);
77
78 if (wp < 0) {
79 if (mmc->cfg->ops->getwp)
80 wp = mmc->cfg->ops->getwp(mmc);
81 else
82 wp = 0;
83 }
84
85 return wp;
86 }
87
88 __weak int board_mmc_getcd(struct mmc *mmc)
89 {
90 return -1;
91 }
92 #endif
93
94 #ifdef CONFIG_MMC_TRACE
95 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
96 {
97 printf("CMD_SEND:%d\n", cmd->cmdidx);
98 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
99 }
100
101 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
102 {
103 int i;
104 u8 *ptr;
105
106 if (ret) {
107 printf("\t\tRET\t\t\t %d\n", ret);
108 } else {
109 switch (cmd->resp_type) {
110 case MMC_RSP_NONE:
111 printf("\t\tMMC_RSP_NONE\n");
112 break;
113 case MMC_RSP_R1:
114 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
115 cmd->response[0]);
116 break;
117 case MMC_RSP_R1b:
118 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
119 cmd->response[0]);
120 break;
121 case MMC_RSP_R2:
122 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
123 cmd->response[0]);
124 printf("\t\t \t\t 0x%08X \n",
125 cmd->response[1]);
126 printf("\t\t \t\t 0x%08X \n",
127 cmd->response[2]);
128 printf("\t\t \t\t 0x%08X \n",
129 cmd->response[3]);
130 printf("\n");
131 printf("\t\t\t\t\tDUMPING DATA\n");
132 for (i = 0; i < 4; i++) {
133 int j;
134 printf("\t\t\t\t\t%03d - ", i*4);
135 ptr = (u8 *)&cmd->response[i];
136 ptr += 3;
137 for (j = 0; j < 4; j++)
138 printf("%02X ", *ptr--);
139 printf("\n");
140 }
141 break;
142 case MMC_RSP_R3:
143 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
144 cmd->response[0]);
145 break;
146 default:
147 printf("\t\tERROR MMC rsp not supported\n");
148 break;
149 }
150 }
151 }
152
153 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
154 {
155 int status;
156
157 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
158 printf("CURR STATE:%d\n", status);
159 }
160 #endif
161
162 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
163 const char *mmc_mode_name(enum bus_mode mode)
164 {
165 static const char *const names[] = {
166 [MMC_LEGACY] = "MMC legacy",
167 [SD_LEGACY] = "SD Legacy",
168 [MMC_HS] = "MMC High Speed (26MHz)",
169 [SD_HS] = "SD High Speed (50MHz)",
170 [UHS_SDR12] = "UHS SDR12 (25MHz)",
171 [UHS_SDR25] = "UHS SDR25 (50MHz)",
172 [UHS_SDR50] = "UHS SDR50 (100MHz)",
173 [UHS_SDR104] = "UHS SDR104 (208MHz)",
174 [UHS_DDR50] = "UHS DDR50 (50MHz)",
175 [MMC_HS_52] = "MMC High Speed (52MHz)",
176 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
177 [MMC_HS_200] = "HS200 (200MHz)",
178 };
179
180 if (mode >= MMC_MODES_END)
181 return "Unknown mode";
182 else
183 return names[mode];
184 }
185 #endif
186
187 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
188 {
189 static const int freqs[] = {
190 [SD_LEGACY] = 25000000,
191 [MMC_HS] = 26000000,
192 [SD_HS] = 50000000,
193 [UHS_SDR12] = 25000000,
194 [UHS_SDR25] = 50000000,
195 [UHS_SDR50] = 100000000,
196 [UHS_SDR104] = 208000000,
197 [UHS_DDR50] = 50000000,
198 [MMC_HS_52] = 52000000,
199 [MMC_DDR_52] = 52000000,
200 [MMC_HS_200] = 200000000,
201 };
202
203 if (mode == MMC_LEGACY)
204 return mmc->legacy_speed;
205 else if (mode >= MMC_MODES_END)
206 return 0;
207 else
208 return freqs[mode];
209 }
210
211 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
212 {
213 mmc->selected_mode = mode;
214 mmc->tran_speed = mmc_mode2freq(mmc, mode);
215 mmc->ddr_mode = mmc_is_mode_ddr(mode);
216 debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
217 mmc->tran_speed / 1000000);
218 return 0;
219 }
220
221 #if !CONFIG_IS_ENABLED(DM_MMC)
222 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
223 {
224 int ret;
225
226 mmmc_trace_before_send(mmc, cmd);
227 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
228 mmmc_trace_after_send(mmc, cmd, ret);
229
230 return ret;
231 }
232 #endif
233
234 int mmc_send_status(struct mmc *mmc, int timeout)
235 {
236 struct mmc_cmd cmd;
237 int err, retries = 5;
238
239 cmd.cmdidx = MMC_CMD_SEND_STATUS;
240 cmd.resp_type = MMC_RSP_R1;
241 if (!mmc_host_is_spi(mmc))
242 cmd.cmdarg = mmc->rca << 16;
243
244 while (1) {
245 err = mmc_send_cmd(mmc, &cmd, NULL);
246 if (!err) {
247 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
248 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
249 MMC_STATE_PRG)
250 break;
251
252 if (cmd.response[0] & MMC_STATUS_MASK) {
253 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
254 printf("Status Error: 0x%08X\n",
255 cmd.response[0]);
256 #endif
257 return -ECOMM;
258 }
259 } else if (--retries < 0)
260 return err;
261
262 if (timeout-- <= 0)
263 break;
264
265 udelay(1000);
266 }
267
268 mmc_trace_state(mmc, &cmd);
269 if (timeout <= 0) {
270 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
271 printf("Timeout waiting card ready\n");
272 #endif
273 return -ETIMEDOUT;
274 }
275
276 return 0;
277 }
278
279 int mmc_set_blocklen(struct mmc *mmc, int len)
280 {
281 struct mmc_cmd cmd;
282
283 if (mmc->ddr_mode)
284 return 0;
285
286 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
287 cmd.resp_type = MMC_RSP_R1;
288 cmd.cmdarg = len;
289
290 return mmc_send_cmd(mmc, &cmd, NULL);
291 }
292
293 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
294 lbaint_t blkcnt)
295 {
296 struct mmc_cmd cmd;
297 struct mmc_data data;
298
299 if (blkcnt > 1)
300 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
301 else
302 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
303
304 if (mmc->high_capacity)
305 cmd.cmdarg = start;
306 else
307 cmd.cmdarg = start * mmc->read_bl_len;
308
309 cmd.resp_type = MMC_RSP_R1;
310
311 data.dest = dst;
312 data.blocks = blkcnt;
313 data.blocksize = mmc->read_bl_len;
314 data.flags = MMC_DATA_READ;
315
316 if (mmc_send_cmd(mmc, &cmd, &data))
317 return 0;
318
319 if (blkcnt > 1) {
320 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
321 cmd.cmdarg = 0;
322 cmd.resp_type = MMC_RSP_R1b;
323 if (mmc_send_cmd(mmc, &cmd, NULL)) {
324 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
325 printf("mmc fail to send stop cmd\n");
326 #endif
327 return 0;
328 }
329 }
330
331 return blkcnt;
332 }
333
334 #if CONFIG_IS_ENABLED(BLK)
335 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
336 #else
337 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
338 void *dst)
339 #endif
340 {
341 #if CONFIG_IS_ENABLED(BLK)
342 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
343 #endif
344 int dev_num = block_dev->devnum;
345 int err;
346 lbaint_t cur, blocks_todo = blkcnt;
347
348 if (blkcnt == 0)
349 return 0;
350
351 struct mmc *mmc = find_mmc_device(dev_num);
352 if (!mmc)
353 return 0;
354
355 if (CONFIG_IS_ENABLED(MMC_TINY))
356 err = mmc_switch_part(mmc, block_dev->hwpart);
357 else
358 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
359
360 if (err < 0)
361 return 0;
362
363 if ((start + blkcnt) > block_dev->lba) {
364 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
365 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
366 start + blkcnt, block_dev->lba);
367 #endif
368 return 0;
369 }
370
371 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
372 debug("%s: Failed to set blocklen\n", __func__);
373 return 0;
374 }
375
376 do {
377 cur = (blocks_todo > mmc->cfg->b_max) ?
378 mmc->cfg->b_max : blocks_todo;
379 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
380 debug("%s: Failed to read blocks\n", __func__);
381 return 0;
382 }
383 blocks_todo -= cur;
384 start += cur;
385 dst += cur * mmc->read_bl_len;
386 } while (blocks_todo > 0);
387
388 return blkcnt;
389 }
390
391 static int mmc_go_idle(struct mmc *mmc)
392 {
393 struct mmc_cmd cmd;
394 int err;
395
396 udelay(1000);
397
398 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
399 cmd.cmdarg = 0;
400 cmd.resp_type = MMC_RSP_NONE;
401
402 err = mmc_send_cmd(mmc, &cmd, NULL);
403
404 if (err)
405 return err;
406
407 udelay(2000);
408
409 return 0;
410 }
411
412 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
413 {
414 struct mmc_cmd cmd;
415 int err = 0;
416
417 /*
418 * Send CMD11 only if the request is to switch the card to
419 * 1.8V signalling.
420 */
421 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
422 return mmc_set_signal_voltage(mmc, signal_voltage);
423
424 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
425 cmd.cmdarg = 0;
426 cmd.resp_type = MMC_RSP_R1;
427
428 err = mmc_send_cmd(mmc, &cmd, NULL);
429 if (err)
430 return err;
431
432 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
433 return -EIO;
434
435 /*
436 * The card should drive cmd and dat[0:3] low immediately
437 * after the response of cmd11, but wait 100 us to be sure
438 */
439 err = mmc_wait_dat0(mmc, 0, 100);
440 if (err == -ENOSYS)
441 udelay(100);
442 else if (err)
443 return -ETIMEDOUT;
444
445 /*
446 * During a signal voltage level switch, the clock must be gated
447 * for 5 ms according to the SD spec
448 */
449 mmc_set_clock(mmc, mmc->clock, true);
450
451 err = mmc_set_signal_voltage(mmc, signal_voltage);
452 if (err)
453 return err;
454
455 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
456 mdelay(10);
457 mmc_set_clock(mmc, mmc->clock, false);
458
459 /*
460 * Failure to switch is indicated by the card holding
461 * dat[0:3] low. Wait for at least 1 ms according to spec
462 */
463 err = mmc_wait_dat0(mmc, 1, 1000);
464 if (err == -ENOSYS)
465 udelay(1000);
466 else if (err)
467 return -ETIMEDOUT;
468
469 return 0;
470 }
471
472 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
473 {
474 int timeout = 1000;
475 int err;
476 struct mmc_cmd cmd;
477
478 while (1) {
479 cmd.cmdidx = MMC_CMD_APP_CMD;
480 cmd.resp_type = MMC_RSP_R1;
481 cmd.cmdarg = 0;
482
483 err = mmc_send_cmd(mmc, &cmd, NULL);
484
485 if (err)
486 return err;
487
488 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
489 cmd.resp_type = MMC_RSP_R3;
490
491 /*
492 * Most cards do not answer if some reserved bits
493 * in the ocr are set. However, Some controller
494 * can set bit 7 (reserved for low voltages), but
495 * how to manage low voltages SD card is not yet
496 * specified.
497 */
498 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
499 (mmc->cfg->voltages & 0xff8000);
500
501 if (mmc->version == SD_VERSION_2)
502 cmd.cmdarg |= OCR_HCS;
503
504 if (uhs_en)
505 cmd.cmdarg |= OCR_S18R;
506
507 err = mmc_send_cmd(mmc, &cmd, NULL);
508
509 if (err)
510 return err;
511
512 if (cmd.response[0] & OCR_BUSY)
513 break;
514
515 if (timeout-- <= 0)
516 return -EOPNOTSUPP;
517
518 udelay(1000);
519 }
520
521 if (mmc->version != SD_VERSION_2)
522 mmc->version = SD_VERSION_1_0;
523
524 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
525 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
526 cmd.resp_type = MMC_RSP_R3;
527 cmd.cmdarg = 0;
528
529 err = mmc_send_cmd(mmc, &cmd, NULL);
530
531 if (err)
532 return err;
533 }
534
535 mmc->ocr = cmd.response[0];
536
537 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
538 == 0x41000000) {
539 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
540 if (err)
541 return err;
542 }
543
544 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
545 mmc->rca = 0;
546
547 return 0;
548 }
549
550 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
551 {
552 struct mmc_cmd cmd;
553 int err;
554
555 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
556 cmd.resp_type = MMC_RSP_R3;
557 cmd.cmdarg = 0;
558 if (use_arg && !mmc_host_is_spi(mmc))
559 cmd.cmdarg = OCR_HCS |
560 (mmc->cfg->voltages &
561 (mmc->ocr & OCR_VOLTAGE_MASK)) |
562 (mmc->ocr & OCR_ACCESS_MODE);
563
564 err = mmc_send_cmd(mmc, &cmd, NULL);
565 if (err)
566 return err;
567 mmc->ocr = cmd.response[0];
568 return 0;
569 }
570
571 static int mmc_send_op_cond(struct mmc *mmc)
572 {
573 int err, i;
574
575 /* Some cards seem to need this */
576 mmc_go_idle(mmc);
577
578 /* Asking to the card its capabilities */
579 for (i = 0; i < 2; i++) {
580 err = mmc_send_op_cond_iter(mmc, i != 0);
581 if (err)
582 return err;
583
584 /* exit if not busy (flag seems to be inverted) */
585 if (mmc->ocr & OCR_BUSY)
586 break;
587 }
588 mmc->op_cond_pending = 1;
589 return 0;
590 }
591
592 static int mmc_complete_op_cond(struct mmc *mmc)
593 {
594 struct mmc_cmd cmd;
595 int timeout = 1000;
596 uint start;
597 int err;
598
599 mmc->op_cond_pending = 0;
600 if (!(mmc->ocr & OCR_BUSY)) {
601 /* Some cards seem to need this */
602 mmc_go_idle(mmc);
603
604 start = get_timer(0);
605 while (1) {
606 err = mmc_send_op_cond_iter(mmc, 1);
607 if (err)
608 return err;
609 if (mmc->ocr & OCR_BUSY)
610 break;
611 if (get_timer(start) > timeout)
612 return -EOPNOTSUPP;
613 udelay(100);
614 }
615 }
616
617 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
618 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
619 cmd.resp_type = MMC_RSP_R3;
620 cmd.cmdarg = 0;
621
622 err = mmc_send_cmd(mmc, &cmd, NULL);
623
624 if (err)
625 return err;
626
627 mmc->ocr = cmd.response[0];
628 }
629
630 mmc->version = MMC_VERSION_UNKNOWN;
631
632 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
633 mmc->rca = 1;
634
635 return 0;
636 }
637
638
639 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
640 {
641 struct mmc_cmd cmd;
642 struct mmc_data data;
643 int err;
644
645 /* Get the Card Status Register */
646 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
647 cmd.resp_type = MMC_RSP_R1;
648 cmd.cmdarg = 0;
649
650 data.dest = (char *)ext_csd;
651 data.blocks = 1;
652 data.blocksize = MMC_MAX_BLOCK_LEN;
653 data.flags = MMC_DATA_READ;
654
655 err = mmc_send_cmd(mmc, &cmd, &data);
656
657 return err;
658 }
659
660 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
661 {
662 struct mmc_cmd cmd;
663 int timeout = 1000;
664 int retries = 3;
665 int ret;
666
667 cmd.cmdidx = MMC_CMD_SWITCH;
668 cmd.resp_type = MMC_RSP_R1b;
669 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
670 (index << 16) |
671 (value << 8);
672
673 while (retries > 0) {
674 ret = mmc_send_cmd(mmc, &cmd, NULL);
675
676 /* Waiting for the ready status */
677 if (!ret) {
678 ret = mmc_send_status(mmc, timeout);
679 return ret;
680 }
681
682 retries--;
683 }
684
685 return ret;
686
687 }
688
689 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
690 {
691 int err;
692 int speed_bits;
693
694 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
695
696 switch (mode) {
697 case MMC_HS:
698 case MMC_HS_52:
699 case MMC_DDR_52:
700 speed_bits = EXT_CSD_TIMING_HS;
701 break;
702 case MMC_HS_200:
703 speed_bits = EXT_CSD_TIMING_HS200;
704 break;
705 case MMC_LEGACY:
706 speed_bits = EXT_CSD_TIMING_LEGACY;
707 break;
708 default:
709 return -EINVAL;
710 }
711 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
712 speed_bits);
713 if (err)
714 return err;
715
716 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
717 /* Now check to see that it worked */
718 err = mmc_send_ext_csd(mmc, test_csd);
719 if (err)
720 return err;
721
722 /* No high-speed support */
723 if (!test_csd[EXT_CSD_HS_TIMING])
724 return -ENOTSUPP;
725 }
726
727 return 0;
728 }
729
730 static int mmc_get_capabilities(struct mmc *mmc)
731 {
732 u8 *ext_csd = mmc->ext_csd;
733 char cardtype;
734
735 mmc->card_caps = MMC_MODE_1BIT;
736
737 if (mmc_host_is_spi(mmc))
738 return 0;
739
740 /* Only version 4 supports high-speed */
741 if (mmc->version < MMC_VERSION_4)
742 return 0;
743
744 if (!ext_csd) {
745 printf("No ext_csd found!\n"); /* this should enver happen */
746 return -ENOTSUPP;
747 }
748
749 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
750
751 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0x3f;
752
753 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
754 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
755 mmc->card_caps |= MMC_MODE_HS200;
756 }
757 if (cardtype & EXT_CSD_CARD_TYPE_52) {
758 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
759 mmc->card_caps |= MMC_MODE_DDR_52MHz;
760 mmc->card_caps |= MMC_MODE_HS_52MHz;
761 }
762 if (cardtype & EXT_CSD_CARD_TYPE_26)
763 mmc->card_caps |= MMC_MODE_HS;
764
765 return 0;
766 }
767
768 static int mmc_set_capacity(struct mmc *mmc, int part_num)
769 {
770 switch (part_num) {
771 case 0:
772 mmc->capacity = mmc->capacity_user;
773 break;
774 case 1:
775 case 2:
776 mmc->capacity = mmc->capacity_boot;
777 break;
778 case 3:
779 mmc->capacity = mmc->capacity_rpmb;
780 break;
781 case 4:
782 case 5:
783 case 6:
784 case 7:
785 mmc->capacity = mmc->capacity_gp[part_num - 4];
786 break;
787 default:
788 return -1;
789 }
790
791 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
792
793 return 0;
794 }
795
796 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
797 {
798 int forbidden = 0;
799 bool change = false;
800
801 if (part_num & PART_ACCESS_MASK)
802 forbidden = MMC_CAP(MMC_HS_200);
803
804 if (MMC_CAP(mmc->selected_mode) & forbidden) {
805 debug("selected mode (%s) is forbidden for part %d\n",
806 mmc_mode_name(mmc->selected_mode), part_num);
807 change = true;
808 } else if (mmc->selected_mode != mmc->best_mode) {
809 debug("selected mode is not optimal\n");
810 change = true;
811 }
812
813 if (change)
814 return mmc_select_mode_and_width(mmc,
815 mmc->card_caps & ~forbidden);
816
817 return 0;
818 }
819
820 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
821 {
822 int ret;
823
824 ret = mmc_boot_part_access_chk(mmc, part_num);
825 if (ret)
826 return ret;
827
828 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
829 (mmc->part_config & ~PART_ACCESS_MASK)
830 | (part_num & PART_ACCESS_MASK));
831
832 /*
833 * Set the capacity if the switch succeeded or was intended
834 * to return to representing the raw device.
835 */
836 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
837 ret = mmc_set_capacity(mmc, part_num);
838 mmc_get_blk_desc(mmc)->hwpart = part_num;
839 }
840
841 return ret;
842 }
843
844 int mmc_hwpart_config(struct mmc *mmc,
845 const struct mmc_hwpart_conf *conf,
846 enum mmc_hwpart_conf_mode mode)
847 {
848 u8 part_attrs = 0;
849 u32 enh_size_mult;
850 u32 enh_start_addr;
851 u32 gp_size_mult[4];
852 u32 max_enh_size_mult;
853 u32 tot_enh_size_mult = 0;
854 u8 wr_rel_set;
855 int i, pidx, err;
856 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
857
858 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
859 return -EINVAL;
860
861 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
862 printf("eMMC >= 4.4 required for enhanced user data area\n");
863 return -EMEDIUMTYPE;
864 }
865
866 if (!(mmc->part_support & PART_SUPPORT)) {
867 printf("Card does not support partitioning\n");
868 return -EMEDIUMTYPE;
869 }
870
871 if (!mmc->hc_wp_grp_size) {
872 printf("Card does not define HC WP group size\n");
873 return -EMEDIUMTYPE;
874 }
875
876 /* check partition alignment and total enhanced size */
877 if (conf->user.enh_size) {
878 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
879 conf->user.enh_start % mmc->hc_wp_grp_size) {
880 printf("User data enhanced area not HC WP group "
881 "size aligned\n");
882 return -EINVAL;
883 }
884 part_attrs |= EXT_CSD_ENH_USR;
885 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
886 if (mmc->high_capacity) {
887 enh_start_addr = conf->user.enh_start;
888 } else {
889 enh_start_addr = (conf->user.enh_start << 9);
890 }
891 } else {
892 enh_size_mult = 0;
893 enh_start_addr = 0;
894 }
895 tot_enh_size_mult += enh_size_mult;
896
897 for (pidx = 0; pidx < 4; pidx++) {
898 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
899 printf("GP%i partition not HC WP group size "
900 "aligned\n", pidx+1);
901 return -EINVAL;
902 }
903 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
904 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
905 part_attrs |= EXT_CSD_ENH_GP(pidx);
906 tot_enh_size_mult += gp_size_mult[pidx];
907 }
908 }
909
910 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
911 printf("Card does not support enhanced attribute\n");
912 return -EMEDIUMTYPE;
913 }
914
915 err = mmc_send_ext_csd(mmc, ext_csd);
916 if (err)
917 return err;
918
919 max_enh_size_mult =
920 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
921 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
922 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
923 if (tot_enh_size_mult > max_enh_size_mult) {
924 printf("Total enhanced size exceeds maximum (%u > %u)\n",
925 tot_enh_size_mult, max_enh_size_mult);
926 return -EMEDIUMTYPE;
927 }
928
929 /* The default value of EXT_CSD_WR_REL_SET is device
930 * dependent, the values can only be changed if the
931 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
932 * changed only once and before partitioning is completed. */
933 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
934 if (conf->user.wr_rel_change) {
935 if (conf->user.wr_rel_set)
936 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
937 else
938 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
939 }
940 for (pidx = 0; pidx < 4; pidx++) {
941 if (conf->gp_part[pidx].wr_rel_change) {
942 if (conf->gp_part[pidx].wr_rel_set)
943 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
944 else
945 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
946 }
947 }
948
949 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
950 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
951 puts("Card does not support host controlled partition write "
952 "reliability settings\n");
953 return -EMEDIUMTYPE;
954 }
955
956 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
957 EXT_CSD_PARTITION_SETTING_COMPLETED) {
958 printf("Card already partitioned\n");
959 return -EPERM;
960 }
961
962 if (mode == MMC_HWPART_CONF_CHECK)
963 return 0;
964
965 /* Partitioning requires high-capacity size definitions */
966 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
967 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
968 EXT_CSD_ERASE_GROUP_DEF, 1);
969
970 if (err)
971 return err;
972
973 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
974
975 /* update erase group size to be high-capacity */
976 mmc->erase_grp_size =
977 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
978
979 }
980
981 /* all OK, write the configuration */
982 for (i = 0; i < 4; i++) {
983 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
984 EXT_CSD_ENH_START_ADDR+i,
985 (enh_start_addr >> (i*8)) & 0xFF);
986 if (err)
987 return err;
988 }
989 for (i = 0; i < 3; i++) {
990 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
991 EXT_CSD_ENH_SIZE_MULT+i,
992 (enh_size_mult >> (i*8)) & 0xFF);
993 if (err)
994 return err;
995 }
996 for (pidx = 0; pidx < 4; pidx++) {
997 for (i = 0; i < 3; i++) {
998 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
999 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1000 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1001 if (err)
1002 return err;
1003 }
1004 }
1005 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1006 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1007 if (err)
1008 return err;
1009
1010 if (mode == MMC_HWPART_CONF_SET)
1011 return 0;
1012
1013 /* The WR_REL_SET is a write-once register but shall be
1014 * written before setting PART_SETTING_COMPLETED. As it is
1015 * write-once we can only write it when completing the
1016 * partitioning. */
1017 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1018 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1019 EXT_CSD_WR_REL_SET, wr_rel_set);
1020 if (err)
1021 return err;
1022 }
1023
1024 /* Setting PART_SETTING_COMPLETED confirms the partition
1025 * configuration but it only becomes effective after power
1026 * cycle, so we do not adjust the partition related settings
1027 * in the mmc struct. */
1028
1029 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1030 EXT_CSD_PARTITION_SETTING,
1031 EXT_CSD_PARTITION_SETTING_COMPLETED);
1032 if (err)
1033 return err;
1034
1035 return 0;
1036 }
1037
1038 #if !CONFIG_IS_ENABLED(DM_MMC)
1039 int mmc_getcd(struct mmc *mmc)
1040 {
1041 int cd;
1042
1043 cd = board_mmc_getcd(mmc);
1044
1045 if (cd < 0) {
1046 if (mmc->cfg->ops->getcd)
1047 cd = mmc->cfg->ops->getcd(mmc);
1048 else
1049 cd = 1;
1050 }
1051
1052 return cd;
1053 }
1054 #endif
1055
1056 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1057 {
1058 struct mmc_cmd cmd;
1059 struct mmc_data data;
1060
1061 /* Switch the frequency */
1062 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1063 cmd.resp_type = MMC_RSP_R1;
1064 cmd.cmdarg = (mode << 31) | 0xffffff;
1065 cmd.cmdarg &= ~(0xf << (group * 4));
1066 cmd.cmdarg |= value << (group * 4);
1067
1068 data.dest = (char *)resp;
1069 data.blocksize = 64;
1070 data.blocks = 1;
1071 data.flags = MMC_DATA_READ;
1072
1073 return mmc_send_cmd(mmc, &cmd, &data);
1074 }
1075
1076
1077 static int sd_get_capabilities(struct mmc *mmc)
1078 {
1079 int err;
1080 struct mmc_cmd cmd;
1081 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1082 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1083 struct mmc_data data;
1084 int timeout;
1085 u32 sd3_bus_mode;
1086
1087 mmc->card_caps = MMC_MODE_1BIT;
1088
1089 if (mmc_host_is_spi(mmc))
1090 return 0;
1091
1092 /* Read the SCR to find out if this card supports higher speeds */
1093 cmd.cmdidx = MMC_CMD_APP_CMD;
1094 cmd.resp_type = MMC_RSP_R1;
1095 cmd.cmdarg = mmc->rca << 16;
1096
1097 err = mmc_send_cmd(mmc, &cmd, NULL);
1098
1099 if (err)
1100 return err;
1101
1102 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1103 cmd.resp_type = MMC_RSP_R1;
1104 cmd.cmdarg = 0;
1105
1106 timeout = 3;
1107
1108 retry_scr:
1109 data.dest = (char *)scr;
1110 data.blocksize = 8;
1111 data.blocks = 1;
1112 data.flags = MMC_DATA_READ;
1113
1114 err = mmc_send_cmd(mmc, &cmd, &data);
1115
1116 if (err) {
1117 if (timeout--)
1118 goto retry_scr;
1119
1120 return err;
1121 }
1122
1123 mmc->scr[0] = __be32_to_cpu(scr[0]);
1124 mmc->scr[1] = __be32_to_cpu(scr[1]);
1125
1126 switch ((mmc->scr[0] >> 24) & 0xf) {
1127 case 0:
1128 mmc->version = SD_VERSION_1_0;
1129 break;
1130 case 1:
1131 mmc->version = SD_VERSION_1_10;
1132 break;
1133 case 2:
1134 mmc->version = SD_VERSION_2;
1135 if ((mmc->scr[0] >> 15) & 0x1)
1136 mmc->version = SD_VERSION_3;
1137 break;
1138 default:
1139 mmc->version = SD_VERSION_1_0;
1140 break;
1141 }
1142
1143 if (mmc->scr[0] & SD_DATA_4BIT)
1144 mmc->card_caps |= MMC_MODE_4BIT;
1145
1146 /* Version 1.0 doesn't support switching */
1147 if (mmc->version == SD_VERSION_1_0)
1148 return 0;
1149
1150 timeout = 4;
1151 while (timeout--) {
1152 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1153 (u8 *)switch_status);
1154
1155 if (err)
1156 return err;
1157
1158 /* The high-speed function is busy. Try again */
1159 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1160 break;
1161 }
1162
1163 /* If high-speed isn't supported, we return */
1164 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1165 mmc->card_caps |= MMC_CAP(SD_HS);
1166
1167 /* Version before 3.0 don't support UHS modes */
1168 if (mmc->version < SD_VERSION_3)
1169 return 0;
1170
1171 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1172 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1173 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1174 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1175 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1176 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1177 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1178 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1179 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1180 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1181 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1182
1183 return 0;
1184 }
1185
1186 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1187 {
1188 int err;
1189
1190 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1191 int speed;
1192
1193 switch (mode) {
1194 case SD_LEGACY:
1195 case UHS_SDR12:
1196 speed = UHS_SDR12_BUS_SPEED;
1197 break;
1198 case SD_HS:
1199 case UHS_SDR25:
1200 speed = UHS_SDR25_BUS_SPEED;
1201 break;
1202 case UHS_SDR50:
1203 speed = UHS_SDR50_BUS_SPEED;
1204 break;
1205 case UHS_DDR50:
1206 speed = UHS_DDR50_BUS_SPEED;
1207 break;
1208 case UHS_SDR104:
1209 speed = UHS_SDR104_BUS_SPEED;
1210 break;
1211 default:
1212 return -EINVAL;
1213 }
1214
1215 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1216 if (err)
1217 return err;
1218
1219 if ((__be32_to_cpu(switch_status[4]) >> 24) != speed)
1220 return -ENOTSUPP;
1221
1222 return 0;
1223 }
1224
1225 int sd_select_bus_width(struct mmc *mmc, int w)
1226 {
1227 int err;
1228 struct mmc_cmd cmd;
1229
1230 if ((w != 4) && (w != 1))
1231 return -EINVAL;
1232
1233 cmd.cmdidx = MMC_CMD_APP_CMD;
1234 cmd.resp_type = MMC_RSP_R1;
1235 cmd.cmdarg = mmc->rca << 16;
1236
1237 err = mmc_send_cmd(mmc, &cmd, NULL);
1238 if (err)
1239 return err;
1240
1241 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1242 cmd.resp_type = MMC_RSP_R1;
1243 if (w == 4)
1244 cmd.cmdarg = 2;
1245 else if (w == 1)
1246 cmd.cmdarg = 0;
1247 err = mmc_send_cmd(mmc, &cmd, NULL);
1248 if (err)
1249 return err;
1250
1251 return 0;
1252 }
1253
1254 static int sd_read_ssr(struct mmc *mmc)
1255 {
1256 int err, i;
1257 struct mmc_cmd cmd;
1258 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1259 struct mmc_data data;
1260 int timeout = 3;
1261 unsigned int au, eo, et, es;
1262
1263 cmd.cmdidx = MMC_CMD_APP_CMD;
1264 cmd.resp_type = MMC_RSP_R1;
1265 cmd.cmdarg = mmc->rca << 16;
1266
1267 err = mmc_send_cmd(mmc, &cmd, NULL);
1268 if (err)
1269 return err;
1270
1271 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1272 cmd.resp_type = MMC_RSP_R1;
1273 cmd.cmdarg = 0;
1274
1275 retry_ssr:
1276 data.dest = (char *)ssr;
1277 data.blocksize = 64;
1278 data.blocks = 1;
1279 data.flags = MMC_DATA_READ;
1280
1281 err = mmc_send_cmd(mmc, &cmd, &data);
1282 if (err) {
1283 if (timeout--)
1284 goto retry_ssr;
1285
1286 return err;
1287 }
1288
1289 for (i = 0; i < 16; i++)
1290 ssr[i] = be32_to_cpu(ssr[i]);
1291
1292 au = (ssr[2] >> 12) & 0xF;
1293 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1294 mmc->ssr.au = sd_au_size[au];
1295 es = (ssr[3] >> 24) & 0xFF;
1296 es |= (ssr[2] & 0xFF) << 8;
1297 et = (ssr[3] >> 18) & 0x3F;
1298 if (es && et) {
1299 eo = (ssr[3] >> 16) & 0x3;
1300 mmc->ssr.erase_timeout = (et * 1000) / es;
1301 mmc->ssr.erase_offset = eo * 1000;
1302 }
1303 } else {
1304 debug("Invalid Allocation Unit Size.\n");
1305 }
1306
1307 return 0;
1308 }
1309
1310 /* frequency bases */
1311 /* divided by 10 to be nice to platforms without floating point */
1312 static const int fbase[] = {
1313 10000,
1314 100000,
1315 1000000,
1316 10000000,
1317 };
1318
1319 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1320 * to platforms without floating point.
1321 */
1322 static const u8 multipliers[] = {
1323 0, /* reserved */
1324 10,
1325 12,
1326 13,
1327 15,
1328 20,
1329 25,
1330 30,
1331 35,
1332 40,
1333 45,
1334 50,
1335 55,
1336 60,
1337 70,
1338 80,
1339 };
1340
1341 static inline int bus_width(uint cap)
1342 {
1343 if (cap == MMC_MODE_8BIT)
1344 return 8;
1345 if (cap == MMC_MODE_4BIT)
1346 return 4;
1347 if (cap == MMC_MODE_1BIT)
1348 return 1;
1349 printf("invalid bus witdh capability 0x%x\n", cap);
1350 return 0;
1351 }
1352
1353 #if !CONFIG_IS_ENABLED(DM_MMC)
1354 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1355 {
1356 return -ENOTSUPP;
1357 }
1358
1359 static void mmc_send_init_stream(struct mmc *mmc)
1360 {
1361 }
1362
1363 static int mmc_set_ios(struct mmc *mmc)
1364 {
1365 int ret = 0;
1366
1367 if (mmc->cfg->ops->set_ios)
1368 ret = mmc->cfg->ops->set_ios(mmc);
1369
1370 return ret;
1371 }
1372 #endif
1373
1374 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1375 {
1376 if (clock > mmc->cfg->f_max)
1377 clock = mmc->cfg->f_max;
1378
1379 if (clock < mmc->cfg->f_min)
1380 clock = mmc->cfg->f_min;
1381
1382 mmc->clock = clock;
1383 mmc->clk_disable = disable;
1384
1385 return mmc_set_ios(mmc);
1386 }
1387
1388 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1389 {
1390 mmc->bus_width = width;
1391
1392 return mmc_set_ios(mmc);
1393 }
1394
1395 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1396 /*
1397 * helper function to display the capabilities in a human
1398 * friendly manner. The capabilities include bus width and
1399 * supported modes.
1400 */
1401 void mmc_dump_capabilities(const char *text, uint caps)
1402 {
1403 enum bus_mode mode;
1404
1405 printf("%s: widths [", text);
1406 if (caps & MMC_MODE_8BIT)
1407 printf("8, ");
1408 if (caps & MMC_MODE_4BIT)
1409 printf("4, ");
1410 if (caps & MMC_MODE_1BIT)
1411 printf("1, ");
1412 printf("\b\b] modes [");
1413 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1414 if (MMC_CAP(mode) & caps)
1415 printf("%s, ", mmc_mode_name(mode));
1416 printf("\b\b]\n");
1417 }
1418 #endif
1419
1420 struct mode_width_tuning {
1421 enum bus_mode mode;
1422 uint widths;
1423 uint tuning;
1424 };
1425
1426 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1427 {
1428 mmc->signal_voltage = signal_voltage;
1429 return mmc_set_ios(mmc);
1430 }
1431
1432 static const struct mode_width_tuning sd_modes_by_pref[] = {
1433 {
1434 .mode = UHS_SDR104,
1435 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1436 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1437 },
1438 {
1439 .mode = UHS_SDR50,
1440 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1441 },
1442 {
1443 .mode = UHS_DDR50,
1444 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1445 },
1446 {
1447 .mode = UHS_SDR25,
1448 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1449 },
1450 {
1451 .mode = SD_HS,
1452 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1453 },
1454 {
1455 .mode = UHS_SDR12,
1456 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1457 },
1458 {
1459 .mode = SD_LEGACY,
1460 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1461 }
1462 };
1463
1464 #define for_each_sd_mode_by_pref(caps, mwt) \
1465 for (mwt = sd_modes_by_pref;\
1466 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1467 mwt++) \
1468 if (caps & MMC_CAP(mwt->mode))
1469
1470 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1471 {
1472 int err;
1473 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1474 const struct mode_width_tuning *mwt;
1475 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1476 uint caps;
1477
1478
1479 /* Restrict card's capabilities by what the host can do */
1480 caps = card_caps & (mmc->host_caps | MMC_MODE_1BIT);
1481
1482 if (!uhs_en)
1483 caps &= ~UHS_CAPS;
1484
1485 for_each_sd_mode_by_pref(caps, mwt) {
1486 uint *w;
1487
1488 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1489 if (*w & caps & mwt->widths) {
1490 debug("trying mode %s width %d (at %d MHz)\n",
1491 mmc_mode_name(mwt->mode),
1492 bus_width(*w),
1493 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1494
1495 /* configure the bus width (card + host) */
1496 err = sd_select_bus_width(mmc, bus_width(*w));
1497 if (err)
1498 goto error;
1499 mmc_set_bus_width(mmc, bus_width(*w));
1500
1501 /* configure the bus mode (card) */
1502 err = sd_set_card_speed(mmc, mwt->mode);
1503 if (err)
1504 goto error;
1505
1506 /* configure the bus mode (host) */
1507 mmc_select_mode(mmc, mwt->mode);
1508 mmc_set_clock(mmc, mmc->tran_speed, false);
1509
1510 /* execute tuning if needed */
1511 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1512 err = mmc_execute_tuning(mmc,
1513 mwt->tuning);
1514 if (err) {
1515 debug("tuning failed\n");
1516 goto error;
1517 }
1518 }
1519
1520 err = sd_read_ssr(mmc);
1521 if (!err)
1522 return 0;
1523
1524 printf("bad ssr\n");
1525
1526 error:
1527 /* revert to a safer bus speed */
1528 mmc_select_mode(mmc, SD_LEGACY);
1529 mmc_set_clock(mmc, mmc->tran_speed, false);
1530 }
1531 }
1532 }
1533
1534 printf("unable to select a mode\n");
1535 return -ENOTSUPP;
1536 }
1537
1538 /*
1539 * read the compare the part of ext csd that is constant.
1540 * This can be used to check that the transfer is working
1541 * as expected.
1542 */
1543 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1544 {
1545 int err;
1546 const u8 *ext_csd = mmc->ext_csd;
1547 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1548
1549 err = mmc_send_ext_csd(mmc, test_csd);
1550 if (err)
1551 return err;
1552
1553 /* Only compare read only fields */
1554 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1555 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1556 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1557 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1558 ext_csd[EXT_CSD_REV]
1559 == test_csd[EXT_CSD_REV] &&
1560 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1561 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1562 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1563 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1564 return 0;
1565
1566 return -EBADMSG;
1567 }
1568
1569 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1570 {
1571 .mode = MMC_HS_200,
1572 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1573 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1574 },
1575 {
1576 .mode = MMC_DDR_52,
1577 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1578 },
1579 {
1580 .mode = MMC_HS_52,
1581 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1582 },
1583 {
1584 .mode = MMC_HS,
1585 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1586 },
1587 {
1588 .mode = MMC_LEGACY,
1589 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1590 }
1591 };
1592
1593 #define for_each_mmc_mode_by_pref(caps, mwt) \
1594 for (mwt = mmc_modes_by_pref;\
1595 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1596 mwt++) \
1597 if (caps & MMC_CAP(mwt->mode))
1598
1599 static const struct ext_csd_bus_width {
1600 uint cap;
1601 bool is_ddr;
1602 uint ext_csd_bits;
1603 } ext_csd_bus_width[] = {
1604 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1605 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1606 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1607 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1608 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1609 };
1610
1611 #define for_each_supported_width(caps, ddr, ecbv) \
1612 for (ecbv = ext_csd_bus_width;\
1613 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1614 ecbv++) \
1615 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1616
1617 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1618 {
1619 int err;
1620 const struct mode_width_tuning *mwt;
1621 const struct ext_csd_bus_width *ecbw;
1622
1623 /* Restrict card's capabilities by what the host can do */
1624 card_caps &= (mmc->host_caps | MMC_MODE_1BIT);
1625
1626 /* Only version 4 of MMC supports wider bus widths */
1627 if (mmc->version < MMC_VERSION_4)
1628 return 0;
1629
1630 if (!mmc->ext_csd) {
1631 debug("No ext_csd found!\n"); /* this should enver happen */
1632 return -ENOTSUPP;
1633 }
1634
1635 mmc_set_clock(mmc, mmc->legacy_speed, false);
1636
1637 for_each_mmc_mode_by_pref(card_caps, mwt) {
1638 for_each_supported_width(card_caps & mwt->widths,
1639 mmc_is_mode_ddr(mwt->mode), ecbw) {
1640 debug("trying mode %s width %d (at %d MHz)\n",
1641 mmc_mode_name(mwt->mode),
1642 bus_width(ecbw->cap),
1643 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1644 /* configure the bus width (card + host) */
1645 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1646 EXT_CSD_BUS_WIDTH,
1647 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1648 if (err)
1649 goto error;
1650 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1651
1652 /* configure the bus speed (card) */
1653 err = mmc_set_card_speed(mmc, mwt->mode);
1654 if (err)
1655 goto error;
1656
1657 /*
1658 * configure the bus width AND the ddr mode (card)
1659 * The host side will be taken care of in the next step
1660 */
1661 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1662 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1663 EXT_CSD_BUS_WIDTH,
1664 ecbw->ext_csd_bits);
1665 if (err)
1666 goto error;
1667 }
1668
1669 /* configure the bus mode (host) */
1670 mmc_select_mode(mmc, mwt->mode);
1671 mmc_set_clock(mmc, mmc->tran_speed, false);
1672
1673 /* execute tuning if needed */
1674 if (mwt->tuning) {
1675 err = mmc_execute_tuning(mmc, mwt->tuning);
1676 if (err) {
1677 debug("tuning failed\n");
1678 goto error;
1679 }
1680 }
1681
1682 /* do a transfer to check the configuration */
1683 err = mmc_read_and_compare_ext_csd(mmc);
1684 if (!err)
1685 return 0;
1686 error:
1687 /* if an error occured, revert to a safer bus mode */
1688 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1689 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
1690 mmc_select_mode(mmc, MMC_LEGACY);
1691 mmc_set_bus_width(mmc, 1);
1692 }
1693 }
1694
1695 printf("unable to select a mode\n");
1696
1697 return -ENOTSUPP;
1698 }
1699
1700 static int mmc_startup_v4(struct mmc *mmc)
1701 {
1702 int err, i;
1703 u64 capacity;
1704 bool has_parts = false;
1705 bool part_completed;
1706 u8 *ext_csd;
1707
1708 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1709 return 0;
1710
1711 ext_csd = malloc_cache_aligned(MMC_MAX_BLOCK_LEN);
1712 if (!ext_csd)
1713 return -ENOMEM;
1714
1715 mmc->ext_csd = ext_csd;
1716
1717 /* check ext_csd version and capacity */
1718 err = mmc_send_ext_csd(mmc, ext_csd);
1719 if (err)
1720 return err;
1721 if (ext_csd[EXT_CSD_REV] >= 2) {
1722 /*
1723 * According to the JEDEC Standard, the value of
1724 * ext_csd's capacity is valid if the value is more
1725 * than 2GB
1726 */
1727 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1728 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1729 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1730 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1731 capacity *= MMC_MAX_BLOCK_LEN;
1732 if ((capacity >> 20) > 2 * 1024)
1733 mmc->capacity_user = capacity;
1734 }
1735
1736 switch (ext_csd[EXT_CSD_REV]) {
1737 case 1:
1738 mmc->version = MMC_VERSION_4_1;
1739 break;
1740 case 2:
1741 mmc->version = MMC_VERSION_4_2;
1742 break;
1743 case 3:
1744 mmc->version = MMC_VERSION_4_3;
1745 break;
1746 case 5:
1747 mmc->version = MMC_VERSION_4_41;
1748 break;
1749 case 6:
1750 mmc->version = MMC_VERSION_4_5;
1751 break;
1752 case 7:
1753 mmc->version = MMC_VERSION_5_0;
1754 break;
1755 case 8:
1756 mmc->version = MMC_VERSION_5_1;
1757 break;
1758 }
1759
1760 /* The partition data may be non-zero but it is only
1761 * effective if PARTITION_SETTING_COMPLETED is set in
1762 * EXT_CSD, so ignore any data if this bit is not set,
1763 * except for enabling the high-capacity group size
1764 * definition (see below).
1765 */
1766 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1767 EXT_CSD_PARTITION_SETTING_COMPLETED);
1768
1769 /* store the partition info of emmc */
1770 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
1771 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
1772 ext_csd[EXT_CSD_BOOT_MULT])
1773 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
1774 if (part_completed &&
1775 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
1776 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
1777
1778 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
1779
1780 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
1781
1782 for (i = 0; i < 4; i++) {
1783 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
1784 uint mult = (ext_csd[idx + 2] << 16) +
1785 (ext_csd[idx + 1] << 8) + ext_csd[idx];
1786 if (mult)
1787 has_parts = true;
1788 if (!part_completed)
1789 continue;
1790 mmc->capacity_gp[i] = mult;
1791 mmc->capacity_gp[i] *=
1792 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1793 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1794 mmc->capacity_gp[i] <<= 19;
1795 }
1796
1797 if (part_completed) {
1798 mmc->enh_user_size =
1799 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
1800 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
1801 ext_csd[EXT_CSD_ENH_SIZE_MULT];
1802 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1803 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1804 mmc->enh_user_size <<= 19;
1805 mmc->enh_user_start =
1806 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
1807 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
1808 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
1809 ext_csd[EXT_CSD_ENH_START_ADDR];
1810 if (mmc->high_capacity)
1811 mmc->enh_user_start <<= 9;
1812 }
1813
1814 /*
1815 * Host needs to enable ERASE_GRP_DEF bit if device is
1816 * partitioned. This bit will be lost every time after a reset
1817 * or power off. This will affect erase size.
1818 */
1819 if (part_completed)
1820 has_parts = true;
1821 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
1822 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
1823 has_parts = true;
1824 if (has_parts) {
1825 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1826 EXT_CSD_ERASE_GROUP_DEF, 1);
1827
1828 if (err)
1829 return err;
1830
1831 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1832 }
1833
1834 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
1835 /* Read out group size from ext_csd */
1836 mmc->erase_grp_size =
1837 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1838 /*
1839 * if high capacity and partition setting completed
1840 * SEC_COUNT is valid even if it is smaller than 2 GiB
1841 * JEDEC Standard JESD84-B45, 6.2.4
1842 */
1843 if (mmc->high_capacity && part_completed) {
1844 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
1845 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
1846 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
1847 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
1848 capacity *= MMC_MAX_BLOCK_LEN;
1849 mmc->capacity_user = capacity;
1850 }
1851 } else {
1852 /* Calculate the group size from the csd value. */
1853 int erase_gsz, erase_gmul;
1854
1855 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
1856 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
1857 mmc->erase_grp_size = (erase_gsz + 1)
1858 * (erase_gmul + 1);
1859 }
1860
1861 mmc->hc_wp_grp_size = 1024
1862 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1863 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1864
1865 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1866
1867 return 0;
1868 }
1869
1870 static int mmc_startup(struct mmc *mmc)
1871 {
1872 int err, i;
1873 uint mult, freq;
1874 u64 cmult, csize;
1875 struct mmc_cmd cmd;
1876 struct blk_desc *bdesc;
1877
1878 #ifdef CONFIG_MMC_SPI_CRC_ON
1879 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
1880 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
1881 cmd.resp_type = MMC_RSP_R1;
1882 cmd.cmdarg = 1;
1883 err = mmc_send_cmd(mmc, &cmd, NULL);
1884
1885 if (err)
1886 return err;
1887 }
1888 #endif
1889
1890 /* Put the Card in Identify Mode */
1891 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
1892 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
1893 cmd.resp_type = MMC_RSP_R2;
1894 cmd.cmdarg = 0;
1895
1896 err = mmc_send_cmd(mmc, &cmd, NULL);
1897
1898 if (err)
1899 return err;
1900
1901 memcpy(mmc->cid, cmd.response, 16);
1902
1903 /*
1904 * For MMC cards, set the Relative Address.
1905 * For SD cards, get the Relatvie Address.
1906 * This also puts the cards into Standby State
1907 */
1908 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1909 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
1910 cmd.cmdarg = mmc->rca << 16;
1911 cmd.resp_type = MMC_RSP_R6;
1912
1913 err = mmc_send_cmd(mmc, &cmd, NULL);
1914
1915 if (err)
1916 return err;
1917
1918 if (IS_SD(mmc))
1919 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
1920 }
1921
1922 /* Get the Card-Specific Data */
1923 cmd.cmdidx = MMC_CMD_SEND_CSD;
1924 cmd.resp_type = MMC_RSP_R2;
1925 cmd.cmdarg = mmc->rca << 16;
1926
1927 err = mmc_send_cmd(mmc, &cmd, NULL);
1928
1929 if (err)
1930 return err;
1931
1932 mmc->csd[0] = cmd.response[0];
1933 mmc->csd[1] = cmd.response[1];
1934 mmc->csd[2] = cmd.response[2];
1935 mmc->csd[3] = cmd.response[3];
1936
1937 if (mmc->version == MMC_VERSION_UNKNOWN) {
1938 int version = (cmd.response[0] >> 26) & 0xf;
1939
1940 switch (version) {
1941 case 0:
1942 mmc->version = MMC_VERSION_1_2;
1943 break;
1944 case 1:
1945 mmc->version = MMC_VERSION_1_4;
1946 break;
1947 case 2:
1948 mmc->version = MMC_VERSION_2_2;
1949 break;
1950 case 3:
1951 mmc->version = MMC_VERSION_3;
1952 break;
1953 case 4:
1954 mmc->version = MMC_VERSION_4;
1955 break;
1956 default:
1957 mmc->version = MMC_VERSION_1_2;
1958 break;
1959 }
1960 }
1961
1962 /* divide frequency by 10, since the mults are 10x bigger */
1963 freq = fbase[(cmd.response[0] & 0x7)];
1964 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
1965
1966 mmc->legacy_speed = freq * mult;
1967 mmc_select_mode(mmc, MMC_LEGACY);
1968
1969 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
1970 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
1971
1972 if (IS_SD(mmc))
1973 mmc->write_bl_len = mmc->read_bl_len;
1974 else
1975 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
1976
1977 if (mmc->high_capacity) {
1978 csize = (mmc->csd[1] & 0x3f) << 16
1979 | (mmc->csd[2] & 0xffff0000) >> 16;
1980 cmult = 8;
1981 } else {
1982 csize = (mmc->csd[1] & 0x3ff) << 2
1983 | (mmc->csd[2] & 0xc0000000) >> 30;
1984 cmult = (mmc->csd[2] & 0x00038000) >> 15;
1985 }
1986
1987 mmc->capacity_user = (csize + 1) << (cmult + 2);
1988 mmc->capacity_user *= mmc->read_bl_len;
1989 mmc->capacity_boot = 0;
1990 mmc->capacity_rpmb = 0;
1991 for (i = 0; i < 4; i++)
1992 mmc->capacity_gp[i] = 0;
1993
1994 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
1995 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1996
1997 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
1998 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1999
2000 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2001 cmd.cmdidx = MMC_CMD_SET_DSR;
2002 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2003 cmd.resp_type = MMC_RSP_NONE;
2004 if (mmc_send_cmd(mmc, &cmd, NULL))
2005 printf("MMC: SET_DSR failed\n");
2006 }
2007
2008 /* Select the card, and put it into Transfer Mode */
2009 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2010 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2011 cmd.resp_type = MMC_RSP_R1;
2012 cmd.cmdarg = mmc->rca << 16;
2013 err = mmc_send_cmd(mmc, &cmd, NULL);
2014
2015 if (err)
2016 return err;
2017 }
2018
2019 /*
2020 * For SD, its erase group is always one sector
2021 */
2022 mmc->erase_grp_size = 1;
2023 mmc->part_config = MMCPART_NOAVAILABLE;
2024
2025 err = mmc_startup_v4(mmc);
2026 if (err)
2027 return err;
2028
2029 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2030 if (err)
2031 return err;
2032
2033 if (IS_SD(mmc)) {
2034 err = sd_get_capabilities(mmc);
2035 if (err)
2036 return err;
2037 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2038 } else {
2039 err = mmc_get_capabilities(mmc);
2040 if (err)
2041 return err;
2042 mmc_select_mode_and_width(mmc, mmc->card_caps);
2043 }
2044
2045 if (err)
2046 return err;
2047
2048 mmc->best_mode = mmc->selected_mode;
2049
2050 /* Fix the block length for DDR mode */
2051 if (mmc->ddr_mode) {
2052 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2053 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2054 }
2055
2056 /* fill in device description */
2057 bdesc = mmc_get_blk_desc(mmc);
2058 bdesc->lun = 0;
2059 bdesc->hwpart = 0;
2060 bdesc->type = 0;
2061 bdesc->blksz = mmc->read_bl_len;
2062 bdesc->log2blksz = LOG2(bdesc->blksz);
2063 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2064 #if !defined(CONFIG_SPL_BUILD) || \
2065 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2066 !defined(CONFIG_USE_TINY_PRINTF))
2067 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2068 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2069 (mmc->cid[3] >> 16) & 0xffff);
2070 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2071 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2072 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2073 (mmc->cid[2] >> 24) & 0xff);
2074 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2075 (mmc->cid[2] >> 16) & 0xf);
2076 #else
2077 bdesc->vendor[0] = 0;
2078 bdesc->product[0] = 0;
2079 bdesc->revision[0] = 0;
2080 #endif
2081 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2082 part_init(bdesc);
2083 #endif
2084
2085 return 0;
2086 }
2087
2088 static int mmc_send_if_cond(struct mmc *mmc)
2089 {
2090 struct mmc_cmd cmd;
2091 int err;
2092
2093 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2094 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2095 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2096 cmd.resp_type = MMC_RSP_R7;
2097
2098 err = mmc_send_cmd(mmc, &cmd, NULL);
2099
2100 if (err)
2101 return err;
2102
2103 if ((cmd.response[0] & 0xff) != 0xaa)
2104 return -EOPNOTSUPP;
2105 else
2106 mmc->version = SD_VERSION_2;
2107
2108 return 0;
2109 }
2110
2111 #if !CONFIG_IS_ENABLED(DM_MMC)
2112 /* board-specific MMC power initializations. */
2113 __weak void board_mmc_power_init(void)
2114 {
2115 }
2116 #endif
2117
2118 static int mmc_power_init(struct mmc *mmc)
2119 {
2120 #if CONFIG_IS_ENABLED(DM_MMC)
2121 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2122 int ret;
2123
2124 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2125 &mmc->vmmc_supply);
2126 if (ret)
2127 debug("%s: No vmmc supply\n", mmc->dev->name);
2128
2129 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2130 &mmc->vqmmc_supply);
2131 if (ret)
2132 debug("%s: No vqmmc supply\n", mmc->dev->name);
2133 #endif
2134 #else /* !CONFIG_DM_MMC */
2135 /*
2136 * Driver model should use a regulator, as above, rather than calling
2137 * out to board code.
2138 */
2139 board_mmc_power_init();
2140 #endif
2141 return 0;
2142 }
2143
2144 /*
2145 * put the host in the initial state:
2146 * - turn on Vdd (card power supply)
2147 * - configure the bus width and clock to minimal values
2148 */
2149 static void mmc_set_initial_state(struct mmc *mmc)
2150 {
2151 int err;
2152
2153 /* First try to set 3.3V. If it fails set to 1.8V */
2154 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2155 if (err != 0)
2156 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2157 if (err != 0)
2158 printf("mmc: failed to set signal voltage\n");
2159
2160 mmc_select_mode(mmc, MMC_LEGACY);
2161 mmc_set_bus_width(mmc, 1);
2162 mmc_set_clock(mmc, 0, false);
2163 }
2164
2165 static int mmc_power_on(struct mmc *mmc)
2166 {
2167 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2168 if (mmc->vmmc_supply) {
2169 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2170
2171 if (ret) {
2172 puts("Error enabling VMMC supply\n");
2173 return ret;
2174 }
2175 }
2176 #endif
2177 return 0;
2178 }
2179
2180 static int mmc_power_off(struct mmc *mmc)
2181 {
2182 mmc_set_clock(mmc, 1, true);
2183 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2184 if (mmc->vmmc_supply) {
2185 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2186
2187 if (ret) {
2188 debug("Error disabling VMMC supply\n");
2189 return ret;
2190 }
2191 }
2192 #endif
2193 return 0;
2194 }
2195
2196 static int mmc_power_cycle(struct mmc *mmc)
2197 {
2198 int ret;
2199
2200 ret = mmc_power_off(mmc);
2201 if (ret)
2202 return ret;
2203 /*
2204 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2205 * to be on the safer side.
2206 */
2207 udelay(2000);
2208 return mmc_power_on(mmc);
2209 }
2210
2211 int mmc_start_init(struct mmc *mmc)
2212 {
2213 bool no_card;
2214 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2215 int err;
2216
2217 mmc->host_caps = mmc->cfg->host_caps;
2218
2219 /* we pretend there's no card when init is NULL */
2220 no_card = mmc_getcd(mmc) == 0;
2221 #if !CONFIG_IS_ENABLED(DM_MMC)
2222 no_card = no_card || (mmc->cfg->ops->init == NULL);
2223 #endif
2224 if (no_card) {
2225 mmc->has_init = 0;
2226 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2227 printf("MMC: no card present\n");
2228 #endif
2229 return -ENOMEDIUM;
2230 }
2231
2232 if (mmc->has_init)
2233 return 0;
2234
2235 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2236 mmc_adapter_card_type_ident();
2237 #endif
2238 err = mmc_power_init(mmc);
2239 if (err)
2240 return err;
2241
2242 err = mmc_power_cycle(mmc);
2243 if (err) {
2244 /*
2245 * if power cycling is not supported, we should not try
2246 * to use the UHS modes, because we wouldn't be able to
2247 * recover from an error during the UHS initialization.
2248 */
2249 debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2250 uhs_en = false;
2251 mmc->host_caps &= ~UHS_CAPS;
2252 err = mmc_power_on(mmc);
2253 }
2254 if (err)
2255 return err;
2256
2257 #if CONFIG_IS_ENABLED(DM_MMC)
2258 /* The device has already been probed ready for use */
2259 #else
2260 /* made sure it's not NULL earlier */
2261 err = mmc->cfg->ops->init(mmc);
2262 if (err)
2263 return err;
2264 #endif
2265 mmc->ddr_mode = 0;
2266
2267 retry:
2268 mmc_set_initial_state(mmc);
2269 mmc_send_init_stream(mmc);
2270
2271 /* Reset the Card */
2272 err = mmc_go_idle(mmc);
2273
2274 if (err)
2275 return err;
2276
2277 /* The internal partition reset to user partition(0) at every CMD0*/
2278 mmc_get_blk_desc(mmc)->hwpart = 0;
2279
2280 /* Test for SD version 2 */
2281 err = mmc_send_if_cond(mmc);
2282
2283 /* Now try to get the SD card's operating condition */
2284 err = sd_send_op_cond(mmc, uhs_en);
2285 if (err && uhs_en) {
2286 uhs_en = false;
2287 mmc_power_cycle(mmc);
2288 goto retry;
2289 }
2290
2291 /* If the command timed out, we check for an MMC card */
2292 if (err == -ETIMEDOUT) {
2293 err = mmc_send_op_cond(mmc);
2294
2295 if (err) {
2296 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2297 printf("Card did not respond to voltage select!\n");
2298 #endif
2299 return -EOPNOTSUPP;
2300 }
2301 }
2302
2303 if (!err)
2304 mmc->init_in_progress = 1;
2305
2306 return err;
2307 }
2308
2309 static int mmc_complete_init(struct mmc *mmc)
2310 {
2311 int err = 0;
2312
2313 mmc->init_in_progress = 0;
2314 if (mmc->op_cond_pending)
2315 err = mmc_complete_op_cond(mmc);
2316
2317 if (!err)
2318 err = mmc_startup(mmc);
2319 if (err)
2320 mmc->has_init = 0;
2321 else
2322 mmc->has_init = 1;
2323 return err;
2324 }
2325
2326 int mmc_init(struct mmc *mmc)
2327 {
2328 int err = 0;
2329 __maybe_unused unsigned start;
2330 #if CONFIG_IS_ENABLED(DM_MMC)
2331 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2332
2333 upriv->mmc = mmc;
2334 #endif
2335 if (mmc->has_init)
2336 return 0;
2337
2338 start = get_timer(0);
2339
2340 if (!mmc->init_in_progress)
2341 err = mmc_start_init(mmc);
2342
2343 if (!err)
2344 err = mmc_complete_init(mmc);
2345 if (err)
2346 printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2347
2348 return err;
2349 }
2350
2351 int mmc_set_dsr(struct mmc *mmc, u16 val)
2352 {
2353 mmc->dsr = val;
2354 return 0;
2355 }
2356
2357 /* CPU-specific MMC initializations */
2358 __weak int cpu_mmc_init(bd_t *bis)
2359 {
2360 return -1;
2361 }
2362
2363 /* board-specific MMC initializations. */
2364 __weak int board_mmc_init(bd_t *bis)
2365 {
2366 return -1;
2367 }
2368
2369 void mmc_set_preinit(struct mmc *mmc, int preinit)
2370 {
2371 mmc->preinit = preinit;
2372 }
2373
2374 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2375 static int mmc_probe(bd_t *bis)
2376 {
2377 return 0;
2378 }
2379 #elif CONFIG_IS_ENABLED(DM_MMC)
2380 static int mmc_probe(bd_t *bis)
2381 {
2382 int ret, i;
2383 struct uclass *uc;
2384 struct udevice *dev;
2385
2386 ret = uclass_get(UCLASS_MMC, &uc);
2387 if (ret)
2388 return ret;
2389
2390 /*
2391 * Try to add them in sequence order. Really with driver model we
2392 * should allow holes, but the current MMC list does not allow that.
2393 * So if we request 0, 1, 3 we will get 0, 1, 2.
2394 */
2395 for (i = 0; ; i++) {
2396 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2397 if (ret == -ENODEV)
2398 break;
2399 }
2400 uclass_foreach_dev(dev, uc) {
2401 ret = device_probe(dev);
2402 if (ret)
2403 printf("%s - probe failed: %d\n", dev->name, ret);
2404 }
2405
2406 return 0;
2407 }
2408 #else
2409 static int mmc_probe(bd_t *bis)
2410 {
2411 if (board_mmc_init(bis) < 0)
2412 cpu_mmc_init(bis);
2413
2414 return 0;
2415 }
2416 #endif
2417
2418 int mmc_initialize(bd_t *bis)
2419 {
2420 static int initialized = 0;
2421 int ret;
2422 if (initialized) /* Avoid initializing mmc multiple times */
2423 return 0;
2424 initialized = 1;
2425
2426 #if !CONFIG_IS_ENABLED(BLK)
2427 #if !CONFIG_IS_ENABLED(MMC_TINY)
2428 mmc_list_init();
2429 #endif
2430 #endif
2431 ret = mmc_probe(bis);
2432 if (ret)
2433 return ret;
2434
2435 #ifndef CONFIG_SPL_BUILD
2436 print_mmc_devices(',');
2437 #endif
2438
2439 mmc_do_preinit();
2440 return 0;
2441 }
2442
2443 #ifdef CONFIG_CMD_BKOPS_ENABLE
2444 int mmc_set_bkops_enable(struct mmc *mmc)
2445 {
2446 int err;
2447 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2448
2449 err = mmc_send_ext_csd(mmc, ext_csd);
2450 if (err) {
2451 puts("Could not get ext_csd register values\n");
2452 return err;
2453 }
2454
2455 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2456 puts("Background operations not supported on device\n");
2457 return -EMEDIUMTYPE;
2458 }
2459
2460 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2461 puts("Background operations already enabled\n");
2462 return 0;
2463 }
2464
2465 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2466 if (err) {
2467 puts("Failed to enable manual background operations\n");
2468 return err;
2469 }
2470
2471 puts("Enabled manual background operations\n");
2472
2473 return 0;
2474 }
2475 #endif