]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/mmc/mmc.c
mmc: add HS200 support in MMC core
[people/ms/u-boot.git] / drivers / mmc / mmc.c
1 /*
2 * Copyright 2008, Freescale Semiconductor, Inc
3 * Andy Fleming
4 *
5 * Based vaguely on the Linux code
6 *
7 * SPDX-License-Identifier: GPL-2.0+
8 */
9
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24
25 static const unsigned int sd_au_size[] = {
26 0, SZ_16K / 512, SZ_32K / 512,
27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
31 };
32
33 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
34 static int mmc_power_cycle(struct mmc *mmc);
35
36 #if CONFIG_IS_ENABLED(MMC_TINY)
37 static struct mmc mmc_static;
38 struct mmc *find_mmc_device(int dev_num)
39 {
40 return &mmc_static;
41 }
42
43 void mmc_do_preinit(void)
44 {
45 struct mmc *m = &mmc_static;
46 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
47 mmc_set_preinit(m, 1);
48 #endif
49 if (m->preinit)
50 mmc_start_init(m);
51 }
52
53 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
54 {
55 return &mmc->block_dev;
56 }
57 #endif
58
59 #if !CONFIG_IS_ENABLED(DM_MMC)
60 __weak int board_mmc_getwp(struct mmc *mmc)
61 {
62 return -1;
63 }
64
65 int mmc_getwp(struct mmc *mmc)
66 {
67 int wp;
68
69 wp = board_mmc_getwp(mmc);
70
71 if (wp < 0) {
72 if (mmc->cfg->ops->getwp)
73 wp = mmc->cfg->ops->getwp(mmc);
74 else
75 wp = 0;
76 }
77
78 return wp;
79 }
80
81 __weak int board_mmc_getcd(struct mmc *mmc)
82 {
83 return -1;
84 }
85 #endif
86
87 #ifdef CONFIG_MMC_TRACE
88 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
89 {
90 printf("CMD_SEND:%d\n", cmd->cmdidx);
91 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
92 }
93
94 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
95 {
96 int i;
97 u8 *ptr;
98
99 if (ret) {
100 printf("\t\tRET\t\t\t %d\n", ret);
101 } else {
102 switch (cmd->resp_type) {
103 case MMC_RSP_NONE:
104 printf("\t\tMMC_RSP_NONE\n");
105 break;
106 case MMC_RSP_R1:
107 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
108 cmd->response[0]);
109 break;
110 case MMC_RSP_R1b:
111 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
112 cmd->response[0]);
113 break;
114 case MMC_RSP_R2:
115 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
116 cmd->response[0]);
117 printf("\t\t \t\t 0x%08X \n",
118 cmd->response[1]);
119 printf("\t\t \t\t 0x%08X \n",
120 cmd->response[2]);
121 printf("\t\t \t\t 0x%08X \n",
122 cmd->response[3]);
123 printf("\n");
124 printf("\t\t\t\t\tDUMPING DATA\n");
125 for (i = 0; i < 4; i++) {
126 int j;
127 printf("\t\t\t\t\t%03d - ", i*4);
128 ptr = (u8 *)&cmd->response[i];
129 ptr += 3;
130 for (j = 0; j < 4; j++)
131 printf("%02X ", *ptr--);
132 printf("\n");
133 }
134 break;
135 case MMC_RSP_R3:
136 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
137 cmd->response[0]);
138 break;
139 default:
140 printf("\t\tERROR MMC rsp not supported\n");
141 break;
142 }
143 }
144 }
145
146 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
147 {
148 int status;
149
150 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
151 printf("CURR STATE:%d\n", status);
152 }
153 #endif
154
155 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
156 const char *mmc_mode_name(enum bus_mode mode)
157 {
158 static const char *const names[] = {
159 [MMC_LEGACY] = "MMC legacy",
160 [SD_LEGACY] = "SD Legacy",
161 [MMC_HS] = "MMC High Speed (26MHz)",
162 [SD_HS] = "SD High Speed (50MHz)",
163 [UHS_SDR12] = "UHS SDR12 (25MHz)",
164 [UHS_SDR25] = "UHS SDR25 (50MHz)",
165 [UHS_SDR50] = "UHS SDR50 (100MHz)",
166 [UHS_SDR104] = "UHS SDR104 (208MHz)",
167 [UHS_DDR50] = "UHS DDR50 (50MHz)",
168 [MMC_HS_52] = "MMC High Speed (52MHz)",
169 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
170 [MMC_HS_200] = "HS200 (200MHz)",
171 };
172
173 if (mode >= MMC_MODES_END)
174 return "Unknown mode";
175 else
176 return names[mode];
177 }
178 #endif
179
180 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
181 {
182 static const int freqs[] = {
183 [SD_LEGACY] = 25000000,
184 [MMC_HS] = 26000000,
185 [SD_HS] = 50000000,
186 [UHS_SDR12] = 25000000,
187 [UHS_SDR25] = 50000000,
188 [UHS_SDR50] = 100000000,
189 [UHS_SDR104] = 208000000,
190 [UHS_DDR50] = 50000000,
191 [MMC_HS_52] = 52000000,
192 [MMC_DDR_52] = 52000000,
193 [MMC_HS_200] = 200000000,
194 };
195
196 if (mode == MMC_LEGACY)
197 return mmc->legacy_speed;
198 else if (mode >= MMC_MODES_END)
199 return 0;
200 else
201 return freqs[mode];
202 }
203
204 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
205 {
206 mmc->selected_mode = mode;
207 mmc->tran_speed = mmc_mode2freq(mmc, mode);
208 mmc->ddr_mode = mmc_is_mode_ddr(mode);
209 debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
210 mmc->tran_speed / 1000000);
211 return 0;
212 }
213
214 #if !CONFIG_IS_ENABLED(DM_MMC)
215 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
216 {
217 int ret;
218
219 mmmc_trace_before_send(mmc, cmd);
220 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
221 mmmc_trace_after_send(mmc, cmd, ret);
222
223 return ret;
224 }
225 #endif
226
227 int mmc_send_status(struct mmc *mmc, int timeout)
228 {
229 struct mmc_cmd cmd;
230 int err, retries = 5;
231
232 cmd.cmdidx = MMC_CMD_SEND_STATUS;
233 cmd.resp_type = MMC_RSP_R1;
234 if (!mmc_host_is_spi(mmc))
235 cmd.cmdarg = mmc->rca << 16;
236
237 while (1) {
238 err = mmc_send_cmd(mmc, &cmd, NULL);
239 if (!err) {
240 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
241 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
242 MMC_STATE_PRG)
243 break;
244
245 if (cmd.response[0] & MMC_STATUS_MASK) {
246 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
247 printf("Status Error: 0x%08X\n",
248 cmd.response[0]);
249 #endif
250 return -ECOMM;
251 }
252 } else if (--retries < 0)
253 return err;
254
255 if (timeout-- <= 0)
256 break;
257
258 udelay(1000);
259 }
260
261 mmc_trace_state(mmc, &cmd);
262 if (timeout <= 0) {
263 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
264 printf("Timeout waiting card ready\n");
265 #endif
266 return -ETIMEDOUT;
267 }
268
269 return 0;
270 }
271
272 int mmc_set_blocklen(struct mmc *mmc, int len)
273 {
274 struct mmc_cmd cmd;
275
276 if (mmc->ddr_mode)
277 return 0;
278
279 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
280 cmd.resp_type = MMC_RSP_R1;
281 cmd.cmdarg = len;
282
283 return mmc_send_cmd(mmc, &cmd, NULL);
284 }
285
286 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
287 lbaint_t blkcnt)
288 {
289 struct mmc_cmd cmd;
290 struct mmc_data data;
291
292 if (blkcnt > 1)
293 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
294 else
295 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
296
297 if (mmc->high_capacity)
298 cmd.cmdarg = start;
299 else
300 cmd.cmdarg = start * mmc->read_bl_len;
301
302 cmd.resp_type = MMC_RSP_R1;
303
304 data.dest = dst;
305 data.blocks = blkcnt;
306 data.blocksize = mmc->read_bl_len;
307 data.flags = MMC_DATA_READ;
308
309 if (mmc_send_cmd(mmc, &cmd, &data))
310 return 0;
311
312 if (blkcnt > 1) {
313 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
314 cmd.cmdarg = 0;
315 cmd.resp_type = MMC_RSP_R1b;
316 if (mmc_send_cmd(mmc, &cmd, NULL)) {
317 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
318 printf("mmc fail to send stop cmd\n");
319 #endif
320 return 0;
321 }
322 }
323
324 return blkcnt;
325 }
326
327 #if CONFIG_IS_ENABLED(BLK)
328 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
329 #else
330 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
331 void *dst)
332 #endif
333 {
334 #if CONFIG_IS_ENABLED(BLK)
335 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
336 #endif
337 int dev_num = block_dev->devnum;
338 int err;
339 lbaint_t cur, blocks_todo = blkcnt;
340
341 if (blkcnt == 0)
342 return 0;
343
344 struct mmc *mmc = find_mmc_device(dev_num);
345 if (!mmc)
346 return 0;
347
348 if (CONFIG_IS_ENABLED(MMC_TINY))
349 err = mmc_switch_part(mmc, block_dev->hwpart);
350 else
351 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
352
353 if (err < 0)
354 return 0;
355
356 if ((start + blkcnt) > block_dev->lba) {
357 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
358 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
359 start + blkcnt, block_dev->lba);
360 #endif
361 return 0;
362 }
363
364 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
365 debug("%s: Failed to set blocklen\n", __func__);
366 return 0;
367 }
368
369 do {
370 cur = (blocks_todo > mmc->cfg->b_max) ?
371 mmc->cfg->b_max : blocks_todo;
372 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
373 debug("%s: Failed to read blocks\n", __func__);
374 return 0;
375 }
376 blocks_todo -= cur;
377 start += cur;
378 dst += cur * mmc->read_bl_len;
379 } while (blocks_todo > 0);
380
381 return blkcnt;
382 }
383
384 static int mmc_go_idle(struct mmc *mmc)
385 {
386 struct mmc_cmd cmd;
387 int err;
388
389 udelay(1000);
390
391 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
392 cmd.cmdarg = 0;
393 cmd.resp_type = MMC_RSP_NONE;
394
395 err = mmc_send_cmd(mmc, &cmd, NULL);
396
397 if (err)
398 return err;
399
400 udelay(2000);
401
402 return 0;
403 }
404
405 static int sd_send_op_cond(struct mmc *mmc)
406 {
407 int timeout = 1000;
408 int err;
409 struct mmc_cmd cmd;
410
411 while (1) {
412 cmd.cmdidx = MMC_CMD_APP_CMD;
413 cmd.resp_type = MMC_RSP_R1;
414 cmd.cmdarg = 0;
415
416 err = mmc_send_cmd(mmc, &cmd, NULL);
417
418 if (err)
419 return err;
420
421 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
422 cmd.resp_type = MMC_RSP_R3;
423
424 /*
425 * Most cards do not answer if some reserved bits
426 * in the ocr are set. However, Some controller
427 * can set bit 7 (reserved for low voltages), but
428 * how to manage low voltages SD card is not yet
429 * specified.
430 */
431 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
432 (mmc->cfg->voltages & 0xff8000);
433
434 if (mmc->version == SD_VERSION_2)
435 cmd.cmdarg |= OCR_HCS;
436
437 err = mmc_send_cmd(mmc, &cmd, NULL);
438
439 if (err)
440 return err;
441
442 if (cmd.response[0] & OCR_BUSY)
443 break;
444
445 if (timeout-- <= 0)
446 return -EOPNOTSUPP;
447
448 udelay(1000);
449 }
450
451 if (mmc->version != SD_VERSION_2)
452 mmc->version = SD_VERSION_1_0;
453
454 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
455 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
456 cmd.resp_type = MMC_RSP_R3;
457 cmd.cmdarg = 0;
458
459 err = mmc_send_cmd(mmc, &cmd, NULL);
460
461 if (err)
462 return err;
463 }
464
465 mmc->ocr = cmd.response[0];
466
467 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
468 mmc->rca = 0;
469
470 return 0;
471 }
472
473 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
474 {
475 struct mmc_cmd cmd;
476 int err;
477
478 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
479 cmd.resp_type = MMC_RSP_R3;
480 cmd.cmdarg = 0;
481 if (use_arg && !mmc_host_is_spi(mmc))
482 cmd.cmdarg = OCR_HCS |
483 (mmc->cfg->voltages &
484 (mmc->ocr & OCR_VOLTAGE_MASK)) |
485 (mmc->ocr & OCR_ACCESS_MODE);
486
487 err = mmc_send_cmd(mmc, &cmd, NULL);
488 if (err)
489 return err;
490 mmc->ocr = cmd.response[0];
491 return 0;
492 }
493
494 static int mmc_send_op_cond(struct mmc *mmc)
495 {
496 int err, i;
497
498 /* Some cards seem to need this */
499 mmc_go_idle(mmc);
500
501 /* Asking to the card its capabilities */
502 for (i = 0; i < 2; i++) {
503 err = mmc_send_op_cond_iter(mmc, i != 0);
504 if (err)
505 return err;
506
507 /* exit if not busy (flag seems to be inverted) */
508 if (mmc->ocr & OCR_BUSY)
509 break;
510 }
511 mmc->op_cond_pending = 1;
512 return 0;
513 }
514
515 static int mmc_complete_op_cond(struct mmc *mmc)
516 {
517 struct mmc_cmd cmd;
518 int timeout = 1000;
519 uint start;
520 int err;
521
522 mmc->op_cond_pending = 0;
523 if (!(mmc->ocr & OCR_BUSY)) {
524 /* Some cards seem to need this */
525 mmc_go_idle(mmc);
526
527 start = get_timer(0);
528 while (1) {
529 err = mmc_send_op_cond_iter(mmc, 1);
530 if (err)
531 return err;
532 if (mmc->ocr & OCR_BUSY)
533 break;
534 if (get_timer(start) > timeout)
535 return -EOPNOTSUPP;
536 udelay(100);
537 }
538 }
539
540 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
541 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
542 cmd.resp_type = MMC_RSP_R3;
543 cmd.cmdarg = 0;
544
545 err = mmc_send_cmd(mmc, &cmd, NULL);
546
547 if (err)
548 return err;
549
550 mmc->ocr = cmd.response[0];
551 }
552
553 mmc->version = MMC_VERSION_UNKNOWN;
554
555 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
556 mmc->rca = 1;
557
558 return 0;
559 }
560
561
562 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
563 {
564 struct mmc_cmd cmd;
565 struct mmc_data data;
566 int err;
567
568 /* Get the Card Status Register */
569 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
570 cmd.resp_type = MMC_RSP_R1;
571 cmd.cmdarg = 0;
572
573 data.dest = (char *)ext_csd;
574 data.blocks = 1;
575 data.blocksize = MMC_MAX_BLOCK_LEN;
576 data.flags = MMC_DATA_READ;
577
578 err = mmc_send_cmd(mmc, &cmd, &data);
579
580 return err;
581 }
582
583 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
584 {
585 struct mmc_cmd cmd;
586 int timeout = 1000;
587 int retries = 3;
588 int ret;
589
590 cmd.cmdidx = MMC_CMD_SWITCH;
591 cmd.resp_type = MMC_RSP_R1b;
592 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
593 (index << 16) |
594 (value << 8);
595
596 while (retries > 0) {
597 ret = mmc_send_cmd(mmc, &cmd, NULL);
598
599 /* Waiting for the ready status */
600 if (!ret) {
601 ret = mmc_send_status(mmc, timeout);
602 return ret;
603 }
604
605 retries--;
606 }
607
608 return ret;
609
610 }
611
612 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
613 {
614 int err;
615 int speed_bits;
616
617 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
618
619 switch (mode) {
620 case MMC_HS:
621 case MMC_HS_52:
622 case MMC_DDR_52:
623 speed_bits = EXT_CSD_TIMING_HS;
624 break;
625 case MMC_HS_200:
626 speed_bits = EXT_CSD_TIMING_HS200;
627 break;
628 case MMC_LEGACY:
629 speed_bits = EXT_CSD_TIMING_LEGACY;
630 break;
631 default:
632 return -EINVAL;
633 }
634 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
635 speed_bits);
636 if (err)
637 return err;
638
639 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
640 /* Now check to see that it worked */
641 err = mmc_send_ext_csd(mmc, test_csd);
642 if (err)
643 return err;
644
645 /* No high-speed support */
646 if (!test_csd[EXT_CSD_HS_TIMING])
647 return -ENOTSUPP;
648 }
649
650 return 0;
651 }
652
653 static int mmc_get_capabilities(struct mmc *mmc)
654 {
655 u8 *ext_csd = mmc->ext_csd;
656 char cardtype;
657
658 mmc->card_caps = MMC_MODE_1BIT;
659
660 if (mmc_host_is_spi(mmc))
661 return 0;
662
663 /* Only version 4 supports high-speed */
664 if (mmc->version < MMC_VERSION_4)
665 return 0;
666
667 if (!ext_csd) {
668 printf("No ext_csd found!\n"); /* this should enver happen */
669 return -ENOTSUPP;
670 }
671
672 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
673
674 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0x3f;
675
676 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
677 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
678 mmc->card_caps |= MMC_MODE_HS200;
679 }
680 if (cardtype & EXT_CSD_CARD_TYPE_52) {
681 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
682 mmc->card_caps |= MMC_MODE_DDR_52MHz;
683 mmc->card_caps |= MMC_MODE_HS_52MHz;
684 }
685 if (cardtype & EXT_CSD_CARD_TYPE_26)
686 mmc->card_caps |= MMC_MODE_HS;
687
688 return 0;
689 }
690
691 static int mmc_set_capacity(struct mmc *mmc, int part_num)
692 {
693 switch (part_num) {
694 case 0:
695 mmc->capacity = mmc->capacity_user;
696 break;
697 case 1:
698 case 2:
699 mmc->capacity = mmc->capacity_boot;
700 break;
701 case 3:
702 mmc->capacity = mmc->capacity_rpmb;
703 break;
704 case 4:
705 case 5:
706 case 6:
707 case 7:
708 mmc->capacity = mmc->capacity_gp[part_num - 4];
709 break;
710 default:
711 return -1;
712 }
713
714 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
715
716 return 0;
717 }
718
719 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
720 {
721 int ret;
722
723 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
724 (mmc->part_config & ~PART_ACCESS_MASK)
725 | (part_num & PART_ACCESS_MASK));
726
727 /*
728 * Set the capacity if the switch succeeded or was intended
729 * to return to representing the raw device.
730 */
731 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
732 ret = mmc_set_capacity(mmc, part_num);
733 mmc_get_blk_desc(mmc)->hwpart = part_num;
734 }
735
736 return ret;
737 }
738
739 int mmc_hwpart_config(struct mmc *mmc,
740 const struct mmc_hwpart_conf *conf,
741 enum mmc_hwpart_conf_mode mode)
742 {
743 u8 part_attrs = 0;
744 u32 enh_size_mult;
745 u32 enh_start_addr;
746 u32 gp_size_mult[4];
747 u32 max_enh_size_mult;
748 u32 tot_enh_size_mult = 0;
749 u8 wr_rel_set;
750 int i, pidx, err;
751 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
752
753 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
754 return -EINVAL;
755
756 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
757 printf("eMMC >= 4.4 required for enhanced user data area\n");
758 return -EMEDIUMTYPE;
759 }
760
761 if (!(mmc->part_support & PART_SUPPORT)) {
762 printf("Card does not support partitioning\n");
763 return -EMEDIUMTYPE;
764 }
765
766 if (!mmc->hc_wp_grp_size) {
767 printf("Card does not define HC WP group size\n");
768 return -EMEDIUMTYPE;
769 }
770
771 /* check partition alignment and total enhanced size */
772 if (conf->user.enh_size) {
773 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
774 conf->user.enh_start % mmc->hc_wp_grp_size) {
775 printf("User data enhanced area not HC WP group "
776 "size aligned\n");
777 return -EINVAL;
778 }
779 part_attrs |= EXT_CSD_ENH_USR;
780 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
781 if (mmc->high_capacity) {
782 enh_start_addr = conf->user.enh_start;
783 } else {
784 enh_start_addr = (conf->user.enh_start << 9);
785 }
786 } else {
787 enh_size_mult = 0;
788 enh_start_addr = 0;
789 }
790 tot_enh_size_mult += enh_size_mult;
791
792 for (pidx = 0; pidx < 4; pidx++) {
793 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
794 printf("GP%i partition not HC WP group size "
795 "aligned\n", pidx+1);
796 return -EINVAL;
797 }
798 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
799 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
800 part_attrs |= EXT_CSD_ENH_GP(pidx);
801 tot_enh_size_mult += gp_size_mult[pidx];
802 }
803 }
804
805 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
806 printf("Card does not support enhanced attribute\n");
807 return -EMEDIUMTYPE;
808 }
809
810 err = mmc_send_ext_csd(mmc, ext_csd);
811 if (err)
812 return err;
813
814 max_enh_size_mult =
815 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
816 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
817 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
818 if (tot_enh_size_mult > max_enh_size_mult) {
819 printf("Total enhanced size exceeds maximum (%u > %u)\n",
820 tot_enh_size_mult, max_enh_size_mult);
821 return -EMEDIUMTYPE;
822 }
823
824 /* The default value of EXT_CSD_WR_REL_SET is device
825 * dependent, the values can only be changed if the
826 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
827 * changed only once and before partitioning is completed. */
828 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
829 if (conf->user.wr_rel_change) {
830 if (conf->user.wr_rel_set)
831 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
832 else
833 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
834 }
835 for (pidx = 0; pidx < 4; pidx++) {
836 if (conf->gp_part[pidx].wr_rel_change) {
837 if (conf->gp_part[pidx].wr_rel_set)
838 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
839 else
840 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
841 }
842 }
843
844 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
845 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
846 puts("Card does not support host controlled partition write "
847 "reliability settings\n");
848 return -EMEDIUMTYPE;
849 }
850
851 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
852 EXT_CSD_PARTITION_SETTING_COMPLETED) {
853 printf("Card already partitioned\n");
854 return -EPERM;
855 }
856
857 if (mode == MMC_HWPART_CONF_CHECK)
858 return 0;
859
860 /* Partitioning requires high-capacity size definitions */
861 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
862 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
863 EXT_CSD_ERASE_GROUP_DEF, 1);
864
865 if (err)
866 return err;
867
868 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
869
870 /* update erase group size to be high-capacity */
871 mmc->erase_grp_size =
872 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
873
874 }
875
876 /* all OK, write the configuration */
877 for (i = 0; i < 4; i++) {
878 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
879 EXT_CSD_ENH_START_ADDR+i,
880 (enh_start_addr >> (i*8)) & 0xFF);
881 if (err)
882 return err;
883 }
884 for (i = 0; i < 3; i++) {
885 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
886 EXT_CSD_ENH_SIZE_MULT+i,
887 (enh_size_mult >> (i*8)) & 0xFF);
888 if (err)
889 return err;
890 }
891 for (pidx = 0; pidx < 4; pidx++) {
892 for (i = 0; i < 3; i++) {
893 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
894 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
895 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
896 if (err)
897 return err;
898 }
899 }
900 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
901 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
902 if (err)
903 return err;
904
905 if (mode == MMC_HWPART_CONF_SET)
906 return 0;
907
908 /* The WR_REL_SET is a write-once register but shall be
909 * written before setting PART_SETTING_COMPLETED. As it is
910 * write-once we can only write it when completing the
911 * partitioning. */
912 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
913 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
914 EXT_CSD_WR_REL_SET, wr_rel_set);
915 if (err)
916 return err;
917 }
918
919 /* Setting PART_SETTING_COMPLETED confirms the partition
920 * configuration but it only becomes effective after power
921 * cycle, so we do not adjust the partition related settings
922 * in the mmc struct. */
923
924 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
925 EXT_CSD_PARTITION_SETTING,
926 EXT_CSD_PARTITION_SETTING_COMPLETED);
927 if (err)
928 return err;
929
930 return 0;
931 }
932
933 #if !CONFIG_IS_ENABLED(DM_MMC)
934 int mmc_getcd(struct mmc *mmc)
935 {
936 int cd;
937
938 cd = board_mmc_getcd(mmc);
939
940 if (cd < 0) {
941 if (mmc->cfg->ops->getcd)
942 cd = mmc->cfg->ops->getcd(mmc);
943 else
944 cd = 1;
945 }
946
947 return cd;
948 }
949 #endif
950
951 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
952 {
953 struct mmc_cmd cmd;
954 struct mmc_data data;
955
956 /* Switch the frequency */
957 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
958 cmd.resp_type = MMC_RSP_R1;
959 cmd.cmdarg = (mode << 31) | 0xffffff;
960 cmd.cmdarg &= ~(0xf << (group * 4));
961 cmd.cmdarg |= value << (group * 4);
962
963 data.dest = (char *)resp;
964 data.blocksize = 64;
965 data.blocks = 1;
966 data.flags = MMC_DATA_READ;
967
968 return mmc_send_cmd(mmc, &cmd, &data);
969 }
970
971
972 static int sd_get_capabilities(struct mmc *mmc)
973 {
974 int err;
975 struct mmc_cmd cmd;
976 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
977 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
978 struct mmc_data data;
979 int timeout;
980
981 mmc->card_caps = MMC_MODE_1BIT;
982
983 if (mmc_host_is_spi(mmc))
984 return 0;
985
986 /* Read the SCR to find out if this card supports higher speeds */
987 cmd.cmdidx = MMC_CMD_APP_CMD;
988 cmd.resp_type = MMC_RSP_R1;
989 cmd.cmdarg = mmc->rca << 16;
990
991 err = mmc_send_cmd(mmc, &cmd, NULL);
992
993 if (err)
994 return err;
995
996 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
997 cmd.resp_type = MMC_RSP_R1;
998 cmd.cmdarg = 0;
999
1000 timeout = 3;
1001
1002 retry_scr:
1003 data.dest = (char *)scr;
1004 data.blocksize = 8;
1005 data.blocks = 1;
1006 data.flags = MMC_DATA_READ;
1007
1008 err = mmc_send_cmd(mmc, &cmd, &data);
1009
1010 if (err) {
1011 if (timeout--)
1012 goto retry_scr;
1013
1014 return err;
1015 }
1016
1017 mmc->scr[0] = __be32_to_cpu(scr[0]);
1018 mmc->scr[1] = __be32_to_cpu(scr[1]);
1019
1020 switch ((mmc->scr[0] >> 24) & 0xf) {
1021 case 0:
1022 mmc->version = SD_VERSION_1_0;
1023 break;
1024 case 1:
1025 mmc->version = SD_VERSION_1_10;
1026 break;
1027 case 2:
1028 mmc->version = SD_VERSION_2;
1029 if ((mmc->scr[0] >> 15) & 0x1)
1030 mmc->version = SD_VERSION_3;
1031 break;
1032 default:
1033 mmc->version = SD_VERSION_1_0;
1034 break;
1035 }
1036
1037 if (mmc->scr[0] & SD_DATA_4BIT)
1038 mmc->card_caps |= MMC_MODE_4BIT;
1039
1040 /* Version 1.0 doesn't support switching */
1041 if (mmc->version == SD_VERSION_1_0)
1042 return 0;
1043
1044 timeout = 4;
1045 while (timeout--) {
1046 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1047 (u8 *)switch_status);
1048
1049 if (err)
1050 return err;
1051
1052 /* The high-speed function is busy. Try again */
1053 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1054 break;
1055 }
1056
1057 /* If high-speed isn't supported, we return */
1058 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1059 mmc->card_caps |= MMC_CAP(SD_HS);
1060
1061 return 0;
1062 }
1063
1064 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1065 {
1066 int err;
1067
1068 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1069
1070 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status);
1071 if (err)
1072 return err;
1073
1074 if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) != 0x01000000)
1075 return -ENOTSUPP;
1076
1077 return 0;
1078 }
1079
1080 int sd_select_bus_width(struct mmc *mmc, int w)
1081 {
1082 int err;
1083 struct mmc_cmd cmd;
1084
1085 if ((w != 4) && (w != 1))
1086 return -EINVAL;
1087
1088 cmd.cmdidx = MMC_CMD_APP_CMD;
1089 cmd.resp_type = MMC_RSP_R1;
1090 cmd.cmdarg = mmc->rca << 16;
1091
1092 err = mmc_send_cmd(mmc, &cmd, NULL);
1093 if (err)
1094 return err;
1095
1096 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1097 cmd.resp_type = MMC_RSP_R1;
1098 if (w == 4)
1099 cmd.cmdarg = 2;
1100 else if (w == 1)
1101 cmd.cmdarg = 0;
1102 err = mmc_send_cmd(mmc, &cmd, NULL);
1103 if (err)
1104 return err;
1105
1106 return 0;
1107 }
1108
1109 static int sd_read_ssr(struct mmc *mmc)
1110 {
1111 int err, i;
1112 struct mmc_cmd cmd;
1113 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1114 struct mmc_data data;
1115 int timeout = 3;
1116 unsigned int au, eo, et, es;
1117
1118 cmd.cmdidx = MMC_CMD_APP_CMD;
1119 cmd.resp_type = MMC_RSP_R1;
1120 cmd.cmdarg = mmc->rca << 16;
1121
1122 err = mmc_send_cmd(mmc, &cmd, NULL);
1123 if (err)
1124 return err;
1125
1126 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1127 cmd.resp_type = MMC_RSP_R1;
1128 cmd.cmdarg = 0;
1129
1130 retry_ssr:
1131 data.dest = (char *)ssr;
1132 data.blocksize = 64;
1133 data.blocks = 1;
1134 data.flags = MMC_DATA_READ;
1135
1136 err = mmc_send_cmd(mmc, &cmd, &data);
1137 if (err) {
1138 if (timeout--)
1139 goto retry_ssr;
1140
1141 return err;
1142 }
1143
1144 for (i = 0; i < 16; i++)
1145 ssr[i] = be32_to_cpu(ssr[i]);
1146
1147 au = (ssr[2] >> 12) & 0xF;
1148 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1149 mmc->ssr.au = sd_au_size[au];
1150 es = (ssr[3] >> 24) & 0xFF;
1151 es |= (ssr[2] & 0xFF) << 8;
1152 et = (ssr[3] >> 18) & 0x3F;
1153 if (es && et) {
1154 eo = (ssr[3] >> 16) & 0x3;
1155 mmc->ssr.erase_timeout = (et * 1000) / es;
1156 mmc->ssr.erase_offset = eo * 1000;
1157 }
1158 } else {
1159 debug("Invalid Allocation Unit Size.\n");
1160 }
1161
1162 return 0;
1163 }
1164
1165 /* frequency bases */
1166 /* divided by 10 to be nice to platforms without floating point */
1167 static const int fbase[] = {
1168 10000,
1169 100000,
1170 1000000,
1171 10000000,
1172 };
1173
1174 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1175 * to platforms without floating point.
1176 */
1177 static const u8 multipliers[] = {
1178 0, /* reserved */
1179 10,
1180 12,
1181 13,
1182 15,
1183 20,
1184 25,
1185 30,
1186 35,
1187 40,
1188 45,
1189 50,
1190 55,
1191 60,
1192 70,
1193 80,
1194 };
1195
1196 static inline int bus_width(uint cap)
1197 {
1198 if (cap == MMC_MODE_8BIT)
1199 return 8;
1200 if (cap == MMC_MODE_4BIT)
1201 return 4;
1202 if (cap == MMC_MODE_1BIT)
1203 return 1;
1204 printf("invalid bus witdh capability 0x%x\n", cap);
1205 return 0;
1206 }
1207
1208 #if !CONFIG_IS_ENABLED(DM_MMC)
1209 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1210 {
1211 return -ENOTSUPP;
1212 }
1213
1214 static void mmc_send_init_stream(struct mmc *mmc)
1215 {
1216 }
1217
1218 static int mmc_set_ios(struct mmc *mmc)
1219 {
1220 int ret = 0;
1221
1222 if (mmc->cfg->ops->set_ios)
1223 ret = mmc->cfg->ops->set_ios(mmc);
1224
1225 return ret;
1226 }
1227 #endif
1228
1229 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1230 {
1231 if (clock > mmc->cfg->f_max)
1232 clock = mmc->cfg->f_max;
1233
1234 if (clock < mmc->cfg->f_min)
1235 clock = mmc->cfg->f_min;
1236
1237 mmc->clock = clock;
1238 mmc->clk_disable = disable;
1239
1240 return mmc_set_ios(mmc);
1241 }
1242
1243 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1244 {
1245 mmc->bus_width = width;
1246
1247 return mmc_set_ios(mmc);
1248 }
1249
1250 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1251 /*
1252 * helper function to display the capabilities in a human
1253 * friendly manner. The capabilities include bus width and
1254 * supported modes.
1255 */
1256 void mmc_dump_capabilities(const char *text, uint caps)
1257 {
1258 enum bus_mode mode;
1259
1260 printf("%s: widths [", text);
1261 if (caps & MMC_MODE_8BIT)
1262 printf("8, ");
1263 if (caps & MMC_MODE_4BIT)
1264 printf("4, ");
1265 if (caps & MMC_MODE_1BIT)
1266 printf("1, ");
1267 printf("\b\b] modes [");
1268 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1269 if (MMC_CAP(mode) & caps)
1270 printf("%s, ", mmc_mode_name(mode));
1271 printf("\b\b]\n");
1272 }
1273 #endif
1274
1275 struct mode_width_tuning {
1276 enum bus_mode mode;
1277 uint widths;
1278 uint tuning;
1279 };
1280
1281 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1282 {
1283 mmc->signal_voltage = signal_voltage;
1284 return mmc_set_ios(mmc);
1285 }
1286
1287 static const struct mode_width_tuning sd_modes_by_pref[] = {
1288 {
1289 .mode = SD_HS,
1290 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1291 },
1292 {
1293 .mode = SD_LEGACY,
1294 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1295 }
1296 };
1297
1298 #define for_each_sd_mode_by_pref(caps, mwt) \
1299 for (mwt = sd_modes_by_pref;\
1300 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1301 mwt++) \
1302 if (caps & MMC_CAP(mwt->mode))
1303
1304 static int sd_select_mode_and_width(struct mmc *mmc)
1305 {
1306 int err;
1307 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1308 const struct mode_width_tuning *mwt;
1309
1310 err = sd_get_capabilities(mmc);
1311 if (err)
1312 return err;
1313 /* Restrict card's capabilities by what the host can do */
1314 mmc->card_caps &= (mmc->cfg->host_caps | MMC_MODE_1BIT);
1315
1316 for_each_sd_mode_by_pref(mmc->card_caps, mwt) {
1317 uint *w;
1318
1319 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1320 if (*w & mmc->card_caps & mwt->widths) {
1321 debug("trying mode %s width %d (at %d MHz)\n",
1322 mmc_mode_name(mwt->mode),
1323 bus_width(*w),
1324 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1325
1326 /* configure the bus width (card + host) */
1327 err = sd_select_bus_width(mmc, bus_width(*w));
1328 if (err)
1329 goto error;
1330 mmc_set_bus_width(mmc, bus_width(*w));
1331
1332 /* configure the bus mode (card) */
1333 err = sd_set_card_speed(mmc, mwt->mode);
1334 if (err)
1335 goto error;
1336
1337 /* configure the bus mode (host) */
1338 mmc_select_mode(mmc, mwt->mode);
1339 mmc_set_clock(mmc, mmc->tran_speed, false);
1340
1341 err = sd_read_ssr(mmc);
1342 if (!err)
1343 return 0;
1344
1345 printf("bad ssr\n");
1346
1347 error:
1348 /* revert to a safer bus speed */
1349 mmc_select_mode(mmc, SD_LEGACY);
1350 mmc_set_clock(mmc, mmc->tran_speed, false);
1351 }
1352 }
1353 }
1354
1355 printf("unable to select a mode\n");
1356 return -ENOTSUPP;
1357 }
1358
1359 /*
1360 * read the compare the part of ext csd that is constant.
1361 * This can be used to check that the transfer is working
1362 * as expected.
1363 */
1364 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1365 {
1366 int err;
1367 const u8 *ext_csd = mmc->ext_csd;
1368 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1369
1370 err = mmc_send_ext_csd(mmc, test_csd);
1371 if (err)
1372 return err;
1373
1374 /* Only compare read only fields */
1375 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1376 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1377 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1378 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1379 ext_csd[EXT_CSD_REV]
1380 == test_csd[EXT_CSD_REV] &&
1381 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1382 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1383 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1384 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1385 return 0;
1386
1387 return -EBADMSG;
1388 }
1389
1390 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1391 {
1392 .mode = MMC_HS_200,
1393 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1394 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1395 },
1396 {
1397 .mode = MMC_DDR_52,
1398 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1399 },
1400 {
1401 .mode = MMC_HS_52,
1402 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1403 },
1404 {
1405 .mode = MMC_HS,
1406 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1407 },
1408 {
1409 .mode = MMC_LEGACY,
1410 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1411 }
1412 };
1413
1414 #define for_each_mmc_mode_by_pref(caps, mwt) \
1415 for (mwt = mmc_modes_by_pref;\
1416 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1417 mwt++) \
1418 if (caps & MMC_CAP(mwt->mode))
1419
1420 static const struct ext_csd_bus_width {
1421 uint cap;
1422 bool is_ddr;
1423 uint ext_csd_bits;
1424 } ext_csd_bus_width[] = {
1425 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1426 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1427 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1428 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1429 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1430 };
1431
1432 #define for_each_supported_width(caps, ddr, ecbv) \
1433 for (ecbv = ext_csd_bus_width;\
1434 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1435 ecbv++) \
1436 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1437
1438 static int mmc_select_mode_and_width(struct mmc *mmc)
1439 {
1440 int err;
1441 const struct mode_width_tuning *mwt;
1442 const struct ext_csd_bus_width *ecbw;
1443
1444 err = mmc_get_capabilities(mmc);
1445 if (err)
1446 return err;
1447
1448 /* Restrict card's capabilities by what the host can do */
1449 mmc->card_caps &= (mmc->cfg->host_caps | MMC_MODE_1BIT);
1450
1451 /* Only version 4 of MMC supports wider bus widths */
1452 if (mmc->version < MMC_VERSION_4)
1453 return 0;
1454
1455 if (!mmc->ext_csd) {
1456 debug("No ext_csd found!\n"); /* this should enver happen */
1457 return -ENOTSUPP;
1458 }
1459
1460 for_each_mmc_mode_by_pref(mmc->card_caps, mwt) {
1461 for_each_supported_width(mmc->card_caps & mwt->widths,
1462 mmc_is_mode_ddr(mwt->mode), ecbw) {
1463 debug("trying mode %s width %d (at %d MHz)\n",
1464 mmc_mode_name(mwt->mode),
1465 bus_width(ecbw->cap),
1466 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1467 /* configure the bus width (card + host) */
1468 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1469 EXT_CSD_BUS_WIDTH,
1470 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1471 if (err)
1472 goto error;
1473 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1474
1475 /* configure the bus speed (card) */
1476 err = mmc_set_card_speed(mmc, mwt->mode);
1477 if (err)
1478 goto error;
1479
1480 /*
1481 * configure the bus width AND the ddr mode (card)
1482 * The host side will be taken care of in the next step
1483 */
1484 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1485 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1486 EXT_CSD_BUS_WIDTH,
1487 ecbw->ext_csd_bits);
1488 if (err)
1489 goto error;
1490 }
1491
1492 /* configure the bus mode (host) */
1493 mmc_select_mode(mmc, mwt->mode);
1494 mmc_set_clock(mmc, mmc->tran_speed, false);
1495
1496 /* execute tuning if needed */
1497 if (mwt->tuning) {
1498 err = mmc_execute_tuning(mmc, mwt->tuning);
1499 if (err) {
1500 debug("tuning failed\n");
1501 goto error;
1502 }
1503 }
1504
1505 /* do a transfer to check the configuration */
1506 err = mmc_read_and_compare_ext_csd(mmc);
1507 if (!err)
1508 return 0;
1509 error:
1510 /* if an error occured, revert to a safer bus mode */
1511 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1512 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
1513 mmc_select_mode(mmc, MMC_LEGACY);
1514 mmc_set_bus_width(mmc, 1);
1515 }
1516 }
1517
1518 printf("unable to select a mode\n");
1519
1520 return -ENOTSUPP;
1521 }
1522
1523 static int mmc_startup_v4(struct mmc *mmc)
1524 {
1525 int err, i;
1526 u64 capacity;
1527 bool has_parts = false;
1528 bool part_completed;
1529 u8 *ext_csd;
1530
1531 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1532 return 0;
1533
1534 ext_csd = malloc_cache_aligned(MMC_MAX_BLOCK_LEN);
1535 if (!ext_csd)
1536 return -ENOMEM;
1537
1538 mmc->ext_csd = ext_csd;
1539
1540 /* check ext_csd version and capacity */
1541 err = mmc_send_ext_csd(mmc, ext_csd);
1542 if (err)
1543 return err;
1544 if (ext_csd[EXT_CSD_REV] >= 2) {
1545 /*
1546 * According to the JEDEC Standard, the value of
1547 * ext_csd's capacity is valid if the value is more
1548 * than 2GB
1549 */
1550 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1551 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1552 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1553 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1554 capacity *= MMC_MAX_BLOCK_LEN;
1555 if ((capacity >> 20) > 2 * 1024)
1556 mmc->capacity_user = capacity;
1557 }
1558
1559 switch (ext_csd[EXT_CSD_REV]) {
1560 case 1:
1561 mmc->version = MMC_VERSION_4_1;
1562 break;
1563 case 2:
1564 mmc->version = MMC_VERSION_4_2;
1565 break;
1566 case 3:
1567 mmc->version = MMC_VERSION_4_3;
1568 break;
1569 case 5:
1570 mmc->version = MMC_VERSION_4_41;
1571 break;
1572 case 6:
1573 mmc->version = MMC_VERSION_4_5;
1574 break;
1575 case 7:
1576 mmc->version = MMC_VERSION_5_0;
1577 break;
1578 case 8:
1579 mmc->version = MMC_VERSION_5_1;
1580 break;
1581 }
1582
1583 /* The partition data may be non-zero but it is only
1584 * effective if PARTITION_SETTING_COMPLETED is set in
1585 * EXT_CSD, so ignore any data if this bit is not set,
1586 * except for enabling the high-capacity group size
1587 * definition (see below).
1588 */
1589 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1590 EXT_CSD_PARTITION_SETTING_COMPLETED);
1591
1592 /* store the partition info of emmc */
1593 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
1594 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
1595 ext_csd[EXT_CSD_BOOT_MULT])
1596 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
1597 if (part_completed &&
1598 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
1599 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
1600
1601 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
1602
1603 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
1604
1605 for (i = 0; i < 4; i++) {
1606 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
1607 uint mult = (ext_csd[idx + 2] << 16) +
1608 (ext_csd[idx + 1] << 8) + ext_csd[idx];
1609 if (mult)
1610 has_parts = true;
1611 if (!part_completed)
1612 continue;
1613 mmc->capacity_gp[i] = mult;
1614 mmc->capacity_gp[i] *=
1615 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1616 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1617 mmc->capacity_gp[i] <<= 19;
1618 }
1619
1620 if (part_completed) {
1621 mmc->enh_user_size =
1622 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
1623 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
1624 ext_csd[EXT_CSD_ENH_SIZE_MULT];
1625 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1626 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1627 mmc->enh_user_size <<= 19;
1628 mmc->enh_user_start =
1629 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
1630 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
1631 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
1632 ext_csd[EXT_CSD_ENH_START_ADDR];
1633 if (mmc->high_capacity)
1634 mmc->enh_user_start <<= 9;
1635 }
1636
1637 /*
1638 * Host needs to enable ERASE_GRP_DEF bit if device is
1639 * partitioned. This bit will be lost every time after a reset
1640 * or power off. This will affect erase size.
1641 */
1642 if (part_completed)
1643 has_parts = true;
1644 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
1645 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
1646 has_parts = true;
1647 if (has_parts) {
1648 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1649 EXT_CSD_ERASE_GROUP_DEF, 1);
1650
1651 if (err)
1652 return err;
1653
1654 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1655 }
1656
1657 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
1658 /* Read out group size from ext_csd */
1659 mmc->erase_grp_size =
1660 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1661 /*
1662 * if high capacity and partition setting completed
1663 * SEC_COUNT is valid even if it is smaller than 2 GiB
1664 * JEDEC Standard JESD84-B45, 6.2.4
1665 */
1666 if (mmc->high_capacity && part_completed) {
1667 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
1668 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
1669 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
1670 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
1671 capacity *= MMC_MAX_BLOCK_LEN;
1672 mmc->capacity_user = capacity;
1673 }
1674 } else {
1675 /* Calculate the group size from the csd value. */
1676 int erase_gsz, erase_gmul;
1677
1678 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
1679 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
1680 mmc->erase_grp_size = (erase_gsz + 1)
1681 * (erase_gmul + 1);
1682 }
1683
1684 mmc->hc_wp_grp_size = 1024
1685 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1686 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1687
1688 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1689
1690 return 0;
1691 }
1692
1693 static int mmc_startup(struct mmc *mmc)
1694 {
1695 int err, i;
1696 uint mult, freq;
1697 u64 cmult, csize;
1698 struct mmc_cmd cmd;
1699 struct blk_desc *bdesc;
1700
1701 #ifdef CONFIG_MMC_SPI_CRC_ON
1702 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
1703 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
1704 cmd.resp_type = MMC_RSP_R1;
1705 cmd.cmdarg = 1;
1706 err = mmc_send_cmd(mmc, &cmd, NULL);
1707
1708 if (err)
1709 return err;
1710 }
1711 #endif
1712
1713 /* Put the Card in Identify Mode */
1714 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
1715 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
1716 cmd.resp_type = MMC_RSP_R2;
1717 cmd.cmdarg = 0;
1718
1719 err = mmc_send_cmd(mmc, &cmd, NULL);
1720
1721 if (err)
1722 return err;
1723
1724 memcpy(mmc->cid, cmd.response, 16);
1725
1726 /*
1727 * For MMC cards, set the Relative Address.
1728 * For SD cards, get the Relatvie Address.
1729 * This also puts the cards into Standby State
1730 */
1731 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1732 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
1733 cmd.cmdarg = mmc->rca << 16;
1734 cmd.resp_type = MMC_RSP_R6;
1735
1736 err = mmc_send_cmd(mmc, &cmd, NULL);
1737
1738 if (err)
1739 return err;
1740
1741 if (IS_SD(mmc))
1742 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
1743 }
1744
1745 /* Get the Card-Specific Data */
1746 cmd.cmdidx = MMC_CMD_SEND_CSD;
1747 cmd.resp_type = MMC_RSP_R2;
1748 cmd.cmdarg = mmc->rca << 16;
1749
1750 err = mmc_send_cmd(mmc, &cmd, NULL);
1751
1752 if (err)
1753 return err;
1754
1755 mmc->csd[0] = cmd.response[0];
1756 mmc->csd[1] = cmd.response[1];
1757 mmc->csd[2] = cmd.response[2];
1758 mmc->csd[3] = cmd.response[3];
1759
1760 if (mmc->version == MMC_VERSION_UNKNOWN) {
1761 int version = (cmd.response[0] >> 26) & 0xf;
1762
1763 switch (version) {
1764 case 0:
1765 mmc->version = MMC_VERSION_1_2;
1766 break;
1767 case 1:
1768 mmc->version = MMC_VERSION_1_4;
1769 break;
1770 case 2:
1771 mmc->version = MMC_VERSION_2_2;
1772 break;
1773 case 3:
1774 mmc->version = MMC_VERSION_3;
1775 break;
1776 case 4:
1777 mmc->version = MMC_VERSION_4;
1778 break;
1779 default:
1780 mmc->version = MMC_VERSION_1_2;
1781 break;
1782 }
1783 }
1784
1785 /* divide frequency by 10, since the mults are 10x bigger */
1786 freq = fbase[(cmd.response[0] & 0x7)];
1787 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
1788
1789 mmc->legacy_speed = freq * mult;
1790 mmc_select_mode(mmc, MMC_LEGACY);
1791
1792 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
1793 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
1794
1795 if (IS_SD(mmc))
1796 mmc->write_bl_len = mmc->read_bl_len;
1797 else
1798 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
1799
1800 if (mmc->high_capacity) {
1801 csize = (mmc->csd[1] & 0x3f) << 16
1802 | (mmc->csd[2] & 0xffff0000) >> 16;
1803 cmult = 8;
1804 } else {
1805 csize = (mmc->csd[1] & 0x3ff) << 2
1806 | (mmc->csd[2] & 0xc0000000) >> 30;
1807 cmult = (mmc->csd[2] & 0x00038000) >> 15;
1808 }
1809
1810 mmc->capacity_user = (csize + 1) << (cmult + 2);
1811 mmc->capacity_user *= mmc->read_bl_len;
1812 mmc->capacity_boot = 0;
1813 mmc->capacity_rpmb = 0;
1814 for (i = 0; i < 4; i++)
1815 mmc->capacity_gp[i] = 0;
1816
1817 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
1818 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1819
1820 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
1821 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1822
1823 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
1824 cmd.cmdidx = MMC_CMD_SET_DSR;
1825 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
1826 cmd.resp_type = MMC_RSP_NONE;
1827 if (mmc_send_cmd(mmc, &cmd, NULL))
1828 printf("MMC: SET_DSR failed\n");
1829 }
1830
1831 /* Select the card, and put it into Transfer Mode */
1832 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1833 cmd.cmdidx = MMC_CMD_SELECT_CARD;
1834 cmd.resp_type = MMC_RSP_R1;
1835 cmd.cmdarg = mmc->rca << 16;
1836 err = mmc_send_cmd(mmc, &cmd, NULL);
1837
1838 if (err)
1839 return err;
1840 }
1841
1842 /*
1843 * For SD, its erase group is always one sector
1844 */
1845 mmc->erase_grp_size = 1;
1846 mmc->part_config = MMCPART_NOAVAILABLE;
1847
1848 err = mmc_startup_v4(mmc);
1849 if (err)
1850 return err;
1851
1852 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
1853 if (err)
1854 return err;
1855
1856 if (IS_SD(mmc))
1857 err = sd_select_mode_and_width(mmc);
1858 else
1859 err = mmc_select_mode_and_width(mmc);
1860
1861 if (err)
1862 return err;
1863
1864
1865 /* Fix the block length for DDR mode */
1866 if (mmc->ddr_mode) {
1867 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1868 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1869 }
1870
1871 /* fill in device description */
1872 bdesc = mmc_get_blk_desc(mmc);
1873 bdesc->lun = 0;
1874 bdesc->hwpart = 0;
1875 bdesc->type = 0;
1876 bdesc->blksz = mmc->read_bl_len;
1877 bdesc->log2blksz = LOG2(bdesc->blksz);
1878 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1879 #if !defined(CONFIG_SPL_BUILD) || \
1880 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
1881 !defined(CONFIG_USE_TINY_PRINTF))
1882 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
1883 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
1884 (mmc->cid[3] >> 16) & 0xffff);
1885 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
1886 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
1887 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
1888 (mmc->cid[2] >> 24) & 0xff);
1889 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
1890 (mmc->cid[2] >> 16) & 0xf);
1891 #else
1892 bdesc->vendor[0] = 0;
1893 bdesc->product[0] = 0;
1894 bdesc->revision[0] = 0;
1895 #endif
1896 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
1897 part_init(bdesc);
1898 #endif
1899
1900 return 0;
1901 }
1902
1903 static int mmc_send_if_cond(struct mmc *mmc)
1904 {
1905 struct mmc_cmd cmd;
1906 int err;
1907
1908 cmd.cmdidx = SD_CMD_SEND_IF_COND;
1909 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
1910 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
1911 cmd.resp_type = MMC_RSP_R7;
1912
1913 err = mmc_send_cmd(mmc, &cmd, NULL);
1914
1915 if (err)
1916 return err;
1917
1918 if ((cmd.response[0] & 0xff) != 0xaa)
1919 return -EOPNOTSUPP;
1920 else
1921 mmc->version = SD_VERSION_2;
1922
1923 return 0;
1924 }
1925
1926 #if !CONFIG_IS_ENABLED(DM_MMC)
1927 /* board-specific MMC power initializations. */
1928 __weak void board_mmc_power_init(void)
1929 {
1930 }
1931 #endif
1932
1933 static int mmc_power_init(struct mmc *mmc)
1934 {
1935 #if CONFIG_IS_ENABLED(DM_MMC)
1936 #if CONFIG_IS_ENABLED(DM_REGULATOR)
1937 int ret;
1938
1939 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
1940 &mmc->vmmc_supply);
1941 if (ret)
1942 debug("%s: No vmmc supply\n", mmc->dev->name);
1943
1944 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
1945 &mmc->vqmmc_supply);
1946 if (ret)
1947 debug("%s: No vqmmc supply\n", mmc->dev->name);
1948 #endif
1949 #else /* !CONFIG_DM_MMC */
1950 /*
1951 * Driver model should use a regulator, as above, rather than calling
1952 * out to board code.
1953 */
1954 board_mmc_power_init();
1955 #endif
1956 return 0;
1957 }
1958
1959 /*
1960 * put the host in the initial state:
1961 * - turn on Vdd (card power supply)
1962 * - configure the bus width and clock to minimal values
1963 */
1964 static void mmc_set_initial_state(struct mmc *mmc)
1965 {
1966 int err;
1967
1968 /* First try to set 3.3V. If it fails set to 1.8V */
1969 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
1970 if (err != 0)
1971 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
1972 if (err != 0)
1973 printf("mmc: failed to set signal voltage\n");
1974
1975 mmc_select_mode(mmc, MMC_LEGACY);
1976 mmc_set_bus_width(mmc, 1);
1977 mmc_set_clock(mmc, 0, false);
1978 }
1979
1980 static int mmc_power_on(struct mmc *mmc)
1981 {
1982 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
1983 if (mmc->vmmc_supply) {
1984 int ret = regulator_set_enable(mmc->vmmc_supply, true);
1985
1986 if (ret) {
1987 puts("Error enabling VMMC supply\n");
1988 return ret;
1989 }
1990 }
1991 #endif
1992 return 0;
1993 }
1994
1995 static int mmc_power_off(struct mmc *mmc)
1996 {
1997 mmc_set_clock(mmc, 1, true);
1998 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
1999 if (mmc->vmmc_supply) {
2000 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2001
2002 if (ret) {
2003 puts("Error disabling VMMC supply\n");
2004 return ret;
2005 }
2006 }
2007 #endif
2008 return 0;
2009 }
2010
2011 static int mmc_power_cycle(struct mmc *mmc)
2012 {
2013 int ret;
2014
2015 ret = mmc_power_off(mmc);
2016 if (ret)
2017 return ret;
2018 /*
2019 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2020 * to be on the safer side.
2021 */
2022 udelay(2000);
2023 return mmc_power_on(mmc);
2024 }
2025
2026 int mmc_start_init(struct mmc *mmc)
2027 {
2028 bool no_card;
2029 int err;
2030
2031 /* we pretend there's no card when init is NULL */
2032 no_card = mmc_getcd(mmc) == 0;
2033 #if !CONFIG_IS_ENABLED(DM_MMC)
2034 no_card = no_card || (mmc->cfg->ops->init == NULL);
2035 #endif
2036 if (no_card) {
2037 mmc->has_init = 0;
2038 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2039 printf("MMC: no card present\n");
2040 #endif
2041 return -ENOMEDIUM;
2042 }
2043
2044 if (mmc->has_init)
2045 return 0;
2046
2047 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2048 mmc_adapter_card_type_ident();
2049 #endif
2050 err = mmc_power_init(mmc);
2051 if (err)
2052 return err;
2053
2054 err = mmc_power_on(mmc);
2055 if (err)
2056 return err;
2057
2058 #if CONFIG_IS_ENABLED(DM_MMC)
2059 /* The device has already been probed ready for use */
2060 #else
2061 /* made sure it's not NULL earlier */
2062 err = mmc->cfg->ops->init(mmc);
2063 if (err)
2064 return err;
2065 #endif
2066 mmc->ddr_mode = 0;
2067
2068 mmc_set_initial_state(mmc);
2069 mmc_send_init_stream(mmc);
2070
2071 /* Reset the Card */
2072 err = mmc_go_idle(mmc);
2073
2074 if (err)
2075 return err;
2076
2077 /* The internal partition reset to user partition(0) at every CMD0*/
2078 mmc_get_blk_desc(mmc)->hwpart = 0;
2079
2080 /* Test for SD version 2 */
2081 err = mmc_send_if_cond(mmc);
2082
2083 /* Now try to get the SD card's operating condition */
2084 err = sd_send_op_cond(mmc);
2085
2086 /* If the command timed out, we check for an MMC card */
2087 if (err == -ETIMEDOUT) {
2088 err = mmc_send_op_cond(mmc);
2089
2090 if (err) {
2091 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2092 printf("Card did not respond to voltage select!\n");
2093 #endif
2094 return -EOPNOTSUPP;
2095 }
2096 }
2097
2098 if (!err)
2099 mmc->init_in_progress = 1;
2100
2101 return err;
2102 }
2103
2104 static int mmc_complete_init(struct mmc *mmc)
2105 {
2106 int err = 0;
2107
2108 mmc->init_in_progress = 0;
2109 if (mmc->op_cond_pending)
2110 err = mmc_complete_op_cond(mmc);
2111
2112 if (!err)
2113 err = mmc_startup(mmc);
2114 if (err)
2115 mmc->has_init = 0;
2116 else
2117 mmc->has_init = 1;
2118 return err;
2119 }
2120
2121 int mmc_init(struct mmc *mmc)
2122 {
2123 int err = 0;
2124 __maybe_unused unsigned start;
2125 #if CONFIG_IS_ENABLED(DM_MMC)
2126 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2127
2128 upriv->mmc = mmc;
2129 #endif
2130 if (mmc->has_init)
2131 return 0;
2132
2133 start = get_timer(0);
2134
2135 if (!mmc->init_in_progress)
2136 err = mmc_start_init(mmc);
2137
2138 if (!err)
2139 err = mmc_complete_init(mmc);
2140 if (err)
2141 printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2142
2143 return err;
2144 }
2145
2146 int mmc_set_dsr(struct mmc *mmc, u16 val)
2147 {
2148 mmc->dsr = val;
2149 return 0;
2150 }
2151
2152 /* CPU-specific MMC initializations */
2153 __weak int cpu_mmc_init(bd_t *bis)
2154 {
2155 return -1;
2156 }
2157
2158 /* board-specific MMC initializations. */
2159 __weak int board_mmc_init(bd_t *bis)
2160 {
2161 return -1;
2162 }
2163
2164 void mmc_set_preinit(struct mmc *mmc, int preinit)
2165 {
2166 mmc->preinit = preinit;
2167 }
2168
2169 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2170 static int mmc_probe(bd_t *bis)
2171 {
2172 return 0;
2173 }
2174 #elif CONFIG_IS_ENABLED(DM_MMC)
2175 static int mmc_probe(bd_t *bis)
2176 {
2177 int ret, i;
2178 struct uclass *uc;
2179 struct udevice *dev;
2180
2181 ret = uclass_get(UCLASS_MMC, &uc);
2182 if (ret)
2183 return ret;
2184
2185 /*
2186 * Try to add them in sequence order. Really with driver model we
2187 * should allow holes, but the current MMC list does not allow that.
2188 * So if we request 0, 1, 3 we will get 0, 1, 2.
2189 */
2190 for (i = 0; ; i++) {
2191 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2192 if (ret == -ENODEV)
2193 break;
2194 }
2195 uclass_foreach_dev(dev, uc) {
2196 ret = device_probe(dev);
2197 if (ret)
2198 printf("%s - probe failed: %d\n", dev->name, ret);
2199 }
2200
2201 return 0;
2202 }
2203 #else
2204 static int mmc_probe(bd_t *bis)
2205 {
2206 if (board_mmc_init(bis) < 0)
2207 cpu_mmc_init(bis);
2208
2209 return 0;
2210 }
2211 #endif
2212
2213 int mmc_initialize(bd_t *bis)
2214 {
2215 static int initialized = 0;
2216 int ret;
2217 if (initialized) /* Avoid initializing mmc multiple times */
2218 return 0;
2219 initialized = 1;
2220
2221 #if !CONFIG_IS_ENABLED(BLK)
2222 #if !CONFIG_IS_ENABLED(MMC_TINY)
2223 mmc_list_init();
2224 #endif
2225 #endif
2226 ret = mmc_probe(bis);
2227 if (ret)
2228 return ret;
2229
2230 #ifndef CONFIG_SPL_BUILD
2231 print_mmc_devices(',');
2232 #endif
2233
2234 mmc_do_preinit();
2235 return 0;
2236 }
2237
2238 #ifdef CONFIG_CMD_BKOPS_ENABLE
2239 int mmc_set_bkops_enable(struct mmc *mmc)
2240 {
2241 int err;
2242 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2243
2244 err = mmc_send_ext_csd(mmc, ext_csd);
2245 if (err) {
2246 puts("Could not get ext_csd register values\n");
2247 return err;
2248 }
2249
2250 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2251 puts("Background operations not supported on device\n");
2252 return -EMEDIUMTYPE;
2253 }
2254
2255 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2256 puts("Background operations already enabled\n");
2257 return 0;
2258 }
2259
2260 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2261 if (err) {
2262 puts("Failed to enable manual background operations\n");
2263 return err;
2264 }
2265
2266 puts("Enabled manual background operations\n");
2267
2268 return 0;
2269 }
2270 #endif