]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/mmc/mmc.c
mmc: disable the mmc clock during power off
[people/ms/u-boot.git] / drivers / mmc / mmc.c
1 /*
2 * Copyright 2008, Freescale Semiconductor, Inc
3 * Andy Fleming
4 *
5 * Based vaguely on the Linux code
6 *
7 * SPDX-License-Identifier: GPL-2.0+
8 */
9
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24
25 static const unsigned int sd_au_size[] = {
26 0, SZ_16K / 512, SZ_32K / 512,
27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
31 };
32
33 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
34 static int mmc_power_cycle(struct mmc *mmc);
35
36 #if CONFIG_IS_ENABLED(MMC_TINY)
37 static struct mmc mmc_static;
38 struct mmc *find_mmc_device(int dev_num)
39 {
40 return &mmc_static;
41 }
42
43 void mmc_do_preinit(void)
44 {
45 struct mmc *m = &mmc_static;
46 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
47 mmc_set_preinit(m, 1);
48 #endif
49 if (m->preinit)
50 mmc_start_init(m);
51 }
52
53 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
54 {
55 return &mmc->block_dev;
56 }
57 #endif
58
59 #if !CONFIG_IS_ENABLED(DM_MMC)
60 __weak int board_mmc_getwp(struct mmc *mmc)
61 {
62 return -1;
63 }
64
65 int mmc_getwp(struct mmc *mmc)
66 {
67 int wp;
68
69 wp = board_mmc_getwp(mmc);
70
71 if (wp < 0) {
72 if (mmc->cfg->ops->getwp)
73 wp = mmc->cfg->ops->getwp(mmc);
74 else
75 wp = 0;
76 }
77
78 return wp;
79 }
80
81 __weak int board_mmc_getcd(struct mmc *mmc)
82 {
83 return -1;
84 }
85 #endif
86
87 #ifdef CONFIG_MMC_TRACE
88 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
89 {
90 printf("CMD_SEND:%d\n", cmd->cmdidx);
91 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
92 }
93
94 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
95 {
96 int i;
97 u8 *ptr;
98
99 if (ret) {
100 printf("\t\tRET\t\t\t %d\n", ret);
101 } else {
102 switch (cmd->resp_type) {
103 case MMC_RSP_NONE:
104 printf("\t\tMMC_RSP_NONE\n");
105 break;
106 case MMC_RSP_R1:
107 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
108 cmd->response[0]);
109 break;
110 case MMC_RSP_R1b:
111 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
112 cmd->response[0]);
113 break;
114 case MMC_RSP_R2:
115 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
116 cmd->response[0]);
117 printf("\t\t \t\t 0x%08X \n",
118 cmd->response[1]);
119 printf("\t\t \t\t 0x%08X \n",
120 cmd->response[2]);
121 printf("\t\t \t\t 0x%08X \n",
122 cmd->response[3]);
123 printf("\n");
124 printf("\t\t\t\t\tDUMPING DATA\n");
125 for (i = 0; i < 4; i++) {
126 int j;
127 printf("\t\t\t\t\t%03d - ", i*4);
128 ptr = (u8 *)&cmd->response[i];
129 ptr += 3;
130 for (j = 0; j < 4; j++)
131 printf("%02X ", *ptr--);
132 printf("\n");
133 }
134 break;
135 case MMC_RSP_R3:
136 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
137 cmd->response[0]);
138 break;
139 default:
140 printf("\t\tERROR MMC rsp not supported\n");
141 break;
142 }
143 }
144 }
145
146 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
147 {
148 int status;
149
150 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
151 printf("CURR STATE:%d\n", status);
152 }
153 #endif
154
155 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
156 const char *mmc_mode_name(enum bus_mode mode)
157 {
158 static const char *const names[] = {
159 [MMC_LEGACY] = "MMC legacy",
160 [SD_LEGACY] = "SD Legacy",
161 [MMC_HS] = "MMC High Speed (26MHz)",
162 [SD_HS] = "SD High Speed (50MHz)",
163 [UHS_SDR12] = "UHS SDR12 (25MHz)",
164 [UHS_SDR25] = "UHS SDR25 (50MHz)",
165 [UHS_SDR50] = "UHS SDR50 (100MHz)",
166 [UHS_SDR104] = "UHS SDR104 (208MHz)",
167 [UHS_DDR50] = "UHS DDR50 (50MHz)",
168 [MMC_HS_52] = "MMC High Speed (52MHz)",
169 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
170 [MMC_HS_200] = "HS200 (200MHz)",
171 };
172
173 if (mode >= MMC_MODES_END)
174 return "Unknown mode";
175 else
176 return names[mode];
177 }
178 #endif
179
180 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
181 {
182 static const int freqs[] = {
183 [SD_LEGACY] = 25000000,
184 [MMC_HS] = 26000000,
185 [SD_HS] = 50000000,
186 [UHS_SDR12] = 25000000,
187 [UHS_SDR25] = 50000000,
188 [UHS_SDR50] = 100000000,
189 [UHS_SDR104] = 208000000,
190 [UHS_DDR50] = 50000000,
191 [MMC_HS_52] = 52000000,
192 [MMC_DDR_52] = 52000000,
193 [MMC_HS_200] = 200000000,
194 };
195
196 if (mode == MMC_LEGACY)
197 return mmc->legacy_speed;
198 else if (mode >= MMC_MODES_END)
199 return 0;
200 else
201 return freqs[mode];
202 }
203
204 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
205 {
206 mmc->selected_mode = mode;
207 mmc->tran_speed = mmc_mode2freq(mmc, mode);
208 mmc->ddr_mode = mmc_is_mode_ddr(mode);
209 debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
210 mmc->tran_speed / 1000000);
211 return 0;
212 }
213
214 #if !CONFIG_IS_ENABLED(DM_MMC)
215 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
216 {
217 int ret;
218
219 mmmc_trace_before_send(mmc, cmd);
220 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
221 mmmc_trace_after_send(mmc, cmd, ret);
222
223 return ret;
224 }
225 #endif
226
227 int mmc_send_status(struct mmc *mmc, int timeout)
228 {
229 struct mmc_cmd cmd;
230 int err, retries = 5;
231
232 cmd.cmdidx = MMC_CMD_SEND_STATUS;
233 cmd.resp_type = MMC_RSP_R1;
234 if (!mmc_host_is_spi(mmc))
235 cmd.cmdarg = mmc->rca << 16;
236
237 while (1) {
238 err = mmc_send_cmd(mmc, &cmd, NULL);
239 if (!err) {
240 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
241 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
242 MMC_STATE_PRG)
243 break;
244
245 if (cmd.response[0] & MMC_STATUS_MASK) {
246 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
247 printf("Status Error: 0x%08X\n",
248 cmd.response[0]);
249 #endif
250 return -ECOMM;
251 }
252 } else if (--retries < 0)
253 return err;
254
255 if (timeout-- <= 0)
256 break;
257
258 udelay(1000);
259 }
260
261 mmc_trace_state(mmc, &cmd);
262 if (timeout <= 0) {
263 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
264 printf("Timeout waiting card ready\n");
265 #endif
266 return -ETIMEDOUT;
267 }
268
269 return 0;
270 }
271
272 int mmc_set_blocklen(struct mmc *mmc, int len)
273 {
274 struct mmc_cmd cmd;
275
276 if (mmc->ddr_mode)
277 return 0;
278
279 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
280 cmd.resp_type = MMC_RSP_R1;
281 cmd.cmdarg = len;
282
283 return mmc_send_cmd(mmc, &cmd, NULL);
284 }
285
286 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
287 lbaint_t blkcnt)
288 {
289 struct mmc_cmd cmd;
290 struct mmc_data data;
291
292 if (blkcnt > 1)
293 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
294 else
295 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
296
297 if (mmc->high_capacity)
298 cmd.cmdarg = start;
299 else
300 cmd.cmdarg = start * mmc->read_bl_len;
301
302 cmd.resp_type = MMC_RSP_R1;
303
304 data.dest = dst;
305 data.blocks = blkcnt;
306 data.blocksize = mmc->read_bl_len;
307 data.flags = MMC_DATA_READ;
308
309 if (mmc_send_cmd(mmc, &cmd, &data))
310 return 0;
311
312 if (blkcnt > 1) {
313 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
314 cmd.cmdarg = 0;
315 cmd.resp_type = MMC_RSP_R1b;
316 if (mmc_send_cmd(mmc, &cmd, NULL)) {
317 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
318 printf("mmc fail to send stop cmd\n");
319 #endif
320 return 0;
321 }
322 }
323
324 return blkcnt;
325 }
326
327 #if CONFIG_IS_ENABLED(BLK)
328 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
329 #else
330 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
331 void *dst)
332 #endif
333 {
334 #if CONFIG_IS_ENABLED(BLK)
335 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
336 #endif
337 int dev_num = block_dev->devnum;
338 int err;
339 lbaint_t cur, blocks_todo = blkcnt;
340
341 if (blkcnt == 0)
342 return 0;
343
344 struct mmc *mmc = find_mmc_device(dev_num);
345 if (!mmc)
346 return 0;
347
348 if (CONFIG_IS_ENABLED(MMC_TINY))
349 err = mmc_switch_part(mmc, block_dev->hwpart);
350 else
351 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
352
353 if (err < 0)
354 return 0;
355
356 if ((start + blkcnt) > block_dev->lba) {
357 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
358 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
359 start + blkcnt, block_dev->lba);
360 #endif
361 return 0;
362 }
363
364 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
365 debug("%s: Failed to set blocklen\n", __func__);
366 return 0;
367 }
368
369 do {
370 cur = (blocks_todo > mmc->cfg->b_max) ?
371 mmc->cfg->b_max : blocks_todo;
372 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
373 debug("%s: Failed to read blocks\n", __func__);
374 return 0;
375 }
376 blocks_todo -= cur;
377 start += cur;
378 dst += cur * mmc->read_bl_len;
379 } while (blocks_todo > 0);
380
381 return blkcnt;
382 }
383
384 static int mmc_go_idle(struct mmc *mmc)
385 {
386 struct mmc_cmd cmd;
387 int err;
388
389 udelay(1000);
390
391 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
392 cmd.cmdarg = 0;
393 cmd.resp_type = MMC_RSP_NONE;
394
395 err = mmc_send_cmd(mmc, &cmd, NULL);
396
397 if (err)
398 return err;
399
400 udelay(2000);
401
402 return 0;
403 }
404
405 static int sd_send_op_cond(struct mmc *mmc)
406 {
407 int timeout = 1000;
408 int err;
409 struct mmc_cmd cmd;
410
411 while (1) {
412 cmd.cmdidx = MMC_CMD_APP_CMD;
413 cmd.resp_type = MMC_RSP_R1;
414 cmd.cmdarg = 0;
415
416 err = mmc_send_cmd(mmc, &cmd, NULL);
417
418 if (err)
419 return err;
420
421 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
422 cmd.resp_type = MMC_RSP_R3;
423
424 /*
425 * Most cards do not answer if some reserved bits
426 * in the ocr are set. However, Some controller
427 * can set bit 7 (reserved for low voltages), but
428 * how to manage low voltages SD card is not yet
429 * specified.
430 */
431 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
432 (mmc->cfg->voltages & 0xff8000);
433
434 if (mmc->version == SD_VERSION_2)
435 cmd.cmdarg |= OCR_HCS;
436
437 err = mmc_send_cmd(mmc, &cmd, NULL);
438
439 if (err)
440 return err;
441
442 if (cmd.response[0] & OCR_BUSY)
443 break;
444
445 if (timeout-- <= 0)
446 return -EOPNOTSUPP;
447
448 udelay(1000);
449 }
450
451 if (mmc->version != SD_VERSION_2)
452 mmc->version = SD_VERSION_1_0;
453
454 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
455 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
456 cmd.resp_type = MMC_RSP_R3;
457 cmd.cmdarg = 0;
458
459 err = mmc_send_cmd(mmc, &cmd, NULL);
460
461 if (err)
462 return err;
463 }
464
465 mmc->ocr = cmd.response[0];
466
467 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
468 mmc->rca = 0;
469
470 return 0;
471 }
472
473 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
474 {
475 struct mmc_cmd cmd;
476 int err;
477
478 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
479 cmd.resp_type = MMC_RSP_R3;
480 cmd.cmdarg = 0;
481 if (use_arg && !mmc_host_is_spi(mmc))
482 cmd.cmdarg = OCR_HCS |
483 (mmc->cfg->voltages &
484 (mmc->ocr & OCR_VOLTAGE_MASK)) |
485 (mmc->ocr & OCR_ACCESS_MODE);
486
487 err = mmc_send_cmd(mmc, &cmd, NULL);
488 if (err)
489 return err;
490 mmc->ocr = cmd.response[0];
491 return 0;
492 }
493
494 static int mmc_send_op_cond(struct mmc *mmc)
495 {
496 int err, i;
497
498 /* Some cards seem to need this */
499 mmc_go_idle(mmc);
500
501 /* Asking to the card its capabilities */
502 for (i = 0; i < 2; i++) {
503 err = mmc_send_op_cond_iter(mmc, i != 0);
504 if (err)
505 return err;
506
507 /* exit if not busy (flag seems to be inverted) */
508 if (mmc->ocr & OCR_BUSY)
509 break;
510 }
511 mmc->op_cond_pending = 1;
512 return 0;
513 }
514
515 static int mmc_complete_op_cond(struct mmc *mmc)
516 {
517 struct mmc_cmd cmd;
518 int timeout = 1000;
519 uint start;
520 int err;
521
522 mmc->op_cond_pending = 0;
523 if (!(mmc->ocr & OCR_BUSY)) {
524 /* Some cards seem to need this */
525 mmc_go_idle(mmc);
526
527 start = get_timer(0);
528 while (1) {
529 err = mmc_send_op_cond_iter(mmc, 1);
530 if (err)
531 return err;
532 if (mmc->ocr & OCR_BUSY)
533 break;
534 if (get_timer(start) > timeout)
535 return -EOPNOTSUPP;
536 udelay(100);
537 }
538 }
539
540 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
541 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
542 cmd.resp_type = MMC_RSP_R3;
543 cmd.cmdarg = 0;
544
545 err = mmc_send_cmd(mmc, &cmd, NULL);
546
547 if (err)
548 return err;
549
550 mmc->ocr = cmd.response[0];
551 }
552
553 mmc->version = MMC_VERSION_UNKNOWN;
554
555 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
556 mmc->rca = 1;
557
558 return 0;
559 }
560
561
562 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
563 {
564 struct mmc_cmd cmd;
565 struct mmc_data data;
566 int err;
567
568 /* Get the Card Status Register */
569 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
570 cmd.resp_type = MMC_RSP_R1;
571 cmd.cmdarg = 0;
572
573 data.dest = (char *)ext_csd;
574 data.blocks = 1;
575 data.blocksize = MMC_MAX_BLOCK_LEN;
576 data.flags = MMC_DATA_READ;
577
578 err = mmc_send_cmd(mmc, &cmd, &data);
579
580 return err;
581 }
582
583 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
584 {
585 struct mmc_cmd cmd;
586 int timeout = 1000;
587 int retries = 3;
588 int ret;
589
590 cmd.cmdidx = MMC_CMD_SWITCH;
591 cmd.resp_type = MMC_RSP_R1b;
592 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
593 (index << 16) |
594 (value << 8);
595
596 while (retries > 0) {
597 ret = mmc_send_cmd(mmc, &cmd, NULL);
598
599 /* Waiting for the ready status */
600 if (!ret) {
601 ret = mmc_send_status(mmc, timeout);
602 return ret;
603 }
604
605 retries--;
606 }
607
608 return ret;
609
610 }
611
612 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
613 {
614 int err;
615 int speed_bits;
616
617 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
618
619 switch (mode) {
620 case MMC_HS:
621 case MMC_HS_52:
622 case MMC_DDR_52:
623 speed_bits = EXT_CSD_TIMING_HS;
624 case MMC_LEGACY:
625 speed_bits = EXT_CSD_TIMING_LEGACY;
626 break;
627 default:
628 return -EINVAL;
629 }
630 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
631 speed_bits);
632 if (err)
633 return err;
634
635 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
636 /* Now check to see that it worked */
637 err = mmc_send_ext_csd(mmc, test_csd);
638 if (err)
639 return err;
640
641 /* No high-speed support */
642 if (!test_csd[EXT_CSD_HS_TIMING])
643 return -ENOTSUPP;
644 }
645
646 return 0;
647 }
648
649 static int mmc_get_capabilities(struct mmc *mmc)
650 {
651 u8 *ext_csd = mmc->ext_csd;
652 char cardtype;
653
654 mmc->card_caps = MMC_MODE_1BIT;
655
656 if (mmc_host_is_spi(mmc))
657 return 0;
658
659 /* Only version 4 supports high-speed */
660 if (mmc->version < MMC_VERSION_4)
661 return 0;
662
663 if (!ext_csd) {
664 printf("No ext_csd found!\n"); /* this should enver happen */
665 return -ENOTSUPP;
666 }
667
668 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
669
670 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0xf;
671
672 /* High Speed is set, there are two types: 52MHz and 26MHz */
673 if (cardtype & EXT_CSD_CARD_TYPE_52) {
674 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
675 mmc->card_caps |= MMC_MODE_DDR_52MHz;
676 mmc->card_caps |= MMC_MODE_HS_52MHz;
677 }
678 if (cardtype & EXT_CSD_CARD_TYPE_26)
679 mmc->card_caps |= MMC_MODE_HS;
680
681 return 0;
682 }
683
684 static int mmc_set_capacity(struct mmc *mmc, int part_num)
685 {
686 switch (part_num) {
687 case 0:
688 mmc->capacity = mmc->capacity_user;
689 break;
690 case 1:
691 case 2:
692 mmc->capacity = mmc->capacity_boot;
693 break;
694 case 3:
695 mmc->capacity = mmc->capacity_rpmb;
696 break;
697 case 4:
698 case 5:
699 case 6:
700 case 7:
701 mmc->capacity = mmc->capacity_gp[part_num - 4];
702 break;
703 default:
704 return -1;
705 }
706
707 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
708
709 return 0;
710 }
711
712 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
713 {
714 int ret;
715
716 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
717 (mmc->part_config & ~PART_ACCESS_MASK)
718 | (part_num & PART_ACCESS_MASK));
719
720 /*
721 * Set the capacity if the switch succeeded or was intended
722 * to return to representing the raw device.
723 */
724 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
725 ret = mmc_set_capacity(mmc, part_num);
726 mmc_get_blk_desc(mmc)->hwpart = part_num;
727 }
728
729 return ret;
730 }
731
732 int mmc_hwpart_config(struct mmc *mmc,
733 const struct mmc_hwpart_conf *conf,
734 enum mmc_hwpart_conf_mode mode)
735 {
736 u8 part_attrs = 0;
737 u32 enh_size_mult;
738 u32 enh_start_addr;
739 u32 gp_size_mult[4];
740 u32 max_enh_size_mult;
741 u32 tot_enh_size_mult = 0;
742 u8 wr_rel_set;
743 int i, pidx, err;
744 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
745
746 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
747 return -EINVAL;
748
749 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
750 printf("eMMC >= 4.4 required for enhanced user data area\n");
751 return -EMEDIUMTYPE;
752 }
753
754 if (!(mmc->part_support & PART_SUPPORT)) {
755 printf("Card does not support partitioning\n");
756 return -EMEDIUMTYPE;
757 }
758
759 if (!mmc->hc_wp_grp_size) {
760 printf("Card does not define HC WP group size\n");
761 return -EMEDIUMTYPE;
762 }
763
764 /* check partition alignment and total enhanced size */
765 if (conf->user.enh_size) {
766 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
767 conf->user.enh_start % mmc->hc_wp_grp_size) {
768 printf("User data enhanced area not HC WP group "
769 "size aligned\n");
770 return -EINVAL;
771 }
772 part_attrs |= EXT_CSD_ENH_USR;
773 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
774 if (mmc->high_capacity) {
775 enh_start_addr = conf->user.enh_start;
776 } else {
777 enh_start_addr = (conf->user.enh_start << 9);
778 }
779 } else {
780 enh_size_mult = 0;
781 enh_start_addr = 0;
782 }
783 tot_enh_size_mult += enh_size_mult;
784
785 for (pidx = 0; pidx < 4; pidx++) {
786 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
787 printf("GP%i partition not HC WP group size "
788 "aligned\n", pidx+1);
789 return -EINVAL;
790 }
791 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
792 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
793 part_attrs |= EXT_CSD_ENH_GP(pidx);
794 tot_enh_size_mult += gp_size_mult[pidx];
795 }
796 }
797
798 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
799 printf("Card does not support enhanced attribute\n");
800 return -EMEDIUMTYPE;
801 }
802
803 err = mmc_send_ext_csd(mmc, ext_csd);
804 if (err)
805 return err;
806
807 max_enh_size_mult =
808 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
809 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
810 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
811 if (tot_enh_size_mult > max_enh_size_mult) {
812 printf("Total enhanced size exceeds maximum (%u > %u)\n",
813 tot_enh_size_mult, max_enh_size_mult);
814 return -EMEDIUMTYPE;
815 }
816
817 /* The default value of EXT_CSD_WR_REL_SET is device
818 * dependent, the values can only be changed if the
819 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
820 * changed only once and before partitioning is completed. */
821 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
822 if (conf->user.wr_rel_change) {
823 if (conf->user.wr_rel_set)
824 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
825 else
826 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
827 }
828 for (pidx = 0; pidx < 4; pidx++) {
829 if (conf->gp_part[pidx].wr_rel_change) {
830 if (conf->gp_part[pidx].wr_rel_set)
831 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
832 else
833 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
834 }
835 }
836
837 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
838 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
839 puts("Card does not support host controlled partition write "
840 "reliability settings\n");
841 return -EMEDIUMTYPE;
842 }
843
844 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
845 EXT_CSD_PARTITION_SETTING_COMPLETED) {
846 printf("Card already partitioned\n");
847 return -EPERM;
848 }
849
850 if (mode == MMC_HWPART_CONF_CHECK)
851 return 0;
852
853 /* Partitioning requires high-capacity size definitions */
854 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
855 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
856 EXT_CSD_ERASE_GROUP_DEF, 1);
857
858 if (err)
859 return err;
860
861 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
862
863 /* update erase group size to be high-capacity */
864 mmc->erase_grp_size =
865 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
866
867 }
868
869 /* all OK, write the configuration */
870 for (i = 0; i < 4; i++) {
871 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
872 EXT_CSD_ENH_START_ADDR+i,
873 (enh_start_addr >> (i*8)) & 0xFF);
874 if (err)
875 return err;
876 }
877 for (i = 0; i < 3; i++) {
878 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
879 EXT_CSD_ENH_SIZE_MULT+i,
880 (enh_size_mult >> (i*8)) & 0xFF);
881 if (err)
882 return err;
883 }
884 for (pidx = 0; pidx < 4; pidx++) {
885 for (i = 0; i < 3; i++) {
886 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
887 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
888 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
889 if (err)
890 return err;
891 }
892 }
893 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
894 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
895 if (err)
896 return err;
897
898 if (mode == MMC_HWPART_CONF_SET)
899 return 0;
900
901 /* The WR_REL_SET is a write-once register but shall be
902 * written before setting PART_SETTING_COMPLETED. As it is
903 * write-once we can only write it when completing the
904 * partitioning. */
905 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
906 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
907 EXT_CSD_WR_REL_SET, wr_rel_set);
908 if (err)
909 return err;
910 }
911
912 /* Setting PART_SETTING_COMPLETED confirms the partition
913 * configuration but it only becomes effective after power
914 * cycle, so we do not adjust the partition related settings
915 * in the mmc struct. */
916
917 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
918 EXT_CSD_PARTITION_SETTING,
919 EXT_CSD_PARTITION_SETTING_COMPLETED);
920 if (err)
921 return err;
922
923 return 0;
924 }
925
926 #if !CONFIG_IS_ENABLED(DM_MMC)
927 int mmc_getcd(struct mmc *mmc)
928 {
929 int cd;
930
931 cd = board_mmc_getcd(mmc);
932
933 if (cd < 0) {
934 if (mmc->cfg->ops->getcd)
935 cd = mmc->cfg->ops->getcd(mmc);
936 else
937 cd = 1;
938 }
939
940 return cd;
941 }
942 #endif
943
944 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
945 {
946 struct mmc_cmd cmd;
947 struct mmc_data data;
948
949 /* Switch the frequency */
950 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
951 cmd.resp_type = MMC_RSP_R1;
952 cmd.cmdarg = (mode << 31) | 0xffffff;
953 cmd.cmdarg &= ~(0xf << (group * 4));
954 cmd.cmdarg |= value << (group * 4);
955
956 data.dest = (char *)resp;
957 data.blocksize = 64;
958 data.blocks = 1;
959 data.flags = MMC_DATA_READ;
960
961 return mmc_send_cmd(mmc, &cmd, &data);
962 }
963
964
965 static int sd_get_capabilities(struct mmc *mmc)
966 {
967 int err;
968 struct mmc_cmd cmd;
969 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
970 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
971 struct mmc_data data;
972 int timeout;
973
974 mmc->card_caps = MMC_MODE_1BIT;
975
976 if (mmc_host_is_spi(mmc))
977 return 0;
978
979 /* Read the SCR to find out if this card supports higher speeds */
980 cmd.cmdidx = MMC_CMD_APP_CMD;
981 cmd.resp_type = MMC_RSP_R1;
982 cmd.cmdarg = mmc->rca << 16;
983
984 err = mmc_send_cmd(mmc, &cmd, NULL);
985
986 if (err)
987 return err;
988
989 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
990 cmd.resp_type = MMC_RSP_R1;
991 cmd.cmdarg = 0;
992
993 timeout = 3;
994
995 retry_scr:
996 data.dest = (char *)scr;
997 data.blocksize = 8;
998 data.blocks = 1;
999 data.flags = MMC_DATA_READ;
1000
1001 err = mmc_send_cmd(mmc, &cmd, &data);
1002
1003 if (err) {
1004 if (timeout--)
1005 goto retry_scr;
1006
1007 return err;
1008 }
1009
1010 mmc->scr[0] = __be32_to_cpu(scr[0]);
1011 mmc->scr[1] = __be32_to_cpu(scr[1]);
1012
1013 switch ((mmc->scr[0] >> 24) & 0xf) {
1014 case 0:
1015 mmc->version = SD_VERSION_1_0;
1016 break;
1017 case 1:
1018 mmc->version = SD_VERSION_1_10;
1019 break;
1020 case 2:
1021 mmc->version = SD_VERSION_2;
1022 if ((mmc->scr[0] >> 15) & 0x1)
1023 mmc->version = SD_VERSION_3;
1024 break;
1025 default:
1026 mmc->version = SD_VERSION_1_0;
1027 break;
1028 }
1029
1030 if (mmc->scr[0] & SD_DATA_4BIT)
1031 mmc->card_caps |= MMC_MODE_4BIT;
1032
1033 /* Version 1.0 doesn't support switching */
1034 if (mmc->version == SD_VERSION_1_0)
1035 return 0;
1036
1037 timeout = 4;
1038 while (timeout--) {
1039 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1040 (u8 *)switch_status);
1041
1042 if (err)
1043 return err;
1044
1045 /* The high-speed function is busy. Try again */
1046 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1047 break;
1048 }
1049
1050 /* If high-speed isn't supported, we return */
1051 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1052 mmc->card_caps |= MMC_CAP(SD_HS);
1053
1054 return 0;
1055 }
1056
1057 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1058 {
1059 int err;
1060
1061 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1062
1063 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status);
1064 if (err)
1065 return err;
1066
1067 if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) != 0x01000000)
1068 return -ENOTSUPP;
1069
1070 return 0;
1071 }
1072
1073 int sd_select_bus_width(struct mmc *mmc, int w)
1074 {
1075 int err;
1076 struct mmc_cmd cmd;
1077
1078 if ((w != 4) && (w != 1))
1079 return -EINVAL;
1080
1081 cmd.cmdidx = MMC_CMD_APP_CMD;
1082 cmd.resp_type = MMC_RSP_R1;
1083 cmd.cmdarg = mmc->rca << 16;
1084
1085 err = mmc_send_cmd(mmc, &cmd, NULL);
1086 if (err)
1087 return err;
1088
1089 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1090 cmd.resp_type = MMC_RSP_R1;
1091 if (w == 4)
1092 cmd.cmdarg = 2;
1093 else if (w == 1)
1094 cmd.cmdarg = 0;
1095 err = mmc_send_cmd(mmc, &cmd, NULL);
1096 if (err)
1097 return err;
1098
1099 return 0;
1100 }
1101
1102 static int sd_read_ssr(struct mmc *mmc)
1103 {
1104 int err, i;
1105 struct mmc_cmd cmd;
1106 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1107 struct mmc_data data;
1108 int timeout = 3;
1109 unsigned int au, eo, et, es;
1110
1111 cmd.cmdidx = MMC_CMD_APP_CMD;
1112 cmd.resp_type = MMC_RSP_R1;
1113 cmd.cmdarg = mmc->rca << 16;
1114
1115 err = mmc_send_cmd(mmc, &cmd, NULL);
1116 if (err)
1117 return err;
1118
1119 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1120 cmd.resp_type = MMC_RSP_R1;
1121 cmd.cmdarg = 0;
1122
1123 retry_ssr:
1124 data.dest = (char *)ssr;
1125 data.blocksize = 64;
1126 data.blocks = 1;
1127 data.flags = MMC_DATA_READ;
1128
1129 err = mmc_send_cmd(mmc, &cmd, &data);
1130 if (err) {
1131 if (timeout--)
1132 goto retry_ssr;
1133
1134 return err;
1135 }
1136
1137 for (i = 0; i < 16; i++)
1138 ssr[i] = be32_to_cpu(ssr[i]);
1139
1140 au = (ssr[2] >> 12) & 0xF;
1141 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1142 mmc->ssr.au = sd_au_size[au];
1143 es = (ssr[3] >> 24) & 0xFF;
1144 es |= (ssr[2] & 0xFF) << 8;
1145 et = (ssr[3] >> 18) & 0x3F;
1146 if (es && et) {
1147 eo = (ssr[3] >> 16) & 0x3;
1148 mmc->ssr.erase_timeout = (et * 1000) / es;
1149 mmc->ssr.erase_offset = eo * 1000;
1150 }
1151 } else {
1152 debug("Invalid Allocation Unit Size.\n");
1153 }
1154
1155 return 0;
1156 }
1157
1158 /* frequency bases */
1159 /* divided by 10 to be nice to platforms without floating point */
1160 static const int fbase[] = {
1161 10000,
1162 100000,
1163 1000000,
1164 10000000,
1165 };
1166
1167 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1168 * to platforms without floating point.
1169 */
1170 static const u8 multipliers[] = {
1171 0, /* reserved */
1172 10,
1173 12,
1174 13,
1175 15,
1176 20,
1177 25,
1178 30,
1179 35,
1180 40,
1181 45,
1182 50,
1183 55,
1184 60,
1185 70,
1186 80,
1187 };
1188
1189 static inline int bus_width(uint cap)
1190 {
1191 if (cap == MMC_MODE_8BIT)
1192 return 8;
1193 if (cap == MMC_MODE_4BIT)
1194 return 4;
1195 if (cap == MMC_MODE_1BIT)
1196 return 1;
1197 printf("invalid bus witdh capability 0x%x\n", cap);
1198 return 0;
1199 }
1200
1201 #if !CONFIG_IS_ENABLED(DM_MMC)
1202 static void mmc_send_init_stream(struct mmc *mmc)
1203 {
1204 }
1205
1206 static int mmc_set_ios(struct mmc *mmc)
1207 {
1208 int ret = 0;
1209
1210 if (mmc->cfg->ops->set_ios)
1211 ret = mmc->cfg->ops->set_ios(mmc);
1212
1213 return ret;
1214 }
1215 #endif
1216
1217 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1218 {
1219 if (clock > mmc->cfg->f_max)
1220 clock = mmc->cfg->f_max;
1221
1222 if (clock < mmc->cfg->f_min)
1223 clock = mmc->cfg->f_min;
1224
1225 mmc->clock = clock;
1226 mmc->clk_disable = disable;
1227
1228 return mmc_set_ios(mmc);
1229 }
1230
1231 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1232 {
1233 mmc->bus_width = width;
1234
1235 return mmc_set_ios(mmc);
1236 }
1237
1238 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1239 /*
1240 * helper function to display the capabilities in a human
1241 * friendly manner. The capabilities include bus width and
1242 * supported modes.
1243 */
1244 void mmc_dump_capabilities(const char *text, uint caps)
1245 {
1246 enum bus_mode mode;
1247
1248 printf("%s: widths [", text);
1249 if (caps & MMC_MODE_8BIT)
1250 printf("8, ");
1251 if (caps & MMC_MODE_4BIT)
1252 printf("4, ");
1253 if (caps & MMC_MODE_1BIT)
1254 printf("1, ");
1255 printf("\b\b] modes [");
1256 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1257 if (MMC_CAP(mode) & caps)
1258 printf("%s, ", mmc_mode_name(mode));
1259 printf("\b\b]\n");
1260 }
1261 #endif
1262
1263 struct mode_width_tuning {
1264 enum bus_mode mode;
1265 uint widths;
1266 };
1267
1268 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1269 {
1270 mmc->signal_voltage = signal_voltage;
1271 return mmc_set_ios(mmc);
1272 }
1273
1274 static const struct mode_width_tuning sd_modes_by_pref[] = {
1275 {
1276 .mode = SD_HS,
1277 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1278 },
1279 {
1280 .mode = SD_LEGACY,
1281 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1282 }
1283 };
1284
1285 #define for_each_sd_mode_by_pref(caps, mwt) \
1286 for (mwt = sd_modes_by_pref;\
1287 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1288 mwt++) \
1289 if (caps & MMC_CAP(mwt->mode))
1290
1291 static int sd_select_mode_and_width(struct mmc *mmc)
1292 {
1293 int err;
1294 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1295 const struct mode_width_tuning *mwt;
1296
1297 err = sd_get_capabilities(mmc);
1298 if (err)
1299 return err;
1300 /* Restrict card's capabilities by what the host can do */
1301 mmc->card_caps &= (mmc->cfg->host_caps | MMC_MODE_1BIT);
1302
1303 for_each_sd_mode_by_pref(mmc->card_caps, mwt) {
1304 uint *w;
1305
1306 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1307 if (*w & mmc->card_caps & mwt->widths) {
1308 debug("trying mode %s width %d (at %d MHz)\n",
1309 mmc_mode_name(mwt->mode),
1310 bus_width(*w),
1311 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1312
1313 /* configure the bus width (card + host) */
1314 err = sd_select_bus_width(mmc, bus_width(*w));
1315 if (err)
1316 goto error;
1317 mmc_set_bus_width(mmc, bus_width(*w));
1318
1319 /* configure the bus mode (card) */
1320 err = sd_set_card_speed(mmc, mwt->mode);
1321 if (err)
1322 goto error;
1323
1324 /* configure the bus mode (host) */
1325 mmc_select_mode(mmc, mwt->mode);
1326 mmc_set_clock(mmc, mmc->tran_speed, false);
1327
1328 err = sd_read_ssr(mmc);
1329 if (!err)
1330 return 0;
1331
1332 printf("bad ssr\n");
1333
1334 error:
1335 /* revert to a safer bus speed */
1336 mmc_select_mode(mmc, SD_LEGACY);
1337 mmc_set_clock(mmc, mmc->tran_speed, false);
1338 }
1339 }
1340 }
1341
1342 printf("unable to select a mode\n");
1343 return -ENOTSUPP;
1344 }
1345
1346 /*
1347 * read the compare the part of ext csd that is constant.
1348 * This can be used to check that the transfer is working
1349 * as expected.
1350 */
1351 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1352 {
1353 int err;
1354 const u8 *ext_csd = mmc->ext_csd;
1355 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1356
1357 err = mmc_send_ext_csd(mmc, test_csd);
1358 if (err)
1359 return err;
1360
1361 /* Only compare read only fields */
1362 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1363 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1364 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1365 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1366 ext_csd[EXT_CSD_REV]
1367 == test_csd[EXT_CSD_REV] &&
1368 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1369 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1370 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1371 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1372 return 0;
1373
1374 return -EBADMSG;
1375 }
1376
1377 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1378 {
1379 .mode = MMC_HS_200,
1380 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1381 },
1382 {
1383 .mode = MMC_DDR_52,
1384 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1385 },
1386 {
1387 .mode = MMC_HS_52,
1388 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1389 },
1390 {
1391 .mode = MMC_HS,
1392 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1393 },
1394 {
1395 .mode = MMC_LEGACY,
1396 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1397 }
1398 };
1399
1400 #define for_each_mmc_mode_by_pref(caps, mwt) \
1401 for (mwt = mmc_modes_by_pref;\
1402 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1403 mwt++) \
1404 if (caps & MMC_CAP(mwt->mode))
1405
1406 static const struct ext_csd_bus_width {
1407 uint cap;
1408 bool is_ddr;
1409 uint ext_csd_bits;
1410 } ext_csd_bus_width[] = {
1411 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1412 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1413 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1414 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1415 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1416 };
1417
1418 #define for_each_supported_width(caps, ddr, ecbv) \
1419 for (ecbv = ext_csd_bus_width;\
1420 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1421 ecbv++) \
1422 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1423
1424 static int mmc_select_mode_and_width(struct mmc *mmc)
1425 {
1426 int err;
1427 const struct mode_width_tuning *mwt;
1428 const struct ext_csd_bus_width *ecbw;
1429
1430 err = mmc_get_capabilities(mmc);
1431 if (err)
1432 return err;
1433
1434 /* Restrict card's capabilities by what the host can do */
1435 mmc->card_caps &= (mmc->cfg->host_caps | MMC_MODE_1BIT);
1436
1437 /* Only version 4 of MMC supports wider bus widths */
1438 if (mmc->version < MMC_VERSION_4)
1439 return 0;
1440
1441 if (!mmc->ext_csd) {
1442 debug("No ext_csd found!\n"); /* this should enver happen */
1443 return -ENOTSUPP;
1444 }
1445
1446 for_each_mmc_mode_by_pref(mmc->card_caps, mwt) {
1447 for_each_supported_width(mmc->card_caps & mwt->widths,
1448 mmc_is_mode_ddr(mwt->mode), ecbw) {
1449 debug("trying mode %s width %d (at %d MHz)\n",
1450 mmc_mode_name(mwt->mode),
1451 bus_width(ecbw->cap),
1452 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1453 /* configure the bus width (card + host) */
1454 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1455 EXT_CSD_BUS_WIDTH,
1456 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1457 if (err)
1458 goto error;
1459 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1460
1461 /* configure the bus speed (card) */
1462 err = mmc_set_card_speed(mmc, mwt->mode);
1463 if (err)
1464 goto error;
1465
1466 /*
1467 * configure the bus width AND the ddr mode (card)
1468 * The host side will be taken care of in the next step
1469 */
1470 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1471 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1472 EXT_CSD_BUS_WIDTH,
1473 ecbw->ext_csd_bits);
1474 if (err)
1475 goto error;
1476 }
1477
1478 /* configure the bus mode (host) */
1479 mmc_select_mode(mmc, mwt->mode);
1480 mmc_set_clock(mmc, mmc->tran_speed, false);
1481
1482 /* do a transfer to check the configuration */
1483 err = mmc_read_and_compare_ext_csd(mmc);
1484 if (!err)
1485 return 0;
1486 error:
1487 /* if an error occured, revert to a safer bus mode */
1488 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1489 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
1490 mmc_select_mode(mmc, MMC_LEGACY);
1491 mmc_set_bus_width(mmc, 1);
1492 }
1493 }
1494
1495 printf("unable to select a mode\n");
1496
1497 return -ENOTSUPP;
1498 }
1499
1500 static int mmc_startup_v4(struct mmc *mmc)
1501 {
1502 int err, i;
1503 u64 capacity;
1504 bool has_parts = false;
1505 bool part_completed;
1506 u8 *ext_csd;
1507
1508 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1509 return 0;
1510
1511 ext_csd = malloc_cache_aligned(MMC_MAX_BLOCK_LEN);
1512 if (!ext_csd)
1513 return -ENOMEM;
1514
1515 mmc->ext_csd = ext_csd;
1516
1517 /* check ext_csd version and capacity */
1518 err = mmc_send_ext_csd(mmc, ext_csd);
1519 if (err)
1520 return err;
1521 if (ext_csd[EXT_CSD_REV] >= 2) {
1522 /*
1523 * According to the JEDEC Standard, the value of
1524 * ext_csd's capacity is valid if the value is more
1525 * than 2GB
1526 */
1527 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1528 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1529 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1530 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1531 capacity *= MMC_MAX_BLOCK_LEN;
1532 if ((capacity >> 20) > 2 * 1024)
1533 mmc->capacity_user = capacity;
1534 }
1535
1536 switch (ext_csd[EXT_CSD_REV]) {
1537 case 1:
1538 mmc->version = MMC_VERSION_4_1;
1539 break;
1540 case 2:
1541 mmc->version = MMC_VERSION_4_2;
1542 break;
1543 case 3:
1544 mmc->version = MMC_VERSION_4_3;
1545 break;
1546 case 5:
1547 mmc->version = MMC_VERSION_4_41;
1548 break;
1549 case 6:
1550 mmc->version = MMC_VERSION_4_5;
1551 break;
1552 case 7:
1553 mmc->version = MMC_VERSION_5_0;
1554 break;
1555 case 8:
1556 mmc->version = MMC_VERSION_5_1;
1557 break;
1558 }
1559
1560 /* The partition data may be non-zero but it is only
1561 * effective if PARTITION_SETTING_COMPLETED is set in
1562 * EXT_CSD, so ignore any data if this bit is not set,
1563 * except for enabling the high-capacity group size
1564 * definition (see below).
1565 */
1566 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1567 EXT_CSD_PARTITION_SETTING_COMPLETED);
1568
1569 /* store the partition info of emmc */
1570 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
1571 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
1572 ext_csd[EXT_CSD_BOOT_MULT])
1573 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
1574 if (part_completed &&
1575 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
1576 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
1577
1578 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
1579
1580 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
1581
1582 for (i = 0; i < 4; i++) {
1583 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
1584 uint mult = (ext_csd[idx + 2] << 16) +
1585 (ext_csd[idx + 1] << 8) + ext_csd[idx];
1586 if (mult)
1587 has_parts = true;
1588 if (!part_completed)
1589 continue;
1590 mmc->capacity_gp[i] = mult;
1591 mmc->capacity_gp[i] *=
1592 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1593 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1594 mmc->capacity_gp[i] <<= 19;
1595 }
1596
1597 if (part_completed) {
1598 mmc->enh_user_size =
1599 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
1600 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
1601 ext_csd[EXT_CSD_ENH_SIZE_MULT];
1602 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1603 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1604 mmc->enh_user_size <<= 19;
1605 mmc->enh_user_start =
1606 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
1607 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
1608 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
1609 ext_csd[EXT_CSD_ENH_START_ADDR];
1610 if (mmc->high_capacity)
1611 mmc->enh_user_start <<= 9;
1612 }
1613
1614 /*
1615 * Host needs to enable ERASE_GRP_DEF bit if device is
1616 * partitioned. This bit will be lost every time after a reset
1617 * or power off. This will affect erase size.
1618 */
1619 if (part_completed)
1620 has_parts = true;
1621 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
1622 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
1623 has_parts = true;
1624 if (has_parts) {
1625 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1626 EXT_CSD_ERASE_GROUP_DEF, 1);
1627
1628 if (err)
1629 return err;
1630
1631 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1632 }
1633
1634 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
1635 /* Read out group size from ext_csd */
1636 mmc->erase_grp_size =
1637 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1638 /*
1639 * if high capacity and partition setting completed
1640 * SEC_COUNT is valid even if it is smaller than 2 GiB
1641 * JEDEC Standard JESD84-B45, 6.2.4
1642 */
1643 if (mmc->high_capacity && part_completed) {
1644 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
1645 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
1646 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
1647 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
1648 capacity *= MMC_MAX_BLOCK_LEN;
1649 mmc->capacity_user = capacity;
1650 }
1651 } else {
1652 /* Calculate the group size from the csd value. */
1653 int erase_gsz, erase_gmul;
1654
1655 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
1656 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
1657 mmc->erase_grp_size = (erase_gsz + 1)
1658 * (erase_gmul + 1);
1659 }
1660
1661 mmc->hc_wp_grp_size = 1024
1662 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1663 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1664
1665 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1666
1667 return 0;
1668 }
1669
1670 static int mmc_startup(struct mmc *mmc)
1671 {
1672 int err, i;
1673 uint mult, freq;
1674 u64 cmult, csize;
1675 struct mmc_cmd cmd;
1676 struct blk_desc *bdesc;
1677
1678 #ifdef CONFIG_MMC_SPI_CRC_ON
1679 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
1680 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
1681 cmd.resp_type = MMC_RSP_R1;
1682 cmd.cmdarg = 1;
1683 err = mmc_send_cmd(mmc, &cmd, NULL);
1684
1685 if (err)
1686 return err;
1687 }
1688 #endif
1689
1690 /* Put the Card in Identify Mode */
1691 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
1692 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
1693 cmd.resp_type = MMC_RSP_R2;
1694 cmd.cmdarg = 0;
1695
1696 err = mmc_send_cmd(mmc, &cmd, NULL);
1697
1698 if (err)
1699 return err;
1700
1701 memcpy(mmc->cid, cmd.response, 16);
1702
1703 /*
1704 * For MMC cards, set the Relative Address.
1705 * For SD cards, get the Relatvie Address.
1706 * This also puts the cards into Standby State
1707 */
1708 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1709 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
1710 cmd.cmdarg = mmc->rca << 16;
1711 cmd.resp_type = MMC_RSP_R6;
1712
1713 err = mmc_send_cmd(mmc, &cmd, NULL);
1714
1715 if (err)
1716 return err;
1717
1718 if (IS_SD(mmc))
1719 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
1720 }
1721
1722 /* Get the Card-Specific Data */
1723 cmd.cmdidx = MMC_CMD_SEND_CSD;
1724 cmd.resp_type = MMC_RSP_R2;
1725 cmd.cmdarg = mmc->rca << 16;
1726
1727 err = mmc_send_cmd(mmc, &cmd, NULL);
1728
1729 if (err)
1730 return err;
1731
1732 mmc->csd[0] = cmd.response[0];
1733 mmc->csd[1] = cmd.response[1];
1734 mmc->csd[2] = cmd.response[2];
1735 mmc->csd[3] = cmd.response[3];
1736
1737 if (mmc->version == MMC_VERSION_UNKNOWN) {
1738 int version = (cmd.response[0] >> 26) & 0xf;
1739
1740 switch (version) {
1741 case 0:
1742 mmc->version = MMC_VERSION_1_2;
1743 break;
1744 case 1:
1745 mmc->version = MMC_VERSION_1_4;
1746 break;
1747 case 2:
1748 mmc->version = MMC_VERSION_2_2;
1749 break;
1750 case 3:
1751 mmc->version = MMC_VERSION_3;
1752 break;
1753 case 4:
1754 mmc->version = MMC_VERSION_4;
1755 break;
1756 default:
1757 mmc->version = MMC_VERSION_1_2;
1758 break;
1759 }
1760 }
1761
1762 /* divide frequency by 10, since the mults are 10x bigger */
1763 freq = fbase[(cmd.response[0] & 0x7)];
1764 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
1765
1766 mmc->legacy_speed = freq * mult;
1767 mmc_select_mode(mmc, MMC_LEGACY);
1768
1769 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
1770 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
1771
1772 if (IS_SD(mmc))
1773 mmc->write_bl_len = mmc->read_bl_len;
1774 else
1775 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
1776
1777 if (mmc->high_capacity) {
1778 csize = (mmc->csd[1] & 0x3f) << 16
1779 | (mmc->csd[2] & 0xffff0000) >> 16;
1780 cmult = 8;
1781 } else {
1782 csize = (mmc->csd[1] & 0x3ff) << 2
1783 | (mmc->csd[2] & 0xc0000000) >> 30;
1784 cmult = (mmc->csd[2] & 0x00038000) >> 15;
1785 }
1786
1787 mmc->capacity_user = (csize + 1) << (cmult + 2);
1788 mmc->capacity_user *= mmc->read_bl_len;
1789 mmc->capacity_boot = 0;
1790 mmc->capacity_rpmb = 0;
1791 for (i = 0; i < 4; i++)
1792 mmc->capacity_gp[i] = 0;
1793
1794 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
1795 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1796
1797 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
1798 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1799
1800 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
1801 cmd.cmdidx = MMC_CMD_SET_DSR;
1802 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
1803 cmd.resp_type = MMC_RSP_NONE;
1804 if (mmc_send_cmd(mmc, &cmd, NULL))
1805 printf("MMC: SET_DSR failed\n");
1806 }
1807
1808 /* Select the card, and put it into Transfer Mode */
1809 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1810 cmd.cmdidx = MMC_CMD_SELECT_CARD;
1811 cmd.resp_type = MMC_RSP_R1;
1812 cmd.cmdarg = mmc->rca << 16;
1813 err = mmc_send_cmd(mmc, &cmd, NULL);
1814
1815 if (err)
1816 return err;
1817 }
1818
1819 /*
1820 * For SD, its erase group is always one sector
1821 */
1822 mmc->erase_grp_size = 1;
1823 mmc->part_config = MMCPART_NOAVAILABLE;
1824
1825 err = mmc_startup_v4(mmc);
1826 if (err)
1827 return err;
1828
1829 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
1830 if (err)
1831 return err;
1832
1833 if (IS_SD(mmc))
1834 err = sd_select_mode_and_width(mmc);
1835 else
1836 err = mmc_select_mode_and_width(mmc);
1837
1838 if (err)
1839 return err;
1840
1841
1842 /* Fix the block length for DDR mode */
1843 if (mmc->ddr_mode) {
1844 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1845 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1846 }
1847
1848 /* fill in device description */
1849 bdesc = mmc_get_blk_desc(mmc);
1850 bdesc->lun = 0;
1851 bdesc->hwpart = 0;
1852 bdesc->type = 0;
1853 bdesc->blksz = mmc->read_bl_len;
1854 bdesc->log2blksz = LOG2(bdesc->blksz);
1855 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1856 #if !defined(CONFIG_SPL_BUILD) || \
1857 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
1858 !defined(CONFIG_USE_TINY_PRINTF))
1859 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
1860 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
1861 (mmc->cid[3] >> 16) & 0xffff);
1862 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
1863 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
1864 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
1865 (mmc->cid[2] >> 24) & 0xff);
1866 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
1867 (mmc->cid[2] >> 16) & 0xf);
1868 #else
1869 bdesc->vendor[0] = 0;
1870 bdesc->product[0] = 0;
1871 bdesc->revision[0] = 0;
1872 #endif
1873 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
1874 part_init(bdesc);
1875 #endif
1876
1877 return 0;
1878 }
1879
1880 static int mmc_send_if_cond(struct mmc *mmc)
1881 {
1882 struct mmc_cmd cmd;
1883 int err;
1884
1885 cmd.cmdidx = SD_CMD_SEND_IF_COND;
1886 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
1887 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
1888 cmd.resp_type = MMC_RSP_R7;
1889
1890 err = mmc_send_cmd(mmc, &cmd, NULL);
1891
1892 if (err)
1893 return err;
1894
1895 if ((cmd.response[0] & 0xff) != 0xaa)
1896 return -EOPNOTSUPP;
1897 else
1898 mmc->version = SD_VERSION_2;
1899
1900 return 0;
1901 }
1902
1903 #if !CONFIG_IS_ENABLED(DM_MMC)
1904 /* board-specific MMC power initializations. */
1905 __weak void board_mmc_power_init(void)
1906 {
1907 }
1908 #endif
1909
1910 static int mmc_power_init(struct mmc *mmc)
1911 {
1912 #if CONFIG_IS_ENABLED(DM_MMC)
1913 #if CONFIG_IS_ENABLED(DM_REGULATOR)
1914 int ret;
1915
1916 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
1917 &mmc->vmmc_supply);
1918 if (ret)
1919 debug("%s: No vmmc supply\n", mmc->dev->name);
1920
1921 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
1922 &mmc->vqmmc_supply);
1923 if (ret)
1924 debug("%s: No vqmmc supply\n", mmc->dev->name);
1925 #endif
1926 #else /* !CONFIG_DM_MMC */
1927 /*
1928 * Driver model should use a regulator, as above, rather than calling
1929 * out to board code.
1930 */
1931 board_mmc_power_init();
1932 #endif
1933 return 0;
1934 }
1935
1936 /*
1937 * put the host in the initial state:
1938 * - turn on Vdd (card power supply)
1939 * - configure the bus width and clock to minimal values
1940 */
1941 static void mmc_set_initial_state(struct mmc *mmc)
1942 {
1943 int err;
1944
1945 /* First try to set 3.3V. If it fails set to 1.8V */
1946 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
1947 if (err != 0)
1948 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
1949 if (err != 0)
1950 printf("mmc: failed to set signal voltage\n");
1951
1952 mmc_select_mode(mmc, MMC_LEGACY);
1953 mmc_set_bus_width(mmc, 1);
1954 mmc_set_clock(mmc, 0, false);
1955 }
1956
1957 static int mmc_power_on(struct mmc *mmc)
1958 {
1959 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
1960 if (mmc->vmmc_supply) {
1961 int ret = regulator_set_enable(mmc->vmmc_supply, true);
1962
1963 if (ret) {
1964 puts("Error enabling VMMC supply\n");
1965 return ret;
1966 }
1967 }
1968 #endif
1969 return 0;
1970 }
1971
1972 static int mmc_power_off(struct mmc *mmc)
1973 {
1974 mmc_set_clock(mmc, 1, true);
1975 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
1976 if (mmc->vmmc_supply) {
1977 int ret = regulator_set_enable(mmc->vmmc_supply, false);
1978
1979 if (ret) {
1980 puts("Error disabling VMMC supply\n");
1981 return ret;
1982 }
1983 }
1984 #endif
1985 return 0;
1986 }
1987
1988 static int mmc_power_cycle(struct mmc *mmc)
1989 {
1990 int ret;
1991
1992 ret = mmc_power_off(mmc);
1993 if (ret)
1994 return ret;
1995 /*
1996 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
1997 * to be on the safer side.
1998 */
1999 udelay(2000);
2000 return mmc_power_on(mmc);
2001 }
2002
2003 int mmc_start_init(struct mmc *mmc)
2004 {
2005 bool no_card;
2006 int err;
2007
2008 /* we pretend there's no card when init is NULL */
2009 no_card = mmc_getcd(mmc) == 0;
2010 #if !CONFIG_IS_ENABLED(DM_MMC)
2011 no_card = no_card || (mmc->cfg->ops->init == NULL);
2012 #endif
2013 if (no_card) {
2014 mmc->has_init = 0;
2015 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2016 printf("MMC: no card present\n");
2017 #endif
2018 return -ENOMEDIUM;
2019 }
2020
2021 if (mmc->has_init)
2022 return 0;
2023
2024 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2025 mmc_adapter_card_type_ident();
2026 #endif
2027 err = mmc_power_init(mmc);
2028 if (err)
2029 return err;
2030
2031 err = mmc_power_on(mmc);
2032 if (err)
2033 return err;
2034
2035 #if CONFIG_IS_ENABLED(DM_MMC)
2036 /* The device has already been probed ready for use */
2037 #else
2038 /* made sure it's not NULL earlier */
2039 err = mmc->cfg->ops->init(mmc);
2040 if (err)
2041 return err;
2042 #endif
2043 mmc->ddr_mode = 0;
2044
2045 mmc_set_initial_state(mmc);
2046 mmc_send_init_stream(mmc);
2047
2048 /* Reset the Card */
2049 err = mmc_go_idle(mmc);
2050
2051 if (err)
2052 return err;
2053
2054 /* The internal partition reset to user partition(0) at every CMD0*/
2055 mmc_get_blk_desc(mmc)->hwpart = 0;
2056
2057 /* Test for SD version 2 */
2058 err = mmc_send_if_cond(mmc);
2059
2060 /* Now try to get the SD card's operating condition */
2061 err = sd_send_op_cond(mmc);
2062
2063 /* If the command timed out, we check for an MMC card */
2064 if (err == -ETIMEDOUT) {
2065 err = mmc_send_op_cond(mmc);
2066
2067 if (err) {
2068 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2069 printf("Card did not respond to voltage select!\n");
2070 #endif
2071 return -EOPNOTSUPP;
2072 }
2073 }
2074
2075 if (!err)
2076 mmc->init_in_progress = 1;
2077
2078 return err;
2079 }
2080
2081 static int mmc_complete_init(struct mmc *mmc)
2082 {
2083 int err = 0;
2084
2085 mmc->init_in_progress = 0;
2086 if (mmc->op_cond_pending)
2087 err = mmc_complete_op_cond(mmc);
2088
2089 if (!err)
2090 err = mmc_startup(mmc);
2091 if (err)
2092 mmc->has_init = 0;
2093 else
2094 mmc->has_init = 1;
2095 return err;
2096 }
2097
2098 int mmc_init(struct mmc *mmc)
2099 {
2100 int err = 0;
2101 __maybe_unused unsigned start;
2102 #if CONFIG_IS_ENABLED(DM_MMC)
2103 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2104
2105 upriv->mmc = mmc;
2106 #endif
2107 if (mmc->has_init)
2108 return 0;
2109
2110 start = get_timer(0);
2111
2112 if (!mmc->init_in_progress)
2113 err = mmc_start_init(mmc);
2114
2115 if (!err)
2116 err = mmc_complete_init(mmc);
2117 if (err)
2118 printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2119
2120 return err;
2121 }
2122
2123 int mmc_set_dsr(struct mmc *mmc, u16 val)
2124 {
2125 mmc->dsr = val;
2126 return 0;
2127 }
2128
2129 /* CPU-specific MMC initializations */
2130 __weak int cpu_mmc_init(bd_t *bis)
2131 {
2132 return -1;
2133 }
2134
2135 /* board-specific MMC initializations. */
2136 __weak int board_mmc_init(bd_t *bis)
2137 {
2138 return -1;
2139 }
2140
2141 void mmc_set_preinit(struct mmc *mmc, int preinit)
2142 {
2143 mmc->preinit = preinit;
2144 }
2145
2146 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2147 static int mmc_probe(bd_t *bis)
2148 {
2149 return 0;
2150 }
2151 #elif CONFIG_IS_ENABLED(DM_MMC)
2152 static int mmc_probe(bd_t *bis)
2153 {
2154 int ret, i;
2155 struct uclass *uc;
2156 struct udevice *dev;
2157
2158 ret = uclass_get(UCLASS_MMC, &uc);
2159 if (ret)
2160 return ret;
2161
2162 /*
2163 * Try to add them in sequence order. Really with driver model we
2164 * should allow holes, but the current MMC list does not allow that.
2165 * So if we request 0, 1, 3 we will get 0, 1, 2.
2166 */
2167 for (i = 0; ; i++) {
2168 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2169 if (ret == -ENODEV)
2170 break;
2171 }
2172 uclass_foreach_dev(dev, uc) {
2173 ret = device_probe(dev);
2174 if (ret)
2175 printf("%s - probe failed: %d\n", dev->name, ret);
2176 }
2177
2178 return 0;
2179 }
2180 #else
2181 static int mmc_probe(bd_t *bis)
2182 {
2183 if (board_mmc_init(bis) < 0)
2184 cpu_mmc_init(bis);
2185
2186 return 0;
2187 }
2188 #endif
2189
2190 int mmc_initialize(bd_t *bis)
2191 {
2192 static int initialized = 0;
2193 int ret;
2194 if (initialized) /* Avoid initializing mmc multiple times */
2195 return 0;
2196 initialized = 1;
2197
2198 #if !CONFIG_IS_ENABLED(BLK)
2199 #if !CONFIG_IS_ENABLED(MMC_TINY)
2200 mmc_list_init();
2201 #endif
2202 #endif
2203 ret = mmc_probe(bis);
2204 if (ret)
2205 return ret;
2206
2207 #ifndef CONFIG_SPL_BUILD
2208 print_mmc_devices(',');
2209 #endif
2210
2211 mmc_do_preinit();
2212 return 0;
2213 }
2214
2215 #ifdef CONFIG_CMD_BKOPS_ENABLE
2216 int mmc_set_bkops_enable(struct mmc *mmc)
2217 {
2218 int err;
2219 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2220
2221 err = mmc_send_ext_csd(mmc, ext_csd);
2222 if (err) {
2223 puts("Could not get ext_csd register values\n");
2224 return err;
2225 }
2226
2227 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2228 puts("Background operations not supported on device\n");
2229 return -EMEDIUMTYPE;
2230 }
2231
2232 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2233 puts("Background operations already enabled\n");
2234 return 0;
2235 }
2236
2237 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2238 if (err) {
2239 puts("Failed to enable manual background operations\n");
2240 return err;
2241 }
2242
2243 puts("Enabled manual background operations\n");
2244
2245 return 0;
2246 }
2247 #endif