]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/mmc/mmc.c
mmc: Enable signal voltage to be selected from mmc core
[people/ms/u-boot.git] / drivers / mmc / mmc.c
1 /*
2 * Copyright 2008, Freescale Semiconductor, Inc
3 * Andy Fleming
4 *
5 * Based vaguely on the Linux code
6 *
7 * SPDX-License-Identifier: GPL-2.0+
8 */
9
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24
25 static const unsigned int sd_au_size[] = {
26 0, SZ_16K / 512, SZ_32K / 512,
27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
31 };
32
33 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
34
35 #if CONFIG_IS_ENABLED(MMC_TINY)
36 static struct mmc mmc_static;
37 struct mmc *find_mmc_device(int dev_num)
38 {
39 return &mmc_static;
40 }
41
42 void mmc_do_preinit(void)
43 {
44 struct mmc *m = &mmc_static;
45 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
46 mmc_set_preinit(m, 1);
47 #endif
48 if (m->preinit)
49 mmc_start_init(m);
50 }
51
52 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
53 {
54 return &mmc->block_dev;
55 }
56 #endif
57
58 #if !CONFIG_IS_ENABLED(DM_MMC)
59 __weak int board_mmc_getwp(struct mmc *mmc)
60 {
61 return -1;
62 }
63
64 int mmc_getwp(struct mmc *mmc)
65 {
66 int wp;
67
68 wp = board_mmc_getwp(mmc);
69
70 if (wp < 0) {
71 if (mmc->cfg->ops->getwp)
72 wp = mmc->cfg->ops->getwp(mmc);
73 else
74 wp = 0;
75 }
76
77 return wp;
78 }
79
80 __weak int board_mmc_getcd(struct mmc *mmc)
81 {
82 return -1;
83 }
84 #endif
85
86 #ifdef CONFIG_MMC_TRACE
87 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
88 {
89 printf("CMD_SEND:%d\n", cmd->cmdidx);
90 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
91 }
92
93 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
94 {
95 int i;
96 u8 *ptr;
97
98 if (ret) {
99 printf("\t\tRET\t\t\t %d\n", ret);
100 } else {
101 switch (cmd->resp_type) {
102 case MMC_RSP_NONE:
103 printf("\t\tMMC_RSP_NONE\n");
104 break;
105 case MMC_RSP_R1:
106 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
107 cmd->response[0]);
108 break;
109 case MMC_RSP_R1b:
110 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
111 cmd->response[0]);
112 break;
113 case MMC_RSP_R2:
114 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
115 cmd->response[0]);
116 printf("\t\t \t\t 0x%08X \n",
117 cmd->response[1]);
118 printf("\t\t \t\t 0x%08X \n",
119 cmd->response[2]);
120 printf("\t\t \t\t 0x%08X \n",
121 cmd->response[3]);
122 printf("\n");
123 printf("\t\t\t\t\tDUMPING DATA\n");
124 for (i = 0; i < 4; i++) {
125 int j;
126 printf("\t\t\t\t\t%03d - ", i*4);
127 ptr = (u8 *)&cmd->response[i];
128 ptr += 3;
129 for (j = 0; j < 4; j++)
130 printf("%02X ", *ptr--);
131 printf("\n");
132 }
133 break;
134 case MMC_RSP_R3:
135 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
136 cmd->response[0]);
137 break;
138 default:
139 printf("\t\tERROR MMC rsp not supported\n");
140 break;
141 }
142 }
143 }
144
145 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
146 {
147 int status;
148
149 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
150 printf("CURR STATE:%d\n", status);
151 }
152 #endif
153
154 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
155 const char *mmc_mode_name(enum bus_mode mode)
156 {
157 static const char *const names[] = {
158 [MMC_LEGACY] = "MMC legacy",
159 [SD_LEGACY] = "SD Legacy",
160 [MMC_HS] = "MMC High Speed (26MHz)",
161 [SD_HS] = "SD High Speed (50MHz)",
162 [UHS_SDR12] = "UHS SDR12 (25MHz)",
163 [UHS_SDR25] = "UHS SDR25 (50MHz)",
164 [UHS_SDR50] = "UHS SDR50 (100MHz)",
165 [UHS_SDR104] = "UHS SDR104 (208MHz)",
166 [UHS_DDR50] = "UHS DDR50 (50MHz)",
167 [MMC_HS_52] = "MMC High Speed (52MHz)",
168 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
169 [MMC_HS_200] = "HS200 (200MHz)",
170 };
171
172 if (mode >= MMC_MODES_END)
173 return "Unknown mode";
174 else
175 return names[mode];
176 }
177 #endif
178
179 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
180 {
181 static const int freqs[] = {
182 [SD_LEGACY] = 25000000,
183 [MMC_HS] = 26000000,
184 [SD_HS] = 50000000,
185 [UHS_SDR12] = 25000000,
186 [UHS_SDR25] = 50000000,
187 [UHS_SDR50] = 100000000,
188 [UHS_SDR104] = 208000000,
189 [UHS_DDR50] = 50000000,
190 [MMC_HS_52] = 52000000,
191 [MMC_DDR_52] = 52000000,
192 [MMC_HS_200] = 200000000,
193 };
194
195 if (mode == MMC_LEGACY)
196 return mmc->legacy_speed;
197 else if (mode >= MMC_MODES_END)
198 return 0;
199 else
200 return freqs[mode];
201 }
202
203 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
204 {
205 mmc->selected_mode = mode;
206 mmc->tran_speed = mmc_mode2freq(mmc, mode);
207 mmc->ddr_mode = mmc_is_mode_ddr(mode);
208 debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
209 mmc->tran_speed / 1000000);
210 return 0;
211 }
212
213 #if !CONFIG_IS_ENABLED(DM_MMC)
214 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
215 {
216 int ret;
217
218 mmmc_trace_before_send(mmc, cmd);
219 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
220 mmmc_trace_after_send(mmc, cmd, ret);
221
222 return ret;
223 }
224 #endif
225
226 int mmc_send_status(struct mmc *mmc, int timeout)
227 {
228 struct mmc_cmd cmd;
229 int err, retries = 5;
230
231 cmd.cmdidx = MMC_CMD_SEND_STATUS;
232 cmd.resp_type = MMC_RSP_R1;
233 if (!mmc_host_is_spi(mmc))
234 cmd.cmdarg = mmc->rca << 16;
235
236 while (1) {
237 err = mmc_send_cmd(mmc, &cmd, NULL);
238 if (!err) {
239 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
240 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
241 MMC_STATE_PRG)
242 break;
243
244 if (cmd.response[0] & MMC_STATUS_MASK) {
245 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
246 printf("Status Error: 0x%08X\n",
247 cmd.response[0]);
248 #endif
249 return -ECOMM;
250 }
251 } else if (--retries < 0)
252 return err;
253
254 if (timeout-- <= 0)
255 break;
256
257 udelay(1000);
258 }
259
260 mmc_trace_state(mmc, &cmd);
261 if (timeout <= 0) {
262 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
263 printf("Timeout waiting card ready\n");
264 #endif
265 return -ETIMEDOUT;
266 }
267
268 return 0;
269 }
270
271 int mmc_set_blocklen(struct mmc *mmc, int len)
272 {
273 struct mmc_cmd cmd;
274
275 if (mmc->ddr_mode)
276 return 0;
277
278 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
279 cmd.resp_type = MMC_RSP_R1;
280 cmd.cmdarg = len;
281
282 return mmc_send_cmd(mmc, &cmd, NULL);
283 }
284
285 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
286 lbaint_t blkcnt)
287 {
288 struct mmc_cmd cmd;
289 struct mmc_data data;
290
291 if (blkcnt > 1)
292 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
293 else
294 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
295
296 if (mmc->high_capacity)
297 cmd.cmdarg = start;
298 else
299 cmd.cmdarg = start * mmc->read_bl_len;
300
301 cmd.resp_type = MMC_RSP_R1;
302
303 data.dest = dst;
304 data.blocks = blkcnt;
305 data.blocksize = mmc->read_bl_len;
306 data.flags = MMC_DATA_READ;
307
308 if (mmc_send_cmd(mmc, &cmd, &data))
309 return 0;
310
311 if (blkcnt > 1) {
312 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
313 cmd.cmdarg = 0;
314 cmd.resp_type = MMC_RSP_R1b;
315 if (mmc_send_cmd(mmc, &cmd, NULL)) {
316 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
317 printf("mmc fail to send stop cmd\n");
318 #endif
319 return 0;
320 }
321 }
322
323 return blkcnt;
324 }
325
326 #if CONFIG_IS_ENABLED(BLK)
327 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
328 #else
329 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
330 void *dst)
331 #endif
332 {
333 #if CONFIG_IS_ENABLED(BLK)
334 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
335 #endif
336 int dev_num = block_dev->devnum;
337 int err;
338 lbaint_t cur, blocks_todo = blkcnt;
339
340 if (blkcnt == 0)
341 return 0;
342
343 struct mmc *mmc = find_mmc_device(dev_num);
344 if (!mmc)
345 return 0;
346
347 if (CONFIG_IS_ENABLED(MMC_TINY))
348 err = mmc_switch_part(mmc, block_dev->hwpart);
349 else
350 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
351
352 if (err < 0)
353 return 0;
354
355 if ((start + blkcnt) > block_dev->lba) {
356 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
357 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
358 start + blkcnt, block_dev->lba);
359 #endif
360 return 0;
361 }
362
363 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
364 debug("%s: Failed to set blocklen\n", __func__);
365 return 0;
366 }
367
368 do {
369 cur = (blocks_todo > mmc->cfg->b_max) ?
370 mmc->cfg->b_max : blocks_todo;
371 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
372 debug("%s: Failed to read blocks\n", __func__);
373 return 0;
374 }
375 blocks_todo -= cur;
376 start += cur;
377 dst += cur * mmc->read_bl_len;
378 } while (blocks_todo > 0);
379
380 return blkcnt;
381 }
382
383 static int mmc_go_idle(struct mmc *mmc)
384 {
385 struct mmc_cmd cmd;
386 int err;
387
388 udelay(1000);
389
390 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
391 cmd.cmdarg = 0;
392 cmd.resp_type = MMC_RSP_NONE;
393
394 err = mmc_send_cmd(mmc, &cmd, NULL);
395
396 if (err)
397 return err;
398
399 udelay(2000);
400
401 return 0;
402 }
403
404 static int sd_send_op_cond(struct mmc *mmc)
405 {
406 int timeout = 1000;
407 int err;
408 struct mmc_cmd cmd;
409
410 while (1) {
411 cmd.cmdidx = MMC_CMD_APP_CMD;
412 cmd.resp_type = MMC_RSP_R1;
413 cmd.cmdarg = 0;
414
415 err = mmc_send_cmd(mmc, &cmd, NULL);
416
417 if (err)
418 return err;
419
420 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
421 cmd.resp_type = MMC_RSP_R3;
422
423 /*
424 * Most cards do not answer if some reserved bits
425 * in the ocr are set. However, Some controller
426 * can set bit 7 (reserved for low voltages), but
427 * how to manage low voltages SD card is not yet
428 * specified.
429 */
430 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
431 (mmc->cfg->voltages & 0xff8000);
432
433 if (mmc->version == SD_VERSION_2)
434 cmd.cmdarg |= OCR_HCS;
435
436 err = mmc_send_cmd(mmc, &cmd, NULL);
437
438 if (err)
439 return err;
440
441 if (cmd.response[0] & OCR_BUSY)
442 break;
443
444 if (timeout-- <= 0)
445 return -EOPNOTSUPP;
446
447 udelay(1000);
448 }
449
450 if (mmc->version != SD_VERSION_2)
451 mmc->version = SD_VERSION_1_0;
452
453 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
454 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
455 cmd.resp_type = MMC_RSP_R3;
456 cmd.cmdarg = 0;
457
458 err = mmc_send_cmd(mmc, &cmd, NULL);
459
460 if (err)
461 return err;
462 }
463
464 mmc->ocr = cmd.response[0];
465
466 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
467 mmc->rca = 0;
468
469 return 0;
470 }
471
472 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
473 {
474 struct mmc_cmd cmd;
475 int err;
476
477 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
478 cmd.resp_type = MMC_RSP_R3;
479 cmd.cmdarg = 0;
480 if (use_arg && !mmc_host_is_spi(mmc))
481 cmd.cmdarg = OCR_HCS |
482 (mmc->cfg->voltages &
483 (mmc->ocr & OCR_VOLTAGE_MASK)) |
484 (mmc->ocr & OCR_ACCESS_MODE);
485
486 err = mmc_send_cmd(mmc, &cmd, NULL);
487 if (err)
488 return err;
489 mmc->ocr = cmd.response[0];
490 return 0;
491 }
492
493 static int mmc_send_op_cond(struct mmc *mmc)
494 {
495 int err, i;
496
497 /* Some cards seem to need this */
498 mmc_go_idle(mmc);
499
500 /* Asking to the card its capabilities */
501 for (i = 0; i < 2; i++) {
502 err = mmc_send_op_cond_iter(mmc, i != 0);
503 if (err)
504 return err;
505
506 /* exit if not busy (flag seems to be inverted) */
507 if (mmc->ocr & OCR_BUSY)
508 break;
509 }
510 mmc->op_cond_pending = 1;
511 return 0;
512 }
513
514 static int mmc_complete_op_cond(struct mmc *mmc)
515 {
516 struct mmc_cmd cmd;
517 int timeout = 1000;
518 uint start;
519 int err;
520
521 mmc->op_cond_pending = 0;
522 if (!(mmc->ocr & OCR_BUSY)) {
523 /* Some cards seem to need this */
524 mmc_go_idle(mmc);
525
526 start = get_timer(0);
527 while (1) {
528 err = mmc_send_op_cond_iter(mmc, 1);
529 if (err)
530 return err;
531 if (mmc->ocr & OCR_BUSY)
532 break;
533 if (get_timer(start) > timeout)
534 return -EOPNOTSUPP;
535 udelay(100);
536 }
537 }
538
539 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
540 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
541 cmd.resp_type = MMC_RSP_R3;
542 cmd.cmdarg = 0;
543
544 err = mmc_send_cmd(mmc, &cmd, NULL);
545
546 if (err)
547 return err;
548
549 mmc->ocr = cmd.response[0];
550 }
551
552 mmc->version = MMC_VERSION_UNKNOWN;
553
554 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
555 mmc->rca = 1;
556
557 return 0;
558 }
559
560
561 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
562 {
563 struct mmc_cmd cmd;
564 struct mmc_data data;
565 int err;
566
567 /* Get the Card Status Register */
568 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
569 cmd.resp_type = MMC_RSP_R1;
570 cmd.cmdarg = 0;
571
572 data.dest = (char *)ext_csd;
573 data.blocks = 1;
574 data.blocksize = MMC_MAX_BLOCK_LEN;
575 data.flags = MMC_DATA_READ;
576
577 err = mmc_send_cmd(mmc, &cmd, &data);
578
579 return err;
580 }
581
582 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
583 {
584 struct mmc_cmd cmd;
585 int timeout = 1000;
586 int retries = 3;
587 int ret;
588
589 cmd.cmdidx = MMC_CMD_SWITCH;
590 cmd.resp_type = MMC_RSP_R1b;
591 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
592 (index << 16) |
593 (value << 8);
594
595 while (retries > 0) {
596 ret = mmc_send_cmd(mmc, &cmd, NULL);
597
598 /* Waiting for the ready status */
599 if (!ret) {
600 ret = mmc_send_status(mmc, timeout);
601 return ret;
602 }
603
604 retries--;
605 }
606
607 return ret;
608
609 }
610
611 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
612 {
613 int err;
614 int speed_bits;
615
616 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
617
618 switch (mode) {
619 case MMC_HS:
620 case MMC_HS_52:
621 case MMC_DDR_52:
622 speed_bits = EXT_CSD_TIMING_HS;
623 case MMC_LEGACY:
624 speed_bits = EXT_CSD_TIMING_LEGACY;
625 break;
626 default:
627 return -EINVAL;
628 }
629 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
630 speed_bits);
631 if (err)
632 return err;
633
634 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
635 /* Now check to see that it worked */
636 err = mmc_send_ext_csd(mmc, test_csd);
637 if (err)
638 return err;
639
640 /* No high-speed support */
641 if (!test_csd[EXT_CSD_HS_TIMING])
642 return -ENOTSUPP;
643 }
644
645 return 0;
646 }
647
648 static int mmc_get_capabilities(struct mmc *mmc)
649 {
650 u8 *ext_csd = mmc->ext_csd;
651 char cardtype;
652
653 mmc->card_caps = MMC_MODE_1BIT;
654
655 if (mmc_host_is_spi(mmc))
656 return 0;
657
658 /* Only version 4 supports high-speed */
659 if (mmc->version < MMC_VERSION_4)
660 return 0;
661
662 if (!ext_csd) {
663 printf("No ext_csd found!\n"); /* this should enver happen */
664 return -ENOTSUPP;
665 }
666
667 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
668
669 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0xf;
670
671 /* High Speed is set, there are two types: 52MHz and 26MHz */
672 if (cardtype & EXT_CSD_CARD_TYPE_52) {
673 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
674 mmc->card_caps |= MMC_MODE_DDR_52MHz;
675 mmc->card_caps |= MMC_MODE_HS_52MHz;
676 }
677 if (cardtype & EXT_CSD_CARD_TYPE_26)
678 mmc->card_caps |= MMC_MODE_HS;
679
680 return 0;
681 }
682
683 static int mmc_set_capacity(struct mmc *mmc, int part_num)
684 {
685 switch (part_num) {
686 case 0:
687 mmc->capacity = mmc->capacity_user;
688 break;
689 case 1:
690 case 2:
691 mmc->capacity = mmc->capacity_boot;
692 break;
693 case 3:
694 mmc->capacity = mmc->capacity_rpmb;
695 break;
696 case 4:
697 case 5:
698 case 6:
699 case 7:
700 mmc->capacity = mmc->capacity_gp[part_num - 4];
701 break;
702 default:
703 return -1;
704 }
705
706 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
707
708 return 0;
709 }
710
711 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
712 {
713 int ret;
714
715 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
716 (mmc->part_config & ~PART_ACCESS_MASK)
717 | (part_num & PART_ACCESS_MASK));
718
719 /*
720 * Set the capacity if the switch succeeded or was intended
721 * to return to representing the raw device.
722 */
723 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
724 ret = mmc_set_capacity(mmc, part_num);
725 mmc_get_blk_desc(mmc)->hwpart = part_num;
726 }
727
728 return ret;
729 }
730
731 int mmc_hwpart_config(struct mmc *mmc,
732 const struct mmc_hwpart_conf *conf,
733 enum mmc_hwpart_conf_mode mode)
734 {
735 u8 part_attrs = 0;
736 u32 enh_size_mult;
737 u32 enh_start_addr;
738 u32 gp_size_mult[4];
739 u32 max_enh_size_mult;
740 u32 tot_enh_size_mult = 0;
741 u8 wr_rel_set;
742 int i, pidx, err;
743 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
744
745 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
746 return -EINVAL;
747
748 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
749 printf("eMMC >= 4.4 required for enhanced user data area\n");
750 return -EMEDIUMTYPE;
751 }
752
753 if (!(mmc->part_support & PART_SUPPORT)) {
754 printf("Card does not support partitioning\n");
755 return -EMEDIUMTYPE;
756 }
757
758 if (!mmc->hc_wp_grp_size) {
759 printf("Card does not define HC WP group size\n");
760 return -EMEDIUMTYPE;
761 }
762
763 /* check partition alignment and total enhanced size */
764 if (conf->user.enh_size) {
765 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
766 conf->user.enh_start % mmc->hc_wp_grp_size) {
767 printf("User data enhanced area not HC WP group "
768 "size aligned\n");
769 return -EINVAL;
770 }
771 part_attrs |= EXT_CSD_ENH_USR;
772 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
773 if (mmc->high_capacity) {
774 enh_start_addr = conf->user.enh_start;
775 } else {
776 enh_start_addr = (conf->user.enh_start << 9);
777 }
778 } else {
779 enh_size_mult = 0;
780 enh_start_addr = 0;
781 }
782 tot_enh_size_mult += enh_size_mult;
783
784 for (pidx = 0; pidx < 4; pidx++) {
785 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
786 printf("GP%i partition not HC WP group size "
787 "aligned\n", pidx+1);
788 return -EINVAL;
789 }
790 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
791 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
792 part_attrs |= EXT_CSD_ENH_GP(pidx);
793 tot_enh_size_mult += gp_size_mult[pidx];
794 }
795 }
796
797 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
798 printf("Card does not support enhanced attribute\n");
799 return -EMEDIUMTYPE;
800 }
801
802 err = mmc_send_ext_csd(mmc, ext_csd);
803 if (err)
804 return err;
805
806 max_enh_size_mult =
807 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
808 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
809 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
810 if (tot_enh_size_mult > max_enh_size_mult) {
811 printf("Total enhanced size exceeds maximum (%u > %u)\n",
812 tot_enh_size_mult, max_enh_size_mult);
813 return -EMEDIUMTYPE;
814 }
815
816 /* The default value of EXT_CSD_WR_REL_SET is device
817 * dependent, the values can only be changed if the
818 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
819 * changed only once and before partitioning is completed. */
820 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
821 if (conf->user.wr_rel_change) {
822 if (conf->user.wr_rel_set)
823 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
824 else
825 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
826 }
827 for (pidx = 0; pidx < 4; pidx++) {
828 if (conf->gp_part[pidx].wr_rel_change) {
829 if (conf->gp_part[pidx].wr_rel_set)
830 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
831 else
832 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
833 }
834 }
835
836 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
837 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
838 puts("Card does not support host controlled partition write "
839 "reliability settings\n");
840 return -EMEDIUMTYPE;
841 }
842
843 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
844 EXT_CSD_PARTITION_SETTING_COMPLETED) {
845 printf("Card already partitioned\n");
846 return -EPERM;
847 }
848
849 if (mode == MMC_HWPART_CONF_CHECK)
850 return 0;
851
852 /* Partitioning requires high-capacity size definitions */
853 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
854 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
855 EXT_CSD_ERASE_GROUP_DEF, 1);
856
857 if (err)
858 return err;
859
860 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
861
862 /* update erase group size to be high-capacity */
863 mmc->erase_grp_size =
864 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
865
866 }
867
868 /* all OK, write the configuration */
869 for (i = 0; i < 4; i++) {
870 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
871 EXT_CSD_ENH_START_ADDR+i,
872 (enh_start_addr >> (i*8)) & 0xFF);
873 if (err)
874 return err;
875 }
876 for (i = 0; i < 3; i++) {
877 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
878 EXT_CSD_ENH_SIZE_MULT+i,
879 (enh_size_mult >> (i*8)) & 0xFF);
880 if (err)
881 return err;
882 }
883 for (pidx = 0; pidx < 4; pidx++) {
884 for (i = 0; i < 3; i++) {
885 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
886 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
887 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
888 if (err)
889 return err;
890 }
891 }
892 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
893 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
894 if (err)
895 return err;
896
897 if (mode == MMC_HWPART_CONF_SET)
898 return 0;
899
900 /* The WR_REL_SET is a write-once register but shall be
901 * written before setting PART_SETTING_COMPLETED. As it is
902 * write-once we can only write it when completing the
903 * partitioning. */
904 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
905 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
906 EXT_CSD_WR_REL_SET, wr_rel_set);
907 if (err)
908 return err;
909 }
910
911 /* Setting PART_SETTING_COMPLETED confirms the partition
912 * configuration but it only becomes effective after power
913 * cycle, so we do not adjust the partition related settings
914 * in the mmc struct. */
915
916 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
917 EXT_CSD_PARTITION_SETTING,
918 EXT_CSD_PARTITION_SETTING_COMPLETED);
919 if (err)
920 return err;
921
922 return 0;
923 }
924
925 #if !CONFIG_IS_ENABLED(DM_MMC)
926 int mmc_getcd(struct mmc *mmc)
927 {
928 int cd;
929
930 cd = board_mmc_getcd(mmc);
931
932 if (cd < 0) {
933 if (mmc->cfg->ops->getcd)
934 cd = mmc->cfg->ops->getcd(mmc);
935 else
936 cd = 1;
937 }
938
939 return cd;
940 }
941 #endif
942
943 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
944 {
945 struct mmc_cmd cmd;
946 struct mmc_data data;
947
948 /* Switch the frequency */
949 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
950 cmd.resp_type = MMC_RSP_R1;
951 cmd.cmdarg = (mode << 31) | 0xffffff;
952 cmd.cmdarg &= ~(0xf << (group * 4));
953 cmd.cmdarg |= value << (group * 4);
954
955 data.dest = (char *)resp;
956 data.blocksize = 64;
957 data.blocks = 1;
958 data.flags = MMC_DATA_READ;
959
960 return mmc_send_cmd(mmc, &cmd, &data);
961 }
962
963
964 static int sd_get_capabilities(struct mmc *mmc)
965 {
966 int err;
967 struct mmc_cmd cmd;
968 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
969 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
970 struct mmc_data data;
971 int timeout;
972
973 mmc->card_caps = MMC_MODE_1BIT;
974
975 if (mmc_host_is_spi(mmc))
976 return 0;
977
978 /* Read the SCR to find out if this card supports higher speeds */
979 cmd.cmdidx = MMC_CMD_APP_CMD;
980 cmd.resp_type = MMC_RSP_R1;
981 cmd.cmdarg = mmc->rca << 16;
982
983 err = mmc_send_cmd(mmc, &cmd, NULL);
984
985 if (err)
986 return err;
987
988 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
989 cmd.resp_type = MMC_RSP_R1;
990 cmd.cmdarg = 0;
991
992 timeout = 3;
993
994 retry_scr:
995 data.dest = (char *)scr;
996 data.blocksize = 8;
997 data.blocks = 1;
998 data.flags = MMC_DATA_READ;
999
1000 err = mmc_send_cmd(mmc, &cmd, &data);
1001
1002 if (err) {
1003 if (timeout--)
1004 goto retry_scr;
1005
1006 return err;
1007 }
1008
1009 mmc->scr[0] = __be32_to_cpu(scr[0]);
1010 mmc->scr[1] = __be32_to_cpu(scr[1]);
1011
1012 switch ((mmc->scr[0] >> 24) & 0xf) {
1013 case 0:
1014 mmc->version = SD_VERSION_1_0;
1015 break;
1016 case 1:
1017 mmc->version = SD_VERSION_1_10;
1018 break;
1019 case 2:
1020 mmc->version = SD_VERSION_2;
1021 if ((mmc->scr[0] >> 15) & 0x1)
1022 mmc->version = SD_VERSION_3;
1023 break;
1024 default:
1025 mmc->version = SD_VERSION_1_0;
1026 break;
1027 }
1028
1029 if (mmc->scr[0] & SD_DATA_4BIT)
1030 mmc->card_caps |= MMC_MODE_4BIT;
1031
1032 /* Version 1.0 doesn't support switching */
1033 if (mmc->version == SD_VERSION_1_0)
1034 return 0;
1035
1036 timeout = 4;
1037 while (timeout--) {
1038 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1039 (u8 *)switch_status);
1040
1041 if (err)
1042 return err;
1043
1044 /* The high-speed function is busy. Try again */
1045 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1046 break;
1047 }
1048
1049 /* If high-speed isn't supported, we return */
1050 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1051 mmc->card_caps |= MMC_CAP(SD_HS);
1052
1053 return 0;
1054 }
1055
1056 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1057 {
1058 int err;
1059
1060 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1061
1062 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status);
1063 if (err)
1064 return err;
1065
1066 if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) != 0x01000000)
1067 return -ENOTSUPP;
1068
1069 return 0;
1070 }
1071
1072 int sd_select_bus_width(struct mmc *mmc, int w)
1073 {
1074 int err;
1075 struct mmc_cmd cmd;
1076
1077 if ((w != 4) && (w != 1))
1078 return -EINVAL;
1079
1080 cmd.cmdidx = MMC_CMD_APP_CMD;
1081 cmd.resp_type = MMC_RSP_R1;
1082 cmd.cmdarg = mmc->rca << 16;
1083
1084 err = mmc_send_cmd(mmc, &cmd, NULL);
1085 if (err)
1086 return err;
1087
1088 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1089 cmd.resp_type = MMC_RSP_R1;
1090 if (w == 4)
1091 cmd.cmdarg = 2;
1092 else if (w == 1)
1093 cmd.cmdarg = 0;
1094 err = mmc_send_cmd(mmc, &cmd, NULL);
1095 if (err)
1096 return err;
1097
1098 return 0;
1099 }
1100
1101 static int sd_read_ssr(struct mmc *mmc)
1102 {
1103 int err, i;
1104 struct mmc_cmd cmd;
1105 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1106 struct mmc_data data;
1107 int timeout = 3;
1108 unsigned int au, eo, et, es;
1109
1110 cmd.cmdidx = MMC_CMD_APP_CMD;
1111 cmd.resp_type = MMC_RSP_R1;
1112 cmd.cmdarg = mmc->rca << 16;
1113
1114 err = mmc_send_cmd(mmc, &cmd, NULL);
1115 if (err)
1116 return err;
1117
1118 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1119 cmd.resp_type = MMC_RSP_R1;
1120 cmd.cmdarg = 0;
1121
1122 retry_ssr:
1123 data.dest = (char *)ssr;
1124 data.blocksize = 64;
1125 data.blocks = 1;
1126 data.flags = MMC_DATA_READ;
1127
1128 err = mmc_send_cmd(mmc, &cmd, &data);
1129 if (err) {
1130 if (timeout--)
1131 goto retry_ssr;
1132
1133 return err;
1134 }
1135
1136 for (i = 0; i < 16; i++)
1137 ssr[i] = be32_to_cpu(ssr[i]);
1138
1139 au = (ssr[2] >> 12) & 0xF;
1140 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1141 mmc->ssr.au = sd_au_size[au];
1142 es = (ssr[3] >> 24) & 0xFF;
1143 es |= (ssr[2] & 0xFF) << 8;
1144 et = (ssr[3] >> 18) & 0x3F;
1145 if (es && et) {
1146 eo = (ssr[3] >> 16) & 0x3;
1147 mmc->ssr.erase_timeout = (et * 1000) / es;
1148 mmc->ssr.erase_offset = eo * 1000;
1149 }
1150 } else {
1151 debug("Invalid Allocation Unit Size.\n");
1152 }
1153
1154 return 0;
1155 }
1156
1157 /* frequency bases */
1158 /* divided by 10 to be nice to platforms without floating point */
1159 static const int fbase[] = {
1160 10000,
1161 100000,
1162 1000000,
1163 10000000,
1164 };
1165
1166 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1167 * to platforms without floating point.
1168 */
1169 static const u8 multipliers[] = {
1170 0, /* reserved */
1171 10,
1172 12,
1173 13,
1174 15,
1175 20,
1176 25,
1177 30,
1178 35,
1179 40,
1180 45,
1181 50,
1182 55,
1183 60,
1184 70,
1185 80,
1186 };
1187
1188 static inline int bus_width(uint cap)
1189 {
1190 if (cap == MMC_MODE_8BIT)
1191 return 8;
1192 if (cap == MMC_MODE_4BIT)
1193 return 4;
1194 if (cap == MMC_MODE_1BIT)
1195 return 1;
1196 printf("invalid bus witdh capability 0x%x\n", cap);
1197 return 0;
1198 }
1199
1200 #if !CONFIG_IS_ENABLED(DM_MMC)
1201 static int mmc_set_ios(struct mmc *mmc)
1202 {
1203 int ret = 0;
1204
1205 if (mmc->cfg->ops->set_ios)
1206 ret = mmc->cfg->ops->set_ios(mmc);
1207
1208 return ret;
1209 }
1210 #endif
1211
1212 int mmc_set_clock(struct mmc *mmc, uint clock)
1213 {
1214 if (clock > mmc->cfg->f_max)
1215 clock = mmc->cfg->f_max;
1216
1217 if (clock < mmc->cfg->f_min)
1218 clock = mmc->cfg->f_min;
1219
1220 mmc->clock = clock;
1221
1222 return mmc_set_ios(mmc);
1223 }
1224
1225 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1226 {
1227 mmc->bus_width = width;
1228
1229 return mmc_set_ios(mmc);
1230 }
1231
1232 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1233 /*
1234 * helper function to display the capabilities in a human
1235 * friendly manner. The capabilities include bus width and
1236 * supported modes.
1237 */
1238 void mmc_dump_capabilities(const char *text, uint caps)
1239 {
1240 enum bus_mode mode;
1241
1242 printf("%s: widths [", text);
1243 if (caps & MMC_MODE_8BIT)
1244 printf("8, ");
1245 if (caps & MMC_MODE_4BIT)
1246 printf("4, ");
1247 if (caps & MMC_MODE_1BIT)
1248 printf("1, ");
1249 printf("\b\b] modes [");
1250 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1251 if (MMC_CAP(mode) & caps)
1252 printf("%s, ", mmc_mode_name(mode));
1253 printf("\b\b]\n");
1254 }
1255 #endif
1256
1257 struct mode_width_tuning {
1258 enum bus_mode mode;
1259 uint widths;
1260 };
1261
1262 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1263 {
1264 mmc->signal_voltage = signal_voltage;
1265 return mmc_set_ios(mmc);
1266 }
1267
1268 static const struct mode_width_tuning sd_modes_by_pref[] = {
1269 {
1270 .mode = SD_HS,
1271 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1272 },
1273 {
1274 .mode = SD_LEGACY,
1275 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1276 }
1277 };
1278
1279 #define for_each_sd_mode_by_pref(caps, mwt) \
1280 for (mwt = sd_modes_by_pref;\
1281 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1282 mwt++) \
1283 if (caps & MMC_CAP(mwt->mode))
1284
1285 static int sd_select_mode_and_width(struct mmc *mmc)
1286 {
1287 int err;
1288 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1289 const struct mode_width_tuning *mwt;
1290
1291 err = sd_get_capabilities(mmc);
1292 if (err)
1293 return err;
1294 /* Restrict card's capabilities by what the host can do */
1295 mmc->card_caps &= (mmc->cfg->host_caps | MMC_MODE_1BIT);
1296
1297 for_each_sd_mode_by_pref(mmc->card_caps, mwt) {
1298 uint *w;
1299
1300 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1301 if (*w & mmc->card_caps & mwt->widths) {
1302 debug("trying mode %s width %d (at %d MHz)\n",
1303 mmc_mode_name(mwt->mode),
1304 bus_width(*w),
1305 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1306
1307 /* configure the bus width (card + host) */
1308 err = sd_select_bus_width(mmc, bus_width(*w));
1309 if (err)
1310 goto error;
1311 mmc_set_bus_width(mmc, bus_width(*w));
1312
1313 /* configure the bus mode (card) */
1314 err = sd_set_card_speed(mmc, mwt->mode);
1315 if (err)
1316 goto error;
1317
1318 /* configure the bus mode (host) */
1319 mmc_select_mode(mmc, mwt->mode);
1320 mmc_set_clock(mmc, mmc->tran_speed);
1321
1322 err = sd_read_ssr(mmc);
1323 if (!err)
1324 return 0;
1325
1326 printf("bad ssr\n");
1327
1328 error:
1329 /* revert to a safer bus speed */
1330 mmc_select_mode(mmc, SD_LEGACY);
1331 mmc_set_clock(mmc, mmc->tran_speed);
1332 }
1333 }
1334 }
1335
1336 printf("unable to select a mode\n");
1337 return -ENOTSUPP;
1338 }
1339
1340 /*
1341 * read the compare the part of ext csd that is constant.
1342 * This can be used to check that the transfer is working
1343 * as expected.
1344 */
1345 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1346 {
1347 int err;
1348 const u8 *ext_csd = mmc->ext_csd;
1349 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1350
1351 err = mmc_send_ext_csd(mmc, test_csd);
1352 if (err)
1353 return err;
1354
1355 /* Only compare read only fields */
1356 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1357 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1358 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1359 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1360 ext_csd[EXT_CSD_REV]
1361 == test_csd[EXT_CSD_REV] &&
1362 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1363 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1364 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1365 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1366 return 0;
1367
1368 return -EBADMSG;
1369 }
1370
1371 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1372 {
1373 .mode = MMC_HS_200,
1374 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1375 },
1376 {
1377 .mode = MMC_DDR_52,
1378 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1379 },
1380 {
1381 .mode = MMC_HS_52,
1382 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1383 },
1384 {
1385 .mode = MMC_HS,
1386 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1387 },
1388 {
1389 .mode = MMC_LEGACY,
1390 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1391 }
1392 };
1393
1394 #define for_each_mmc_mode_by_pref(caps, mwt) \
1395 for (mwt = mmc_modes_by_pref;\
1396 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1397 mwt++) \
1398 if (caps & MMC_CAP(mwt->mode))
1399
1400 static const struct ext_csd_bus_width {
1401 uint cap;
1402 bool is_ddr;
1403 uint ext_csd_bits;
1404 } ext_csd_bus_width[] = {
1405 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1406 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1407 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1408 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1409 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1410 };
1411
1412 #define for_each_supported_width(caps, ddr, ecbv) \
1413 for (ecbv = ext_csd_bus_width;\
1414 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1415 ecbv++) \
1416 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1417
1418 static int mmc_select_mode_and_width(struct mmc *mmc)
1419 {
1420 int err;
1421 const struct mode_width_tuning *mwt;
1422 const struct ext_csd_bus_width *ecbw;
1423
1424 err = mmc_get_capabilities(mmc);
1425 if (err)
1426 return err;
1427
1428 /* Restrict card's capabilities by what the host can do */
1429 mmc->card_caps &= (mmc->cfg->host_caps | MMC_MODE_1BIT);
1430
1431 /* Only version 4 of MMC supports wider bus widths */
1432 if (mmc->version < MMC_VERSION_4)
1433 return 0;
1434
1435 if (!mmc->ext_csd) {
1436 debug("No ext_csd found!\n"); /* this should enver happen */
1437 return -ENOTSUPP;
1438 }
1439
1440 for_each_mmc_mode_by_pref(mmc->card_caps, mwt) {
1441 for_each_supported_width(mmc->card_caps & mwt->widths,
1442 mmc_is_mode_ddr(mwt->mode), ecbw) {
1443 debug("trying mode %s width %d (at %d MHz)\n",
1444 mmc_mode_name(mwt->mode),
1445 bus_width(ecbw->cap),
1446 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1447 /* configure the bus width (card + host) */
1448 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1449 EXT_CSD_BUS_WIDTH,
1450 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1451 if (err)
1452 goto error;
1453 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1454
1455 /* configure the bus speed (card) */
1456 err = mmc_set_card_speed(mmc, mwt->mode);
1457 if (err)
1458 goto error;
1459
1460 /*
1461 * configure the bus width AND the ddr mode (card)
1462 * The host side will be taken care of in the next step
1463 */
1464 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1465 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1466 EXT_CSD_BUS_WIDTH,
1467 ecbw->ext_csd_bits);
1468 if (err)
1469 goto error;
1470 }
1471
1472 /* configure the bus mode (host) */
1473 mmc_select_mode(mmc, mwt->mode);
1474 mmc_set_clock(mmc, mmc->tran_speed);
1475
1476 /* do a transfer to check the configuration */
1477 err = mmc_read_and_compare_ext_csd(mmc);
1478 if (!err)
1479 return 0;
1480 error:
1481 /* if an error occured, revert to a safer bus mode */
1482 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1483 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
1484 mmc_select_mode(mmc, MMC_LEGACY);
1485 mmc_set_bus_width(mmc, 1);
1486 }
1487 }
1488
1489 printf("unable to select a mode\n");
1490
1491 return -ENOTSUPP;
1492 }
1493
1494 static int mmc_startup_v4(struct mmc *mmc)
1495 {
1496 int err, i;
1497 u64 capacity;
1498 bool has_parts = false;
1499 bool part_completed;
1500 u8 *ext_csd;
1501
1502 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1503 return 0;
1504
1505 ext_csd = malloc_cache_aligned(MMC_MAX_BLOCK_LEN);
1506 if (!ext_csd)
1507 return -ENOMEM;
1508
1509 mmc->ext_csd = ext_csd;
1510
1511 /* check ext_csd version and capacity */
1512 err = mmc_send_ext_csd(mmc, ext_csd);
1513 if (err)
1514 return err;
1515 if (ext_csd[EXT_CSD_REV] >= 2) {
1516 /*
1517 * According to the JEDEC Standard, the value of
1518 * ext_csd's capacity is valid if the value is more
1519 * than 2GB
1520 */
1521 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1522 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1523 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1524 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1525 capacity *= MMC_MAX_BLOCK_LEN;
1526 if ((capacity >> 20) > 2 * 1024)
1527 mmc->capacity_user = capacity;
1528 }
1529
1530 switch (ext_csd[EXT_CSD_REV]) {
1531 case 1:
1532 mmc->version = MMC_VERSION_4_1;
1533 break;
1534 case 2:
1535 mmc->version = MMC_VERSION_4_2;
1536 break;
1537 case 3:
1538 mmc->version = MMC_VERSION_4_3;
1539 break;
1540 case 5:
1541 mmc->version = MMC_VERSION_4_41;
1542 break;
1543 case 6:
1544 mmc->version = MMC_VERSION_4_5;
1545 break;
1546 case 7:
1547 mmc->version = MMC_VERSION_5_0;
1548 break;
1549 case 8:
1550 mmc->version = MMC_VERSION_5_1;
1551 break;
1552 }
1553
1554 /* The partition data may be non-zero but it is only
1555 * effective if PARTITION_SETTING_COMPLETED is set in
1556 * EXT_CSD, so ignore any data if this bit is not set,
1557 * except for enabling the high-capacity group size
1558 * definition (see below).
1559 */
1560 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1561 EXT_CSD_PARTITION_SETTING_COMPLETED);
1562
1563 /* store the partition info of emmc */
1564 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
1565 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
1566 ext_csd[EXT_CSD_BOOT_MULT])
1567 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
1568 if (part_completed &&
1569 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
1570 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
1571
1572 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
1573
1574 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
1575
1576 for (i = 0; i < 4; i++) {
1577 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
1578 uint mult = (ext_csd[idx + 2] << 16) +
1579 (ext_csd[idx + 1] << 8) + ext_csd[idx];
1580 if (mult)
1581 has_parts = true;
1582 if (!part_completed)
1583 continue;
1584 mmc->capacity_gp[i] = mult;
1585 mmc->capacity_gp[i] *=
1586 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1587 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1588 mmc->capacity_gp[i] <<= 19;
1589 }
1590
1591 if (part_completed) {
1592 mmc->enh_user_size =
1593 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
1594 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
1595 ext_csd[EXT_CSD_ENH_SIZE_MULT];
1596 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1597 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1598 mmc->enh_user_size <<= 19;
1599 mmc->enh_user_start =
1600 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
1601 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
1602 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
1603 ext_csd[EXT_CSD_ENH_START_ADDR];
1604 if (mmc->high_capacity)
1605 mmc->enh_user_start <<= 9;
1606 }
1607
1608 /*
1609 * Host needs to enable ERASE_GRP_DEF bit if device is
1610 * partitioned. This bit will be lost every time after a reset
1611 * or power off. This will affect erase size.
1612 */
1613 if (part_completed)
1614 has_parts = true;
1615 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
1616 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
1617 has_parts = true;
1618 if (has_parts) {
1619 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1620 EXT_CSD_ERASE_GROUP_DEF, 1);
1621
1622 if (err)
1623 return err;
1624
1625 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1626 }
1627
1628 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
1629 /* Read out group size from ext_csd */
1630 mmc->erase_grp_size =
1631 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1632 /*
1633 * if high capacity and partition setting completed
1634 * SEC_COUNT is valid even if it is smaller than 2 GiB
1635 * JEDEC Standard JESD84-B45, 6.2.4
1636 */
1637 if (mmc->high_capacity && part_completed) {
1638 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
1639 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
1640 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
1641 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
1642 capacity *= MMC_MAX_BLOCK_LEN;
1643 mmc->capacity_user = capacity;
1644 }
1645 } else {
1646 /* Calculate the group size from the csd value. */
1647 int erase_gsz, erase_gmul;
1648
1649 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
1650 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
1651 mmc->erase_grp_size = (erase_gsz + 1)
1652 * (erase_gmul + 1);
1653 }
1654
1655 mmc->hc_wp_grp_size = 1024
1656 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1657 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1658
1659 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1660
1661 return 0;
1662 }
1663
1664 static int mmc_startup(struct mmc *mmc)
1665 {
1666 int err, i;
1667 uint mult, freq;
1668 u64 cmult, csize;
1669 struct mmc_cmd cmd;
1670 struct blk_desc *bdesc;
1671
1672 #ifdef CONFIG_MMC_SPI_CRC_ON
1673 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
1674 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
1675 cmd.resp_type = MMC_RSP_R1;
1676 cmd.cmdarg = 1;
1677 err = mmc_send_cmd(mmc, &cmd, NULL);
1678
1679 if (err)
1680 return err;
1681 }
1682 #endif
1683
1684 /* Put the Card in Identify Mode */
1685 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
1686 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
1687 cmd.resp_type = MMC_RSP_R2;
1688 cmd.cmdarg = 0;
1689
1690 err = mmc_send_cmd(mmc, &cmd, NULL);
1691
1692 if (err)
1693 return err;
1694
1695 memcpy(mmc->cid, cmd.response, 16);
1696
1697 /*
1698 * For MMC cards, set the Relative Address.
1699 * For SD cards, get the Relatvie Address.
1700 * This also puts the cards into Standby State
1701 */
1702 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1703 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
1704 cmd.cmdarg = mmc->rca << 16;
1705 cmd.resp_type = MMC_RSP_R6;
1706
1707 err = mmc_send_cmd(mmc, &cmd, NULL);
1708
1709 if (err)
1710 return err;
1711
1712 if (IS_SD(mmc))
1713 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
1714 }
1715
1716 /* Get the Card-Specific Data */
1717 cmd.cmdidx = MMC_CMD_SEND_CSD;
1718 cmd.resp_type = MMC_RSP_R2;
1719 cmd.cmdarg = mmc->rca << 16;
1720
1721 err = mmc_send_cmd(mmc, &cmd, NULL);
1722
1723 if (err)
1724 return err;
1725
1726 mmc->csd[0] = cmd.response[0];
1727 mmc->csd[1] = cmd.response[1];
1728 mmc->csd[2] = cmd.response[2];
1729 mmc->csd[3] = cmd.response[3];
1730
1731 if (mmc->version == MMC_VERSION_UNKNOWN) {
1732 int version = (cmd.response[0] >> 26) & 0xf;
1733
1734 switch (version) {
1735 case 0:
1736 mmc->version = MMC_VERSION_1_2;
1737 break;
1738 case 1:
1739 mmc->version = MMC_VERSION_1_4;
1740 break;
1741 case 2:
1742 mmc->version = MMC_VERSION_2_2;
1743 break;
1744 case 3:
1745 mmc->version = MMC_VERSION_3;
1746 break;
1747 case 4:
1748 mmc->version = MMC_VERSION_4;
1749 break;
1750 default:
1751 mmc->version = MMC_VERSION_1_2;
1752 break;
1753 }
1754 }
1755
1756 /* divide frequency by 10, since the mults are 10x bigger */
1757 freq = fbase[(cmd.response[0] & 0x7)];
1758 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
1759
1760 mmc->legacy_speed = freq * mult;
1761 mmc_select_mode(mmc, MMC_LEGACY);
1762
1763 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
1764 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
1765
1766 if (IS_SD(mmc))
1767 mmc->write_bl_len = mmc->read_bl_len;
1768 else
1769 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
1770
1771 if (mmc->high_capacity) {
1772 csize = (mmc->csd[1] & 0x3f) << 16
1773 | (mmc->csd[2] & 0xffff0000) >> 16;
1774 cmult = 8;
1775 } else {
1776 csize = (mmc->csd[1] & 0x3ff) << 2
1777 | (mmc->csd[2] & 0xc0000000) >> 30;
1778 cmult = (mmc->csd[2] & 0x00038000) >> 15;
1779 }
1780
1781 mmc->capacity_user = (csize + 1) << (cmult + 2);
1782 mmc->capacity_user *= mmc->read_bl_len;
1783 mmc->capacity_boot = 0;
1784 mmc->capacity_rpmb = 0;
1785 for (i = 0; i < 4; i++)
1786 mmc->capacity_gp[i] = 0;
1787
1788 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
1789 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1790
1791 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
1792 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1793
1794 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
1795 cmd.cmdidx = MMC_CMD_SET_DSR;
1796 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
1797 cmd.resp_type = MMC_RSP_NONE;
1798 if (mmc_send_cmd(mmc, &cmd, NULL))
1799 printf("MMC: SET_DSR failed\n");
1800 }
1801
1802 /* Select the card, and put it into Transfer Mode */
1803 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1804 cmd.cmdidx = MMC_CMD_SELECT_CARD;
1805 cmd.resp_type = MMC_RSP_R1;
1806 cmd.cmdarg = mmc->rca << 16;
1807 err = mmc_send_cmd(mmc, &cmd, NULL);
1808
1809 if (err)
1810 return err;
1811 }
1812
1813 /*
1814 * For SD, its erase group is always one sector
1815 */
1816 mmc->erase_grp_size = 1;
1817 mmc->part_config = MMCPART_NOAVAILABLE;
1818
1819 err = mmc_startup_v4(mmc);
1820 if (err)
1821 return err;
1822
1823 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
1824 if (err)
1825 return err;
1826
1827 if (IS_SD(mmc))
1828 err = sd_select_mode_and_width(mmc);
1829 else
1830 err = mmc_select_mode_and_width(mmc);
1831
1832 if (err)
1833 return err;
1834
1835
1836 /* Fix the block length for DDR mode */
1837 if (mmc->ddr_mode) {
1838 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1839 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1840 }
1841
1842 /* fill in device description */
1843 bdesc = mmc_get_blk_desc(mmc);
1844 bdesc->lun = 0;
1845 bdesc->hwpart = 0;
1846 bdesc->type = 0;
1847 bdesc->blksz = mmc->read_bl_len;
1848 bdesc->log2blksz = LOG2(bdesc->blksz);
1849 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1850 #if !defined(CONFIG_SPL_BUILD) || \
1851 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
1852 !defined(CONFIG_USE_TINY_PRINTF))
1853 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
1854 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
1855 (mmc->cid[3] >> 16) & 0xffff);
1856 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
1857 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
1858 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
1859 (mmc->cid[2] >> 24) & 0xff);
1860 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
1861 (mmc->cid[2] >> 16) & 0xf);
1862 #else
1863 bdesc->vendor[0] = 0;
1864 bdesc->product[0] = 0;
1865 bdesc->revision[0] = 0;
1866 #endif
1867 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
1868 part_init(bdesc);
1869 #endif
1870
1871 return 0;
1872 }
1873
1874 static int mmc_send_if_cond(struct mmc *mmc)
1875 {
1876 struct mmc_cmd cmd;
1877 int err;
1878
1879 cmd.cmdidx = SD_CMD_SEND_IF_COND;
1880 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
1881 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
1882 cmd.resp_type = MMC_RSP_R7;
1883
1884 err = mmc_send_cmd(mmc, &cmd, NULL);
1885
1886 if (err)
1887 return err;
1888
1889 if ((cmd.response[0] & 0xff) != 0xaa)
1890 return -EOPNOTSUPP;
1891 else
1892 mmc->version = SD_VERSION_2;
1893
1894 return 0;
1895 }
1896
1897 #if !CONFIG_IS_ENABLED(DM_MMC)
1898 /* board-specific MMC power initializations. */
1899 __weak void board_mmc_power_init(void)
1900 {
1901 }
1902 #endif
1903
1904 static int mmc_power_init(struct mmc *mmc)
1905 {
1906 #if CONFIG_IS_ENABLED(DM_MMC)
1907 #if CONFIG_IS_ENABLED(DM_REGULATOR)
1908 int ret;
1909
1910 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
1911 &mmc->vmmc_supply);
1912 if (ret)
1913 debug("%s: No vmmc supply\n", mmc->dev->name);
1914
1915 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
1916 &mmc->vqmmc_supply);
1917 if (ret)
1918 debug("%s: No vqmmc supply\n", mmc->dev->name);
1919
1920 if (mmc->vmmc_supply) {
1921 ret = regulator_set_enable(mmc->vmmc_supply, true);
1922 if (ret) {
1923 puts("Error enabling VMMC supply\n");
1924 return ret;
1925 }
1926 }
1927 #endif
1928 #else /* !CONFIG_DM_MMC */
1929 /*
1930 * Driver model should use a regulator, as above, rather than calling
1931 * out to board code.
1932 */
1933 board_mmc_power_init();
1934 #endif
1935 return 0;
1936 }
1937
1938 int mmc_start_init(struct mmc *mmc)
1939 {
1940 bool no_card;
1941 int err;
1942
1943 /* we pretend there's no card when init is NULL */
1944 no_card = mmc_getcd(mmc) == 0;
1945 #if !CONFIG_IS_ENABLED(DM_MMC)
1946 no_card = no_card || (mmc->cfg->ops->init == NULL);
1947 #endif
1948 if (no_card) {
1949 mmc->has_init = 0;
1950 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
1951 printf("MMC: no card present\n");
1952 #endif
1953 return -ENOMEDIUM;
1954 }
1955
1956 if (mmc->has_init)
1957 return 0;
1958
1959 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
1960 mmc_adapter_card_type_ident();
1961 #endif
1962 err = mmc_power_init(mmc);
1963 if (err)
1964 return err;
1965
1966 #if CONFIG_IS_ENABLED(DM_MMC)
1967 /* The device has already been probed ready for use */
1968 #else
1969 /* made sure it's not NULL earlier */
1970 err = mmc->cfg->ops->init(mmc);
1971 if (err)
1972 return err;
1973 #endif
1974 mmc->ddr_mode = 0;
1975
1976 /* First try to set 3.3V. If it fails set to 1.8V */
1977 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
1978 if (err != 0)
1979 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
1980 if (err != 0)
1981 printf("failed to set signal voltage\n");
1982
1983 mmc_set_bus_width(mmc, 1);
1984 mmc_set_clock(mmc, 1);
1985
1986 /* Reset the Card */
1987 err = mmc_go_idle(mmc);
1988
1989 if (err)
1990 return err;
1991
1992 /* The internal partition reset to user partition(0) at every CMD0*/
1993 mmc_get_blk_desc(mmc)->hwpart = 0;
1994
1995 /* Test for SD version 2 */
1996 err = mmc_send_if_cond(mmc);
1997
1998 /* Now try to get the SD card's operating condition */
1999 err = sd_send_op_cond(mmc);
2000
2001 /* If the command timed out, we check for an MMC card */
2002 if (err == -ETIMEDOUT) {
2003 err = mmc_send_op_cond(mmc);
2004
2005 if (err) {
2006 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2007 printf("Card did not respond to voltage select!\n");
2008 #endif
2009 return -EOPNOTSUPP;
2010 }
2011 }
2012
2013 if (!err)
2014 mmc->init_in_progress = 1;
2015
2016 return err;
2017 }
2018
2019 static int mmc_complete_init(struct mmc *mmc)
2020 {
2021 int err = 0;
2022
2023 mmc->init_in_progress = 0;
2024 if (mmc->op_cond_pending)
2025 err = mmc_complete_op_cond(mmc);
2026
2027 if (!err)
2028 err = mmc_startup(mmc);
2029 if (err)
2030 mmc->has_init = 0;
2031 else
2032 mmc->has_init = 1;
2033 return err;
2034 }
2035
2036 int mmc_init(struct mmc *mmc)
2037 {
2038 int err = 0;
2039 __maybe_unused unsigned start;
2040 #if CONFIG_IS_ENABLED(DM_MMC)
2041 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2042
2043 upriv->mmc = mmc;
2044 #endif
2045 if (mmc->has_init)
2046 return 0;
2047
2048 start = get_timer(0);
2049
2050 if (!mmc->init_in_progress)
2051 err = mmc_start_init(mmc);
2052
2053 if (!err)
2054 err = mmc_complete_init(mmc);
2055 if (err)
2056 printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2057
2058 return err;
2059 }
2060
2061 int mmc_set_dsr(struct mmc *mmc, u16 val)
2062 {
2063 mmc->dsr = val;
2064 return 0;
2065 }
2066
2067 /* CPU-specific MMC initializations */
2068 __weak int cpu_mmc_init(bd_t *bis)
2069 {
2070 return -1;
2071 }
2072
2073 /* board-specific MMC initializations. */
2074 __weak int board_mmc_init(bd_t *bis)
2075 {
2076 return -1;
2077 }
2078
2079 void mmc_set_preinit(struct mmc *mmc, int preinit)
2080 {
2081 mmc->preinit = preinit;
2082 }
2083
2084 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2085 static int mmc_probe(bd_t *bis)
2086 {
2087 return 0;
2088 }
2089 #elif CONFIG_IS_ENABLED(DM_MMC)
2090 static int mmc_probe(bd_t *bis)
2091 {
2092 int ret, i;
2093 struct uclass *uc;
2094 struct udevice *dev;
2095
2096 ret = uclass_get(UCLASS_MMC, &uc);
2097 if (ret)
2098 return ret;
2099
2100 /*
2101 * Try to add them in sequence order. Really with driver model we
2102 * should allow holes, but the current MMC list does not allow that.
2103 * So if we request 0, 1, 3 we will get 0, 1, 2.
2104 */
2105 for (i = 0; ; i++) {
2106 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2107 if (ret == -ENODEV)
2108 break;
2109 }
2110 uclass_foreach_dev(dev, uc) {
2111 ret = device_probe(dev);
2112 if (ret)
2113 printf("%s - probe failed: %d\n", dev->name, ret);
2114 }
2115
2116 return 0;
2117 }
2118 #else
2119 static int mmc_probe(bd_t *bis)
2120 {
2121 if (board_mmc_init(bis) < 0)
2122 cpu_mmc_init(bis);
2123
2124 return 0;
2125 }
2126 #endif
2127
2128 int mmc_initialize(bd_t *bis)
2129 {
2130 static int initialized = 0;
2131 int ret;
2132 if (initialized) /* Avoid initializing mmc multiple times */
2133 return 0;
2134 initialized = 1;
2135
2136 #if !CONFIG_IS_ENABLED(BLK)
2137 #if !CONFIG_IS_ENABLED(MMC_TINY)
2138 mmc_list_init();
2139 #endif
2140 #endif
2141 ret = mmc_probe(bis);
2142 if (ret)
2143 return ret;
2144
2145 #ifndef CONFIG_SPL_BUILD
2146 print_mmc_devices(',');
2147 #endif
2148
2149 mmc_do_preinit();
2150 return 0;
2151 }
2152
2153 #ifdef CONFIG_CMD_BKOPS_ENABLE
2154 int mmc_set_bkops_enable(struct mmc *mmc)
2155 {
2156 int err;
2157 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2158
2159 err = mmc_send_ext_csd(mmc, ext_csd);
2160 if (err) {
2161 puts("Could not get ext_csd register values\n");
2162 return err;
2163 }
2164
2165 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2166 puts("Background operations not supported on device\n");
2167 return -EMEDIUMTYPE;
2168 }
2169
2170 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2171 puts("Background operations already enabled\n");
2172 return 0;
2173 }
2174
2175 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2176 if (err) {
2177 puts("Failed to enable manual background operations\n");
2178 return err;
2179 }
2180
2181 puts("Enabled manual background operations\n");
2182
2183 return 0;
2184 }
2185 #endif