]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/mmc/mmc.c
mmc: refactor MMC startup to make it easier to support new modes
[people/ms/u-boot.git] / drivers / mmc / mmc.c
1 /*
2 * Copyright 2008, Freescale Semiconductor, Inc
3 * Andy Fleming
4 *
5 * Based vaguely on the Linux code
6 *
7 * SPDX-License-Identifier: GPL-2.0+
8 */
9
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24
25 static const unsigned int sd_au_size[] = {
26 0, SZ_16K / 512, SZ_32K / 512,
27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
31 };
32
33 #if CONFIG_IS_ENABLED(MMC_TINY)
34 static struct mmc mmc_static;
35 struct mmc *find_mmc_device(int dev_num)
36 {
37 return &mmc_static;
38 }
39
40 void mmc_do_preinit(void)
41 {
42 struct mmc *m = &mmc_static;
43 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
44 mmc_set_preinit(m, 1);
45 #endif
46 if (m->preinit)
47 mmc_start_init(m);
48 }
49
50 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
51 {
52 return &mmc->block_dev;
53 }
54 #endif
55
56 #if !CONFIG_IS_ENABLED(DM_MMC)
57 __weak int board_mmc_getwp(struct mmc *mmc)
58 {
59 return -1;
60 }
61
62 int mmc_getwp(struct mmc *mmc)
63 {
64 int wp;
65
66 wp = board_mmc_getwp(mmc);
67
68 if (wp < 0) {
69 if (mmc->cfg->ops->getwp)
70 wp = mmc->cfg->ops->getwp(mmc);
71 else
72 wp = 0;
73 }
74
75 return wp;
76 }
77
78 __weak int board_mmc_getcd(struct mmc *mmc)
79 {
80 return -1;
81 }
82 #endif
83
84 #ifdef CONFIG_MMC_TRACE
85 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
86 {
87 printf("CMD_SEND:%d\n", cmd->cmdidx);
88 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
89 }
90
91 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
92 {
93 int i;
94 u8 *ptr;
95
96 if (ret) {
97 printf("\t\tRET\t\t\t %d\n", ret);
98 } else {
99 switch (cmd->resp_type) {
100 case MMC_RSP_NONE:
101 printf("\t\tMMC_RSP_NONE\n");
102 break;
103 case MMC_RSP_R1:
104 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
105 cmd->response[0]);
106 break;
107 case MMC_RSP_R1b:
108 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
109 cmd->response[0]);
110 break;
111 case MMC_RSP_R2:
112 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
113 cmd->response[0]);
114 printf("\t\t \t\t 0x%08X \n",
115 cmd->response[1]);
116 printf("\t\t \t\t 0x%08X \n",
117 cmd->response[2]);
118 printf("\t\t \t\t 0x%08X \n",
119 cmd->response[3]);
120 printf("\n");
121 printf("\t\t\t\t\tDUMPING DATA\n");
122 for (i = 0; i < 4; i++) {
123 int j;
124 printf("\t\t\t\t\t%03d - ", i*4);
125 ptr = (u8 *)&cmd->response[i];
126 ptr += 3;
127 for (j = 0; j < 4; j++)
128 printf("%02X ", *ptr--);
129 printf("\n");
130 }
131 break;
132 case MMC_RSP_R3:
133 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
134 cmd->response[0]);
135 break;
136 default:
137 printf("\t\tERROR MMC rsp not supported\n");
138 break;
139 }
140 }
141 }
142
143 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
144 {
145 int status;
146
147 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
148 printf("CURR STATE:%d\n", status);
149 }
150 #endif
151
152 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
153 const char *mmc_mode_name(enum bus_mode mode)
154 {
155 static const char *const names[] = {
156 [MMC_LEGACY] = "MMC legacy",
157 [SD_LEGACY] = "SD Legacy",
158 [MMC_HS] = "MMC High Speed (26MHz)",
159 [SD_HS] = "SD High Speed (50MHz)",
160 [UHS_SDR12] = "UHS SDR12 (25MHz)",
161 [UHS_SDR25] = "UHS SDR25 (50MHz)",
162 [UHS_SDR50] = "UHS SDR50 (100MHz)",
163 [UHS_SDR104] = "UHS SDR104 (208MHz)",
164 [UHS_DDR50] = "UHS DDR50 (50MHz)",
165 [MMC_HS_52] = "MMC High Speed (52MHz)",
166 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
167 [MMC_HS_200] = "HS200 (200MHz)",
168 };
169
170 if (mode >= MMC_MODES_END)
171 return "Unknown mode";
172 else
173 return names[mode];
174 }
175 #endif
176
177 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
178 {
179 static const int freqs[] = {
180 [SD_LEGACY] = 25000000,
181 [MMC_HS] = 26000000,
182 [SD_HS] = 50000000,
183 [UHS_SDR12] = 25000000,
184 [UHS_SDR25] = 50000000,
185 [UHS_SDR50] = 100000000,
186 [UHS_SDR104] = 208000000,
187 [UHS_DDR50] = 50000000,
188 [MMC_HS_52] = 52000000,
189 [MMC_DDR_52] = 52000000,
190 [MMC_HS_200] = 200000000,
191 };
192
193 if (mode == MMC_LEGACY)
194 return mmc->legacy_speed;
195 else if (mode >= MMC_MODES_END)
196 return 0;
197 else
198 return freqs[mode];
199 }
200
201 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
202 {
203 mmc->selected_mode = mode;
204 mmc->tran_speed = mmc_mode2freq(mmc, mode);
205 mmc->ddr_mode = mmc_is_mode_ddr(mode);
206 debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
207 mmc->tran_speed / 1000000);
208 return 0;
209 }
210
211 #if !CONFIG_IS_ENABLED(DM_MMC)
212 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
213 {
214 int ret;
215
216 mmmc_trace_before_send(mmc, cmd);
217 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
218 mmmc_trace_after_send(mmc, cmd, ret);
219
220 return ret;
221 }
222 #endif
223
224 int mmc_send_status(struct mmc *mmc, int timeout)
225 {
226 struct mmc_cmd cmd;
227 int err, retries = 5;
228
229 cmd.cmdidx = MMC_CMD_SEND_STATUS;
230 cmd.resp_type = MMC_RSP_R1;
231 if (!mmc_host_is_spi(mmc))
232 cmd.cmdarg = mmc->rca << 16;
233
234 while (1) {
235 err = mmc_send_cmd(mmc, &cmd, NULL);
236 if (!err) {
237 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
238 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
239 MMC_STATE_PRG)
240 break;
241
242 if (cmd.response[0] & MMC_STATUS_MASK) {
243 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
244 printf("Status Error: 0x%08X\n",
245 cmd.response[0]);
246 #endif
247 return -ECOMM;
248 }
249 } else if (--retries < 0)
250 return err;
251
252 if (timeout-- <= 0)
253 break;
254
255 udelay(1000);
256 }
257
258 mmc_trace_state(mmc, &cmd);
259 if (timeout <= 0) {
260 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
261 printf("Timeout waiting card ready\n");
262 #endif
263 return -ETIMEDOUT;
264 }
265
266 return 0;
267 }
268
269 int mmc_set_blocklen(struct mmc *mmc, int len)
270 {
271 struct mmc_cmd cmd;
272
273 if (mmc->ddr_mode)
274 return 0;
275
276 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
277 cmd.resp_type = MMC_RSP_R1;
278 cmd.cmdarg = len;
279
280 return mmc_send_cmd(mmc, &cmd, NULL);
281 }
282
283 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
284 lbaint_t blkcnt)
285 {
286 struct mmc_cmd cmd;
287 struct mmc_data data;
288
289 if (blkcnt > 1)
290 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
291 else
292 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
293
294 if (mmc->high_capacity)
295 cmd.cmdarg = start;
296 else
297 cmd.cmdarg = start * mmc->read_bl_len;
298
299 cmd.resp_type = MMC_RSP_R1;
300
301 data.dest = dst;
302 data.blocks = blkcnt;
303 data.blocksize = mmc->read_bl_len;
304 data.flags = MMC_DATA_READ;
305
306 if (mmc_send_cmd(mmc, &cmd, &data))
307 return 0;
308
309 if (blkcnt > 1) {
310 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
311 cmd.cmdarg = 0;
312 cmd.resp_type = MMC_RSP_R1b;
313 if (mmc_send_cmd(mmc, &cmd, NULL)) {
314 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
315 printf("mmc fail to send stop cmd\n");
316 #endif
317 return 0;
318 }
319 }
320
321 return blkcnt;
322 }
323
324 #if CONFIG_IS_ENABLED(BLK)
325 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
326 #else
327 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
328 void *dst)
329 #endif
330 {
331 #if CONFIG_IS_ENABLED(BLK)
332 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
333 #endif
334 int dev_num = block_dev->devnum;
335 int err;
336 lbaint_t cur, blocks_todo = blkcnt;
337
338 if (blkcnt == 0)
339 return 0;
340
341 struct mmc *mmc = find_mmc_device(dev_num);
342 if (!mmc)
343 return 0;
344
345 if (CONFIG_IS_ENABLED(MMC_TINY))
346 err = mmc_switch_part(mmc, block_dev->hwpart);
347 else
348 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
349
350 if (err < 0)
351 return 0;
352
353 if ((start + blkcnt) > block_dev->lba) {
354 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
355 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
356 start + blkcnt, block_dev->lba);
357 #endif
358 return 0;
359 }
360
361 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
362 debug("%s: Failed to set blocklen\n", __func__);
363 return 0;
364 }
365
366 do {
367 cur = (blocks_todo > mmc->cfg->b_max) ?
368 mmc->cfg->b_max : blocks_todo;
369 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
370 debug("%s: Failed to read blocks\n", __func__);
371 return 0;
372 }
373 blocks_todo -= cur;
374 start += cur;
375 dst += cur * mmc->read_bl_len;
376 } while (blocks_todo > 0);
377
378 return blkcnt;
379 }
380
381 static int mmc_go_idle(struct mmc *mmc)
382 {
383 struct mmc_cmd cmd;
384 int err;
385
386 udelay(1000);
387
388 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
389 cmd.cmdarg = 0;
390 cmd.resp_type = MMC_RSP_NONE;
391
392 err = mmc_send_cmd(mmc, &cmd, NULL);
393
394 if (err)
395 return err;
396
397 udelay(2000);
398
399 return 0;
400 }
401
402 static int sd_send_op_cond(struct mmc *mmc)
403 {
404 int timeout = 1000;
405 int err;
406 struct mmc_cmd cmd;
407
408 while (1) {
409 cmd.cmdidx = MMC_CMD_APP_CMD;
410 cmd.resp_type = MMC_RSP_R1;
411 cmd.cmdarg = 0;
412
413 err = mmc_send_cmd(mmc, &cmd, NULL);
414
415 if (err)
416 return err;
417
418 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
419 cmd.resp_type = MMC_RSP_R3;
420
421 /*
422 * Most cards do not answer if some reserved bits
423 * in the ocr are set. However, Some controller
424 * can set bit 7 (reserved for low voltages), but
425 * how to manage low voltages SD card is not yet
426 * specified.
427 */
428 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
429 (mmc->cfg->voltages & 0xff8000);
430
431 if (mmc->version == SD_VERSION_2)
432 cmd.cmdarg |= OCR_HCS;
433
434 err = mmc_send_cmd(mmc, &cmd, NULL);
435
436 if (err)
437 return err;
438
439 if (cmd.response[0] & OCR_BUSY)
440 break;
441
442 if (timeout-- <= 0)
443 return -EOPNOTSUPP;
444
445 udelay(1000);
446 }
447
448 if (mmc->version != SD_VERSION_2)
449 mmc->version = SD_VERSION_1_0;
450
451 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
452 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
453 cmd.resp_type = MMC_RSP_R3;
454 cmd.cmdarg = 0;
455
456 err = mmc_send_cmd(mmc, &cmd, NULL);
457
458 if (err)
459 return err;
460 }
461
462 mmc->ocr = cmd.response[0];
463
464 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
465 mmc->rca = 0;
466
467 return 0;
468 }
469
470 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
471 {
472 struct mmc_cmd cmd;
473 int err;
474
475 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
476 cmd.resp_type = MMC_RSP_R3;
477 cmd.cmdarg = 0;
478 if (use_arg && !mmc_host_is_spi(mmc))
479 cmd.cmdarg = OCR_HCS |
480 (mmc->cfg->voltages &
481 (mmc->ocr & OCR_VOLTAGE_MASK)) |
482 (mmc->ocr & OCR_ACCESS_MODE);
483
484 err = mmc_send_cmd(mmc, &cmd, NULL);
485 if (err)
486 return err;
487 mmc->ocr = cmd.response[0];
488 return 0;
489 }
490
491 static int mmc_send_op_cond(struct mmc *mmc)
492 {
493 int err, i;
494
495 /* Some cards seem to need this */
496 mmc_go_idle(mmc);
497
498 /* Asking to the card its capabilities */
499 for (i = 0; i < 2; i++) {
500 err = mmc_send_op_cond_iter(mmc, i != 0);
501 if (err)
502 return err;
503
504 /* exit if not busy (flag seems to be inverted) */
505 if (mmc->ocr & OCR_BUSY)
506 break;
507 }
508 mmc->op_cond_pending = 1;
509 return 0;
510 }
511
512 static int mmc_complete_op_cond(struct mmc *mmc)
513 {
514 struct mmc_cmd cmd;
515 int timeout = 1000;
516 uint start;
517 int err;
518
519 mmc->op_cond_pending = 0;
520 if (!(mmc->ocr & OCR_BUSY)) {
521 /* Some cards seem to need this */
522 mmc_go_idle(mmc);
523
524 start = get_timer(0);
525 while (1) {
526 err = mmc_send_op_cond_iter(mmc, 1);
527 if (err)
528 return err;
529 if (mmc->ocr & OCR_BUSY)
530 break;
531 if (get_timer(start) > timeout)
532 return -EOPNOTSUPP;
533 udelay(100);
534 }
535 }
536
537 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
538 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
539 cmd.resp_type = MMC_RSP_R3;
540 cmd.cmdarg = 0;
541
542 err = mmc_send_cmd(mmc, &cmd, NULL);
543
544 if (err)
545 return err;
546
547 mmc->ocr = cmd.response[0];
548 }
549
550 mmc->version = MMC_VERSION_UNKNOWN;
551
552 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
553 mmc->rca = 1;
554
555 return 0;
556 }
557
558
559 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
560 {
561 struct mmc_cmd cmd;
562 struct mmc_data data;
563 int err;
564
565 /* Get the Card Status Register */
566 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
567 cmd.resp_type = MMC_RSP_R1;
568 cmd.cmdarg = 0;
569
570 data.dest = (char *)ext_csd;
571 data.blocks = 1;
572 data.blocksize = MMC_MAX_BLOCK_LEN;
573 data.flags = MMC_DATA_READ;
574
575 err = mmc_send_cmd(mmc, &cmd, &data);
576
577 return err;
578 }
579
580 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
581 {
582 struct mmc_cmd cmd;
583 int timeout = 1000;
584 int retries = 3;
585 int ret;
586
587 cmd.cmdidx = MMC_CMD_SWITCH;
588 cmd.resp_type = MMC_RSP_R1b;
589 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
590 (index << 16) |
591 (value << 8);
592
593 while (retries > 0) {
594 ret = mmc_send_cmd(mmc, &cmd, NULL);
595
596 /* Waiting for the ready status */
597 if (!ret) {
598 ret = mmc_send_status(mmc, timeout);
599 return ret;
600 }
601
602 retries--;
603 }
604
605 return ret;
606
607 }
608
609 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
610 {
611 int err;
612 int speed_bits;
613
614 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
615
616 switch (mode) {
617 case MMC_HS:
618 case MMC_HS_52:
619 case MMC_DDR_52:
620 speed_bits = EXT_CSD_TIMING_HS;
621 case MMC_LEGACY:
622 speed_bits = EXT_CSD_TIMING_LEGACY;
623 break;
624 default:
625 return -EINVAL;
626 }
627 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
628 speed_bits);
629 if (err)
630 return err;
631
632 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
633 /* Now check to see that it worked */
634 err = mmc_send_ext_csd(mmc, test_csd);
635 if (err)
636 return err;
637
638 /* No high-speed support */
639 if (!test_csd[EXT_CSD_HS_TIMING])
640 return -ENOTSUPP;
641 }
642
643 return 0;
644 }
645
646 static int mmc_get_capabilities(struct mmc *mmc)
647 {
648 u8 *ext_csd = mmc->ext_csd;
649 char cardtype;
650
651 mmc->card_caps = MMC_MODE_1BIT;
652
653 if (mmc_host_is_spi(mmc))
654 return 0;
655
656 /* Only version 4 supports high-speed */
657 if (mmc->version < MMC_VERSION_4)
658 return 0;
659
660 if (!ext_csd) {
661 printf("No ext_csd found!\n"); /* this should enver happen */
662 return -ENOTSUPP;
663 }
664
665 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
666
667 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0xf;
668
669 /* High Speed is set, there are two types: 52MHz and 26MHz */
670 if (cardtype & EXT_CSD_CARD_TYPE_52) {
671 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
672 mmc->card_caps |= MMC_MODE_DDR_52MHz;
673 mmc->card_caps |= MMC_MODE_HS_52MHz;
674 }
675 if (cardtype & EXT_CSD_CARD_TYPE_26)
676 mmc->card_caps |= MMC_MODE_HS;
677
678 return 0;
679 }
680
681 static int mmc_set_capacity(struct mmc *mmc, int part_num)
682 {
683 switch (part_num) {
684 case 0:
685 mmc->capacity = mmc->capacity_user;
686 break;
687 case 1:
688 case 2:
689 mmc->capacity = mmc->capacity_boot;
690 break;
691 case 3:
692 mmc->capacity = mmc->capacity_rpmb;
693 break;
694 case 4:
695 case 5:
696 case 6:
697 case 7:
698 mmc->capacity = mmc->capacity_gp[part_num - 4];
699 break;
700 default:
701 return -1;
702 }
703
704 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
705
706 return 0;
707 }
708
709 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
710 {
711 int ret;
712
713 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
714 (mmc->part_config & ~PART_ACCESS_MASK)
715 | (part_num & PART_ACCESS_MASK));
716
717 /*
718 * Set the capacity if the switch succeeded or was intended
719 * to return to representing the raw device.
720 */
721 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
722 ret = mmc_set_capacity(mmc, part_num);
723 mmc_get_blk_desc(mmc)->hwpart = part_num;
724 }
725
726 return ret;
727 }
728
729 int mmc_hwpart_config(struct mmc *mmc,
730 const struct mmc_hwpart_conf *conf,
731 enum mmc_hwpart_conf_mode mode)
732 {
733 u8 part_attrs = 0;
734 u32 enh_size_mult;
735 u32 enh_start_addr;
736 u32 gp_size_mult[4];
737 u32 max_enh_size_mult;
738 u32 tot_enh_size_mult = 0;
739 u8 wr_rel_set;
740 int i, pidx, err;
741 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
742
743 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
744 return -EINVAL;
745
746 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
747 printf("eMMC >= 4.4 required for enhanced user data area\n");
748 return -EMEDIUMTYPE;
749 }
750
751 if (!(mmc->part_support & PART_SUPPORT)) {
752 printf("Card does not support partitioning\n");
753 return -EMEDIUMTYPE;
754 }
755
756 if (!mmc->hc_wp_grp_size) {
757 printf("Card does not define HC WP group size\n");
758 return -EMEDIUMTYPE;
759 }
760
761 /* check partition alignment and total enhanced size */
762 if (conf->user.enh_size) {
763 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
764 conf->user.enh_start % mmc->hc_wp_grp_size) {
765 printf("User data enhanced area not HC WP group "
766 "size aligned\n");
767 return -EINVAL;
768 }
769 part_attrs |= EXT_CSD_ENH_USR;
770 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
771 if (mmc->high_capacity) {
772 enh_start_addr = conf->user.enh_start;
773 } else {
774 enh_start_addr = (conf->user.enh_start << 9);
775 }
776 } else {
777 enh_size_mult = 0;
778 enh_start_addr = 0;
779 }
780 tot_enh_size_mult += enh_size_mult;
781
782 for (pidx = 0; pidx < 4; pidx++) {
783 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
784 printf("GP%i partition not HC WP group size "
785 "aligned\n", pidx+1);
786 return -EINVAL;
787 }
788 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
789 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
790 part_attrs |= EXT_CSD_ENH_GP(pidx);
791 tot_enh_size_mult += gp_size_mult[pidx];
792 }
793 }
794
795 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
796 printf("Card does not support enhanced attribute\n");
797 return -EMEDIUMTYPE;
798 }
799
800 err = mmc_send_ext_csd(mmc, ext_csd);
801 if (err)
802 return err;
803
804 max_enh_size_mult =
805 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
806 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
807 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
808 if (tot_enh_size_mult > max_enh_size_mult) {
809 printf("Total enhanced size exceeds maximum (%u > %u)\n",
810 tot_enh_size_mult, max_enh_size_mult);
811 return -EMEDIUMTYPE;
812 }
813
814 /* The default value of EXT_CSD_WR_REL_SET is device
815 * dependent, the values can only be changed if the
816 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
817 * changed only once and before partitioning is completed. */
818 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
819 if (conf->user.wr_rel_change) {
820 if (conf->user.wr_rel_set)
821 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
822 else
823 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
824 }
825 for (pidx = 0; pidx < 4; pidx++) {
826 if (conf->gp_part[pidx].wr_rel_change) {
827 if (conf->gp_part[pidx].wr_rel_set)
828 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
829 else
830 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
831 }
832 }
833
834 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
835 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
836 puts("Card does not support host controlled partition write "
837 "reliability settings\n");
838 return -EMEDIUMTYPE;
839 }
840
841 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
842 EXT_CSD_PARTITION_SETTING_COMPLETED) {
843 printf("Card already partitioned\n");
844 return -EPERM;
845 }
846
847 if (mode == MMC_HWPART_CONF_CHECK)
848 return 0;
849
850 /* Partitioning requires high-capacity size definitions */
851 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
852 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
853 EXT_CSD_ERASE_GROUP_DEF, 1);
854
855 if (err)
856 return err;
857
858 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
859
860 /* update erase group size to be high-capacity */
861 mmc->erase_grp_size =
862 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
863
864 }
865
866 /* all OK, write the configuration */
867 for (i = 0; i < 4; i++) {
868 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
869 EXT_CSD_ENH_START_ADDR+i,
870 (enh_start_addr >> (i*8)) & 0xFF);
871 if (err)
872 return err;
873 }
874 for (i = 0; i < 3; i++) {
875 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
876 EXT_CSD_ENH_SIZE_MULT+i,
877 (enh_size_mult >> (i*8)) & 0xFF);
878 if (err)
879 return err;
880 }
881 for (pidx = 0; pidx < 4; pidx++) {
882 for (i = 0; i < 3; i++) {
883 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
884 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
885 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
886 if (err)
887 return err;
888 }
889 }
890 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
891 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
892 if (err)
893 return err;
894
895 if (mode == MMC_HWPART_CONF_SET)
896 return 0;
897
898 /* The WR_REL_SET is a write-once register but shall be
899 * written before setting PART_SETTING_COMPLETED. As it is
900 * write-once we can only write it when completing the
901 * partitioning. */
902 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
903 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
904 EXT_CSD_WR_REL_SET, wr_rel_set);
905 if (err)
906 return err;
907 }
908
909 /* Setting PART_SETTING_COMPLETED confirms the partition
910 * configuration but it only becomes effective after power
911 * cycle, so we do not adjust the partition related settings
912 * in the mmc struct. */
913
914 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
915 EXT_CSD_PARTITION_SETTING,
916 EXT_CSD_PARTITION_SETTING_COMPLETED);
917 if (err)
918 return err;
919
920 return 0;
921 }
922
923 #if !CONFIG_IS_ENABLED(DM_MMC)
924 int mmc_getcd(struct mmc *mmc)
925 {
926 int cd;
927
928 cd = board_mmc_getcd(mmc);
929
930 if (cd < 0) {
931 if (mmc->cfg->ops->getcd)
932 cd = mmc->cfg->ops->getcd(mmc);
933 else
934 cd = 1;
935 }
936
937 return cd;
938 }
939 #endif
940
941 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
942 {
943 struct mmc_cmd cmd;
944 struct mmc_data data;
945
946 /* Switch the frequency */
947 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
948 cmd.resp_type = MMC_RSP_R1;
949 cmd.cmdarg = (mode << 31) | 0xffffff;
950 cmd.cmdarg &= ~(0xf << (group * 4));
951 cmd.cmdarg |= value << (group * 4);
952
953 data.dest = (char *)resp;
954 data.blocksize = 64;
955 data.blocks = 1;
956 data.flags = MMC_DATA_READ;
957
958 return mmc_send_cmd(mmc, &cmd, &data);
959 }
960
961
962 static int sd_get_capabilities(struct mmc *mmc)
963 {
964 int err;
965 struct mmc_cmd cmd;
966 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
967 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
968 struct mmc_data data;
969 int timeout;
970
971 mmc->card_caps = MMC_MODE_1BIT;
972
973 if (mmc_host_is_spi(mmc))
974 return 0;
975
976 /* Read the SCR to find out if this card supports higher speeds */
977 cmd.cmdidx = MMC_CMD_APP_CMD;
978 cmd.resp_type = MMC_RSP_R1;
979 cmd.cmdarg = mmc->rca << 16;
980
981 err = mmc_send_cmd(mmc, &cmd, NULL);
982
983 if (err)
984 return err;
985
986 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
987 cmd.resp_type = MMC_RSP_R1;
988 cmd.cmdarg = 0;
989
990 timeout = 3;
991
992 retry_scr:
993 data.dest = (char *)scr;
994 data.blocksize = 8;
995 data.blocks = 1;
996 data.flags = MMC_DATA_READ;
997
998 err = mmc_send_cmd(mmc, &cmd, &data);
999
1000 if (err) {
1001 if (timeout--)
1002 goto retry_scr;
1003
1004 return err;
1005 }
1006
1007 mmc->scr[0] = __be32_to_cpu(scr[0]);
1008 mmc->scr[1] = __be32_to_cpu(scr[1]);
1009
1010 switch ((mmc->scr[0] >> 24) & 0xf) {
1011 case 0:
1012 mmc->version = SD_VERSION_1_0;
1013 break;
1014 case 1:
1015 mmc->version = SD_VERSION_1_10;
1016 break;
1017 case 2:
1018 mmc->version = SD_VERSION_2;
1019 if ((mmc->scr[0] >> 15) & 0x1)
1020 mmc->version = SD_VERSION_3;
1021 break;
1022 default:
1023 mmc->version = SD_VERSION_1_0;
1024 break;
1025 }
1026
1027 if (mmc->scr[0] & SD_DATA_4BIT)
1028 mmc->card_caps |= MMC_MODE_4BIT;
1029
1030 /* Version 1.0 doesn't support switching */
1031 if (mmc->version == SD_VERSION_1_0)
1032 return 0;
1033
1034 timeout = 4;
1035 while (timeout--) {
1036 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1037 (u8 *)switch_status);
1038
1039 if (err)
1040 return err;
1041
1042 /* The high-speed function is busy. Try again */
1043 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1044 break;
1045 }
1046
1047 /* If high-speed isn't supported, we return */
1048 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1049 mmc->card_caps |= MMC_CAP(SD_HS);
1050
1051 return 0;
1052 }
1053
1054 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1055 {
1056 int err;
1057
1058 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1059
1060 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status);
1061 if (err)
1062 return err;
1063
1064 if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) != 0x01000000)
1065 return -ENOTSUPP;
1066
1067 return 0;
1068 }
1069
1070 int sd_select_bus_width(struct mmc *mmc, int w)
1071 {
1072 int err;
1073 struct mmc_cmd cmd;
1074
1075 if ((w != 4) && (w != 1))
1076 return -EINVAL;
1077
1078 cmd.cmdidx = MMC_CMD_APP_CMD;
1079 cmd.resp_type = MMC_RSP_R1;
1080 cmd.cmdarg = mmc->rca << 16;
1081
1082 err = mmc_send_cmd(mmc, &cmd, NULL);
1083 if (err)
1084 return err;
1085
1086 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1087 cmd.resp_type = MMC_RSP_R1;
1088 if (w == 4)
1089 cmd.cmdarg = 2;
1090 else if (w == 1)
1091 cmd.cmdarg = 0;
1092 err = mmc_send_cmd(mmc, &cmd, NULL);
1093 if (err)
1094 return err;
1095
1096 return 0;
1097 }
1098
1099 static int sd_read_ssr(struct mmc *mmc)
1100 {
1101 int err, i;
1102 struct mmc_cmd cmd;
1103 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1104 struct mmc_data data;
1105 int timeout = 3;
1106 unsigned int au, eo, et, es;
1107
1108 cmd.cmdidx = MMC_CMD_APP_CMD;
1109 cmd.resp_type = MMC_RSP_R1;
1110 cmd.cmdarg = mmc->rca << 16;
1111
1112 err = mmc_send_cmd(mmc, &cmd, NULL);
1113 if (err)
1114 return err;
1115
1116 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1117 cmd.resp_type = MMC_RSP_R1;
1118 cmd.cmdarg = 0;
1119
1120 retry_ssr:
1121 data.dest = (char *)ssr;
1122 data.blocksize = 64;
1123 data.blocks = 1;
1124 data.flags = MMC_DATA_READ;
1125
1126 err = mmc_send_cmd(mmc, &cmd, &data);
1127 if (err) {
1128 if (timeout--)
1129 goto retry_ssr;
1130
1131 return err;
1132 }
1133
1134 for (i = 0; i < 16; i++)
1135 ssr[i] = be32_to_cpu(ssr[i]);
1136
1137 au = (ssr[2] >> 12) & 0xF;
1138 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1139 mmc->ssr.au = sd_au_size[au];
1140 es = (ssr[3] >> 24) & 0xFF;
1141 es |= (ssr[2] & 0xFF) << 8;
1142 et = (ssr[3] >> 18) & 0x3F;
1143 if (es && et) {
1144 eo = (ssr[3] >> 16) & 0x3;
1145 mmc->ssr.erase_timeout = (et * 1000) / es;
1146 mmc->ssr.erase_offset = eo * 1000;
1147 }
1148 } else {
1149 debug("Invalid Allocation Unit Size.\n");
1150 }
1151
1152 return 0;
1153 }
1154
1155 /* frequency bases */
1156 /* divided by 10 to be nice to platforms without floating point */
1157 static const int fbase[] = {
1158 10000,
1159 100000,
1160 1000000,
1161 10000000,
1162 };
1163
1164 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1165 * to platforms without floating point.
1166 */
1167 static const u8 multipliers[] = {
1168 0, /* reserved */
1169 10,
1170 12,
1171 13,
1172 15,
1173 20,
1174 25,
1175 30,
1176 35,
1177 40,
1178 45,
1179 50,
1180 55,
1181 60,
1182 70,
1183 80,
1184 };
1185
1186 static inline int bus_width(uint cap)
1187 {
1188 if (cap == MMC_MODE_8BIT)
1189 return 8;
1190 if (cap == MMC_MODE_4BIT)
1191 return 4;
1192 if (cap == MMC_MODE_1BIT)
1193 return 1;
1194 printf("invalid bus witdh capability 0x%x\n", cap);
1195 return 0;
1196 }
1197
1198 #if !CONFIG_IS_ENABLED(DM_MMC)
1199 static void mmc_set_ios(struct mmc *mmc)
1200 {
1201 if (mmc->cfg->ops->set_ios)
1202 mmc->cfg->ops->set_ios(mmc);
1203 }
1204 #endif
1205
1206 void mmc_set_clock(struct mmc *mmc, uint clock)
1207 {
1208 if (clock > mmc->cfg->f_max)
1209 clock = mmc->cfg->f_max;
1210
1211 if (clock < mmc->cfg->f_min)
1212 clock = mmc->cfg->f_min;
1213
1214 mmc->clock = clock;
1215
1216 mmc_set_ios(mmc);
1217 }
1218
1219 static void mmc_set_bus_width(struct mmc *mmc, uint width)
1220 {
1221 mmc->bus_width = width;
1222
1223 mmc_set_ios(mmc);
1224 }
1225
1226 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1227 /*
1228 * helper function to display the capabilities in a human
1229 * friendly manner. The capabilities include bus width and
1230 * supported modes.
1231 */
1232 void mmc_dump_capabilities(const char *text, uint caps)
1233 {
1234 enum bus_mode mode;
1235
1236 printf("%s: widths [", text);
1237 if (caps & MMC_MODE_8BIT)
1238 printf("8, ");
1239 if (caps & MMC_MODE_4BIT)
1240 printf("4, ");
1241 if (caps & MMC_MODE_1BIT)
1242 printf("1, ");
1243 printf("\b\b] modes [");
1244 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1245 if (MMC_CAP(mode) & caps)
1246 printf("%s, ", mmc_mode_name(mode));
1247 printf("\b\b]\n");
1248 }
1249 #endif
1250
1251 struct mode_width_tuning {
1252 enum bus_mode mode;
1253 uint widths;
1254 };
1255
1256 static const struct mode_width_tuning sd_modes_by_pref[] = {
1257 {
1258 .mode = SD_HS,
1259 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1260 },
1261 {
1262 .mode = SD_LEGACY,
1263 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1264 }
1265 };
1266
1267 #define for_each_sd_mode_by_pref(caps, mwt) \
1268 for (mwt = sd_modes_by_pref;\
1269 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1270 mwt++) \
1271 if (caps & MMC_CAP(mwt->mode))
1272
1273 static int sd_select_mode_and_width(struct mmc *mmc)
1274 {
1275 int err;
1276 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1277 const struct mode_width_tuning *mwt;
1278
1279 err = sd_get_capabilities(mmc);
1280 if (err)
1281 return err;
1282 /* Restrict card's capabilities by what the host can do */
1283 mmc->card_caps &= (mmc->cfg->host_caps | MMC_MODE_1BIT);
1284
1285 for_each_sd_mode_by_pref(mmc->card_caps, mwt) {
1286 uint *w;
1287
1288 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1289 if (*w & mmc->card_caps & mwt->widths) {
1290 debug("trying mode %s width %d (at %d MHz)\n",
1291 mmc_mode_name(mwt->mode),
1292 bus_width(*w),
1293 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1294
1295 /* configure the bus width (card + host) */
1296 err = sd_select_bus_width(mmc, bus_width(*w));
1297 if (err)
1298 goto error;
1299 mmc_set_bus_width(mmc, bus_width(*w));
1300
1301 /* configure the bus mode (card) */
1302 err = sd_set_card_speed(mmc, mwt->mode);
1303 if (err)
1304 goto error;
1305
1306 /* configure the bus mode (host) */
1307 mmc_select_mode(mmc, mwt->mode);
1308 mmc_set_clock(mmc, mmc->tran_speed);
1309
1310 err = sd_read_ssr(mmc);
1311 if (!err)
1312 return 0;
1313
1314 printf("bad ssr\n");
1315
1316 error:
1317 /* revert to a safer bus speed */
1318 mmc_select_mode(mmc, SD_LEGACY);
1319 mmc_set_clock(mmc, mmc->tran_speed);
1320 }
1321 }
1322 }
1323
1324 printf("unable to select a mode\n");
1325 return -ENOTSUPP;
1326 }
1327
1328 /*
1329 * read the compare the part of ext csd that is constant.
1330 * This can be used to check that the transfer is working
1331 * as expected.
1332 */
1333 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1334 {
1335 int err;
1336 const u8 *ext_csd = mmc->ext_csd;
1337 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1338
1339 err = mmc_send_ext_csd(mmc, test_csd);
1340 if (err)
1341 return err;
1342
1343 /* Only compare read only fields */
1344 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1345 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1346 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1347 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1348 ext_csd[EXT_CSD_REV]
1349 == test_csd[EXT_CSD_REV] &&
1350 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1351 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1352 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1353 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1354 return 0;
1355
1356 return -EBADMSG;
1357 }
1358
1359 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1360 {
1361 .mode = MMC_HS_200,
1362 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1363 },
1364 {
1365 .mode = MMC_DDR_52,
1366 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1367 },
1368 {
1369 .mode = MMC_HS_52,
1370 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1371 },
1372 {
1373 .mode = MMC_HS,
1374 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1375 },
1376 {
1377 .mode = MMC_LEGACY,
1378 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1379 }
1380 };
1381
1382 #define for_each_mmc_mode_by_pref(caps, mwt) \
1383 for (mwt = mmc_modes_by_pref;\
1384 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1385 mwt++) \
1386 if (caps & MMC_CAP(mwt->mode))
1387
1388 static const struct ext_csd_bus_width {
1389 uint cap;
1390 bool is_ddr;
1391 uint ext_csd_bits;
1392 } ext_csd_bus_width[] = {
1393 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1394 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1395 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1396 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1397 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1398 };
1399
1400 #define for_each_supported_width(caps, ddr, ecbv) \
1401 for (ecbv = ext_csd_bus_width;\
1402 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1403 ecbv++) \
1404 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1405
1406 static int mmc_select_mode_and_width(struct mmc *mmc)
1407 {
1408 int err;
1409 const struct mode_width_tuning *mwt;
1410 const struct ext_csd_bus_width *ecbw;
1411
1412 err = mmc_get_capabilities(mmc);
1413 if (err)
1414 return err;
1415
1416 /* Restrict card's capabilities by what the host can do */
1417 mmc->card_caps &= (mmc->cfg->host_caps | MMC_MODE_1BIT);
1418
1419 /* Only version 4 of MMC supports wider bus widths */
1420 if (mmc->version < MMC_VERSION_4)
1421 return 0;
1422
1423 if (!mmc->ext_csd) {
1424 debug("No ext_csd found!\n"); /* this should enver happen */
1425 return -ENOTSUPP;
1426 }
1427
1428 for_each_mmc_mode_by_pref(mmc->card_caps, mwt) {
1429 for_each_supported_width(mmc->card_caps & mwt->widths,
1430 mmc_is_mode_ddr(mwt->mode), ecbw) {
1431 debug("trying mode %s width %d (at %d MHz)\n",
1432 mmc_mode_name(mwt->mode),
1433 bus_width(ecbw->cap),
1434 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1435 /* configure the bus width (card + host) */
1436 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1437 EXT_CSD_BUS_WIDTH,
1438 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1439 if (err)
1440 goto error;
1441 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1442
1443 /* configure the bus speed (card) */
1444 err = mmc_set_card_speed(mmc, mwt->mode);
1445 if (err)
1446 goto error;
1447
1448 /*
1449 * configure the bus width AND the ddr mode (card)
1450 * The host side will be taken care of in the next step
1451 */
1452 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1453 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1454 EXT_CSD_BUS_WIDTH,
1455 ecbw->ext_csd_bits);
1456 if (err)
1457 goto error;
1458 }
1459
1460 /* configure the bus mode (host) */
1461 mmc_select_mode(mmc, mwt->mode);
1462 mmc_set_clock(mmc, mmc->tran_speed);
1463
1464 /* do a transfer to check the configuration */
1465 err = mmc_read_and_compare_ext_csd(mmc);
1466 if (!err)
1467 return 0;
1468 error:
1469 /* if an error occured, revert to a safer bus mode */
1470 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1471 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
1472 mmc_select_mode(mmc, MMC_LEGACY);
1473 mmc_set_bus_width(mmc, 1);
1474 }
1475 }
1476
1477 printf("unable to select a mode\n");
1478
1479 return -ENOTSUPP;
1480 }
1481
1482 static int mmc_startup_v4(struct mmc *mmc)
1483 {
1484 int err, i;
1485 u64 capacity;
1486 bool has_parts = false;
1487 bool part_completed;
1488 u8 *ext_csd;
1489
1490 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1491 return 0;
1492
1493 ext_csd = malloc_cache_aligned(MMC_MAX_BLOCK_LEN);
1494 if (!ext_csd)
1495 return -ENOMEM;
1496
1497 mmc->ext_csd = ext_csd;
1498
1499 /* check ext_csd version and capacity */
1500 err = mmc_send_ext_csd(mmc, ext_csd);
1501 if (err)
1502 return err;
1503 if (ext_csd[EXT_CSD_REV] >= 2) {
1504 /*
1505 * According to the JEDEC Standard, the value of
1506 * ext_csd's capacity is valid if the value is more
1507 * than 2GB
1508 */
1509 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1510 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1511 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1512 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1513 capacity *= MMC_MAX_BLOCK_LEN;
1514 if ((capacity >> 20) > 2 * 1024)
1515 mmc->capacity_user = capacity;
1516 }
1517
1518 switch (ext_csd[EXT_CSD_REV]) {
1519 case 1:
1520 mmc->version = MMC_VERSION_4_1;
1521 break;
1522 case 2:
1523 mmc->version = MMC_VERSION_4_2;
1524 break;
1525 case 3:
1526 mmc->version = MMC_VERSION_4_3;
1527 break;
1528 case 5:
1529 mmc->version = MMC_VERSION_4_41;
1530 break;
1531 case 6:
1532 mmc->version = MMC_VERSION_4_5;
1533 break;
1534 case 7:
1535 mmc->version = MMC_VERSION_5_0;
1536 break;
1537 case 8:
1538 mmc->version = MMC_VERSION_5_1;
1539 break;
1540 }
1541
1542 /* The partition data may be non-zero but it is only
1543 * effective if PARTITION_SETTING_COMPLETED is set in
1544 * EXT_CSD, so ignore any data if this bit is not set,
1545 * except for enabling the high-capacity group size
1546 * definition (see below).
1547 */
1548 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1549 EXT_CSD_PARTITION_SETTING_COMPLETED);
1550
1551 /* store the partition info of emmc */
1552 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
1553 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
1554 ext_csd[EXT_CSD_BOOT_MULT])
1555 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
1556 if (part_completed &&
1557 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
1558 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
1559
1560 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
1561
1562 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
1563
1564 for (i = 0; i < 4; i++) {
1565 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
1566 uint mult = (ext_csd[idx + 2] << 16) +
1567 (ext_csd[idx + 1] << 8) + ext_csd[idx];
1568 if (mult)
1569 has_parts = true;
1570 if (!part_completed)
1571 continue;
1572 mmc->capacity_gp[i] = mult;
1573 mmc->capacity_gp[i] *=
1574 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1575 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1576 mmc->capacity_gp[i] <<= 19;
1577 }
1578
1579 if (part_completed) {
1580 mmc->enh_user_size =
1581 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
1582 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
1583 ext_csd[EXT_CSD_ENH_SIZE_MULT];
1584 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1585 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1586 mmc->enh_user_size <<= 19;
1587 mmc->enh_user_start =
1588 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
1589 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
1590 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
1591 ext_csd[EXT_CSD_ENH_START_ADDR];
1592 if (mmc->high_capacity)
1593 mmc->enh_user_start <<= 9;
1594 }
1595
1596 /*
1597 * Host needs to enable ERASE_GRP_DEF bit if device is
1598 * partitioned. This bit will be lost every time after a reset
1599 * or power off. This will affect erase size.
1600 */
1601 if (part_completed)
1602 has_parts = true;
1603 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
1604 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
1605 has_parts = true;
1606 if (has_parts) {
1607 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1608 EXT_CSD_ERASE_GROUP_DEF, 1);
1609
1610 if (err)
1611 return err;
1612
1613 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1614 }
1615
1616 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
1617 /* Read out group size from ext_csd */
1618 mmc->erase_grp_size =
1619 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1620 /*
1621 * if high capacity and partition setting completed
1622 * SEC_COUNT is valid even if it is smaller than 2 GiB
1623 * JEDEC Standard JESD84-B45, 6.2.4
1624 */
1625 if (mmc->high_capacity && part_completed) {
1626 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
1627 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
1628 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
1629 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
1630 capacity *= MMC_MAX_BLOCK_LEN;
1631 mmc->capacity_user = capacity;
1632 }
1633 } else {
1634 /* Calculate the group size from the csd value. */
1635 int erase_gsz, erase_gmul;
1636
1637 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
1638 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
1639 mmc->erase_grp_size = (erase_gsz + 1)
1640 * (erase_gmul + 1);
1641 }
1642
1643 mmc->hc_wp_grp_size = 1024
1644 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1645 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1646
1647 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1648
1649 return 0;
1650 }
1651
1652 static int mmc_startup(struct mmc *mmc)
1653 {
1654 int err, i;
1655 uint mult, freq;
1656 u64 cmult, csize;
1657 struct mmc_cmd cmd;
1658 struct blk_desc *bdesc;
1659
1660 #ifdef CONFIG_MMC_SPI_CRC_ON
1661 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
1662 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
1663 cmd.resp_type = MMC_RSP_R1;
1664 cmd.cmdarg = 1;
1665 err = mmc_send_cmd(mmc, &cmd, NULL);
1666
1667 if (err)
1668 return err;
1669 }
1670 #endif
1671
1672 /* Put the Card in Identify Mode */
1673 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
1674 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
1675 cmd.resp_type = MMC_RSP_R2;
1676 cmd.cmdarg = 0;
1677
1678 err = mmc_send_cmd(mmc, &cmd, NULL);
1679
1680 if (err)
1681 return err;
1682
1683 memcpy(mmc->cid, cmd.response, 16);
1684
1685 /*
1686 * For MMC cards, set the Relative Address.
1687 * For SD cards, get the Relatvie Address.
1688 * This also puts the cards into Standby State
1689 */
1690 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1691 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
1692 cmd.cmdarg = mmc->rca << 16;
1693 cmd.resp_type = MMC_RSP_R6;
1694
1695 err = mmc_send_cmd(mmc, &cmd, NULL);
1696
1697 if (err)
1698 return err;
1699
1700 if (IS_SD(mmc))
1701 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
1702 }
1703
1704 /* Get the Card-Specific Data */
1705 cmd.cmdidx = MMC_CMD_SEND_CSD;
1706 cmd.resp_type = MMC_RSP_R2;
1707 cmd.cmdarg = mmc->rca << 16;
1708
1709 err = mmc_send_cmd(mmc, &cmd, NULL);
1710
1711 if (err)
1712 return err;
1713
1714 mmc->csd[0] = cmd.response[0];
1715 mmc->csd[1] = cmd.response[1];
1716 mmc->csd[2] = cmd.response[2];
1717 mmc->csd[3] = cmd.response[3];
1718
1719 if (mmc->version == MMC_VERSION_UNKNOWN) {
1720 int version = (cmd.response[0] >> 26) & 0xf;
1721
1722 switch (version) {
1723 case 0:
1724 mmc->version = MMC_VERSION_1_2;
1725 break;
1726 case 1:
1727 mmc->version = MMC_VERSION_1_4;
1728 break;
1729 case 2:
1730 mmc->version = MMC_VERSION_2_2;
1731 break;
1732 case 3:
1733 mmc->version = MMC_VERSION_3;
1734 break;
1735 case 4:
1736 mmc->version = MMC_VERSION_4;
1737 break;
1738 default:
1739 mmc->version = MMC_VERSION_1_2;
1740 break;
1741 }
1742 }
1743
1744 /* divide frequency by 10, since the mults are 10x bigger */
1745 freq = fbase[(cmd.response[0] & 0x7)];
1746 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
1747
1748 mmc->legacy_speed = freq * mult;
1749 mmc_select_mode(mmc, MMC_LEGACY);
1750
1751 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
1752 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
1753
1754 if (IS_SD(mmc))
1755 mmc->write_bl_len = mmc->read_bl_len;
1756 else
1757 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
1758
1759 if (mmc->high_capacity) {
1760 csize = (mmc->csd[1] & 0x3f) << 16
1761 | (mmc->csd[2] & 0xffff0000) >> 16;
1762 cmult = 8;
1763 } else {
1764 csize = (mmc->csd[1] & 0x3ff) << 2
1765 | (mmc->csd[2] & 0xc0000000) >> 30;
1766 cmult = (mmc->csd[2] & 0x00038000) >> 15;
1767 }
1768
1769 mmc->capacity_user = (csize + 1) << (cmult + 2);
1770 mmc->capacity_user *= mmc->read_bl_len;
1771 mmc->capacity_boot = 0;
1772 mmc->capacity_rpmb = 0;
1773 for (i = 0; i < 4; i++)
1774 mmc->capacity_gp[i] = 0;
1775
1776 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
1777 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1778
1779 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
1780 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1781
1782 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
1783 cmd.cmdidx = MMC_CMD_SET_DSR;
1784 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
1785 cmd.resp_type = MMC_RSP_NONE;
1786 if (mmc_send_cmd(mmc, &cmd, NULL))
1787 printf("MMC: SET_DSR failed\n");
1788 }
1789
1790 /* Select the card, and put it into Transfer Mode */
1791 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1792 cmd.cmdidx = MMC_CMD_SELECT_CARD;
1793 cmd.resp_type = MMC_RSP_R1;
1794 cmd.cmdarg = mmc->rca << 16;
1795 err = mmc_send_cmd(mmc, &cmd, NULL);
1796
1797 if (err)
1798 return err;
1799 }
1800
1801 /*
1802 * For SD, its erase group is always one sector
1803 */
1804 mmc->erase_grp_size = 1;
1805 mmc->part_config = MMCPART_NOAVAILABLE;
1806
1807 err = mmc_startup_v4(mmc);
1808 if (err)
1809 return err;
1810
1811 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
1812 if (err)
1813 return err;
1814
1815 if (IS_SD(mmc))
1816 err = sd_select_mode_and_width(mmc);
1817 else
1818 err = mmc_select_mode_and_width(mmc);
1819
1820 if (err)
1821 return err;
1822
1823
1824 /* Fix the block length for DDR mode */
1825 if (mmc->ddr_mode) {
1826 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1827 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1828 }
1829
1830 /* fill in device description */
1831 bdesc = mmc_get_blk_desc(mmc);
1832 bdesc->lun = 0;
1833 bdesc->hwpart = 0;
1834 bdesc->type = 0;
1835 bdesc->blksz = mmc->read_bl_len;
1836 bdesc->log2blksz = LOG2(bdesc->blksz);
1837 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1838 #if !defined(CONFIG_SPL_BUILD) || \
1839 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
1840 !defined(CONFIG_USE_TINY_PRINTF))
1841 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
1842 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
1843 (mmc->cid[3] >> 16) & 0xffff);
1844 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
1845 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
1846 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
1847 (mmc->cid[2] >> 24) & 0xff);
1848 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
1849 (mmc->cid[2] >> 16) & 0xf);
1850 #else
1851 bdesc->vendor[0] = 0;
1852 bdesc->product[0] = 0;
1853 bdesc->revision[0] = 0;
1854 #endif
1855 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
1856 part_init(bdesc);
1857 #endif
1858
1859 return 0;
1860 }
1861
1862 static int mmc_send_if_cond(struct mmc *mmc)
1863 {
1864 struct mmc_cmd cmd;
1865 int err;
1866
1867 cmd.cmdidx = SD_CMD_SEND_IF_COND;
1868 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
1869 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
1870 cmd.resp_type = MMC_RSP_R7;
1871
1872 err = mmc_send_cmd(mmc, &cmd, NULL);
1873
1874 if (err)
1875 return err;
1876
1877 if ((cmd.response[0] & 0xff) != 0xaa)
1878 return -EOPNOTSUPP;
1879 else
1880 mmc->version = SD_VERSION_2;
1881
1882 return 0;
1883 }
1884
1885 #if !CONFIG_IS_ENABLED(DM_MMC)
1886 /* board-specific MMC power initializations. */
1887 __weak void board_mmc_power_init(void)
1888 {
1889 }
1890 #endif
1891
1892 static int mmc_power_init(struct mmc *mmc)
1893 {
1894 #if CONFIG_IS_ENABLED(DM_MMC)
1895 #if CONFIG_IS_ENABLED(DM_REGULATOR)
1896 int ret;
1897
1898 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
1899 &mmc->vmmc_supply);
1900 if (ret)
1901 debug("%s: No vmmc supply\n", mmc->dev->name);
1902
1903 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
1904 &mmc->vqmmc_supply);
1905 if (ret)
1906 debug("%s: No vqmmc supply\n", mmc->dev->name);
1907
1908 if (mmc->vmmc_supply) {
1909 ret = regulator_set_enable(mmc->vmmc_supply, true);
1910 if (ret) {
1911 puts("Error enabling VMMC supply\n");
1912 return ret;
1913 }
1914 }
1915 #endif
1916 #else /* !CONFIG_DM_MMC */
1917 /*
1918 * Driver model should use a regulator, as above, rather than calling
1919 * out to board code.
1920 */
1921 board_mmc_power_init();
1922 #endif
1923 return 0;
1924 }
1925
1926 int mmc_start_init(struct mmc *mmc)
1927 {
1928 bool no_card;
1929 int err;
1930
1931 /* we pretend there's no card when init is NULL */
1932 no_card = mmc_getcd(mmc) == 0;
1933 #if !CONFIG_IS_ENABLED(DM_MMC)
1934 no_card = no_card || (mmc->cfg->ops->init == NULL);
1935 #endif
1936 if (no_card) {
1937 mmc->has_init = 0;
1938 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
1939 printf("MMC: no card present\n");
1940 #endif
1941 return -ENOMEDIUM;
1942 }
1943
1944 if (mmc->has_init)
1945 return 0;
1946
1947 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
1948 mmc_adapter_card_type_ident();
1949 #endif
1950 err = mmc_power_init(mmc);
1951 if (err)
1952 return err;
1953
1954 #if CONFIG_IS_ENABLED(DM_MMC)
1955 /* The device has already been probed ready for use */
1956 #else
1957 /* made sure it's not NULL earlier */
1958 err = mmc->cfg->ops->init(mmc);
1959 if (err)
1960 return err;
1961 #endif
1962 mmc->ddr_mode = 0;
1963 mmc_set_bus_width(mmc, 1);
1964 mmc_set_clock(mmc, 1);
1965
1966 /* Reset the Card */
1967 err = mmc_go_idle(mmc);
1968
1969 if (err)
1970 return err;
1971
1972 /* The internal partition reset to user partition(0) at every CMD0*/
1973 mmc_get_blk_desc(mmc)->hwpart = 0;
1974
1975 /* Test for SD version 2 */
1976 err = mmc_send_if_cond(mmc);
1977
1978 /* Now try to get the SD card's operating condition */
1979 err = sd_send_op_cond(mmc);
1980
1981 /* If the command timed out, we check for an MMC card */
1982 if (err == -ETIMEDOUT) {
1983 err = mmc_send_op_cond(mmc);
1984
1985 if (err) {
1986 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
1987 printf("Card did not respond to voltage select!\n");
1988 #endif
1989 return -EOPNOTSUPP;
1990 }
1991 }
1992
1993 if (!err)
1994 mmc->init_in_progress = 1;
1995
1996 return err;
1997 }
1998
1999 static int mmc_complete_init(struct mmc *mmc)
2000 {
2001 int err = 0;
2002
2003 mmc->init_in_progress = 0;
2004 if (mmc->op_cond_pending)
2005 err = mmc_complete_op_cond(mmc);
2006
2007 if (!err)
2008 err = mmc_startup(mmc);
2009 if (err)
2010 mmc->has_init = 0;
2011 else
2012 mmc->has_init = 1;
2013 return err;
2014 }
2015
2016 int mmc_init(struct mmc *mmc)
2017 {
2018 int err = 0;
2019 __maybe_unused unsigned start;
2020 #if CONFIG_IS_ENABLED(DM_MMC)
2021 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2022
2023 upriv->mmc = mmc;
2024 #endif
2025 if (mmc->has_init)
2026 return 0;
2027
2028 start = get_timer(0);
2029
2030 if (!mmc->init_in_progress)
2031 err = mmc_start_init(mmc);
2032
2033 if (!err)
2034 err = mmc_complete_init(mmc);
2035 if (err)
2036 printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2037
2038 return err;
2039 }
2040
2041 int mmc_set_dsr(struct mmc *mmc, u16 val)
2042 {
2043 mmc->dsr = val;
2044 return 0;
2045 }
2046
2047 /* CPU-specific MMC initializations */
2048 __weak int cpu_mmc_init(bd_t *bis)
2049 {
2050 return -1;
2051 }
2052
2053 /* board-specific MMC initializations. */
2054 __weak int board_mmc_init(bd_t *bis)
2055 {
2056 return -1;
2057 }
2058
2059 void mmc_set_preinit(struct mmc *mmc, int preinit)
2060 {
2061 mmc->preinit = preinit;
2062 }
2063
2064 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2065 static int mmc_probe(bd_t *bis)
2066 {
2067 return 0;
2068 }
2069 #elif CONFIG_IS_ENABLED(DM_MMC)
2070 static int mmc_probe(bd_t *bis)
2071 {
2072 int ret, i;
2073 struct uclass *uc;
2074 struct udevice *dev;
2075
2076 ret = uclass_get(UCLASS_MMC, &uc);
2077 if (ret)
2078 return ret;
2079
2080 /*
2081 * Try to add them in sequence order. Really with driver model we
2082 * should allow holes, but the current MMC list does not allow that.
2083 * So if we request 0, 1, 3 we will get 0, 1, 2.
2084 */
2085 for (i = 0; ; i++) {
2086 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2087 if (ret == -ENODEV)
2088 break;
2089 }
2090 uclass_foreach_dev(dev, uc) {
2091 ret = device_probe(dev);
2092 if (ret)
2093 printf("%s - probe failed: %d\n", dev->name, ret);
2094 }
2095
2096 return 0;
2097 }
2098 #else
2099 static int mmc_probe(bd_t *bis)
2100 {
2101 if (board_mmc_init(bis) < 0)
2102 cpu_mmc_init(bis);
2103
2104 return 0;
2105 }
2106 #endif
2107
2108 int mmc_initialize(bd_t *bis)
2109 {
2110 static int initialized = 0;
2111 int ret;
2112 if (initialized) /* Avoid initializing mmc multiple times */
2113 return 0;
2114 initialized = 1;
2115
2116 #if !CONFIG_IS_ENABLED(BLK)
2117 #if !CONFIG_IS_ENABLED(MMC_TINY)
2118 mmc_list_init();
2119 #endif
2120 #endif
2121 ret = mmc_probe(bis);
2122 if (ret)
2123 return ret;
2124
2125 #ifndef CONFIG_SPL_BUILD
2126 print_mmc_devices(',');
2127 #endif
2128
2129 mmc_do_preinit();
2130 return 0;
2131 }
2132
2133 #ifdef CONFIG_CMD_BKOPS_ENABLE
2134 int mmc_set_bkops_enable(struct mmc *mmc)
2135 {
2136 int err;
2137 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2138
2139 err = mmc_send_ext_csd(mmc, ext_csd);
2140 if (err) {
2141 puts("Could not get ext_csd register values\n");
2142 return err;
2143 }
2144
2145 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2146 puts("Background operations not supported on device\n");
2147 return -EMEDIUMTYPE;
2148 }
2149
2150 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2151 puts("Background operations already enabled\n");
2152 return 0;
2153 }
2154
2155 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2156 if (err) {
2157 puts("Failed to enable manual background operations\n");
2158 return err;
2159 }
2160
2161 puts("Enabled manual background operations\n");
2162
2163 return 0;
2164 }
2165 #endif