]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/mmc/mmc.c
mmc: move the MMC startup for version above v4.0 in a separate function
[people/ms/u-boot.git] / drivers / mmc / mmc.c
1 /*
2 * Copyright 2008, Freescale Semiconductor, Inc
3 * Andy Fleming
4 *
5 * Based vaguely on the Linux code
6 *
7 * SPDX-License-Identifier: GPL-2.0+
8 */
9
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24
25 static const unsigned int sd_au_size[] = {
26 0, SZ_16K / 512, SZ_32K / 512,
27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
31 };
32
33 #if CONFIG_IS_ENABLED(MMC_TINY)
34 static struct mmc mmc_static;
35 struct mmc *find_mmc_device(int dev_num)
36 {
37 return &mmc_static;
38 }
39
40 void mmc_do_preinit(void)
41 {
42 struct mmc *m = &mmc_static;
43 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
44 mmc_set_preinit(m, 1);
45 #endif
46 if (m->preinit)
47 mmc_start_init(m);
48 }
49
50 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
51 {
52 return &mmc->block_dev;
53 }
54 #endif
55
56 #if !CONFIG_IS_ENABLED(DM_MMC)
57 __weak int board_mmc_getwp(struct mmc *mmc)
58 {
59 return -1;
60 }
61
62 int mmc_getwp(struct mmc *mmc)
63 {
64 int wp;
65
66 wp = board_mmc_getwp(mmc);
67
68 if (wp < 0) {
69 if (mmc->cfg->ops->getwp)
70 wp = mmc->cfg->ops->getwp(mmc);
71 else
72 wp = 0;
73 }
74
75 return wp;
76 }
77
78 __weak int board_mmc_getcd(struct mmc *mmc)
79 {
80 return -1;
81 }
82 #endif
83
84 #ifdef CONFIG_MMC_TRACE
85 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
86 {
87 printf("CMD_SEND:%d\n", cmd->cmdidx);
88 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
89 }
90
91 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
92 {
93 int i;
94 u8 *ptr;
95
96 if (ret) {
97 printf("\t\tRET\t\t\t %d\n", ret);
98 } else {
99 switch (cmd->resp_type) {
100 case MMC_RSP_NONE:
101 printf("\t\tMMC_RSP_NONE\n");
102 break;
103 case MMC_RSP_R1:
104 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
105 cmd->response[0]);
106 break;
107 case MMC_RSP_R1b:
108 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
109 cmd->response[0]);
110 break;
111 case MMC_RSP_R2:
112 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
113 cmd->response[0]);
114 printf("\t\t \t\t 0x%08X \n",
115 cmd->response[1]);
116 printf("\t\t \t\t 0x%08X \n",
117 cmd->response[2]);
118 printf("\t\t \t\t 0x%08X \n",
119 cmd->response[3]);
120 printf("\n");
121 printf("\t\t\t\t\tDUMPING DATA\n");
122 for (i = 0; i < 4; i++) {
123 int j;
124 printf("\t\t\t\t\t%03d - ", i*4);
125 ptr = (u8 *)&cmd->response[i];
126 ptr += 3;
127 for (j = 0; j < 4; j++)
128 printf("%02X ", *ptr--);
129 printf("\n");
130 }
131 break;
132 case MMC_RSP_R3:
133 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
134 cmd->response[0]);
135 break;
136 default:
137 printf("\t\tERROR MMC rsp not supported\n");
138 break;
139 }
140 }
141 }
142
143 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
144 {
145 int status;
146
147 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
148 printf("CURR STATE:%d\n", status);
149 }
150 #endif
151
152 #if !CONFIG_IS_ENABLED(DM_MMC)
153 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
154 {
155 int ret;
156
157 mmmc_trace_before_send(mmc, cmd);
158 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
159 mmmc_trace_after_send(mmc, cmd, ret);
160
161 return ret;
162 }
163 #endif
164
165 int mmc_send_status(struct mmc *mmc, int timeout)
166 {
167 struct mmc_cmd cmd;
168 int err, retries = 5;
169
170 cmd.cmdidx = MMC_CMD_SEND_STATUS;
171 cmd.resp_type = MMC_RSP_R1;
172 if (!mmc_host_is_spi(mmc))
173 cmd.cmdarg = mmc->rca << 16;
174
175 while (1) {
176 err = mmc_send_cmd(mmc, &cmd, NULL);
177 if (!err) {
178 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
179 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
180 MMC_STATE_PRG)
181 break;
182 else if (cmd.response[0] & MMC_STATUS_MASK) {
183 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
184 printf("Status Error: 0x%08X\n",
185 cmd.response[0]);
186 #endif
187 return -ECOMM;
188 }
189 } else if (--retries < 0)
190 return err;
191
192 if (timeout-- <= 0)
193 break;
194
195 udelay(1000);
196 }
197
198 mmc_trace_state(mmc, &cmd);
199 if (timeout <= 0) {
200 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
201 printf("Timeout waiting card ready\n");
202 #endif
203 return -ETIMEDOUT;
204 }
205
206 return 0;
207 }
208
209 int mmc_set_blocklen(struct mmc *mmc, int len)
210 {
211 struct mmc_cmd cmd;
212
213 if (mmc->ddr_mode)
214 return 0;
215
216 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
217 cmd.resp_type = MMC_RSP_R1;
218 cmd.cmdarg = len;
219
220 return mmc_send_cmd(mmc, &cmd, NULL);
221 }
222
223 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
224 lbaint_t blkcnt)
225 {
226 struct mmc_cmd cmd;
227 struct mmc_data data;
228
229 if (blkcnt > 1)
230 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
231 else
232 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
233
234 if (mmc->high_capacity)
235 cmd.cmdarg = start;
236 else
237 cmd.cmdarg = start * mmc->read_bl_len;
238
239 cmd.resp_type = MMC_RSP_R1;
240
241 data.dest = dst;
242 data.blocks = blkcnt;
243 data.blocksize = mmc->read_bl_len;
244 data.flags = MMC_DATA_READ;
245
246 if (mmc_send_cmd(mmc, &cmd, &data))
247 return 0;
248
249 if (blkcnt > 1) {
250 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
251 cmd.cmdarg = 0;
252 cmd.resp_type = MMC_RSP_R1b;
253 if (mmc_send_cmd(mmc, &cmd, NULL)) {
254 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
255 printf("mmc fail to send stop cmd\n");
256 #endif
257 return 0;
258 }
259 }
260
261 return blkcnt;
262 }
263
264 #if CONFIG_IS_ENABLED(BLK)
265 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
266 #else
267 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
268 void *dst)
269 #endif
270 {
271 #if CONFIG_IS_ENABLED(BLK)
272 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
273 #endif
274 int dev_num = block_dev->devnum;
275 int err;
276 lbaint_t cur, blocks_todo = blkcnt;
277
278 if (blkcnt == 0)
279 return 0;
280
281 struct mmc *mmc = find_mmc_device(dev_num);
282 if (!mmc)
283 return 0;
284
285 if (CONFIG_IS_ENABLED(MMC_TINY))
286 err = mmc_switch_part(mmc, block_dev->hwpart);
287 else
288 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
289
290 if (err < 0)
291 return 0;
292
293 if ((start + blkcnt) > block_dev->lba) {
294 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
295 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
296 start + blkcnt, block_dev->lba);
297 #endif
298 return 0;
299 }
300
301 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
302 debug("%s: Failed to set blocklen\n", __func__);
303 return 0;
304 }
305
306 do {
307 cur = (blocks_todo > mmc->cfg->b_max) ?
308 mmc->cfg->b_max : blocks_todo;
309 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
310 debug("%s: Failed to read blocks\n", __func__);
311 return 0;
312 }
313 blocks_todo -= cur;
314 start += cur;
315 dst += cur * mmc->read_bl_len;
316 } while (blocks_todo > 0);
317
318 return blkcnt;
319 }
320
321 static int mmc_go_idle(struct mmc *mmc)
322 {
323 struct mmc_cmd cmd;
324 int err;
325
326 udelay(1000);
327
328 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
329 cmd.cmdarg = 0;
330 cmd.resp_type = MMC_RSP_NONE;
331
332 err = mmc_send_cmd(mmc, &cmd, NULL);
333
334 if (err)
335 return err;
336
337 udelay(2000);
338
339 return 0;
340 }
341
342 static int sd_send_op_cond(struct mmc *mmc)
343 {
344 int timeout = 1000;
345 int err;
346 struct mmc_cmd cmd;
347
348 while (1) {
349 cmd.cmdidx = MMC_CMD_APP_CMD;
350 cmd.resp_type = MMC_RSP_R1;
351 cmd.cmdarg = 0;
352
353 err = mmc_send_cmd(mmc, &cmd, NULL);
354
355 if (err)
356 return err;
357
358 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
359 cmd.resp_type = MMC_RSP_R3;
360
361 /*
362 * Most cards do not answer if some reserved bits
363 * in the ocr are set. However, Some controller
364 * can set bit 7 (reserved for low voltages), but
365 * how to manage low voltages SD card is not yet
366 * specified.
367 */
368 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
369 (mmc->cfg->voltages & 0xff8000);
370
371 if (mmc->version == SD_VERSION_2)
372 cmd.cmdarg |= OCR_HCS;
373
374 err = mmc_send_cmd(mmc, &cmd, NULL);
375
376 if (err)
377 return err;
378
379 if (cmd.response[0] & OCR_BUSY)
380 break;
381
382 if (timeout-- <= 0)
383 return -EOPNOTSUPP;
384
385 udelay(1000);
386 }
387
388 if (mmc->version != SD_VERSION_2)
389 mmc->version = SD_VERSION_1_0;
390
391 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
392 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
393 cmd.resp_type = MMC_RSP_R3;
394 cmd.cmdarg = 0;
395
396 err = mmc_send_cmd(mmc, &cmd, NULL);
397
398 if (err)
399 return err;
400 }
401
402 mmc->ocr = cmd.response[0];
403
404 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
405 mmc->rca = 0;
406
407 return 0;
408 }
409
410 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
411 {
412 struct mmc_cmd cmd;
413 int err;
414
415 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
416 cmd.resp_type = MMC_RSP_R3;
417 cmd.cmdarg = 0;
418 if (use_arg && !mmc_host_is_spi(mmc))
419 cmd.cmdarg = OCR_HCS |
420 (mmc->cfg->voltages &
421 (mmc->ocr & OCR_VOLTAGE_MASK)) |
422 (mmc->ocr & OCR_ACCESS_MODE);
423
424 err = mmc_send_cmd(mmc, &cmd, NULL);
425 if (err)
426 return err;
427 mmc->ocr = cmd.response[0];
428 return 0;
429 }
430
431 static int mmc_send_op_cond(struct mmc *mmc)
432 {
433 int err, i;
434
435 /* Some cards seem to need this */
436 mmc_go_idle(mmc);
437
438 /* Asking to the card its capabilities */
439 for (i = 0; i < 2; i++) {
440 err = mmc_send_op_cond_iter(mmc, i != 0);
441 if (err)
442 return err;
443
444 /* exit if not busy (flag seems to be inverted) */
445 if (mmc->ocr & OCR_BUSY)
446 break;
447 }
448 mmc->op_cond_pending = 1;
449 return 0;
450 }
451
452 static int mmc_complete_op_cond(struct mmc *mmc)
453 {
454 struct mmc_cmd cmd;
455 int timeout = 1000;
456 uint start;
457 int err;
458
459 mmc->op_cond_pending = 0;
460 if (!(mmc->ocr & OCR_BUSY)) {
461 /* Some cards seem to need this */
462 mmc_go_idle(mmc);
463
464 start = get_timer(0);
465 while (1) {
466 err = mmc_send_op_cond_iter(mmc, 1);
467 if (err)
468 return err;
469 if (mmc->ocr & OCR_BUSY)
470 break;
471 if (get_timer(start) > timeout)
472 return -EOPNOTSUPP;
473 udelay(100);
474 }
475 }
476
477 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
478 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
479 cmd.resp_type = MMC_RSP_R3;
480 cmd.cmdarg = 0;
481
482 err = mmc_send_cmd(mmc, &cmd, NULL);
483
484 if (err)
485 return err;
486
487 mmc->ocr = cmd.response[0];
488 }
489
490 mmc->version = MMC_VERSION_UNKNOWN;
491
492 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
493 mmc->rca = 1;
494
495 return 0;
496 }
497
498
499 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
500 {
501 struct mmc_cmd cmd;
502 struct mmc_data data;
503 int err;
504
505 /* Get the Card Status Register */
506 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
507 cmd.resp_type = MMC_RSP_R1;
508 cmd.cmdarg = 0;
509
510 data.dest = (char *)ext_csd;
511 data.blocks = 1;
512 data.blocksize = MMC_MAX_BLOCK_LEN;
513 data.flags = MMC_DATA_READ;
514
515 err = mmc_send_cmd(mmc, &cmd, &data);
516
517 return err;
518 }
519
520 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
521 {
522 struct mmc_cmd cmd;
523 int timeout = 1000;
524 int retries = 3;
525 int ret;
526
527 cmd.cmdidx = MMC_CMD_SWITCH;
528 cmd.resp_type = MMC_RSP_R1b;
529 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
530 (index << 16) |
531 (value << 8);
532
533 while (retries > 0) {
534 ret = mmc_send_cmd(mmc, &cmd, NULL);
535
536 /* Waiting for the ready status */
537 if (!ret) {
538 ret = mmc_send_status(mmc, timeout);
539 return ret;
540 }
541
542 retries--;
543 }
544
545 return ret;
546
547 }
548
549 static int mmc_change_freq(struct mmc *mmc)
550 {
551 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
552 char cardtype;
553 int err;
554
555 mmc->card_caps = 0;
556
557 if (mmc_host_is_spi(mmc))
558 return 0;
559
560 /* Only version 4 supports high-speed */
561 if (mmc->version < MMC_VERSION_4)
562 return 0;
563
564 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
565
566 err = mmc_send_ext_csd(mmc, ext_csd);
567
568 if (err)
569 return err;
570
571 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0xf;
572
573 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, 1);
574
575 if (err)
576 return err;
577
578 /* Now check to see that it worked */
579 err = mmc_send_ext_csd(mmc, ext_csd);
580
581 if (err)
582 return err;
583
584 /* No high-speed support */
585 if (!ext_csd[EXT_CSD_HS_TIMING])
586 return 0;
587
588 /* High Speed is set, there are two types: 52MHz and 26MHz */
589 if (cardtype & EXT_CSD_CARD_TYPE_52) {
590 if (cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
591 mmc->card_caps |= MMC_MODE_DDR_52MHz;
592 mmc->card_caps |= MMC_MODE_HS_52MHz | MMC_MODE_HS;
593 } else {
594 mmc->card_caps |= MMC_MODE_HS;
595 }
596
597 return 0;
598 }
599
600 static int mmc_set_capacity(struct mmc *mmc, int part_num)
601 {
602 switch (part_num) {
603 case 0:
604 mmc->capacity = mmc->capacity_user;
605 break;
606 case 1:
607 case 2:
608 mmc->capacity = mmc->capacity_boot;
609 break;
610 case 3:
611 mmc->capacity = mmc->capacity_rpmb;
612 break;
613 case 4:
614 case 5:
615 case 6:
616 case 7:
617 mmc->capacity = mmc->capacity_gp[part_num - 4];
618 break;
619 default:
620 return -1;
621 }
622
623 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
624
625 return 0;
626 }
627
628 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
629 {
630 int ret;
631
632 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
633 (mmc->part_config & ~PART_ACCESS_MASK)
634 | (part_num & PART_ACCESS_MASK));
635
636 /*
637 * Set the capacity if the switch succeeded or was intended
638 * to return to representing the raw device.
639 */
640 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
641 ret = mmc_set_capacity(mmc, part_num);
642 mmc_get_blk_desc(mmc)->hwpart = part_num;
643 }
644
645 return ret;
646 }
647
648 int mmc_hwpart_config(struct mmc *mmc,
649 const struct mmc_hwpart_conf *conf,
650 enum mmc_hwpart_conf_mode mode)
651 {
652 u8 part_attrs = 0;
653 u32 enh_size_mult;
654 u32 enh_start_addr;
655 u32 gp_size_mult[4];
656 u32 max_enh_size_mult;
657 u32 tot_enh_size_mult = 0;
658 u8 wr_rel_set;
659 int i, pidx, err;
660 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
661
662 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
663 return -EINVAL;
664
665 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
666 printf("eMMC >= 4.4 required for enhanced user data area\n");
667 return -EMEDIUMTYPE;
668 }
669
670 if (!(mmc->part_support & PART_SUPPORT)) {
671 printf("Card does not support partitioning\n");
672 return -EMEDIUMTYPE;
673 }
674
675 if (!mmc->hc_wp_grp_size) {
676 printf("Card does not define HC WP group size\n");
677 return -EMEDIUMTYPE;
678 }
679
680 /* check partition alignment and total enhanced size */
681 if (conf->user.enh_size) {
682 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
683 conf->user.enh_start % mmc->hc_wp_grp_size) {
684 printf("User data enhanced area not HC WP group "
685 "size aligned\n");
686 return -EINVAL;
687 }
688 part_attrs |= EXT_CSD_ENH_USR;
689 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
690 if (mmc->high_capacity) {
691 enh_start_addr = conf->user.enh_start;
692 } else {
693 enh_start_addr = (conf->user.enh_start << 9);
694 }
695 } else {
696 enh_size_mult = 0;
697 enh_start_addr = 0;
698 }
699 tot_enh_size_mult += enh_size_mult;
700
701 for (pidx = 0; pidx < 4; pidx++) {
702 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
703 printf("GP%i partition not HC WP group size "
704 "aligned\n", pidx+1);
705 return -EINVAL;
706 }
707 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
708 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
709 part_attrs |= EXT_CSD_ENH_GP(pidx);
710 tot_enh_size_mult += gp_size_mult[pidx];
711 }
712 }
713
714 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
715 printf("Card does not support enhanced attribute\n");
716 return -EMEDIUMTYPE;
717 }
718
719 err = mmc_send_ext_csd(mmc, ext_csd);
720 if (err)
721 return err;
722
723 max_enh_size_mult =
724 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
725 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
726 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
727 if (tot_enh_size_mult > max_enh_size_mult) {
728 printf("Total enhanced size exceeds maximum (%u > %u)\n",
729 tot_enh_size_mult, max_enh_size_mult);
730 return -EMEDIUMTYPE;
731 }
732
733 /* The default value of EXT_CSD_WR_REL_SET is device
734 * dependent, the values can only be changed if the
735 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
736 * changed only once and before partitioning is completed. */
737 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
738 if (conf->user.wr_rel_change) {
739 if (conf->user.wr_rel_set)
740 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
741 else
742 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
743 }
744 for (pidx = 0; pidx < 4; pidx++) {
745 if (conf->gp_part[pidx].wr_rel_change) {
746 if (conf->gp_part[pidx].wr_rel_set)
747 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
748 else
749 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
750 }
751 }
752
753 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
754 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
755 puts("Card does not support host controlled partition write "
756 "reliability settings\n");
757 return -EMEDIUMTYPE;
758 }
759
760 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
761 EXT_CSD_PARTITION_SETTING_COMPLETED) {
762 printf("Card already partitioned\n");
763 return -EPERM;
764 }
765
766 if (mode == MMC_HWPART_CONF_CHECK)
767 return 0;
768
769 /* Partitioning requires high-capacity size definitions */
770 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
771 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
772 EXT_CSD_ERASE_GROUP_DEF, 1);
773
774 if (err)
775 return err;
776
777 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
778
779 /* update erase group size to be high-capacity */
780 mmc->erase_grp_size =
781 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
782
783 }
784
785 /* all OK, write the configuration */
786 for (i = 0; i < 4; i++) {
787 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
788 EXT_CSD_ENH_START_ADDR+i,
789 (enh_start_addr >> (i*8)) & 0xFF);
790 if (err)
791 return err;
792 }
793 for (i = 0; i < 3; i++) {
794 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
795 EXT_CSD_ENH_SIZE_MULT+i,
796 (enh_size_mult >> (i*8)) & 0xFF);
797 if (err)
798 return err;
799 }
800 for (pidx = 0; pidx < 4; pidx++) {
801 for (i = 0; i < 3; i++) {
802 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
803 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
804 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
805 if (err)
806 return err;
807 }
808 }
809 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
810 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
811 if (err)
812 return err;
813
814 if (mode == MMC_HWPART_CONF_SET)
815 return 0;
816
817 /* The WR_REL_SET is a write-once register but shall be
818 * written before setting PART_SETTING_COMPLETED. As it is
819 * write-once we can only write it when completing the
820 * partitioning. */
821 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
822 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
823 EXT_CSD_WR_REL_SET, wr_rel_set);
824 if (err)
825 return err;
826 }
827
828 /* Setting PART_SETTING_COMPLETED confirms the partition
829 * configuration but it only becomes effective after power
830 * cycle, so we do not adjust the partition related settings
831 * in the mmc struct. */
832
833 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
834 EXT_CSD_PARTITION_SETTING,
835 EXT_CSD_PARTITION_SETTING_COMPLETED);
836 if (err)
837 return err;
838
839 return 0;
840 }
841
842 #if !CONFIG_IS_ENABLED(DM_MMC)
843 int mmc_getcd(struct mmc *mmc)
844 {
845 int cd;
846
847 cd = board_mmc_getcd(mmc);
848
849 if (cd < 0) {
850 if (mmc->cfg->ops->getcd)
851 cd = mmc->cfg->ops->getcd(mmc);
852 else
853 cd = 1;
854 }
855
856 return cd;
857 }
858 #endif
859
860 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
861 {
862 struct mmc_cmd cmd;
863 struct mmc_data data;
864
865 /* Switch the frequency */
866 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
867 cmd.resp_type = MMC_RSP_R1;
868 cmd.cmdarg = (mode << 31) | 0xffffff;
869 cmd.cmdarg &= ~(0xf << (group * 4));
870 cmd.cmdarg |= value << (group * 4);
871
872 data.dest = (char *)resp;
873 data.blocksize = 64;
874 data.blocks = 1;
875 data.flags = MMC_DATA_READ;
876
877 return mmc_send_cmd(mmc, &cmd, &data);
878 }
879
880
881 static int sd_change_freq(struct mmc *mmc)
882 {
883 int err;
884 struct mmc_cmd cmd;
885 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
886 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
887 struct mmc_data data;
888 int timeout;
889
890 mmc->card_caps = 0;
891
892 if (mmc_host_is_spi(mmc))
893 return 0;
894
895 /* Read the SCR to find out if this card supports higher speeds */
896 cmd.cmdidx = MMC_CMD_APP_CMD;
897 cmd.resp_type = MMC_RSP_R1;
898 cmd.cmdarg = mmc->rca << 16;
899
900 err = mmc_send_cmd(mmc, &cmd, NULL);
901
902 if (err)
903 return err;
904
905 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
906 cmd.resp_type = MMC_RSP_R1;
907 cmd.cmdarg = 0;
908
909 timeout = 3;
910
911 retry_scr:
912 data.dest = (char *)scr;
913 data.blocksize = 8;
914 data.blocks = 1;
915 data.flags = MMC_DATA_READ;
916
917 err = mmc_send_cmd(mmc, &cmd, &data);
918
919 if (err) {
920 if (timeout--)
921 goto retry_scr;
922
923 return err;
924 }
925
926 mmc->scr[0] = __be32_to_cpu(scr[0]);
927 mmc->scr[1] = __be32_to_cpu(scr[1]);
928
929 switch ((mmc->scr[0] >> 24) & 0xf) {
930 case 0:
931 mmc->version = SD_VERSION_1_0;
932 break;
933 case 1:
934 mmc->version = SD_VERSION_1_10;
935 break;
936 case 2:
937 mmc->version = SD_VERSION_2;
938 if ((mmc->scr[0] >> 15) & 0x1)
939 mmc->version = SD_VERSION_3;
940 break;
941 default:
942 mmc->version = SD_VERSION_1_0;
943 break;
944 }
945
946 if (mmc->scr[0] & SD_DATA_4BIT)
947 mmc->card_caps |= MMC_MODE_4BIT;
948
949 /* Version 1.0 doesn't support switching */
950 if (mmc->version == SD_VERSION_1_0)
951 return 0;
952
953 timeout = 4;
954 while (timeout--) {
955 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
956 (u8 *)switch_status);
957
958 if (err)
959 return err;
960
961 /* The high-speed function is busy. Try again */
962 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
963 break;
964 }
965
966 /* If high-speed isn't supported, we return */
967 if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED))
968 return 0;
969
970 /*
971 * If the host doesn't support SD_HIGHSPEED, do not switch card to
972 * HIGHSPEED mode even if the card support SD_HIGHSPPED.
973 * This can avoid furthur problem when the card runs in different
974 * mode between the host.
975 */
976 if (!((mmc->cfg->host_caps & MMC_MODE_HS_52MHz) &&
977 (mmc->cfg->host_caps & MMC_MODE_HS)))
978 return 0;
979
980 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status);
981
982 if (err)
983 return err;
984
985 if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) == 0x01000000)
986 mmc->card_caps |= MMC_MODE_HS;
987
988 return 0;
989 }
990
991 static int sd_read_ssr(struct mmc *mmc)
992 {
993 int err, i;
994 struct mmc_cmd cmd;
995 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
996 struct mmc_data data;
997 int timeout = 3;
998 unsigned int au, eo, et, es;
999
1000 cmd.cmdidx = MMC_CMD_APP_CMD;
1001 cmd.resp_type = MMC_RSP_R1;
1002 cmd.cmdarg = mmc->rca << 16;
1003
1004 err = mmc_send_cmd(mmc, &cmd, NULL);
1005 if (err)
1006 return err;
1007
1008 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1009 cmd.resp_type = MMC_RSP_R1;
1010 cmd.cmdarg = 0;
1011
1012 retry_ssr:
1013 data.dest = (char *)ssr;
1014 data.blocksize = 64;
1015 data.blocks = 1;
1016 data.flags = MMC_DATA_READ;
1017
1018 err = mmc_send_cmd(mmc, &cmd, &data);
1019 if (err) {
1020 if (timeout--)
1021 goto retry_ssr;
1022
1023 return err;
1024 }
1025
1026 for (i = 0; i < 16; i++)
1027 ssr[i] = be32_to_cpu(ssr[i]);
1028
1029 au = (ssr[2] >> 12) & 0xF;
1030 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1031 mmc->ssr.au = sd_au_size[au];
1032 es = (ssr[3] >> 24) & 0xFF;
1033 es |= (ssr[2] & 0xFF) << 8;
1034 et = (ssr[3] >> 18) & 0x3F;
1035 if (es && et) {
1036 eo = (ssr[3] >> 16) & 0x3;
1037 mmc->ssr.erase_timeout = (et * 1000) / es;
1038 mmc->ssr.erase_offset = eo * 1000;
1039 }
1040 } else {
1041 debug("Invalid Allocation Unit Size.\n");
1042 }
1043
1044 return 0;
1045 }
1046
1047 /* frequency bases */
1048 /* divided by 10 to be nice to platforms without floating point */
1049 static const int fbase[] = {
1050 10000,
1051 100000,
1052 1000000,
1053 10000000,
1054 };
1055
1056 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1057 * to platforms without floating point.
1058 */
1059 static const u8 multipliers[] = {
1060 0, /* reserved */
1061 10,
1062 12,
1063 13,
1064 15,
1065 20,
1066 25,
1067 30,
1068 35,
1069 40,
1070 45,
1071 50,
1072 55,
1073 60,
1074 70,
1075 80,
1076 };
1077
1078 #if !CONFIG_IS_ENABLED(DM_MMC)
1079 static void mmc_set_ios(struct mmc *mmc)
1080 {
1081 if (mmc->cfg->ops->set_ios)
1082 mmc->cfg->ops->set_ios(mmc);
1083 }
1084 #endif
1085
1086 void mmc_set_clock(struct mmc *mmc, uint clock)
1087 {
1088 if (clock > mmc->cfg->f_max)
1089 clock = mmc->cfg->f_max;
1090
1091 if (clock < mmc->cfg->f_min)
1092 clock = mmc->cfg->f_min;
1093
1094 mmc->clock = clock;
1095
1096 mmc_set_ios(mmc);
1097 }
1098
1099 static void mmc_set_bus_width(struct mmc *mmc, uint width)
1100 {
1101 mmc->bus_width = width;
1102
1103 mmc_set_ios(mmc);
1104 }
1105
1106 static int sd_select_bus_freq_width(struct mmc *mmc)
1107 {
1108 int err;
1109 struct mmc_cmd cmd;
1110
1111 err = sd_change_freq(mmc);
1112 if (err)
1113 return err;
1114
1115 /* Restrict card's capabilities by what the host can do */
1116 mmc->card_caps &= mmc->cfg->host_caps;
1117
1118 if (mmc->card_caps & MMC_MODE_4BIT) {
1119 cmd.cmdidx = MMC_CMD_APP_CMD;
1120 cmd.resp_type = MMC_RSP_R1;
1121 cmd.cmdarg = mmc->rca << 16;
1122
1123 err = mmc_send_cmd(mmc, &cmd, NULL);
1124 if (err)
1125 return err;
1126
1127 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1128 cmd.resp_type = MMC_RSP_R1;
1129 cmd.cmdarg = 2;
1130 err = mmc_send_cmd(mmc, &cmd, NULL);
1131 if (err)
1132 return err;
1133
1134 mmc_set_bus_width(mmc, 4);
1135 }
1136
1137 err = sd_read_ssr(mmc);
1138 if (err)
1139 return err;
1140
1141 if (mmc->card_caps & MMC_MODE_HS)
1142 mmc->tran_speed = 50000000;
1143 else
1144 mmc->tran_speed = 25000000;
1145
1146 return 0;
1147 }
1148
1149 static int mmc_select_bus_freq_width(struct mmc *mmc, const u8 *ext_csd)
1150 {
1151 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1152 /* An array of possible bus widths in order of preference */
1153 static const unsigned int ext_csd_bits[] = {
1154 EXT_CSD_DDR_BUS_WIDTH_8,
1155 EXT_CSD_DDR_BUS_WIDTH_4,
1156 EXT_CSD_BUS_WIDTH_8,
1157 EXT_CSD_BUS_WIDTH_4,
1158 EXT_CSD_BUS_WIDTH_1,
1159 };
1160 /* An array to map CSD bus widths to host cap bits */
1161 static const unsigned int ext_to_hostcaps[] = {
1162 [EXT_CSD_DDR_BUS_WIDTH_4] =
1163 MMC_MODE_DDR_52MHz | MMC_MODE_4BIT,
1164 [EXT_CSD_DDR_BUS_WIDTH_8] =
1165 MMC_MODE_DDR_52MHz | MMC_MODE_8BIT,
1166 [EXT_CSD_BUS_WIDTH_4] = MMC_MODE_4BIT,
1167 [EXT_CSD_BUS_WIDTH_8] = MMC_MODE_8BIT,
1168 };
1169 /* An array to map chosen bus width to an integer */
1170 static const unsigned int widths[] = {
1171 8, 4, 8, 4, 1,
1172 };
1173 int err;
1174 int idx;
1175
1176 err = mmc_change_freq(mmc);
1177 if (err)
1178 return err;
1179
1180 /* Restrict card's capabilities by what the host can do */
1181 mmc->card_caps &= mmc->cfg->host_caps;
1182
1183 /* Only version 4 of MMC supports wider bus widths */
1184 if (mmc->version < MMC_VERSION_4)
1185 return 0;
1186
1187 for (idx = 0; idx < ARRAY_SIZE(ext_csd_bits); idx++) {
1188 unsigned int extw = ext_csd_bits[idx];
1189 unsigned int caps = ext_to_hostcaps[extw];
1190 /*
1191 * If the bus width is still not changed,
1192 * don't try to set the default again.
1193 * Otherwise, recover from switch attempts
1194 * by switching to 1-bit bus width.
1195 */
1196 if (extw == EXT_CSD_BUS_WIDTH_1 &&
1197 mmc->bus_width == 1) {
1198 err = 0;
1199 break;
1200 }
1201
1202 /*
1203 * Check to make sure the card and controller support
1204 * these capabilities
1205 */
1206 if ((mmc->card_caps & caps) != caps)
1207 continue;
1208
1209 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1210 EXT_CSD_BUS_WIDTH, extw);
1211
1212 if (err)
1213 continue;
1214
1215 mmc->ddr_mode = (caps & MMC_MODE_DDR_52MHz) ? 1 : 0;
1216 mmc_set_bus_width(mmc, widths[idx]);
1217
1218 err = mmc_send_ext_csd(mmc, test_csd);
1219
1220 if (err)
1221 continue;
1222
1223 /* Only compare read only fields */
1224 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1225 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1226 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1227 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1228 ext_csd[EXT_CSD_REV]
1229 == test_csd[EXT_CSD_REV] &&
1230 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1231 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1232 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1233 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1234 break;
1235
1236 err = -EBADMSG;
1237 }
1238
1239 if (err)
1240 return err;
1241
1242 if (mmc->card_caps & MMC_MODE_HS) {
1243 if (mmc->card_caps & MMC_MODE_HS_52MHz)
1244 mmc->tran_speed = 52000000;
1245 else
1246 mmc->tran_speed = 26000000;
1247 }
1248
1249 return err;
1250 }
1251
1252 static int mmc_startup_v4(struct mmc *mmc, u8 *ext_csd)
1253 {
1254 int err, i;
1255 u64 capacity;
1256 bool has_parts = false;
1257 bool part_completed;
1258
1259 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1260 return 0;
1261
1262 /* check ext_csd version and capacity */
1263 err = mmc_send_ext_csd(mmc, ext_csd);
1264 if (err)
1265 return err;
1266 if (ext_csd[EXT_CSD_REV] >= 2) {
1267 /*
1268 * According to the JEDEC Standard, the value of
1269 * ext_csd's capacity is valid if the value is more
1270 * than 2GB
1271 */
1272 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1273 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1274 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1275 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1276 capacity *= MMC_MAX_BLOCK_LEN;
1277 if ((capacity >> 20) > 2 * 1024)
1278 mmc->capacity_user = capacity;
1279 }
1280
1281 switch (ext_csd[EXT_CSD_REV]) {
1282 case 1:
1283 mmc->version = MMC_VERSION_4_1;
1284 break;
1285 case 2:
1286 mmc->version = MMC_VERSION_4_2;
1287 break;
1288 case 3:
1289 mmc->version = MMC_VERSION_4_3;
1290 break;
1291 case 5:
1292 mmc->version = MMC_VERSION_4_41;
1293 break;
1294 case 6:
1295 mmc->version = MMC_VERSION_4_5;
1296 break;
1297 case 7:
1298 mmc->version = MMC_VERSION_5_0;
1299 break;
1300 case 8:
1301 mmc->version = MMC_VERSION_5_1;
1302 break;
1303 }
1304
1305 /* The partition data may be non-zero but it is only
1306 * effective if PARTITION_SETTING_COMPLETED is set in
1307 * EXT_CSD, so ignore any data if this bit is not set,
1308 * except for enabling the high-capacity group size
1309 * definition (see below).
1310 */
1311 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1312 EXT_CSD_PARTITION_SETTING_COMPLETED);
1313
1314 /* store the partition info of emmc */
1315 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
1316 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
1317 ext_csd[EXT_CSD_BOOT_MULT])
1318 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
1319 if (part_completed &&
1320 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
1321 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
1322
1323 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
1324
1325 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
1326
1327 for (i = 0; i < 4; i++) {
1328 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
1329 uint mult = (ext_csd[idx + 2] << 16) +
1330 (ext_csd[idx + 1] << 8) + ext_csd[idx];
1331 if (mult)
1332 has_parts = true;
1333 if (!part_completed)
1334 continue;
1335 mmc->capacity_gp[i] = mult;
1336 mmc->capacity_gp[i] *=
1337 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1338 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1339 mmc->capacity_gp[i] <<= 19;
1340 }
1341
1342 if (part_completed) {
1343 mmc->enh_user_size =
1344 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
1345 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
1346 ext_csd[EXT_CSD_ENH_SIZE_MULT];
1347 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1348 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1349 mmc->enh_user_size <<= 19;
1350 mmc->enh_user_start =
1351 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
1352 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
1353 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
1354 ext_csd[EXT_CSD_ENH_START_ADDR];
1355 if (mmc->high_capacity)
1356 mmc->enh_user_start <<= 9;
1357 }
1358
1359 /*
1360 * Host needs to enable ERASE_GRP_DEF bit if device is
1361 * partitioned. This bit will be lost every time after a reset
1362 * or power off. This will affect erase size.
1363 */
1364 if (part_completed)
1365 has_parts = true;
1366 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
1367 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
1368 has_parts = true;
1369 if (has_parts) {
1370 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1371 EXT_CSD_ERASE_GROUP_DEF, 1);
1372
1373 if (err)
1374 return err;
1375
1376 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1377 }
1378
1379 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
1380 /* Read out group size from ext_csd */
1381 mmc->erase_grp_size =
1382 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1383 /*
1384 * if high capacity and partition setting completed
1385 * SEC_COUNT is valid even if it is smaller than 2 GiB
1386 * JEDEC Standard JESD84-B45, 6.2.4
1387 */
1388 if (mmc->high_capacity && part_completed) {
1389 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
1390 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
1391 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
1392 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
1393 capacity *= MMC_MAX_BLOCK_LEN;
1394 mmc->capacity_user = capacity;
1395 }
1396 } else {
1397 /* Calculate the group size from the csd value. */
1398 int erase_gsz, erase_gmul;
1399
1400 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
1401 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
1402 mmc->erase_grp_size = (erase_gsz + 1)
1403 * (erase_gmul + 1);
1404 }
1405
1406 mmc->hc_wp_grp_size = 1024
1407 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1408 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1409
1410 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1411
1412 return 0;
1413 }
1414
1415 static int mmc_startup(struct mmc *mmc)
1416 {
1417 int err, i;
1418 uint mult, freq;
1419 u64 cmult, csize;
1420 struct mmc_cmd cmd;
1421 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1422 struct blk_desc *bdesc;
1423
1424 #ifdef CONFIG_MMC_SPI_CRC_ON
1425 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
1426 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
1427 cmd.resp_type = MMC_RSP_R1;
1428 cmd.cmdarg = 1;
1429 err = mmc_send_cmd(mmc, &cmd, NULL);
1430
1431 if (err)
1432 return err;
1433 }
1434 #endif
1435
1436 /* Put the Card in Identify Mode */
1437 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
1438 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
1439 cmd.resp_type = MMC_RSP_R2;
1440 cmd.cmdarg = 0;
1441
1442 err = mmc_send_cmd(mmc, &cmd, NULL);
1443
1444 if (err)
1445 return err;
1446
1447 memcpy(mmc->cid, cmd.response, 16);
1448
1449 /*
1450 * For MMC cards, set the Relative Address.
1451 * For SD cards, get the Relatvie Address.
1452 * This also puts the cards into Standby State
1453 */
1454 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1455 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
1456 cmd.cmdarg = mmc->rca << 16;
1457 cmd.resp_type = MMC_RSP_R6;
1458
1459 err = mmc_send_cmd(mmc, &cmd, NULL);
1460
1461 if (err)
1462 return err;
1463
1464 if (IS_SD(mmc))
1465 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
1466 }
1467
1468 /* Get the Card-Specific Data */
1469 cmd.cmdidx = MMC_CMD_SEND_CSD;
1470 cmd.resp_type = MMC_RSP_R2;
1471 cmd.cmdarg = mmc->rca << 16;
1472
1473 err = mmc_send_cmd(mmc, &cmd, NULL);
1474
1475 if (err)
1476 return err;
1477
1478 mmc->csd[0] = cmd.response[0];
1479 mmc->csd[1] = cmd.response[1];
1480 mmc->csd[2] = cmd.response[2];
1481 mmc->csd[3] = cmd.response[3];
1482
1483 if (mmc->version == MMC_VERSION_UNKNOWN) {
1484 int version = (cmd.response[0] >> 26) & 0xf;
1485
1486 switch (version) {
1487 case 0:
1488 mmc->version = MMC_VERSION_1_2;
1489 break;
1490 case 1:
1491 mmc->version = MMC_VERSION_1_4;
1492 break;
1493 case 2:
1494 mmc->version = MMC_VERSION_2_2;
1495 break;
1496 case 3:
1497 mmc->version = MMC_VERSION_3;
1498 break;
1499 case 4:
1500 mmc->version = MMC_VERSION_4;
1501 break;
1502 default:
1503 mmc->version = MMC_VERSION_1_2;
1504 break;
1505 }
1506 }
1507
1508 /* divide frequency by 10, since the mults are 10x bigger */
1509 freq = fbase[(cmd.response[0] & 0x7)];
1510 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
1511
1512 mmc->tran_speed = freq * mult;
1513
1514 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
1515 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
1516
1517 if (IS_SD(mmc))
1518 mmc->write_bl_len = mmc->read_bl_len;
1519 else
1520 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
1521
1522 if (mmc->high_capacity) {
1523 csize = (mmc->csd[1] & 0x3f) << 16
1524 | (mmc->csd[2] & 0xffff0000) >> 16;
1525 cmult = 8;
1526 } else {
1527 csize = (mmc->csd[1] & 0x3ff) << 2
1528 | (mmc->csd[2] & 0xc0000000) >> 30;
1529 cmult = (mmc->csd[2] & 0x00038000) >> 15;
1530 }
1531
1532 mmc->capacity_user = (csize + 1) << (cmult + 2);
1533 mmc->capacity_user *= mmc->read_bl_len;
1534 mmc->capacity_boot = 0;
1535 mmc->capacity_rpmb = 0;
1536 for (i = 0; i < 4; i++)
1537 mmc->capacity_gp[i] = 0;
1538
1539 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
1540 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1541
1542 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
1543 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1544
1545 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
1546 cmd.cmdidx = MMC_CMD_SET_DSR;
1547 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
1548 cmd.resp_type = MMC_RSP_NONE;
1549 if (mmc_send_cmd(mmc, &cmd, NULL))
1550 printf("MMC: SET_DSR failed\n");
1551 }
1552
1553 /* Select the card, and put it into Transfer Mode */
1554 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1555 cmd.cmdidx = MMC_CMD_SELECT_CARD;
1556 cmd.resp_type = MMC_RSP_R1;
1557 cmd.cmdarg = mmc->rca << 16;
1558 err = mmc_send_cmd(mmc, &cmd, NULL);
1559
1560 if (err)
1561 return err;
1562 }
1563
1564 /*
1565 * For SD, its erase group is always one sector
1566 */
1567 mmc->erase_grp_size = 1;
1568 mmc->part_config = MMCPART_NOAVAILABLE;
1569
1570 err = mmc_startup_v4(mmc, ext_csd);
1571 if (err)
1572 return err;
1573
1574 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
1575 if (err)
1576 return err;
1577
1578 if (IS_SD(mmc))
1579 err = sd_select_bus_freq_width(mmc);
1580 else
1581 err = mmc_select_bus_freq_width(mmc, ext_csd);
1582
1583 if (err)
1584 return err;
1585
1586 mmc_set_clock(mmc, mmc->tran_speed);
1587
1588 /* Fix the block length for DDR mode */
1589 if (mmc->ddr_mode) {
1590 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1591 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1592 }
1593
1594 /* fill in device description */
1595 bdesc = mmc_get_blk_desc(mmc);
1596 bdesc->lun = 0;
1597 bdesc->hwpart = 0;
1598 bdesc->type = 0;
1599 bdesc->blksz = mmc->read_bl_len;
1600 bdesc->log2blksz = LOG2(bdesc->blksz);
1601 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1602 #if !defined(CONFIG_SPL_BUILD) || \
1603 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
1604 !defined(CONFIG_USE_TINY_PRINTF))
1605 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
1606 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
1607 (mmc->cid[3] >> 16) & 0xffff);
1608 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
1609 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
1610 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
1611 (mmc->cid[2] >> 24) & 0xff);
1612 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
1613 (mmc->cid[2] >> 16) & 0xf);
1614 #else
1615 bdesc->vendor[0] = 0;
1616 bdesc->product[0] = 0;
1617 bdesc->revision[0] = 0;
1618 #endif
1619 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
1620 part_init(bdesc);
1621 #endif
1622
1623 return 0;
1624 }
1625
1626 static int mmc_send_if_cond(struct mmc *mmc)
1627 {
1628 struct mmc_cmd cmd;
1629 int err;
1630
1631 cmd.cmdidx = SD_CMD_SEND_IF_COND;
1632 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
1633 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
1634 cmd.resp_type = MMC_RSP_R7;
1635
1636 err = mmc_send_cmd(mmc, &cmd, NULL);
1637
1638 if (err)
1639 return err;
1640
1641 if ((cmd.response[0] & 0xff) != 0xaa)
1642 return -EOPNOTSUPP;
1643 else
1644 mmc->version = SD_VERSION_2;
1645
1646 return 0;
1647 }
1648
1649 #if !CONFIG_IS_ENABLED(DM_MMC)
1650 /* board-specific MMC power initializations. */
1651 __weak void board_mmc_power_init(void)
1652 {
1653 }
1654 #endif
1655
1656 static int mmc_power_init(struct mmc *mmc)
1657 {
1658 #if CONFIG_IS_ENABLED(DM_MMC)
1659 #if CONFIG_IS_ENABLED(DM_REGULATOR)
1660 int ret;
1661
1662 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
1663 &mmc->vmmc_supply);
1664 if (ret)
1665 debug("%s: No vmmc supply\n", mmc->dev->name);
1666
1667 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
1668 &mmc->vqmmc_supply);
1669 if (ret)
1670 debug("%s: No vqmmc supply\n", mmc->dev->name);
1671
1672 if (mmc->vmmc_supply) {
1673 ret = regulator_set_enable(mmc->vmmc_supply, true);
1674 if (ret) {
1675 puts("Error enabling VMMC supply\n");
1676 return ret;
1677 }
1678 }
1679 #endif
1680 #else /* !CONFIG_DM_MMC */
1681 /*
1682 * Driver model should use a regulator, as above, rather than calling
1683 * out to board code.
1684 */
1685 board_mmc_power_init();
1686 #endif
1687 return 0;
1688 }
1689
1690 int mmc_start_init(struct mmc *mmc)
1691 {
1692 bool no_card;
1693 int err;
1694
1695 /* we pretend there's no card when init is NULL */
1696 no_card = mmc_getcd(mmc) == 0;
1697 #if !CONFIG_IS_ENABLED(DM_MMC)
1698 no_card = no_card || (mmc->cfg->ops->init == NULL);
1699 #endif
1700 if (no_card) {
1701 mmc->has_init = 0;
1702 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
1703 printf("MMC: no card present\n");
1704 #endif
1705 return -ENOMEDIUM;
1706 }
1707
1708 if (mmc->has_init)
1709 return 0;
1710
1711 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
1712 mmc_adapter_card_type_ident();
1713 #endif
1714 err = mmc_power_init(mmc);
1715 if (err)
1716 return err;
1717
1718 #if CONFIG_IS_ENABLED(DM_MMC)
1719 /* The device has already been probed ready for use */
1720 #else
1721 /* made sure it's not NULL earlier */
1722 err = mmc->cfg->ops->init(mmc);
1723 if (err)
1724 return err;
1725 #endif
1726 mmc->ddr_mode = 0;
1727 mmc_set_bus_width(mmc, 1);
1728 mmc_set_clock(mmc, 1);
1729
1730 /* Reset the Card */
1731 err = mmc_go_idle(mmc);
1732
1733 if (err)
1734 return err;
1735
1736 /* The internal partition reset to user partition(0) at every CMD0*/
1737 mmc_get_blk_desc(mmc)->hwpart = 0;
1738
1739 /* Test for SD version 2 */
1740 err = mmc_send_if_cond(mmc);
1741
1742 /* Now try to get the SD card's operating condition */
1743 err = sd_send_op_cond(mmc);
1744
1745 /* If the command timed out, we check for an MMC card */
1746 if (err == -ETIMEDOUT) {
1747 err = mmc_send_op_cond(mmc);
1748
1749 if (err) {
1750 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
1751 printf("Card did not respond to voltage select!\n");
1752 #endif
1753 return -EOPNOTSUPP;
1754 }
1755 }
1756
1757 if (!err)
1758 mmc->init_in_progress = 1;
1759
1760 return err;
1761 }
1762
1763 static int mmc_complete_init(struct mmc *mmc)
1764 {
1765 int err = 0;
1766
1767 mmc->init_in_progress = 0;
1768 if (mmc->op_cond_pending)
1769 err = mmc_complete_op_cond(mmc);
1770
1771 if (!err)
1772 err = mmc_startup(mmc);
1773 if (err)
1774 mmc->has_init = 0;
1775 else
1776 mmc->has_init = 1;
1777 return err;
1778 }
1779
1780 int mmc_init(struct mmc *mmc)
1781 {
1782 int err = 0;
1783 __maybe_unused unsigned start;
1784 #if CONFIG_IS_ENABLED(DM_MMC)
1785 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
1786
1787 upriv->mmc = mmc;
1788 #endif
1789 if (mmc->has_init)
1790 return 0;
1791
1792 start = get_timer(0);
1793
1794 if (!mmc->init_in_progress)
1795 err = mmc_start_init(mmc);
1796
1797 if (!err)
1798 err = mmc_complete_init(mmc);
1799 if (err)
1800 printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
1801
1802 return err;
1803 }
1804
1805 int mmc_set_dsr(struct mmc *mmc, u16 val)
1806 {
1807 mmc->dsr = val;
1808 return 0;
1809 }
1810
1811 /* CPU-specific MMC initializations */
1812 __weak int cpu_mmc_init(bd_t *bis)
1813 {
1814 return -1;
1815 }
1816
1817 /* board-specific MMC initializations. */
1818 __weak int board_mmc_init(bd_t *bis)
1819 {
1820 return -1;
1821 }
1822
1823 void mmc_set_preinit(struct mmc *mmc, int preinit)
1824 {
1825 mmc->preinit = preinit;
1826 }
1827
1828 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
1829 static int mmc_probe(bd_t *bis)
1830 {
1831 return 0;
1832 }
1833 #elif CONFIG_IS_ENABLED(DM_MMC)
1834 static int mmc_probe(bd_t *bis)
1835 {
1836 int ret, i;
1837 struct uclass *uc;
1838 struct udevice *dev;
1839
1840 ret = uclass_get(UCLASS_MMC, &uc);
1841 if (ret)
1842 return ret;
1843
1844 /*
1845 * Try to add them in sequence order. Really with driver model we
1846 * should allow holes, but the current MMC list does not allow that.
1847 * So if we request 0, 1, 3 we will get 0, 1, 2.
1848 */
1849 for (i = 0; ; i++) {
1850 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
1851 if (ret == -ENODEV)
1852 break;
1853 }
1854 uclass_foreach_dev(dev, uc) {
1855 ret = device_probe(dev);
1856 if (ret)
1857 printf("%s - probe failed: %d\n", dev->name, ret);
1858 }
1859
1860 return 0;
1861 }
1862 #else
1863 static int mmc_probe(bd_t *bis)
1864 {
1865 if (board_mmc_init(bis) < 0)
1866 cpu_mmc_init(bis);
1867
1868 return 0;
1869 }
1870 #endif
1871
1872 int mmc_initialize(bd_t *bis)
1873 {
1874 static int initialized = 0;
1875 int ret;
1876 if (initialized) /* Avoid initializing mmc multiple times */
1877 return 0;
1878 initialized = 1;
1879
1880 #if !CONFIG_IS_ENABLED(BLK)
1881 #if !CONFIG_IS_ENABLED(MMC_TINY)
1882 mmc_list_init();
1883 #endif
1884 #endif
1885 ret = mmc_probe(bis);
1886 if (ret)
1887 return ret;
1888
1889 #ifndef CONFIG_SPL_BUILD
1890 print_mmc_devices(',');
1891 #endif
1892
1893 mmc_do_preinit();
1894 return 0;
1895 }
1896
1897 #ifdef CONFIG_CMD_BKOPS_ENABLE
1898 int mmc_set_bkops_enable(struct mmc *mmc)
1899 {
1900 int err;
1901 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1902
1903 err = mmc_send_ext_csd(mmc, ext_csd);
1904 if (err) {
1905 puts("Could not get ext_csd register values\n");
1906 return err;
1907 }
1908
1909 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
1910 puts("Background operations not supported on device\n");
1911 return -EMEDIUMTYPE;
1912 }
1913
1914 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
1915 puts("Background operations already enabled\n");
1916 return 0;
1917 }
1918
1919 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
1920 if (err) {
1921 puts("Failed to enable manual background operations\n");
1922 return err;
1923 }
1924
1925 puts("Enabled manual background operations\n");
1926
1927 return 0;
1928 }
1929 #endif