]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/mmc/mmc.c
mmc: make ext_csd part of struct mmc
[people/ms/u-boot.git] / drivers / mmc / mmc.c
1 /*
2 * Copyright 2008, Freescale Semiconductor, Inc
3 * Andy Fleming
4 *
5 * Based vaguely on the Linux code
6 *
7 * SPDX-License-Identifier: GPL-2.0+
8 */
9
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24
25 static const unsigned int sd_au_size[] = {
26 0, SZ_16K / 512, SZ_32K / 512,
27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
31 };
32
33 #if CONFIG_IS_ENABLED(MMC_TINY)
34 static struct mmc mmc_static;
35 struct mmc *find_mmc_device(int dev_num)
36 {
37 return &mmc_static;
38 }
39
40 void mmc_do_preinit(void)
41 {
42 struct mmc *m = &mmc_static;
43 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
44 mmc_set_preinit(m, 1);
45 #endif
46 if (m->preinit)
47 mmc_start_init(m);
48 }
49
50 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
51 {
52 return &mmc->block_dev;
53 }
54 #endif
55
56 #if !CONFIG_IS_ENABLED(DM_MMC)
57 __weak int board_mmc_getwp(struct mmc *mmc)
58 {
59 return -1;
60 }
61
62 int mmc_getwp(struct mmc *mmc)
63 {
64 int wp;
65
66 wp = board_mmc_getwp(mmc);
67
68 if (wp < 0) {
69 if (mmc->cfg->ops->getwp)
70 wp = mmc->cfg->ops->getwp(mmc);
71 else
72 wp = 0;
73 }
74
75 return wp;
76 }
77
78 __weak int board_mmc_getcd(struct mmc *mmc)
79 {
80 return -1;
81 }
82 #endif
83
84 #ifdef CONFIG_MMC_TRACE
85 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
86 {
87 printf("CMD_SEND:%d\n", cmd->cmdidx);
88 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
89 }
90
91 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
92 {
93 int i;
94 u8 *ptr;
95
96 if (ret) {
97 printf("\t\tRET\t\t\t %d\n", ret);
98 } else {
99 switch (cmd->resp_type) {
100 case MMC_RSP_NONE:
101 printf("\t\tMMC_RSP_NONE\n");
102 break;
103 case MMC_RSP_R1:
104 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
105 cmd->response[0]);
106 break;
107 case MMC_RSP_R1b:
108 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
109 cmd->response[0]);
110 break;
111 case MMC_RSP_R2:
112 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
113 cmd->response[0]);
114 printf("\t\t \t\t 0x%08X \n",
115 cmd->response[1]);
116 printf("\t\t \t\t 0x%08X \n",
117 cmd->response[2]);
118 printf("\t\t \t\t 0x%08X \n",
119 cmd->response[3]);
120 printf("\n");
121 printf("\t\t\t\t\tDUMPING DATA\n");
122 for (i = 0; i < 4; i++) {
123 int j;
124 printf("\t\t\t\t\t%03d - ", i*4);
125 ptr = (u8 *)&cmd->response[i];
126 ptr += 3;
127 for (j = 0; j < 4; j++)
128 printf("%02X ", *ptr--);
129 printf("\n");
130 }
131 break;
132 case MMC_RSP_R3:
133 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
134 cmd->response[0]);
135 break;
136 default:
137 printf("\t\tERROR MMC rsp not supported\n");
138 break;
139 }
140 }
141 }
142
143 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
144 {
145 int status;
146
147 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
148 printf("CURR STATE:%d\n", status);
149 }
150 #endif
151
152 #if !CONFIG_IS_ENABLED(DM_MMC)
153 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
154 {
155 int ret;
156
157 mmmc_trace_before_send(mmc, cmd);
158 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
159 mmmc_trace_after_send(mmc, cmd, ret);
160
161 return ret;
162 }
163 #endif
164
165 int mmc_send_status(struct mmc *mmc, int timeout)
166 {
167 struct mmc_cmd cmd;
168 int err, retries = 5;
169
170 cmd.cmdidx = MMC_CMD_SEND_STATUS;
171 cmd.resp_type = MMC_RSP_R1;
172 if (!mmc_host_is_spi(mmc))
173 cmd.cmdarg = mmc->rca << 16;
174
175 while (1) {
176 err = mmc_send_cmd(mmc, &cmd, NULL);
177 if (!err) {
178 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
179 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
180 MMC_STATE_PRG)
181 break;
182 else if (cmd.response[0] & MMC_STATUS_MASK) {
183 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
184 printf("Status Error: 0x%08X\n",
185 cmd.response[0]);
186 #endif
187 return -ECOMM;
188 }
189 } else if (--retries < 0)
190 return err;
191
192 if (timeout-- <= 0)
193 break;
194
195 udelay(1000);
196 }
197
198 mmc_trace_state(mmc, &cmd);
199 if (timeout <= 0) {
200 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
201 printf("Timeout waiting card ready\n");
202 #endif
203 return -ETIMEDOUT;
204 }
205
206 return 0;
207 }
208
209 int mmc_set_blocklen(struct mmc *mmc, int len)
210 {
211 struct mmc_cmd cmd;
212
213 if (mmc->ddr_mode)
214 return 0;
215
216 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
217 cmd.resp_type = MMC_RSP_R1;
218 cmd.cmdarg = len;
219
220 return mmc_send_cmd(mmc, &cmd, NULL);
221 }
222
223 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
224 lbaint_t blkcnt)
225 {
226 struct mmc_cmd cmd;
227 struct mmc_data data;
228
229 if (blkcnt > 1)
230 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
231 else
232 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
233
234 if (mmc->high_capacity)
235 cmd.cmdarg = start;
236 else
237 cmd.cmdarg = start * mmc->read_bl_len;
238
239 cmd.resp_type = MMC_RSP_R1;
240
241 data.dest = dst;
242 data.blocks = blkcnt;
243 data.blocksize = mmc->read_bl_len;
244 data.flags = MMC_DATA_READ;
245
246 if (mmc_send_cmd(mmc, &cmd, &data))
247 return 0;
248
249 if (blkcnt > 1) {
250 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
251 cmd.cmdarg = 0;
252 cmd.resp_type = MMC_RSP_R1b;
253 if (mmc_send_cmd(mmc, &cmd, NULL)) {
254 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
255 printf("mmc fail to send stop cmd\n");
256 #endif
257 return 0;
258 }
259 }
260
261 return blkcnt;
262 }
263
264 #if CONFIG_IS_ENABLED(BLK)
265 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
266 #else
267 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
268 void *dst)
269 #endif
270 {
271 #if CONFIG_IS_ENABLED(BLK)
272 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
273 #endif
274 int dev_num = block_dev->devnum;
275 int err;
276 lbaint_t cur, blocks_todo = blkcnt;
277
278 if (blkcnt == 0)
279 return 0;
280
281 struct mmc *mmc = find_mmc_device(dev_num);
282 if (!mmc)
283 return 0;
284
285 if (CONFIG_IS_ENABLED(MMC_TINY))
286 err = mmc_switch_part(mmc, block_dev->hwpart);
287 else
288 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
289
290 if (err < 0)
291 return 0;
292
293 if ((start + blkcnt) > block_dev->lba) {
294 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
295 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
296 start + blkcnt, block_dev->lba);
297 #endif
298 return 0;
299 }
300
301 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
302 debug("%s: Failed to set blocklen\n", __func__);
303 return 0;
304 }
305
306 do {
307 cur = (blocks_todo > mmc->cfg->b_max) ?
308 mmc->cfg->b_max : blocks_todo;
309 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
310 debug("%s: Failed to read blocks\n", __func__);
311 return 0;
312 }
313 blocks_todo -= cur;
314 start += cur;
315 dst += cur * mmc->read_bl_len;
316 } while (blocks_todo > 0);
317
318 return blkcnt;
319 }
320
321 static int mmc_go_idle(struct mmc *mmc)
322 {
323 struct mmc_cmd cmd;
324 int err;
325
326 udelay(1000);
327
328 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
329 cmd.cmdarg = 0;
330 cmd.resp_type = MMC_RSP_NONE;
331
332 err = mmc_send_cmd(mmc, &cmd, NULL);
333
334 if (err)
335 return err;
336
337 udelay(2000);
338
339 return 0;
340 }
341
342 static int sd_send_op_cond(struct mmc *mmc)
343 {
344 int timeout = 1000;
345 int err;
346 struct mmc_cmd cmd;
347
348 while (1) {
349 cmd.cmdidx = MMC_CMD_APP_CMD;
350 cmd.resp_type = MMC_RSP_R1;
351 cmd.cmdarg = 0;
352
353 err = mmc_send_cmd(mmc, &cmd, NULL);
354
355 if (err)
356 return err;
357
358 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
359 cmd.resp_type = MMC_RSP_R3;
360
361 /*
362 * Most cards do not answer if some reserved bits
363 * in the ocr are set. However, Some controller
364 * can set bit 7 (reserved for low voltages), but
365 * how to manage low voltages SD card is not yet
366 * specified.
367 */
368 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
369 (mmc->cfg->voltages & 0xff8000);
370
371 if (mmc->version == SD_VERSION_2)
372 cmd.cmdarg |= OCR_HCS;
373
374 err = mmc_send_cmd(mmc, &cmd, NULL);
375
376 if (err)
377 return err;
378
379 if (cmd.response[0] & OCR_BUSY)
380 break;
381
382 if (timeout-- <= 0)
383 return -EOPNOTSUPP;
384
385 udelay(1000);
386 }
387
388 if (mmc->version != SD_VERSION_2)
389 mmc->version = SD_VERSION_1_0;
390
391 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
392 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
393 cmd.resp_type = MMC_RSP_R3;
394 cmd.cmdarg = 0;
395
396 err = mmc_send_cmd(mmc, &cmd, NULL);
397
398 if (err)
399 return err;
400 }
401
402 mmc->ocr = cmd.response[0];
403
404 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
405 mmc->rca = 0;
406
407 return 0;
408 }
409
410 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
411 {
412 struct mmc_cmd cmd;
413 int err;
414
415 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
416 cmd.resp_type = MMC_RSP_R3;
417 cmd.cmdarg = 0;
418 if (use_arg && !mmc_host_is_spi(mmc))
419 cmd.cmdarg = OCR_HCS |
420 (mmc->cfg->voltages &
421 (mmc->ocr & OCR_VOLTAGE_MASK)) |
422 (mmc->ocr & OCR_ACCESS_MODE);
423
424 err = mmc_send_cmd(mmc, &cmd, NULL);
425 if (err)
426 return err;
427 mmc->ocr = cmd.response[0];
428 return 0;
429 }
430
431 static int mmc_send_op_cond(struct mmc *mmc)
432 {
433 int err, i;
434
435 /* Some cards seem to need this */
436 mmc_go_idle(mmc);
437
438 /* Asking to the card its capabilities */
439 for (i = 0; i < 2; i++) {
440 err = mmc_send_op_cond_iter(mmc, i != 0);
441 if (err)
442 return err;
443
444 /* exit if not busy (flag seems to be inverted) */
445 if (mmc->ocr & OCR_BUSY)
446 break;
447 }
448 mmc->op_cond_pending = 1;
449 return 0;
450 }
451
452 static int mmc_complete_op_cond(struct mmc *mmc)
453 {
454 struct mmc_cmd cmd;
455 int timeout = 1000;
456 uint start;
457 int err;
458
459 mmc->op_cond_pending = 0;
460 if (!(mmc->ocr & OCR_BUSY)) {
461 /* Some cards seem to need this */
462 mmc_go_idle(mmc);
463
464 start = get_timer(0);
465 while (1) {
466 err = mmc_send_op_cond_iter(mmc, 1);
467 if (err)
468 return err;
469 if (mmc->ocr & OCR_BUSY)
470 break;
471 if (get_timer(start) > timeout)
472 return -EOPNOTSUPP;
473 udelay(100);
474 }
475 }
476
477 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
478 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
479 cmd.resp_type = MMC_RSP_R3;
480 cmd.cmdarg = 0;
481
482 err = mmc_send_cmd(mmc, &cmd, NULL);
483
484 if (err)
485 return err;
486
487 mmc->ocr = cmd.response[0];
488 }
489
490 mmc->version = MMC_VERSION_UNKNOWN;
491
492 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
493 mmc->rca = 1;
494
495 return 0;
496 }
497
498
499 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
500 {
501 struct mmc_cmd cmd;
502 struct mmc_data data;
503 int err;
504
505 /* Get the Card Status Register */
506 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
507 cmd.resp_type = MMC_RSP_R1;
508 cmd.cmdarg = 0;
509
510 data.dest = (char *)ext_csd;
511 data.blocks = 1;
512 data.blocksize = MMC_MAX_BLOCK_LEN;
513 data.flags = MMC_DATA_READ;
514
515 err = mmc_send_cmd(mmc, &cmd, &data);
516
517 return err;
518 }
519
520 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
521 {
522 struct mmc_cmd cmd;
523 int timeout = 1000;
524 int retries = 3;
525 int ret;
526
527 cmd.cmdidx = MMC_CMD_SWITCH;
528 cmd.resp_type = MMC_RSP_R1b;
529 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
530 (index << 16) |
531 (value << 8);
532
533 while (retries > 0) {
534 ret = mmc_send_cmd(mmc, &cmd, NULL);
535
536 /* Waiting for the ready status */
537 if (!ret) {
538 ret = mmc_send_status(mmc, timeout);
539 return ret;
540 }
541
542 retries--;
543 }
544
545 return ret;
546
547 }
548
549 static int mmc_change_freq(struct mmc *mmc)
550 {
551 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
552 char cardtype;
553 int err;
554
555 mmc->card_caps = 0;
556
557 if (mmc_host_is_spi(mmc))
558 return 0;
559
560 /* Only version 4 supports high-speed */
561 if (mmc->version < MMC_VERSION_4)
562 return 0;
563
564 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
565
566 err = mmc_send_ext_csd(mmc, ext_csd);
567
568 if (err)
569 return err;
570
571 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0xf;
572
573 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, 1);
574
575 if (err)
576 return err;
577
578 /* Now check to see that it worked */
579 err = mmc_send_ext_csd(mmc, ext_csd);
580
581 if (err)
582 return err;
583
584 /* No high-speed support */
585 if (!ext_csd[EXT_CSD_HS_TIMING])
586 return 0;
587
588 /* High Speed is set, there are two types: 52MHz and 26MHz */
589 if (cardtype & EXT_CSD_CARD_TYPE_52) {
590 if (cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
591 mmc->card_caps |= MMC_MODE_DDR_52MHz;
592 mmc->card_caps |= MMC_MODE_HS_52MHz | MMC_MODE_HS;
593 } else {
594 mmc->card_caps |= MMC_MODE_HS;
595 }
596
597 return 0;
598 }
599
600 static int mmc_set_capacity(struct mmc *mmc, int part_num)
601 {
602 switch (part_num) {
603 case 0:
604 mmc->capacity = mmc->capacity_user;
605 break;
606 case 1:
607 case 2:
608 mmc->capacity = mmc->capacity_boot;
609 break;
610 case 3:
611 mmc->capacity = mmc->capacity_rpmb;
612 break;
613 case 4:
614 case 5:
615 case 6:
616 case 7:
617 mmc->capacity = mmc->capacity_gp[part_num - 4];
618 break;
619 default:
620 return -1;
621 }
622
623 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
624
625 return 0;
626 }
627
628 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
629 {
630 int ret;
631
632 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
633 (mmc->part_config & ~PART_ACCESS_MASK)
634 | (part_num & PART_ACCESS_MASK));
635
636 /*
637 * Set the capacity if the switch succeeded or was intended
638 * to return to representing the raw device.
639 */
640 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
641 ret = mmc_set_capacity(mmc, part_num);
642 mmc_get_blk_desc(mmc)->hwpart = part_num;
643 }
644
645 return ret;
646 }
647
648 int mmc_hwpart_config(struct mmc *mmc,
649 const struct mmc_hwpart_conf *conf,
650 enum mmc_hwpart_conf_mode mode)
651 {
652 u8 part_attrs = 0;
653 u32 enh_size_mult;
654 u32 enh_start_addr;
655 u32 gp_size_mult[4];
656 u32 max_enh_size_mult;
657 u32 tot_enh_size_mult = 0;
658 u8 wr_rel_set;
659 int i, pidx, err;
660 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
661
662 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
663 return -EINVAL;
664
665 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
666 printf("eMMC >= 4.4 required for enhanced user data area\n");
667 return -EMEDIUMTYPE;
668 }
669
670 if (!(mmc->part_support & PART_SUPPORT)) {
671 printf("Card does not support partitioning\n");
672 return -EMEDIUMTYPE;
673 }
674
675 if (!mmc->hc_wp_grp_size) {
676 printf("Card does not define HC WP group size\n");
677 return -EMEDIUMTYPE;
678 }
679
680 /* check partition alignment and total enhanced size */
681 if (conf->user.enh_size) {
682 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
683 conf->user.enh_start % mmc->hc_wp_grp_size) {
684 printf("User data enhanced area not HC WP group "
685 "size aligned\n");
686 return -EINVAL;
687 }
688 part_attrs |= EXT_CSD_ENH_USR;
689 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
690 if (mmc->high_capacity) {
691 enh_start_addr = conf->user.enh_start;
692 } else {
693 enh_start_addr = (conf->user.enh_start << 9);
694 }
695 } else {
696 enh_size_mult = 0;
697 enh_start_addr = 0;
698 }
699 tot_enh_size_mult += enh_size_mult;
700
701 for (pidx = 0; pidx < 4; pidx++) {
702 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
703 printf("GP%i partition not HC WP group size "
704 "aligned\n", pidx+1);
705 return -EINVAL;
706 }
707 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
708 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
709 part_attrs |= EXT_CSD_ENH_GP(pidx);
710 tot_enh_size_mult += gp_size_mult[pidx];
711 }
712 }
713
714 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
715 printf("Card does not support enhanced attribute\n");
716 return -EMEDIUMTYPE;
717 }
718
719 err = mmc_send_ext_csd(mmc, ext_csd);
720 if (err)
721 return err;
722
723 max_enh_size_mult =
724 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
725 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
726 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
727 if (tot_enh_size_mult > max_enh_size_mult) {
728 printf("Total enhanced size exceeds maximum (%u > %u)\n",
729 tot_enh_size_mult, max_enh_size_mult);
730 return -EMEDIUMTYPE;
731 }
732
733 /* The default value of EXT_CSD_WR_REL_SET is device
734 * dependent, the values can only be changed if the
735 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
736 * changed only once and before partitioning is completed. */
737 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
738 if (conf->user.wr_rel_change) {
739 if (conf->user.wr_rel_set)
740 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
741 else
742 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
743 }
744 for (pidx = 0; pidx < 4; pidx++) {
745 if (conf->gp_part[pidx].wr_rel_change) {
746 if (conf->gp_part[pidx].wr_rel_set)
747 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
748 else
749 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
750 }
751 }
752
753 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
754 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
755 puts("Card does not support host controlled partition write "
756 "reliability settings\n");
757 return -EMEDIUMTYPE;
758 }
759
760 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
761 EXT_CSD_PARTITION_SETTING_COMPLETED) {
762 printf("Card already partitioned\n");
763 return -EPERM;
764 }
765
766 if (mode == MMC_HWPART_CONF_CHECK)
767 return 0;
768
769 /* Partitioning requires high-capacity size definitions */
770 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
771 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
772 EXT_CSD_ERASE_GROUP_DEF, 1);
773
774 if (err)
775 return err;
776
777 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
778
779 /* update erase group size to be high-capacity */
780 mmc->erase_grp_size =
781 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
782
783 }
784
785 /* all OK, write the configuration */
786 for (i = 0; i < 4; i++) {
787 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
788 EXT_CSD_ENH_START_ADDR+i,
789 (enh_start_addr >> (i*8)) & 0xFF);
790 if (err)
791 return err;
792 }
793 for (i = 0; i < 3; i++) {
794 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
795 EXT_CSD_ENH_SIZE_MULT+i,
796 (enh_size_mult >> (i*8)) & 0xFF);
797 if (err)
798 return err;
799 }
800 for (pidx = 0; pidx < 4; pidx++) {
801 for (i = 0; i < 3; i++) {
802 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
803 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
804 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
805 if (err)
806 return err;
807 }
808 }
809 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
810 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
811 if (err)
812 return err;
813
814 if (mode == MMC_HWPART_CONF_SET)
815 return 0;
816
817 /* The WR_REL_SET is a write-once register but shall be
818 * written before setting PART_SETTING_COMPLETED. As it is
819 * write-once we can only write it when completing the
820 * partitioning. */
821 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
822 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
823 EXT_CSD_WR_REL_SET, wr_rel_set);
824 if (err)
825 return err;
826 }
827
828 /* Setting PART_SETTING_COMPLETED confirms the partition
829 * configuration but it only becomes effective after power
830 * cycle, so we do not adjust the partition related settings
831 * in the mmc struct. */
832
833 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
834 EXT_CSD_PARTITION_SETTING,
835 EXT_CSD_PARTITION_SETTING_COMPLETED);
836 if (err)
837 return err;
838
839 return 0;
840 }
841
842 #if !CONFIG_IS_ENABLED(DM_MMC)
843 int mmc_getcd(struct mmc *mmc)
844 {
845 int cd;
846
847 cd = board_mmc_getcd(mmc);
848
849 if (cd < 0) {
850 if (mmc->cfg->ops->getcd)
851 cd = mmc->cfg->ops->getcd(mmc);
852 else
853 cd = 1;
854 }
855
856 return cd;
857 }
858 #endif
859
860 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
861 {
862 struct mmc_cmd cmd;
863 struct mmc_data data;
864
865 /* Switch the frequency */
866 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
867 cmd.resp_type = MMC_RSP_R1;
868 cmd.cmdarg = (mode << 31) | 0xffffff;
869 cmd.cmdarg &= ~(0xf << (group * 4));
870 cmd.cmdarg |= value << (group * 4);
871
872 data.dest = (char *)resp;
873 data.blocksize = 64;
874 data.blocks = 1;
875 data.flags = MMC_DATA_READ;
876
877 return mmc_send_cmd(mmc, &cmd, &data);
878 }
879
880
881 static int sd_change_freq(struct mmc *mmc)
882 {
883 int err;
884 struct mmc_cmd cmd;
885 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
886 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
887 struct mmc_data data;
888 int timeout;
889
890 mmc->card_caps = 0;
891
892 if (mmc_host_is_spi(mmc))
893 return 0;
894
895 /* Read the SCR to find out if this card supports higher speeds */
896 cmd.cmdidx = MMC_CMD_APP_CMD;
897 cmd.resp_type = MMC_RSP_R1;
898 cmd.cmdarg = mmc->rca << 16;
899
900 err = mmc_send_cmd(mmc, &cmd, NULL);
901
902 if (err)
903 return err;
904
905 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
906 cmd.resp_type = MMC_RSP_R1;
907 cmd.cmdarg = 0;
908
909 timeout = 3;
910
911 retry_scr:
912 data.dest = (char *)scr;
913 data.blocksize = 8;
914 data.blocks = 1;
915 data.flags = MMC_DATA_READ;
916
917 err = mmc_send_cmd(mmc, &cmd, &data);
918
919 if (err) {
920 if (timeout--)
921 goto retry_scr;
922
923 return err;
924 }
925
926 mmc->scr[0] = __be32_to_cpu(scr[0]);
927 mmc->scr[1] = __be32_to_cpu(scr[1]);
928
929 switch ((mmc->scr[0] >> 24) & 0xf) {
930 case 0:
931 mmc->version = SD_VERSION_1_0;
932 break;
933 case 1:
934 mmc->version = SD_VERSION_1_10;
935 break;
936 case 2:
937 mmc->version = SD_VERSION_2;
938 if ((mmc->scr[0] >> 15) & 0x1)
939 mmc->version = SD_VERSION_3;
940 break;
941 default:
942 mmc->version = SD_VERSION_1_0;
943 break;
944 }
945
946 if (mmc->scr[0] & SD_DATA_4BIT)
947 mmc->card_caps |= MMC_MODE_4BIT;
948
949 /* Version 1.0 doesn't support switching */
950 if (mmc->version == SD_VERSION_1_0)
951 return 0;
952
953 timeout = 4;
954 while (timeout--) {
955 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
956 (u8 *)switch_status);
957
958 if (err)
959 return err;
960
961 /* The high-speed function is busy. Try again */
962 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
963 break;
964 }
965
966 /* If high-speed isn't supported, we return */
967 if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED))
968 return 0;
969
970 /*
971 * If the host doesn't support SD_HIGHSPEED, do not switch card to
972 * HIGHSPEED mode even if the card support SD_HIGHSPPED.
973 * This can avoid furthur problem when the card runs in different
974 * mode between the host.
975 */
976 if (!((mmc->cfg->host_caps & MMC_MODE_HS_52MHz) &&
977 (mmc->cfg->host_caps & MMC_MODE_HS)))
978 return 0;
979
980 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status);
981
982 if (err)
983 return err;
984
985 if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) == 0x01000000)
986 mmc->card_caps |= MMC_MODE_HS;
987
988 return 0;
989 }
990
991 static int sd_read_ssr(struct mmc *mmc)
992 {
993 int err, i;
994 struct mmc_cmd cmd;
995 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
996 struct mmc_data data;
997 int timeout = 3;
998 unsigned int au, eo, et, es;
999
1000 cmd.cmdidx = MMC_CMD_APP_CMD;
1001 cmd.resp_type = MMC_RSP_R1;
1002 cmd.cmdarg = mmc->rca << 16;
1003
1004 err = mmc_send_cmd(mmc, &cmd, NULL);
1005 if (err)
1006 return err;
1007
1008 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1009 cmd.resp_type = MMC_RSP_R1;
1010 cmd.cmdarg = 0;
1011
1012 retry_ssr:
1013 data.dest = (char *)ssr;
1014 data.blocksize = 64;
1015 data.blocks = 1;
1016 data.flags = MMC_DATA_READ;
1017
1018 err = mmc_send_cmd(mmc, &cmd, &data);
1019 if (err) {
1020 if (timeout--)
1021 goto retry_ssr;
1022
1023 return err;
1024 }
1025
1026 for (i = 0; i < 16; i++)
1027 ssr[i] = be32_to_cpu(ssr[i]);
1028
1029 au = (ssr[2] >> 12) & 0xF;
1030 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1031 mmc->ssr.au = sd_au_size[au];
1032 es = (ssr[3] >> 24) & 0xFF;
1033 es |= (ssr[2] & 0xFF) << 8;
1034 et = (ssr[3] >> 18) & 0x3F;
1035 if (es && et) {
1036 eo = (ssr[3] >> 16) & 0x3;
1037 mmc->ssr.erase_timeout = (et * 1000) / es;
1038 mmc->ssr.erase_offset = eo * 1000;
1039 }
1040 } else {
1041 debug("Invalid Allocation Unit Size.\n");
1042 }
1043
1044 return 0;
1045 }
1046
1047 /* frequency bases */
1048 /* divided by 10 to be nice to platforms without floating point */
1049 static const int fbase[] = {
1050 10000,
1051 100000,
1052 1000000,
1053 10000000,
1054 };
1055
1056 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1057 * to platforms without floating point.
1058 */
1059 static const u8 multipliers[] = {
1060 0, /* reserved */
1061 10,
1062 12,
1063 13,
1064 15,
1065 20,
1066 25,
1067 30,
1068 35,
1069 40,
1070 45,
1071 50,
1072 55,
1073 60,
1074 70,
1075 80,
1076 };
1077
1078 #if !CONFIG_IS_ENABLED(DM_MMC)
1079 static void mmc_set_ios(struct mmc *mmc)
1080 {
1081 if (mmc->cfg->ops->set_ios)
1082 mmc->cfg->ops->set_ios(mmc);
1083 }
1084 #endif
1085
1086 void mmc_set_clock(struct mmc *mmc, uint clock)
1087 {
1088 if (clock > mmc->cfg->f_max)
1089 clock = mmc->cfg->f_max;
1090
1091 if (clock < mmc->cfg->f_min)
1092 clock = mmc->cfg->f_min;
1093
1094 mmc->clock = clock;
1095
1096 mmc_set_ios(mmc);
1097 }
1098
1099 static void mmc_set_bus_width(struct mmc *mmc, uint width)
1100 {
1101 mmc->bus_width = width;
1102
1103 mmc_set_ios(mmc);
1104 }
1105
1106 static int sd_select_bus_freq_width(struct mmc *mmc)
1107 {
1108 int err;
1109 struct mmc_cmd cmd;
1110
1111 err = sd_change_freq(mmc);
1112 if (err)
1113 return err;
1114
1115 /* Restrict card's capabilities by what the host can do */
1116 mmc->card_caps &= mmc->cfg->host_caps;
1117
1118 if (mmc->card_caps & MMC_MODE_4BIT) {
1119 cmd.cmdidx = MMC_CMD_APP_CMD;
1120 cmd.resp_type = MMC_RSP_R1;
1121 cmd.cmdarg = mmc->rca << 16;
1122
1123 err = mmc_send_cmd(mmc, &cmd, NULL);
1124 if (err)
1125 return err;
1126
1127 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1128 cmd.resp_type = MMC_RSP_R1;
1129 cmd.cmdarg = 2;
1130 err = mmc_send_cmd(mmc, &cmd, NULL);
1131 if (err)
1132 return err;
1133
1134 mmc_set_bus_width(mmc, 4);
1135 }
1136
1137 err = sd_read_ssr(mmc);
1138 if (err)
1139 return err;
1140
1141 if (mmc->card_caps & MMC_MODE_HS)
1142 mmc->tran_speed = 50000000;
1143 else
1144 mmc->tran_speed = 25000000;
1145
1146 return 0;
1147 }
1148
1149 static int mmc_select_bus_freq_width(struct mmc *mmc)
1150 {
1151 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1152 const u8 *ext_csd = mmc->ext_csd;
1153 /* An array of possible bus widths in order of preference */
1154 static const unsigned int ext_csd_bits[] = {
1155 EXT_CSD_DDR_BUS_WIDTH_8,
1156 EXT_CSD_DDR_BUS_WIDTH_4,
1157 EXT_CSD_BUS_WIDTH_8,
1158 EXT_CSD_BUS_WIDTH_4,
1159 EXT_CSD_BUS_WIDTH_1,
1160 };
1161 /* An array to map CSD bus widths to host cap bits */
1162 static const unsigned int ext_to_hostcaps[] = {
1163 [EXT_CSD_DDR_BUS_WIDTH_4] =
1164 MMC_MODE_DDR_52MHz | MMC_MODE_4BIT,
1165 [EXT_CSD_DDR_BUS_WIDTH_8] =
1166 MMC_MODE_DDR_52MHz | MMC_MODE_8BIT,
1167 [EXT_CSD_BUS_WIDTH_4] = MMC_MODE_4BIT,
1168 [EXT_CSD_BUS_WIDTH_8] = MMC_MODE_8BIT,
1169 };
1170 /* An array to map chosen bus width to an integer */
1171 static const unsigned int widths[] = {
1172 8, 4, 8, 4, 1,
1173 };
1174 int err;
1175 int idx;
1176
1177 err = mmc_change_freq(mmc);
1178 if (err)
1179 return err;
1180
1181 /* Restrict card's capabilities by what the host can do */
1182 mmc->card_caps &= mmc->cfg->host_caps;
1183
1184 /* Only version 4 of MMC supports wider bus widths */
1185 if (mmc->version < MMC_VERSION_4)
1186 return 0;
1187
1188 if (!mmc->ext_csd) {
1189 debug("No ext_csd found!\n"); /* this should enver happen */
1190 return -ENOTSUPP;
1191 }
1192
1193 for (idx = 0; idx < ARRAY_SIZE(ext_csd_bits); idx++) {
1194 unsigned int extw = ext_csd_bits[idx];
1195 unsigned int caps = ext_to_hostcaps[extw];
1196 /*
1197 * If the bus width is still not changed,
1198 * don't try to set the default again.
1199 * Otherwise, recover from switch attempts
1200 * by switching to 1-bit bus width.
1201 */
1202 if (extw == EXT_CSD_BUS_WIDTH_1 &&
1203 mmc->bus_width == 1) {
1204 err = 0;
1205 break;
1206 }
1207
1208 /*
1209 * Check to make sure the card and controller support
1210 * these capabilities
1211 */
1212 if ((mmc->card_caps & caps) != caps)
1213 continue;
1214
1215 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1216 EXT_CSD_BUS_WIDTH, extw);
1217
1218 if (err)
1219 continue;
1220
1221 mmc->ddr_mode = (caps & MMC_MODE_DDR_52MHz) ? 1 : 0;
1222 mmc_set_bus_width(mmc, widths[idx]);
1223
1224 err = mmc_send_ext_csd(mmc, test_csd);
1225
1226 if (err)
1227 continue;
1228
1229 /* Only compare read only fields */
1230 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1231 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1232 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1233 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1234 ext_csd[EXT_CSD_REV]
1235 == test_csd[EXT_CSD_REV] &&
1236 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1237 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1238 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1239 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1240 break;
1241
1242 err = -EBADMSG;
1243 }
1244
1245 if (err)
1246 return err;
1247
1248 if (mmc->card_caps & MMC_MODE_HS) {
1249 if (mmc->card_caps & MMC_MODE_HS_52MHz)
1250 mmc->tran_speed = 52000000;
1251 else
1252 mmc->tran_speed = 26000000;
1253 }
1254
1255 return err;
1256 }
1257
1258 static int mmc_startup_v4(struct mmc *mmc)
1259 {
1260 int err, i;
1261 u64 capacity;
1262 bool has_parts = false;
1263 bool part_completed;
1264 u8 *ext_csd;
1265
1266 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1267 return 0;
1268
1269 ext_csd = malloc_cache_aligned(MMC_MAX_BLOCK_LEN);
1270 if (!ext_csd)
1271 return -ENOMEM;
1272
1273 mmc->ext_csd = ext_csd;
1274
1275 /* check ext_csd version and capacity */
1276 err = mmc_send_ext_csd(mmc, ext_csd);
1277 if (err)
1278 return err;
1279 if (ext_csd[EXT_CSD_REV] >= 2) {
1280 /*
1281 * According to the JEDEC Standard, the value of
1282 * ext_csd's capacity is valid if the value is more
1283 * than 2GB
1284 */
1285 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1286 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1287 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1288 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1289 capacity *= MMC_MAX_BLOCK_LEN;
1290 if ((capacity >> 20) > 2 * 1024)
1291 mmc->capacity_user = capacity;
1292 }
1293
1294 switch (ext_csd[EXT_CSD_REV]) {
1295 case 1:
1296 mmc->version = MMC_VERSION_4_1;
1297 break;
1298 case 2:
1299 mmc->version = MMC_VERSION_4_2;
1300 break;
1301 case 3:
1302 mmc->version = MMC_VERSION_4_3;
1303 break;
1304 case 5:
1305 mmc->version = MMC_VERSION_4_41;
1306 break;
1307 case 6:
1308 mmc->version = MMC_VERSION_4_5;
1309 break;
1310 case 7:
1311 mmc->version = MMC_VERSION_5_0;
1312 break;
1313 case 8:
1314 mmc->version = MMC_VERSION_5_1;
1315 break;
1316 }
1317
1318 /* The partition data may be non-zero but it is only
1319 * effective if PARTITION_SETTING_COMPLETED is set in
1320 * EXT_CSD, so ignore any data if this bit is not set,
1321 * except for enabling the high-capacity group size
1322 * definition (see below).
1323 */
1324 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1325 EXT_CSD_PARTITION_SETTING_COMPLETED);
1326
1327 /* store the partition info of emmc */
1328 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
1329 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
1330 ext_csd[EXT_CSD_BOOT_MULT])
1331 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
1332 if (part_completed &&
1333 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
1334 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
1335
1336 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
1337
1338 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
1339
1340 for (i = 0; i < 4; i++) {
1341 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
1342 uint mult = (ext_csd[idx + 2] << 16) +
1343 (ext_csd[idx + 1] << 8) + ext_csd[idx];
1344 if (mult)
1345 has_parts = true;
1346 if (!part_completed)
1347 continue;
1348 mmc->capacity_gp[i] = mult;
1349 mmc->capacity_gp[i] *=
1350 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1351 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1352 mmc->capacity_gp[i] <<= 19;
1353 }
1354
1355 if (part_completed) {
1356 mmc->enh_user_size =
1357 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
1358 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
1359 ext_csd[EXT_CSD_ENH_SIZE_MULT];
1360 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1361 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1362 mmc->enh_user_size <<= 19;
1363 mmc->enh_user_start =
1364 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
1365 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
1366 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
1367 ext_csd[EXT_CSD_ENH_START_ADDR];
1368 if (mmc->high_capacity)
1369 mmc->enh_user_start <<= 9;
1370 }
1371
1372 /*
1373 * Host needs to enable ERASE_GRP_DEF bit if device is
1374 * partitioned. This bit will be lost every time after a reset
1375 * or power off. This will affect erase size.
1376 */
1377 if (part_completed)
1378 has_parts = true;
1379 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
1380 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
1381 has_parts = true;
1382 if (has_parts) {
1383 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1384 EXT_CSD_ERASE_GROUP_DEF, 1);
1385
1386 if (err)
1387 return err;
1388
1389 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1390 }
1391
1392 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
1393 /* Read out group size from ext_csd */
1394 mmc->erase_grp_size =
1395 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1396 /*
1397 * if high capacity and partition setting completed
1398 * SEC_COUNT is valid even if it is smaller than 2 GiB
1399 * JEDEC Standard JESD84-B45, 6.2.4
1400 */
1401 if (mmc->high_capacity && part_completed) {
1402 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
1403 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
1404 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
1405 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
1406 capacity *= MMC_MAX_BLOCK_LEN;
1407 mmc->capacity_user = capacity;
1408 }
1409 } else {
1410 /* Calculate the group size from the csd value. */
1411 int erase_gsz, erase_gmul;
1412
1413 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
1414 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
1415 mmc->erase_grp_size = (erase_gsz + 1)
1416 * (erase_gmul + 1);
1417 }
1418
1419 mmc->hc_wp_grp_size = 1024
1420 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1421 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1422
1423 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1424
1425 return 0;
1426 }
1427
1428 static int mmc_startup(struct mmc *mmc)
1429 {
1430 int err, i;
1431 uint mult, freq;
1432 u64 cmult, csize;
1433 struct mmc_cmd cmd;
1434 struct blk_desc *bdesc;
1435
1436 #ifdef CONFIG_MMC_SPI_CRC_ON
1437 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
1438 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
1439 cmd.resp_type = MMC_RSP_R1;
1440 cmd.cmdarg = 1;
1441 err = mmc_send_cmd(mmc, &cmd, NULL);
1442
1443 if (err)
1444 return err;
1445 }
1446 #endif
1447
1448 /* Put the Card in Identify Mode */
1449 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
1450 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
1451 cmd.resp_type = MMC_RSP_R2;
1452 cmd.cmdarg = 0;
1453
1454 err = mmc_send_cmd(mmc, &cmd, NULL);
1455
1456 if (err)
1457 return err;
1458
1459 memcpy(mmc->cid, cmd.response, 16);
1460
1461 /*
1462 * For MMC cards, set the Relative Address.
1463 * For SD cards, get the Relatvie Address.
1464 * This also puts the cards into Standby State
1465 */
1466 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1467 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
1468 cmd.cmdarg = mmc->rca << 16;
1469 cmd.resp_type = MMC_RSP_R6;
1470
1471 err = mmc_send_cmd(mmc, &cmd, NULL);
1472
1473 if (err)
1474 return err;
1475
1476 if (IS_SD(mmc))
1477 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
1478 }
1479
1480 /* Get the Card-Specific Data */
1481 cmd.cmdidx = MMC_CMD_SEND_CSD;
1482 cmd.resp_type = MMC_RSP_R2;
1483 cmd.cmdarg = mmc->rca << 16;
1484
1485 err = mmc_send_cmd(mmc, &cmd, NULL);
1486
1487 if (err)
1488 return err;
1489
1490 mmc->csd[0] = cmd.response[0];
1491 mmc->csd[1] = cmd.response[1];
1492 mmc->csd[2] = cmd.response[2];
1493 mmc->csd[3] = cmd.response[3];
1494
1495 if (mmc->version == MMC_VERSION_UNKNOWN) {
1496 int version = (cmd.response[0] >> 26) & 0xf;
1497
1498 switch (version) {
1499 case 0:
1500 mmc->version = MMC_VERSION_1_2;
1501 break;
1502 case 1:
1503 mmc->version = MMC_VERSION_1_4;
1504 break;
1505 case 2:
1506 mmc->version = MMC_VERSION_2_2;
1507 break;
1508 case 3:
1509 mmc->version = MMC_VERSION_3;
1510 break;
1511 case 4:
1512 mmc->version = MMC_VERSION_4;
1513 break;
1514 default:
1515 mmc->version = MMC_VERSION_1_2;
1516 break;
1517 }
1518 }
1519
1520 /* divide frequency by 10, since the mults are 10x bigger */
1521 freq = fbase[(cmd.response[0] & 0x7)];
1522 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
1523
1524 mmc->tran_speed = freq * mult;
1525
1526 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
1527 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
1528
1529 if (IS_SD(mmc))
1530 mmc->write_bl_len = mmc->read_bl_len;
1531 else
1532 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
1533
1534 if (mmc->high_capacity) {
1535 csize = (mmc->csd[1] & 0x3f) << 16
1536 | (mmc->csd[2] & 0xffff0000) >> 16;
1537 cmult = 8;
1538 } else {
1539 csize = (mmc->csd[1] & 0x3ff) << 2
1540 | (mmc->csd[2] & 0xc0000000) >> 30;
1541 cmult = (mmc->csd[2] & 0x00038000) >> 15;
1542 }
1543
1544 mmc->capacity_user = (csize + 1) << (cmult + 2);
1545 mmc->capacity_user *= mmc->read_bl_len;
1546 mmc->capacity_boot = 0;
1547 mmc->capacity_rpmb = 0;
1548 for (i = 0; i < 4; i++)
1549 mmc->capacity_gp[i] = 0;
1550
1551 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
1552 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1553
1554 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
1555 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1556
1557 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
1558 cmd.cmdidx = MMC_CMD_SET_DSR;
1559 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
1560 cmd.resp_type = MMC_RSP_NONE;
1561 if (mmc_send_cmd(mmc, &cmd, NULL))
1562 printf("MMC: SET_DSR failed\n");
1563 }
1564
1565 /* Select the card, and put it into Transfer Mode */
1566 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1567 cmd.cmdidx = MMC_CMD_SELECT_CARD;
1568 cmd.resp_type = MMC_RSP_R1;
1569 cmd.cmdarg = mmc->rca << 16;
1570 err = mmc_send_cmd(mmc, &cmd, NULL);
1571
1572 if (err)
1573 return err;
1574 }
1575
1576 /*
1577 * For SD, its erase group is always one sector
1578 */
1579 mmc->erase_grp_size = 1;
1580 mmc->part_config = MMCPART_NOAVAILABLE;
1581
1582 err = mmc_startup_v4(mmc);
1583 if (err)
1584 return err;
1585
1586 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
1587 if (err)
1588 return err;
1589
1590 if (IS_SD(mmc))
1591 err = sd_select_bus_freq_width(mmc);
1592 else
1593 err = mmc_select_bus_freq_width(mmc);
1594
1595 if (err)
1596 return err;
1597
1598 mmc_set_clock(mmc, mmc->tran_speed);
1599
1600 /* Fix the block length for DDR mode */
1601 if (mmc->ddr_mode) {
1602 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1603 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1604 }
1605
1606 /* fill in device description */
1607 bdesc = mmc_get_blk_desc(mmc);
1608 bdesc->lun = 0;
1609 bdesc->hwpart = 0;
1610 bdesc->type = 0;
1611 bdesc->blksz = mmc->read_bl_len;
1612 bdesc->log2blksz = LOG2(bdesc->blksz);
1613 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1614 #if !defined(CONFIG_SPL_BUILD) || \
1615 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
1616 !defined(CONFIG_USE_TINY_PRINTF))
1617 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
1618 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
1619 (mmc->cid[3] >> 16) & 0xffff);
1620 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
1621 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
1622 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
1623 (mmc->cid[2] >> 24) & 0xff);
1624 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
1625 (mmc->cid[2] >> 16) & 0xf);
1626 #else
1627 bdesc->vendor[0] = 0;
1628 bdesc->product[0] = 0;
1629 bdesc->revision[0] = 0;
1630 #endif
1631 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
1632 part_init(bdesc);
1633 #endif
1634
1635 return 0;
1636 }
1637
1638 static int mmc_send_if_cond(struct mmc *mmc)
1639 {
1640 struct mmc_cmd cmd;
1641 int err;
1642
1643 cmd.cmdidx = SD_CMD_SEND_IF_COND;
1644 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
1645 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
1646 cmd.resp_type = MMC_RSP_R7;
1647
1648 err = mmc_send_cmd(mmc, &cmd, NULL);
1649
1650 if (err)
1651 return err;
1652
1653 if ((cmd.response[0] & 0xff) != 0xaa)
1654 return -EOPNOTSUPP;
1655 else
1656 mmc->version = SD_VERSION_2;
1657
1658 return 0;
1659 }
1660
1661 #if !CONFIG_IS_ENABLED(DM_MMC)
1662 /* board-specific MMC power initializations. */
1663 __weak void board_mmc_power_init(void)
1664 {
1665 }
1666 #endif
1667
1668 static int mmc_power_init(struct mmc *mmc)
1669 {
1670 #if CONFIG_IS_ENABLED(DM_MMC)
1671 #if CONFIG_IS_ENABLED(DM_REGULATOR)
1672 int ret;
1673
1674 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
1675 &mmc->vmmc_supply);
1676 if (ret)
1677 debug("%s: No vmmc supply\n", mmc->dev->name);
1678
1679 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
1680 &mmc->vqmmc_supply);
1681 if (ret)
1682 debug("%s: No vqmmc supply\n", mmc->dev->name);
1683
1684 if (mmc->vmmc_supply) {
1685 ret = regulator_set_enable(mmc->vmmc_supply, true);
1686 if (ret) {
1687 puts("Error enabling VMMC supply\n");
1688 return ret;
1689 }
1690 }
1691 #endif
1692 #else /* !CONFIG_DM_MMC */
1693 /*
1694 * Driver model should use a regulator, as above, rather than calling
1695 * out to board code.
1696 */
1697 board_mmc_power_init();
1698 #endif
1699 return 0;
1700 }
1701
1702 int mmc_start_init(struct mmc *mmc)
1703 {
1704 bool no_card;
1705 int err;
1706
1707 /* we pretend there's no card when init is NULL */
1708 no_card = mmc_getcd(mmc) == 0;
1709 #if !CONFIG_IS_ENABLED(DM_MMC)
1710 no_card = no_card || (mmc->cfg->ops->init == NULL);
1711 #endif
1712 if (no_card) {
1713 mmc->has_init = 0;
1714 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
1715 printf("MMC: no card present\n");
1716 #endif
1717 return -ENOMEDIUM;
1718 }
1719
1720 if (mmc->has_init)
1721 return 0;
1722
1723 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
1724 mmc_adapter_card_type_ident();
1725 #endif
1726 err = mmc_power_init(mmc);
1727 if (err)
1728 return err;
1729
1730 #if CONFIG_IS_ENABLED(DM_MMC)
1731 /* The device has already been probed ready for use */
1732 #else
1733 /* made sure it's not NULL earlier */
1734 err = mmc->cfg->ops->init(mmc);
1735 if (err)
1736 return err;
1737 #endif
1738 mmc->ddr_mode = 0;
1739 mmc_set_bus_width(mmc, 1);
1740 mmc_set_clock(mmc, 1);
1741
1742 /* Reset the Card */
1743 err = mmc_go_idle(mmc);
1744
1745 if (err)
1746 return err;
1747
1748 /* The internal partition reset to user partition(0) at every CMD0*/
1749 mmc_get_blk_desc(mmc)->hwpart = 0;
1750
1751 /* Test for SD version 2 */
1752 err = mmc_send_if_cond(mmc);
1753
1754 /* Now try to get the SD card's operating condition */
1755 err = sd_send_op_cond(mmc);
1756
1757 /* If the command timed out, we check for an MMC card */
1758 if (err == -ETIMEDOUT) {
1759 err = mmc_send_op_cond(mmc);
1760
1761 if (err) {
1762 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
1763 printf("Card did not respond to voltage select!\n");
1764 #endif
1765 return -EOPNOTSUPP;
1766 }
1767 }
1768
1769 if (!err)
1770 mmc->init_in_progress = 1;
1771
1772 return err;
1773 }
1774
1775 static int mmc_complete_init(struct mmc *mmc)
1776 {
1777 int err = 0;
1778
1779 mmc->init_in_progress = 0;
1780 if (mmc->op_cond_pending)
1781 err = mmc_complete_op_cond(mmc);
1782
1783 if (!err)
1784 err = mmc_startup(mmc);
1785 if (err)
1786 mmc->has_init = 0;
1787 else
1788 mmc->has_init = 1;
1789 return err;
1790 }
1791
1792 int mmc_init(struct mmc *mmc)
1793 {
1794 int err = 0;
1795 __maybe_unused unsigned start;
1796 #if CONFIG_IS_ENABLED(DM_MMC)
1797 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
1798
1799 upriv->mmc = mmc;
1800 #endif
1801 if (mmc->has_init)
1802 return 0;
1803
1804 start = get_timer(0);
1805
1806 if (!mmc->init_in_progress)
1807 err = mmc_start_init(mmc);
1808
1809 if (!err)
1810 err = mmc_complete_init(mmc);
1811 if (err)
1812 printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
1813
1814 return err;
1815 }
1816
1817 int mmc_set_dsr(struct mmc *mmc, u16 val)
1818 {
1819 mmc->dsr = val;
1820 return 0;
1821 }
1822
1823 /* CPU-specific MMC initializations */
1824 __weak int cpu_mmc_init(bd_t *bis)
1825 {
1826 return -1;
1827 }
1828
1829 /* board-specific MMC initializations. */
1830 __weak int board_mmc_init(bd_t *bis)
1831 {
1832 return -1;
1833 }
1834
1835 void mmc_set_preinit(struct mmc *mmc, int preinit)
1836 {
1837 mmc->preinit = preinit;
1838 }
1839
1840 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
1841 static int mmc_probe(bd_t *bis)
1842 {
1843 return 0;
1844 }
1845 #elif CONFIG_IS_ENABLED(DM_MMC)
1846 static int mmc_probe(bd_t *bis)
1847 {
1848 int ret, i;
1849 struct uclass *uc;
1850 struct udevice *dev;
1851
1852 ret = uclass_get(UCLASS_MMC, &uc);
1853 if (ret)
1854 return ret;
1855
1856 /*
1857 * Try to add them in sequence order. Really with driver model we
1858 * should allow holes, but the current MMC list does not allow that.
1859 * So if we request 0, 1, 3 we will get 0, 1, 2.
1860 */
1861 for (i = 0; ; i++) {
1862 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
1863 if (ret == -ENODEV)
1864 break;
1865 }
1866 uclass_foreach_dev(dev, uc) {
1867 ret = device_probe(dev);
1868 if (ret)
1869 printf("%s - probe failed: %d\n", dev->name, ret);
1870 }
1871
1872 return 0;
1873 }
1874 #else
1875 static int mmc_probe(bd_t *bis)
1876 {
1877 if (board_mmc_init(bis) < 0)
1878 cpu_mmc_init(bis);
1879
1880 return 0;
1881 }
1882 #endif
1883
1884 int mmc_initialize(bd_t *bis)
1885 {
1886 static int initialized = 0;
1887 int ret;
1888 if (initialized) /* Avoid initializing mmc multiple times */
1889 return 0;
1890 initialized = 1;
1891
1892 #if !CONFIG_IS_ENABLED(BLK)
1893 #if !CONFIG_IS_ENABLED(MMC_TINY)
1894 mmc_list_init();
1895 #endif
1896 #endif
1897 ret = mmc_probe(bis);
1898 if (ret)
1899 return ret;
1900
1901 #ifndef CONFIG_SPL_BUILD
1902 print_mmc_devices(',');
1903 #endif
1904
1905 mmc_do_preinit();
1906 return 0;
1907 }
1908
1909 #ifdef CONFIG_CMD_BKOPS_ENABLE
1910 int mmc_set_bkops_enable(struct mmc *mmc)
1911 {
1912 int err;
1913 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1914
1915 err = mmc_send_ext_csd(mmc, ext_csd);
1916 if (err) {
1917 puts("Could not get ext_csd register values\n");
1918 return err;
1919 }
1920
1921 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
1922 puts("Background operations not supported on device\n");
1923 return -EMEDIUMTYPE;
1924 }
1925
1926 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
1927 puts("Background operations already enabled\n");
1928 return 0;
1929 }
1930
1931 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
1932 if (err) {
1933 puts("Failed to enable manual background operations\n");
1934 return err;
1935 }
1936
1937 puts("Enabled manual background operations\n");
1938
1939 return 0;
1940 }
1941 #endif