]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/mmc/mmc.c
mmc: add a library function to send tuning command
[people/ms/u-boot.git] / drivers / mmc / mmc.c
1 /*
2 * Copyright 2008, Freescale Semiconductor, Inc
3 * Andy Fleming
4 *
5 * Based vaguely on the Linux code
6 *
7 * SPDX-License-Identifier: GPL-2.0+
8 */
9
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24
25 static const unsigned int sd_au_size[] = {
26 0, SZ_16K / 512, SZ_32K / 512,
27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
31 };
32
33 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
34 static int mmc_power_cycle(struct mmc *mmc);
35 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
36
37 #if CONFIG_IS_ENABLED(MMC_TINY)
38 static struct mmc mmc_static;
39 struct mmc *find_mmc_device(int dev_num)
40 {
41 return &mmc_static;
42 }
43
44 void mmc_do_preinit(void)
45 {
46 struct mmc *m = &mmc_static;
47 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
48 mmc_set_preinit(m, 1);
49 #endif
50 if (m->preinit)
51 mmc_start_init(m);
52 }
53
54 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
55 {
56 return &mmc->block_dev;
57 }
58 #endif
59
60 #if !CONFIG_IS_ENABLED(DM_MMC)
61
62 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
63 {
64 return -ENOSYS;
65 }
66
67 __weak int board_mmc_getwp(struct mmc *mmc)
68 {
69 return -1;
70 }
71
72 int mmc_getwp(struct mmc *mmc)
73 {
74 int wp;
75
76 wp = board_mmc_getwp(mmc);
77
78 if (wp < 0) {
79 if (mmc->cfg->ops->getwp)
80 wp = mmc->cfg->ops->getwp(mmc);
81 else
82 wp = 0;
83 }
84
85 return wp;
86 }
87
88 __weak int board_mmc_getcd(struct mmc *mmc)
89 {
90 return -1;
91 }
92 #endif
93
94 #ifdef CONFIG_MMC_TRACE
95 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
96 {
97 printf("CMD_SEND:%d\n", cmd->cmdidx);
98 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
99 }
100
101 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
102 {
103 int i;
104 u8 *ptr;
105
106 if (ret) {
107 printf("\t\tRET\t\t\t %d\n", ret);
108 } else {
109 switch (cmd->resp_type) {
110 case MMC_RSP_NONE:
111 printf("\t\tMMC_RSP_NONE\n");
112 break;
113 case MMC_RSP_R1:
114 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
115 cmd->response[0]);
116 break;
117 case MMC_RSP_R1b:
118 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
119 cmd->response[0]);
120 break;
121 case MMC_RSP_R2:
122 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
123 cmd->response[0]);
124 printf("\t\t \t\t 0x%08X \n",
125 cmd->response[1]);
126 printf("\t\t \t\t 0x%08X \n",
127 cmd->response[2]);
128 printf("\t\t \t\t 0x%08X \n",
129 cmd->response[3]);
130 printf("\n");
131 printf("\t\t\t\t\tDUMPING DATA\n");
132 for (i = 0; i < 4; i++) {
133 int j;
134 printf("\t\t\t\t\t%03d - ", i*4);
135 ptr = (u8 *)&cmd->response[i];
136 ptr += 3;
137 for (j = 0; j < 4; j++)
138 printf("%02X ", *ptr--);
139 printf("\n");
140 }
141 break;
142 case MMC_RSP_R3:
143 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
144 cmd->response[0]);
145 break;
146 default:
147 printf("\t\tERROR MMC rsp not supported\n");
148 break;
149 }
150 }
151 }
152
153 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
154 {
155 int status;
156
157 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
158 printf("CURR STATE:%d\n", status);
159 }
160 #endif
161
162 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
163 const char *mmc_mode_name(enum bus_mode mode)
164 {
165 static const char *const names[] = {
166 [MMC_LEGACY] = "MMC legacy",
167 [SD_LEGACY] = "SD Legacy",
168 [MMC_HS] = "MMC High Speed (26MHz)",
169 [SD_HS] = "SD High Speed (50MHz)",
170 [UHS_SDR12] = "UHS SDR12 (25MHz)",
171 [UHS_SDR25] = "UHS SDR25 (50MHz)",
172 [UHS_SDR50] = "UHS SDR50 (100MHz)",
173 [UHS_SDR104] = "UHS SDR104 (208MHz)",
174 [UHS_DDR50] = "UHS DDR50 (50MHz)",
175 [MMC_HS_52] = "MMC High Speed (52MHz)",
176 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
177 [MMC_HS_200] = "HS200 (200MHz)",
178 };
179
180 if (mode >= MMC_MODES_END)
181 return "Unknown mode";
182 else
183 return names[mode];
184 }
185 #endif
186
187 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
188 {
189 static const int freqs[] = {
190 [SD_LEGACY] = 25000000,
191 [MMC_HS] = 26000000,
192 [SD_HS] = 50000000,
193 [UHS_SDR12] = 25000000,
194 [UHS_SDR25] = 50000000,
195 [UHS_SDR50] = 100000000,
196 [UHS_SDR104] = 208000000,
197 [UHS_DDR50] = 50000000,
198 [MMC_HS_52] = 52000000,
199 [MMC_DDR_52] = 52000000,
200 [MMC_HS_200] = 200000000,
201 };
202
203 if (mode == MMC_LEGACY)
204 return mmc->legacy_speed;
205 else if (mode >= MMC_MODES_END)
206 return 0;
207 else
208 return freqs[mode];
209 }
210
211 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
212 {
213 mmc->selected_mode = mode;
214 mmc->tran_speed = mmc_mode2freq(mmc, mode);
215 mmc->ddr_mode = mmc_is_mode_ddr(mode);
216 debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
217 mmc->tran_speed / 1000000);
218 return 0;
219 }
220
221 #if !CONFIG_IS_ENABLED(DM_MMC)
222 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
223 {
224 int ret;
225
226 mmmc_trace_before_send(mmc, cmd);
227 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
228 mmmc_trace_after_send(mmc, cmd, ret);
229
230 return ret;
231 }
232 #endif
233
234 int mmc_send_status(struct mmc *mmc, int timeout)
235 {
236 struct mmc_cmd cmd;
237 int err, retries = 5;
238
239 cmd.cmdidx = MMC_CMD_SEND_STATUS;
240 cmd.resp_type = MMC_RSP_R1;
241 if (!mmc_host_is_spi(mmc))
242 cmd.cmdarg = mmc->rca << 16;
243
244 while (1) {
245 err = mmc_send_cmd(mmc, &cmd, NULL);
246 if (!err) {
247 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
248 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
249 MMC_STATE_PRG)
250 break;
251
252 if (cmd.response[0] & MMC_STATUS_MASK) {
253 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
254 printf("Status Error: 0x%08X\n",
255 cmd.response[0]);
256 #endif
257 return -ECOMM;
258 }
259 } else if (--retries < 0)
260 return err;
261
262 if (timeout-- <= 0)
263 break;
264
265 udelay(1000);
266 }
267
268 mmc_trace_state(mmc, &cmd);
269 if (timeout <= 0) {
270 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
271 printf("Timeout waiting card ready\n");
272 #endif
273 return -ETIMEDOUT;
274 }
275
276 return 0;
277 }
278
279 int mmc_set_blocklen(struct mmc *mmc, int len)
280 {
281 struct mmc_cmd cmd;
282 int err;
283
284 if (mmc->ddr_mode)
285 return 0;
286
287 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
288 cmd.resp_type = MMC_RSP_R1;
289 cmd.cmdarg = len;
290
291 err = mmc_send_cmd(mmc, &cmd, NULL);
292
293 #ifdef CONFIG_MMC_QUIRKS
294 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
295 int retries = 4;
296 /*
297 * It has been seen that SET_BLOCKLEN may fail on the first
298 * attempt, let's try a few more time
299 */
300 do {
301 err = mmc_send_cmd(mmc, &cmd, NULL);
302 if (!err)
303 break;
304 } while (retries--);
305 }
306 #endif
307
308 return err;
309 }
310
311 static const u8 tuning_blk_pattern_4bit[] = {
312 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
313 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
314 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
315 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
316 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
317 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
318 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
319 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
320 };
321
322 static const u8 tuning_blk_pattern_8bit[] = {
323 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
324 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
325 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
326 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
327 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
328 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
329 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
330 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
331 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
332 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
333 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
334 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
335 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
336 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
337 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
338 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
339 };
340
341 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
342 {
343 struct mmc_cmd cmd;
344 struct mmc_data data;
345 const u8 *tuning_block_pattern;
346 int size, err;
347
348 if (mmc->bus_width == 8) {
349 tuning_block_pattern = tuning_blk_pattern_8bit;
350 size = sizeof(tuning_blk_pattern_8bit);
351 } else if (mmc->bus_width == 4) {
352 tuning_block_pattern = tuning_blk_pattern_4bit;
353 size = sizeof(tuning_blk_pattern_4bit);
354 } else {
355 return -EINVAL;
356 }
357
358 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
359
360 cmd.cmdidx = opcode;
361 cmd.cmdarg = 0;
362 cmd.resp_type = MMC_RSP_R1;
363
364 data.dest = (void *)data_buf;
365 data.blocks = 1;
366 data.blocksize = size;
367 data.flags = MMC_DATA_READ;
368
369 err = mmc_send_cmd(mmc, &cmd, &data);
370 if (err)
371 return err;
372
373 if (memcmp(data_buf, tuning_block_pattern, size))
374 return -EIO;
375
376 return 0;
377 }
378
379 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
380 lbaint_t blkcnt)
381 {
382 struct mmc_cmd cmd;
383 struct mmc_data data;
384
385 if (blkcnt > 1)
386 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
387 else
388 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
389
390 if (mmc->high_capacity)
391 cmd.cmdarg = start;
392 else
393 cmd.cmdarg = start * mmc->read_bl_len;
394
395 cmd.resp_type = MMC_RSP_R1;
396
397 data.dest = dst;
398 data.blocks = blkcnt;
399 data.blocksize = mmc->read_bl_len;
400 data.flags = MMC_DATA_READ;
401
402 if (mmc_send_cmd(mmc, &cmd, &data))
403 return 0;
404
405 if (blkcnt > 1) {
406 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
407 cmd.cmdarg = 0;
408 cmd.resp_type = MMC_RSP_R1b;
409 if (mmc_send_cmd(mmc, &cmd, NULL)) {
410 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
411 printf("mmc fail to send stop cmd\n");
412 #endif
413 return 0;
414 }
415 }
416
417 return blkcnt;
418 }
419
420 #if CONFIG_IS_ENABLED(BLK)
421 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
422 #else
423 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
424 void *dst)
425 #endif
426 {
427 #if CONFIG_IS_ENABLED(BLK)
428 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
429 #endif
430 int dev_num = block_dev->devnum;
431 int err;
432 lbaint_t cur, blocks_todo = blkcnt;
433
434 if (blkcnt == 0)
435 return 0;
436
437 struct mmc *mmc = find_mmc_device(dev_num);
438 if (!mmc)
439 return 0;
440
441 if (CONFIG_IS_ENABLED(MMC_TINY))
442 err = mmc_switch_part(mmc, block_dev->hwpart);
443 else
444 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
445
446 if (err < 0)
447 return 0;
448
449 if ((start + blkcnt) > block_dev->lba) {
450 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
451 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
452 start + blkcnt, block_dev->lba);
453 #endif
454 return 0;
455 }
456
457 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
458 debug("%s: Failed to set blocklen\n", __func__);
459 return 0;
460 }
461
462 do {
463 cur = (blocks_todo > mmc->cfg->b_max) ?
464 mmc->cfg->b_max : blocks_todo;
465 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
466 debug("%s: Failed to read blocks\n", __func__);
467 return 0;
468 }
469 blocks_todo -= cur;
470 start += cur;
471 dst += cur * mmc->read_bl_len;
472 } while (blocks_todo > 0);
473
474 return blkcnt;
475 }
476
477 static int mmc_go_idle(struct mmc *mmc)
478 {
479 struct mmc_cmd cmd;
480 int err;
481
482 udelay(1000);
483
484 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
485 cmd.cmdarg = 0;
486 cmd.resp_type = MMC_RSP_NONE;
487
488 err = mmc_send_cmd(mmc, &cmd, NULL);
489
490 if (err)
491 return err;
492
493 udelay(2000);
494
495 return 0;
496 }
497
498 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
499 {
500 struct mmc_cmd cmd;
501 int err = 0;
502
503 /*
504 * Send CMD11 only if the request is to switch the card to
505 * 1.8V signalling.
506 */
507 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
508 return mmc_set_signal_voltage(mmc, signal_voltage);
509
510 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
511 cmd.cmdarg = 0;
512 cmd.resp_type = MMC_RSP_R1;
513
514 err = mmc_send_cmd(mmc, &cmd, NULL);
515 if (err)
516 return err;
517
518 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
519 return -EIO;
520
521 /*
522 * The card should drive cmd and dat[0:3] low immediately
523 * after the response of cmd11, but wait 100 us to be sure
524 */
525 err = mmc_wait_dat0(mmc, 0, 100);
526 if (err == -ENOSYS)
527 udelay(100);
528 else if (err)
529 return -ETIMEDOUT;
530
531 /*
532 * During a signal voltage level switch, the clock must be gated
533 * for 5 ms according to the SD spec
534 */
535 mmc_set_clock(mmc, mmc->clock, true);
536
537 err = mmc_set_signal_voltage(mmc, signal_voltage);
538 if (err)
539 return err;
540
541 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
542 mdelay(10);
543 mmc_set_clock(mmc, mmc->clock, false);
544
545 /*
546 * Failure to switch is indicated by the card holding
547 * dat[0:3] low. Wait for at least 1 ms according to spec
548 */
549 err = mmc_wait_dat0(mmc, 1, 1000);
550 if (err == -ENOSYS)
551 udelay(1000);
552 else if (err)
553 return -ETIMEDOUT;
554
555 return 0;
556 }
557
558 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
559 {
560 int timeout = 1000;
561 int err;
562 struct mmc_cmd cmd;
563
564 while (1) {
565 cmd.cmdidx = MMC_CMD_APP_CMD;
566 cmd.resp_type = MMC_RSP_R1;
567 cmd.cmdarg = 0;
568
569 err = mmc_send_cmd(mmc, &cmd, NULL);
570
571 if (err)
572 return err;
573
574 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
575 cmd.resp_type = MMC_RSP_R3;
576
577 /*
578 * Most cards do not answer if some reserved bits
579 * in the ocr are set. However, Some controller
580 * can set bit 7 (reserved for low voltages), but
581 * how to manage low voltages SD card is not yet
582 * specified.
583 */
584 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
585 (mmc->cfg->voltages & 0xff8000);
586
587 if (mmc->version == SD_VERSION_2)
588 cmd.cmdarg |= OCR_HCS;
589
590 if (uhs_en)
591 cmd.cmdarg |= OCR_S18R;
592
593 err = mmc_send_cmd(mmc, &cmd, NULL);
594
595 if (err)
596 return err;
597
598 if (cmd.response[0] & OCR_BUSY)
599 break;
600
601 if (timeout-- <= 0)
602 return -EOPNOTSUPP;
603
604 udelay(1000);
605 }
606
607 if (mmc->version != SD_VERSION_2)
608 mmc->version = SD_VERSION_1_0;
609
610 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
611 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
612 cmd.resp_type = MMC_RSP_R3;
613 cmd.cmdarg = 0;
614
615 err = mmc_send_cmd(mmc, &cmd, NULL);
616
617 if (err)
618 return err;
619 }
620
621 mmc->ocr = cmd.response[0];
622
623 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
624 == 0x41000000) {
625 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
626 if (err)
627 return err;
628 }
629
630 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
631 mmc->rca = 0;
632
633 return 0;
634 }
635
636 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
637 {
638 struct mmc_cmd cmd;
639 int err;
640
641 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
642 cmd.resp_type = MMC_RSP_R3;
643 cmd.cmdarg = 0;
644 if (use_arg && !mmc_host_is_spi(mmc))
645 cmd.cmdarg = OCR_HCS |
646 (mmc->cfg->voltages &
647 (mmc->ocr & OCR_VOLTAGE_MASK)) |
648 (mmc->ocr & OCR_ACCESS_MODE);
649
650 err = mmc_send_cmd(mmc, &cmd, NULL);
651 if (err)
652 return err;
653 mmc->ocr = cmd.response[0];
654 return 0;
655 }
656
657 static int mmc_send_op_cond(struct mmc *mmc)
658 {
659 int err, i;
660
661 /* Some cards seem to need this */
662 mmc_go_idle(mmc);
663
664 /* Asking to the card its capabilities */
665 for (i = 0; i < 2; i++) {
666 err = mmc_send_op_cond_iter(mmc, i != 0);
667 if (err)
668 return err;
669
670 /* exit if not busy (flag seems to be inverted) */
671 if (mmc->ocr & OCR_BUSY)
672 break;
673 }
674 mmc->op_cond_pending = 1;
675 return 0;
676 }
677
678 static int mmc_complete_op_cond(struct mmc *mmc)
679 {
680 struct mmc_cmd cmd;
681 int timeout = 1000;
682 uint start;
683 int err;
684
685 mmc->op_cond_pending = 0;
686 if (!(mmc->ocr & OCR_BUSY)) {
687 /* Some cards seem to need this */
688 mmc_go_idle(mmc);
689
690 start = get_timer(0);
691 while (1) {
692 err = mmc_send_op_cond_iter(mmc, 1);
693 if (err)
694 return err;
695 if (mmc->ocr & OCR_BUSY)
696 break;
697 if (get_timer(start) > timeout)
698 return -EOPNOTSUPP;
699 udelay(100);
700 }
701 }
702
703 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
704 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
705 cmd.resp_type = MMC_RSP_R3;
706 cmd.cmdarg = 0;
707
708 err = mmc_send_cmd(mmc, &cmd, NULL);
709
710 if (err)
711 return err;
712
713 mmc->ocr = cmd.response[0];
714 }
715
716 mmc->version = MMC_VERSION_UNKNOWN;
717
718 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
719 mmc->rca = 1;
720
721 return 0;
722 }
723
724
725 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
726 {
727 struct mmc_cmd cmd;
728 struct mmc_data data;
729 int err;
730
731 /* Get the Card Status Register */
732 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
733 cmd.resp_type = MMC_RSP_R1;
734 cmd.cmdarg = 0;
735
736 data.dest = (char *)ext_csd;
737 data.blocks = 1;
738 data.blocksize = MMC_MAX_BLOCK_LEN;
739 data.flags = MMC_DATA_READ;
740
741 err = mmc_send_cmd(mmc, &cmd, &data);
742
743 return err;
744 }
745
746 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
747 {
748 struct mmc_cmd cmd;
749 int timeout = 1000;
750 int retries = 3;
751 int ret;
752
753 cmd.cmdidx = MMC_CMD_SWITCH;
754 cmd.resp_type = MMC_RSP_R1b;
755 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
756 (index << 16) |
757 (value << 8);
758
759 while (retries > 0) {
760 ret = mmc_send_cmd(mmc, &cmd, NULL);
761
762 /* Waiting for the ready status */
763 if (!ret) {
764 ret = mmc_send_status(mmc, timeout);
765 return ret;
766 }
767
768 retries--;
769 }
770
771 return ret;
772
773 }
774
775 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
776 {
777 int err;
778 int speed_bits;
779
780 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
781
782 switch (mode) {
783 case MMC_HS:
784 case MMC_HS_52:
785 case MMC_DDR_52:
786 speed_bits = EXT_CSD_TIMING_HS;
787 break;
788 case MMC_HS_200:
789 speed_bits = EXT_CSD_TIMING_HS200;
790 break;
791 case MMC_LEGACY:
792 speed_bits = EXT_CSD_TIMING_LEGACY;
793 break;
794 default:
795 return -EINVAL;
796 }
797 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
798 speed_bits);
799 if (err)
800 return err;
801
802 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
803 /* Now check to see that it worked */
804 err = mmc_send_ext_csd(mmc, test_csd);
805 if (err)
806 return err;
807
808 /* No high-speed support */
809 if (!test_csd[EXT_CSD_HS_TIMING])
810 return -ENOTSUPP;
811 }
812
813 return 0;
814 }
815
816 static int mmc_get_capabilities(struct mmc *mmc)
817 {
818 u8 *ext_csd = mmc->ext_csd;
819 char cardtype;
820
821 mmc->card_caps = MMC_MODE_1BIT;
822
823 if (mmc_host_is_spi(mmc))
824 return 0;
825
826 /* Only version 4 supports high-speed */
827 if (mmc->version < MMC_VERSION_4)
828 return 0;
829
830 if (!ext_csd) {
831 printf("No ext_csd found!\n"); /* this should enver happen */
832 return -ENOTSUPP;
833 }
834
835 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
836
837 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0x3f;
838 mmc->cardtype = cardtype;
839
840 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
841 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
842 mmc->card_caps |= MMC_MODE_HS200;
843 }
844 if (cardtype & EXT_CSD_CARD_TYPE_52) {
845 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
846 mmc->card_caps |= MMC_MODE_DDR_52MHz;
847 mmc->card_caps |= MMC_MODE_HS_52MHz;
848 }
849 if (cardtype & EXT_CSD_CARD_TYPE_26)
850 mmc->card_caps |= MMC_MODE_HS;
851
852 return 0;
853 }
854
855 static int mmc_set_capacity(struct mmc *mmc, int part_num)
856 {
857 switch (part_num) {
858 case 0:
859 mmc->capacity = mmc->capacity_user;
860 break;
861 case 1:
862 case 2:
863 mmc->capacity = mmc->capacity_boot;
864 break;
865 case 3:
866 mmc->capacity = mmc->capacity_rpmb;
867 break;
868 case 4:
869 case 5:
870 case 6:
871 case 7:
872 mmc->capacity = mmc->capacity_gp[part_num - 4];
873 break;
874 default:
875 return -1;
876 }
877
878 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
879
880 return 0;
881 }
882
883 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
884 {
885 int forbidden = 0;
886 bool change = false;
887
888 if (part_num & PART_ACCESS_MASK)
889 forbidden = MMC_CAP(MMC_HS_200);
890
891 if (MMC_CAP(mmc->selected_mode) & forbidden) {
892 debug("selected mode (%s) is forbidden for part %d\n",
893 mmc_mode_name(mmc->selected_mode), part_num);
894 change = true;
895 } else if (mmc->selected_mode != mmc->best_mode) {
896 debug("selected mode is not optimal\n");
897 change = true;
898 }
899
900 if (change)
901 return mmc_select_mode_and_width(mmc,
902 mmc->card_caps & ~forbidden);
903
904 return 0;
905 }
906
907 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
908 {
909 int ret;
910
911 ret = mmc_boot_part_access_chk(mmc, part_num);
912 if (ret)
913 return ret;
914
915 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
916 (mmc->part_config & ~PART_ACCESS_MASK)
917 | (part_num & PART_ACCESS_MASK));
918
919 /*
920 * Set the capacity if the switch succeeded or was intended
921 * to return to representing the raw device.
922 */
923 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
924 ret = mmc_set_capacity(mmc, part_num);
925 mmc_get_blk_desc(mmc)->hwpart = part_num;
926 }
927
928 return ret;
929 }
930
931 int mmc_hwpart_config(struct mmc *mmc,
932 const struct mmc_hwpart_conf *conf,
933 enum mmc_hwpart_conf_mode mode)
934 {
935 u8 part_attrs = 0;
936 u32 enh_size_mult;
937 u32 enh_start_addr;
938 u32 gp_size_mult[4];
939 u32 max_enh_size_mult;
940 u32 tot_enh_size_mult = 0;
941 u8 wr_rel_set;
942 int i, pidx, err;
943 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
944
945 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
946 return -EINVAL;
947
948 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
949 printf("eMMC >= 4.4 required for enhanced user data area\n");
950 return -EMEDIUMTYPE;
951 }
952
953 if (!(mmc->part_support & PART_SUPPORT)) {
954 printf("Card does not support partitioning\n");
955 return -EMEDIUMTYPE;
956 }
957
958 if (!mmc->hc_wp_grp_size) {
959 printf("Card does not define HC WP group size\n");
960 return -EMEDIUMTYPE;
961 }
962
963 /* check partition alignment and total enhanced size */
964 if (conf->user.enh_size) {
965 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
966 conf->user.enh_start % mmc->hc_wp_grp_size) {
967 printf("User data enhanced area not HC WP group "
968 "size aligned\n");
969 return -EINVAL;
970 }
971 part_attrs |= EXT_CSD_ENH_USR;
972 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
973 if (mmc->high_capacity) {
974 enh_start_addr = conf->user.enh_start;
975 } else {
976 enh_start_addr = (conf->user.enh_start << 9);
977 }
978 } else {
979 enh_size_mult = 0;
980 enh_start_addr = 0;
981 }
982 tot_enh_size_mult += enh_size_mult;
983
984 for (pidx = 0; pidx < 4; pidx++) {
985 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
986 printf("GP%i partition not HC WP group size "
987 "aligned\n", pidx+1);
988 return -EINVAL;
989 }
990 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
991 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
992 part_attrs |= EXT_CSD_ENH_GP(pidx);
993 tot_enh_size_mult += gp_size_mult[pidx];
994 }
995 }
996
997 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
998 printf("Card does not support enhanced attribute\n");
999 return -EMEDIUMTYPE;
1000 }
1001
1002 err = mmc_send_ext_csd(mmc, ext_csd);
1003 if (err)
1004 return err;
1005
1006 max_enh_size_mult =
1007 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1008 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1009 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1010 if (tot_enh_size_mult > max_enh_size_mult) {
1011 printf("Total enhanced size exceeds maximum (%u > %u)\n",
1012 tot_enh_size_mult, max_enh_size_mult);
1013 return -EMEDIUMTYPE;
1014 }
1015
1016 /* The default value of EXT_CSD_WR_REL_SET is device
1017 * dependent, the values can only be changed if the
1018 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1019 * changed only once and before partitioning is completed. */
1020 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1021 if (conf->user.wr_rel_change) {
1022 if (conf->user.wr_rel_set)
1023 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1024 else
1025 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1026 }
1027 for (pidx = 0; pidx < 4; pidx++) {
1028 if (conf->gp_part[pidx].wr_rel_change) {
1029 if (conf->gp_part[pidx].wr_rel_set)
1030 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1031 else
1032 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1033 }
1034 }
1035
1036 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1037 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1038 puts("Card does not support host controlled partition write "
1039 "reliability settings\n");
1040 return -EMEDIUMTYPE;
1041 }
1042
1043 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1044 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1045 printf("Card already partitioned\n");
1046 return -EPERM;
1047 }
1048
1049 if (mode == MMC_HWPART_CONF_CHECK)
1050 return 0;
1051
1052 /* Partitioning requires high-capacity size definitions */
1053 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1054 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1055 EXT_CSD_ERASE_GROUP_DEF, 1);
1056
1057 if (err)
1058 return err;
1059
1060 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1061
1062 /* update erase group size to be high-capacity */
1063 mmc->erase_grp_size =
1064 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1065
1066 }
1067
1068 /* all OK, write the configuration */
1069 for (i = 0; i < 4; i++) {
1070 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1071 EXT_CSD_ENH_START_ADDR+i,
1072 (enh_start_addr >> (i*8)) & 0xFF);
1073 if (err)
1074 return err;
1075 }
1076 for (i = 0; i < 3; i++) {
1077 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1078 EXT_CSD_ENH_SIZE_MULT+i,
1079 (enh_size_mult >> (i*8)) & 0xFF);
1080 if (err)
1081 return err;
1082 }
1083 for (pidx = 0; pidx < 4; pidx++) {
1084 for (i = 0; i < 3; i++) {
1085 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1086 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1087 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1088 if (err)
1089 return err;
1090 }
1091 }
1092 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1093 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1094 if (err)
1095 return err;
1096
1097 if (mode == MMC_HWPART_CONF_SET)
1098 return 0;
1099
1100 /* The WR_REL_SET is a write-once register but shall be
1101 * written before setting PART_SETTING_COMPLETED. As it is
1102 * write-once we can only write it when completing the
1103 * partitioning. */
1104 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1105 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1106 EXT_CSD_WR_REL_SET, wr_rel_set);
1107 if (err)
1108 return err;
1109 }
1110
1111 /* Setting PART_SETTING_COMPLETED confirms the partition
1112 * configuration but it only becomes effective after power
1113 * cycle, so we do not adjust the partition related settings
1114 * in the mmc struct. */
1115
1116 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1117 EXT_CSD_PARTITION_SETTING,
1118 EXT_CSD_PARTITION_SETTING_COMPLETED);
1119 if (err)
1120 return err;
1121
1122 return 0;
1123 }
1124
1125 #if !CONFIG_IS_ENABLED(DM_MMC)
1126 int mmc_getcd(struct mmc *mmc)
1127 {
1128 int cd;
1129
1130 cd = board_mmc_getcd(mmc);
1131
1132 if (cd < 0) {
1133 if (mmc->cfg->ops->getcd)
1134 cd = mmc->cfg->ops->getcd(mmc);
1135 else
1136 cd = 1;
1137 }
1138
1139 return cd;
1140 }
1141 #endif
1142
1143 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1144 {
1145 struct mmc_cmd cmd;
1146 struct mmc_data data;
1147
1148 /* Switch the frequency */
1149 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1150 cmd.resp_type = MMC_RSP_R1;
1151 cmd.cmdarg = (mode << 31) | 0xffffff;
1152 cmd.cmdarg &= ~(0xf << (group * 4));
1153 cmd.cmdarg |= value << (group * 4);
1154
1155 data.dest = (char *)resp;
1156 data.blocksize = 64;
1157 data.blocks = 1;
1158 data.flags = MMC_DATA_READ;
1159
1160 return mmc_send_cmd(mmc, &cmd, &data);
1161 }
1162
1163
1164 static int sd_get_capabilities(struct mmc *mmc)
1165 {
1166 int err;
1167 struct mmc_cmd cmd;
1168 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1169 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1170 struct mmc_data data;
1171 int timeout;
1172 u32 sd3_bus_mode;
1173
1174 mmc->card_caps = MMC_MODE_1BIT;
1175
1176 if (mmc_host_is_spi(mmc))
1177 return 0;
1178
1179 /* Read the SCR to find out if this card supports higher speeds */
1180 cmd.cmdidx = MMC_CMD_APP_CMD;
1181 cmd.resp_type = MMC_RSP_R1;
1182 cmd.cmdarg = mmc->rca << 16;
1183
1184 err = mmc_send_cmd(mmc, &cmd, NULL);
1185
1186 if (err)
1187 return err;
1188
1189 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1190 cmd.resp_type = MMC_RSP_R1;
1191 cmd.cmdarg = 0;
1192
1193 timeout = 3;
1194
1195 retry_scr:
1196 data.dest = (char *)scr;
1197 data.blocksize = 8;
1198 data.blocks = 1;
1199 data.flags = MMC_DATA_READ;
1200
1201 err = mmc_send_cmd(mmc, &cmd, &data);
1202
1203 if (err) {
1204 if (timeout--)
1205 goto retry_scr;
1206
1207 return err;
1208 }
1209
1210 mmc->scr[0] = __be32_to_cpu(scr[0]);
1211 mmc->scr[1] = __be32_to_cpu(scr[1]);
1212
1213 switch ((mmc->scr[0] >> 24) & 0xf) {
1214 case 0:
1215 mmc->version = SD_VERSION_1_0;
1216 break;
1217 case 1:
1218 mmc->version = SD_VERSION_1_10;
1219 break;
1220 case 2:
1221 mmc->version = SD_VERSION_2;
1222 if ((mmc->scr[0] >> 15) & 0x1)
1223 mmc->version = SD_VERSION_3;
1224 break;
1225 default:
1226 mmc->version = SD_VERSION_1_0;
1227 break;
1228 }
1229
1230 if (mmc->scr[0] & SD_DATA_4BIT)
1231 mmc->card_caps |= MMC_MODE_4BIT;
1232
1233 /* Version 1.0 doesn't support switching */
1234 if (mmc->version == SD_VERSION_1_0)
1235 return 0;
1236
1237 timeout = 4;
1238 while (timeout--) {
1239 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1240 (u8 *)switch_status);
1241
1242 if (err)
1243 return err;
1244
1245 /* The high-speed function is busy. Try again */
1246 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1247 break;
1248 }
1249
1250 /* If high-speed isn't supported, we return */
1251 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1252 mmc->card_caps |= MMC_CAP(SD_HS);
1253
1254 /* Version before 3.0 don't support UHS modes */
1255 if (mmc->version < SD_VERSION_3)
1256 return 0;
1257
1258 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1259 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1260 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1261 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1262 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1263 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1264 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1265 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1266 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1267 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1268 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1269
1270 return 0;
1271 }
1272
1273 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1274 {
1275 int err;
1276
1277 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1278 int speed;
1279
1280 switch (mode) {
1281 case SD_LEGACY:
1282 case UHS_SDR12:
1283 speed = UHS_SDR12_BUS_SPEED;
1284 break;
1285 case SD_HS:
1286 case UHS_SDR25:
1287 speed = UHS_SDR25_BUS_SPEED;
1288 break;
1289 case UHS_SDR50:
1290 speed = UHS_SDR50_BUS_SPEED;
1291 break;
1292 case UHS_DDR50:
1293 speed = UHS_DDR50_BUS_SPEED;
1294 break;
1295 case UHS_SDR104:
1296 speed = UHS_SDR104_BUS_SPEED;
1297 break;
1298 default:
1299 return -EINVAL;
1300 }
1301
1302 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1303 if (err)
1304 return err;
1305
1306 if ((__be32_to_cpu(switch_status[4]) >> 24) != speed)
1307 return -ENOTSUPP;
1308
1309 return 0;
1310 }
1311
1312 int sd_select_bus_width(struct mmc *mmc, int w)
1313 {
1314 int err;
1315 struct mmc_cmd cmd;
1316
1317 if ((w != 4) && (w != 1))
1318 return -EINVAL;
1319
1320 cmd.cmdidx = MMC_CMD_APP_CMD;
1321 cmd.resp_type = MMC_RSP_R1;
1322 cmd.cmdarg = mmc->rca << 16;
1323
1324 err = mmc_send_cmd(mmc, &cmd, NULL);
1325 if (err)
1326 return err;
1327
1328 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1329 cmd.resp_type = MMC_RSP_R1;
1330 if (w == 4)
1331 cmd.cmdarg = 2;
1332 else if (w == 1)
1333 cmd.cmdarg = 0;
1334 err = mmc_send_cmd(mmc, &cmd, NULL);
1335 if (err)
1336 return err;
1337
1338 return 0;
1339 }
1340
1341 static int sd_read_ssr(struct mmc *mmc)
1342 {
1343 int err, i;
1344 struct mmc_cmd cmd;
1345 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1346 struct mmc_data data;
1347 int timeout = 3;
1348 unsigned int au, eo, et, es;
1349
1350 cmd.cmdidx = MMC_CMD_APP_CMD;
1351 cmd.resp_type = MMC_RSP_R1;
1352 cmd.cmdarg = mmc->rca << 16;
1353
1354 err = mmc_send_cmd(mmc, &cmd, NULL);
1355 if (err)
1356 return err;
1357
1358 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1359 cmd.resp_type = MMC_RSP_R1;
1360 cmd.cmdarg = 0;
1361
1362 retry_ssr:
1363 data.dest = (char *)ssr;
1364 data.blocksize = 64;
1365 data.blocks = 1;
1366 data.flags = MMC_DATA_READ;
1367
1368 err = mmc_send_cmd(mmc, &cmd, &data);
1369 if (err) {
1370 if (timeout--)
1371 goto retry_ssr;
1372
1373 return err;
1374 }
1375
1376 for (i = 0; i < 16; i++)
1377 ssr[i] = be32_to_cpu(ssr[i]);
1378
1379 au = (ssr[2] >> 12) & 0xF;
1380 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1381 mmc->ssr.au = sd_au_size[au];
1382 es = (ssr[3] >> 24) & 0xFF;
1383 es |= (ssr[2] & 0xFF) << 8;
1384 et = (ssr[3] >> 18) & 0x3F;
1385 if (es && et) {
1386 eo = (ssr[3] >> 16) & 0x3;
1387 mmc->ssr.erase_timeout = (et * 1000) / es;
1388 mmc->ssr.erase_offset = eo * 1000;
1389 }
1390 } else {
1391 debug("Invalid Allocation Unit Size.\n");
1392 }
1393
1394 return 0;
1395 }
1396
1397 /* frequency bases */
1398 /* divided by 10 to be nice to platforms without floating point */
1399 static const int fbase[] = {
1400 10000,
1401 100000,
1402 1000000,
1403 10000000,
1404 };
1405
1406 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1407 * to platforms without floating point.
1408 */
1409 static const u8 multipliers[] = {
1410 0, /* reserved */
1411 10,
1412 12,
1413 13,
1414 15,
1415 20,
1416 25,
1417 30,
1418 35,
1419 40,
1420 45,
1421 50,
1422 55,
1423 60,
1424 70,
1425 80,
1426 };
1427
1428 static inline int bus_width(uint cap)
1429 {
1430 if (cap == MMC_MODE_8BIT)
1431 return 8;
1432 if (cap == MMC_MODE_4BIT)
1433 return 4;
1434 if (cap == MMC_MODE_1BIT)
1435 return 1;
1436 printf("invalid bus witdh capability 0x%x\n", cap);
1437 return 0;
1438 }
1439
1440 #if !CONFIG_IS_ENABLED(DM_MMC)
1441 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1442 {
1443 return -ENOTSUPP;
1444 }
1445
1446 static void mmc_send_init_stream(struct mmc *mmc)
1447 {
1448 }
1449
1450 static int mmc_set_ios(struct mmc *mmc)
1451 {
1452 int ret = 0;
1453
1454 if (mmc->cfg->ops->set_ios)
1455 ret = mmc->cfg->ops->set_ios(mmc);
1456
1457 return ret;
1458 }
1459 #endif
1460
1461 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1462 {
1463 if (clock > mmc->cfg->f_max)
1464 clock = mmc->cfg->f_max;
1465
1466 if (clock < mmc->cfg->f_min)
1467 clock = mmc->cfg->f_min;
1468
1469 mmc->clock = clock;
1470 mmc->clk_disable = disable;
1471
1472 return mmc_set_ios(mmc);
1473 }
1474
1475 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1476 {
1477 mmc->bus_width = width;
1478
1479 return mmc_set_ios(mmc);
1480 }
1481
1482 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1483 /*
1484 * helper function to display the capabilities in a human
1485 * friendly manner. The capabilities include bus width and
1486 * supported modes.
1487 */
1488 void mmc_dump_capabilities(const char *text, uint caps)
1489 {
1490 enum bus_mode mode;
1491
1492 printf("%s: widths [", text);
1493 if (caps & MMC_MODE_8BIT)
1494 printf("8, ");
1495 if (caps & MMC_MODE_4BIT)
1496 printf("4, ");
1497 if (caps & MMC_MODE_1BIT)
1498 printf("1, ");
1499 printf("\b\b] modes [");
1500 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1501 if (MMC_CAP(mode) & caps)
1502 printf("%s, ", mmc_mode_name(mode));
1503 printf("\b\b]\n");
1504 }
1505 #endif
1506
1507 struct mode_width_tuning {
1508 enum bus_mode mode;
1509 uint widths;
1510 uint tuning;
1511 };
1512
1513 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1514 {
1515 switch (voltage) {
1516 case MMC_SIGNAL_VOLTAGE_000: return 0;
1517 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1518 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1519 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1520 }
1521 return -EINVAL;
1522 }
1523
1524 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1525 {
1526 int err;
1527
1528 if (mmc->signal_voltage == signal_voltage)
1529 return 0;
1530
1531 mmc->signal_voltage = signal_voltage;
1532 err = mmc_set_ios(mmc);
1533 if (err)
1534 debug("unable to set voltage (err %d)\n", err);
1535
1536 return err;
1537 }
1538
1539 static const struct mode_width_tuning sd_modes_by_pref[] = {
1540 {
1541 .mode = UHS_SDR104,
1542 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1543 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1544 },
1545 {
1546 .mode = UHS_SDR50,
1547 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1548 },
1549 {
1550 .mode = UHS_DDR50,
1551 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1552 },
1553 {
1554 .mode = UHS_SDR25,
1555 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1556 },
1557 {
1558 .mode = SD_HS,
1559 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1560 },
1561 {
1562 .mode = UHS_SDR12,
1563 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1564 },
1565 {
1566 .mode = SD_LEGACY,
1567 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1568 }
1569 };
1570
1571 #define for_each_sd_mode_by_pref(caps, mwt) \
1572 for (mwt = sd_modes_by_pref;\
1573 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1574 mwt++) \
1575 if (caps & MMC_CAP(mwt->mode))
1576
1577 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1578 {
1579 int err;
1580 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1581 const struct mode_width_tuning *mwt;
1582 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1583 uint caps;
1584
1585
1586 /* Restrict card's capabilities by what the host can do */
1587 caps = card_caps & (mmc->host_caps | MMC_MODE_1BIT);
1588
1589 if (!uhs_en)
1590 caps &= ~UHS_CAPS;
1591
1592 for_each_sd_mode_by_pref(caps, mwt) {
1593 uint *w;
1594
1595 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1596 if (*w & caps & mwt->widths) {
1597 debug("trying mode %s width %d (at %d MHz)\n",
1598 mmc_mode_name(mwt->mode),
1599 bus_width(*w),
1600 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1601
1602 /* configure the bus width (card + host) */
1603 err = sd_select_bus_width(mmc, bus_width(*w));
1604 if (err)
1605 goto error;
1606 mmc_set_bus_width(mmc, bus_width(*w));
1607
1608 /* configure the bus mode (card) */
1609 err = sd_set_card_speed(mmc, mwt->mode);
1610 if (err)
1611 goto error;
1612
1613 /* configure the bus mode (host) */
1614 mmc_select_mode(mmc, mwt->mode);
1615 mmc_set_clock(mmc, mmc->tran_speed, false);
1616
1617 /* execute tuning if needed */
1618 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1619 err = mmc_execute_tuning(mmc,
1620 mwt->tuning);
1621 if (err) {
1622 debug("tuning failed\n");
1623 goto error;
1624 }
1625 }
1626
1627 err = sd_read_ssr(mmc);
1628 if (!err)
1629 return 0;
1630
1631 printf("bad ssr\n");
1632
1633 error:
1634 /* revert to a safer bus speed */
1635 mmc_select_mode(mmc, SD_LEGACY);
1636 mmc_set_clock(mmc, mmc->tran_speed, false);
1637 }
1638 }
1639 }
1640
1641 printf("unable to select a mode\n");
1642 return -ENOTSUPP;
1643 }
1644
1645 /*
1646 * read the compare the part of ext csd that is constant.
1647 * This can be used to check that the transfer is working
1648 * as expected.
1649 */
1650 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1651 {
1652 int err;
1653 const u8 *ext_csd = mmc->ext_csd;
1654 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1655
1656 err = mmc_send_ext_csd(mmc, test_csd);
1657 if (err)
1658 return err;
1659
1660 /* Only compare read only fields */
1661 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1662 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1663 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1664 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1665 ext_csd[EXT_CSD_REV]
1666 == test_csd[EXT_CSD_REV] &&
1667 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1668 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1669 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1670 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1671 return 0;
1672
1673 return -EBADMSG;
1674 }
1675
1676 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1677 uint32_t allowed_mask)
1678 {
1679 u32 card_mask = 0;
1680
1681 switch (mode) {
1682 case MMC_HS_200:
1683 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_8V)
1684 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1685 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_2V)
1686 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1687 break;
1688 case MMC_DDR_52:
1689 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1690 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1691 MMC_SIGNAL_VOLTAGE_180;
1692 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1693 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1694 break;
1695 default:
1696 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1697 break;
1698 }
1699
1700 while (card_mask & allowed_mask) {
1701 enum mmc_voltage best_match;
1702
1703 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1704 if (!mmc_set_signal_voltage(mmc, best_match))
1705 return 0;
1706
1707 allowed_mask &= ~best_match;
1708 }
1709
1710 return -ENOTSUPP;
1711 }
1712
1713 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1714 {
1715 .mode = MMC_HS_200,
1716 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1717 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1718 },
1719 {
1720 .mode = MMC_DDR_52,
1721 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1722 },
1723 {
1724 .mode = MMC_HS_52,
1725 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1726 },
1727 {
1728 .mode = MMC_HS,
1729 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1730 },
1731 {
1732 .mode = MMC_LEGACY,
1733 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1734 }
1735 };
1736
1737 #define for_each_mmc_mode_by_pref(caps, mwt) \
1738 for (mwt = mmc_modes_by_pref;\
1739 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1740 mwt++) \
1741 if (caps & MMC_CAP(mwt->mode))
1742
1743 static const struct ext_csd_bus_width {
1744 uint cap;
1745 bool is_ddr;
1746 uint ext_csd_bits;
1747 } ext_csd_bus_width[] = {
1748 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1749 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1750 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1751 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1752 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1753 };
1754
1755 #define for_each_supported_width(caps, ddr, ecbv) \
1756 for (ecbv = ext_csd_bus_width;\
1757 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1758 ecbv++) \
1759 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1760
1761 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1762 {
1763 int err;
1764 const struct mode_width_tuning *mwt;
1765 const struct ext_csd_bus_width *ecbw;
1766
1767 /* Restrict card's capabilities by what the host can do */
1768 card_caps &= (mmc->host_caps | MMC_MODE_1BIT);
1769
1770 /* Only version 4 of MMC supports wider bus widths */
1771 if (mmc->version < MMC_VERSION_4)
1772 return 0;
1773
1774 if (!mmc->ext_csd) {
1775 debug("No ext_csd found!\n"); /* this should enver happen */
1776 return -ENOTSUPP;
1777 }
1778
1779 mmc_set_clock(mmc, mmc->legacy_speed, false);
1780
1781 for_each_mmc_mode_by_pref(card_caps, mwt) {
1782 for_each_supported_width(card_caps & mwt->widths,
1783 mmc_is_mode_ddr(mwt->mode), ecbw) {
1784 enum mmc_voltage old_voltage;
1785 debug("trying mode %s width %d (at %d MHz)\n",
1786 mmc_mode_name(mwt->mode),
1787 bus_width(ecbw->cap),
1788 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1789 old_voltage = mmc->signal_voltage;
1790 err = mmc_set_lowest_voltage(mmc, mwt->mode,
1791 MMC_ALL_SIGNAL_VOLTAGE);
1792 if (err)
1793 continue;
1794
1795 /* configure the bus width (card + host) */
1796 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1797 EXT_CSD_BUS_WIDTH,
1798 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1799 if (err)
1800 goto error;
1801 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1802
1803 /* configure the bus speed (card) */
1804 err = mmc_set_card_speed(mmc, mwt->mode);
1805 if (err)
1806 goto error;
1807
1808 /*
1809 * configure the bus width AND the ddr mode (card)
1810 * The host side will be taken care of in the next step
1811 */
1812 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1813 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1814 EXT_CSD_BUS_WIDTH,
1815 ecbw->ext_csd_bits);
1816 if (err)
1817 goto error;
1818 }
1819
1820 /* configure the bus mode (host) */
1821 mmc_select_mode(mmc, mwt->mode);
1822 mmc_set_clock(mmc, mmc->tran_speed, false);
1823
1824 /* execute tuning if needed */
1825 if (mwt->tuning) {
1826 err = mmc_execute_tuning(mmc, mwt->tuning);
1827 if (err) {
1828 debug("tuning failed\n");
1829 goto error;
1830 }
1831 }
1832
1833 /* do a transfer to check the configuration */
1834 err = mmc_read_and_compare_ext_csd(mmc);
1835 if (!err)
1836 return 0;
1837 error:
1838 mmc_set_signal_voltage(mmc, old_voltage);
1839 /* if an error occured, revert to a safer bus mode */
1840 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1841 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
1842 mmc_select_mode(mmc, MMC_LEGACY);
1843 mmc_set_bus_width(mmc, 1);
1844 }
1845 }
1846
1847 printf("unable to select a mode\n");
1848
1849 return -ENOTSUPP;
1850 }
1851
1852 static int mmc_startup_v4(struct mmc *mmc)
1853 {
1854 int err, i;
1855 u64 capacity;
1856 bool has_parts = false;
1857 bool part_completed;
1858 u8 *ext_csd;
1859
1860 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1861 return 0;
1862
1863 ext_csd = malloc_cache_aligned(MMC_MAX_BLOCK_LEN);
1864 if (!ext_csd)
1865 return -ENOMEM;
1866
1867 mmc->ext_csd = ext_csd;
1868
1869 /* check ext_csd version and capacity */
1870 err = mmc_send_ext_csd(mmc, ext_csd);
1871 if (err)
1872 return err;
1873 if (ext_csd[EXT_CSD_REV] >= 2) {
1874 /*
1875 * According to the JEDEC Standard, the value of
1876 * ext_csd's capacity is valid if the value is more
1877 * than 2GB
1878 */
1879 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1880 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1881 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1882 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1883 capacity *= MMC_MAX_BLOCK_LEN;
1884 if ((capacity >> 20) > 2 * 1024)
1885 mmc->capacity_user = capacity;
1886 }
1887
1888 switch (ext_csd[EXT_CSD_REV]) {
1889 case 1:
1890 mmc->version = MMC_VERSION_4_1;
1891 break;
1892 case 2:
1893 mmc->version = MMC_VERSION_4_2;
1894 break;
1895 case 3:
1896 mmc->version = MMC_VERSION_4_3;
1897 break;
1898 case 5:
1899 mmc->version = MMC_VERSION_4_41;
1900 break;
1901 case 6:
1902 mmc->version = MMC_VERSION_4_5;
1903 break;
1904 case 7:
1905 mmc->version = MMC_VERSION_5_0;
1906 break;
1907 case 8:
1908 mmc->version = MMC_VERSION_5_1;
1909 break;
1910 }
1911
1912 /* The partition data may be non-zero but it is only
1913 * effective if PARTITION_SETTING_COMPLETED is set in
1914 * EXT_CSD, so ignore any data if this bit is not set,
1915 * except for enabling the high-capacity group size
1916 * definition (see below).
1917 */
1918 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1919 EXT_CSD_PARTITION_SETTING_COMPLETED);
1920
1921 /* store the partition info of emmc */
1922 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
1923 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
1924 ext_csd[EXT_CSD_BOOT_MULT])
1925 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
1926 if (part_completed &&
1927 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
1928 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
1929
1930 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
1931
1932 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
1933
1934 for (i = 0; i < 4; i++) {
1935 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
1936 uint mult = (ext_csd[idx + 2] << 16) +
1937 (ext_csd[idx + 1] << 8) + ext_csd[idx];
1938 if (mult)
1939 has_parts = true;
1940 if (!part_completed)
1941 continue;
1942 mmc->capacity_gp[i] = mult;
1943 mmc->capacity_gp[i] *=
1944 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1945 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1946 mmc->capacity_gp[i] <<= 19;
1947 }
1948
1949 if (part_completed) {
1950 mmc->enh_user_size =
1951 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
1952 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
1953 ext_csd[EXT_CSD_ENH_SIZE_MULT];
1954 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1955 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1956 mmc->enh_user_size <<= 19;
1957 mmc->enh_user_start =
1958 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
1959 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
1960 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
1961 ext_csd[EXT_CSD_ENH_START_ADDR];
1962 if (mmc->high_capacity)
1963 mmc->enh_user_start <<= 9;
1964 }
1965
1966 /*
1967 * Host needs to enable ERASE_GRP_DEF bit if device is
1968 * partitioned. This bit will be lost every time after a reset
1969 * or power off. This will affect erase size.
1970 */
1971 if (part_completed)
1972 has_parts = true;
1973 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
1974 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
1975 has_parts = true;
1976 if (has_parts) {
1977 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1978 EXT_CSD_ERASE_GROUP_DEF, 1);
1979
1980 if (err)
1981 return err;
1982
1983 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1984 }
1985
1986 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
1987 /* Read out group size from ext_csd */
1988 mmc->erase_grp_size =
1989 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1990 /*
1991 * if high capacity and partition setting completed
1992 * SEC_COUNT is valid even if it is smaller than 2 GiB
1993 * JEDEC Standard JESD84-B45, 6.2.4
1994 */
1995 if (mmc->high_capacity && part_completed) {
1996 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
1997 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
1998 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
1999 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2000 capacity *= MMC_MAX_BLOCK_LEN;
2001 mmc->capacity_user = capacity;
2002 }
2003 } else {
2004 /* Calculate the group size from the csd value. */
2005 int erase_gsz, erase_gmul;
2006
2007 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2008 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2009 mmc->erase_grp_size = (erase_gsz + 1)
2010 * (erase_gmul + 1);
2011 }
2012
2013 mmc->hc_wp_grp_size = 1024
2014 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2015 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2016
2017 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2018
2019 return 0;
2020 }
2021
2022 static int mmc_startup(struct mmc *mmc)
2023 {
2024 int err, i;
2025 uint mult, freq;
2026 u64 cmult, csize;
2027 struct mmc_cmd cmd;
2028 struct blk_desc *bdesc;
2029
2030 #ifdef CONFIG_MMC_SPI_CRC_ON
2031 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2032 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2033 cmd.resp_type = MMC_RSP_R1;
2034 cmd.cmdarg = 1;
2035 err = mmc_send_cmd(mmc, &cmd, NULL);
2036 if (err)
2037 return err;
2038 }
2039 #endif
2040
2041 /* Put the Card in Identify Mode */
2042 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2043 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2044 cmd.resp_type = MMC_RSP_R2;
2045 cmd.cmdarg = 0;
2046
2047 err = mmc_send_cmd(mmc, &cmd, NULL);
2048
2049 #ifdef CONFIG_MMC_QUIRKS
2050 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2051 int retries = 4;
2052 /*
2053 * It has been seen that SEND_CID may fail on the first
2054 * attempt, let's try a few more time
2055 */
2056 do {
2057 err = mmc_send_cmd(mmc, &cmd, NULL);
2058 if (!err)
2059 break;
2060 } while (retries--);
2061 }
2062 #endif
2063
2064 if (err)
2065 return err;
2066
2067 memcpy(mmc->cid, cmd.response, 16);
2068
2069 /*
2070 * For MMC cards, set the Relative Address.
2071 * For SD cards, get the Relatvie Address.
2072 * This also puts the cards into Standby State
2073 */
2074 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2075 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2076 cmd.cmdarg = mmc->rca << 16;
2077 cmd.resp_type = MMC_RSP_R6;
2078
2079 err = mmc_send_cmd(mmc, &cmd, NULL);
2080
2081 if (err)
2082 return err;
2083
2084 if (IS_SD(mmc))
2085 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2086 }
2087
2088 /* Get the Card-Specific Data */
2089 cmd.cmdidx = MMC_CMD_SEND_CSD;
2090 cmd.resp_type = MMC_RSP_R2;
2091 cmd.cmdarg = mmc->rca << 16;
2092
2093 err = mmc_send_cmd(mmc, &cmd, NULL);
2094
2095 if (err)
2096 return err;
2097
2098 mmc->csd[0] = cmd.response[0];
2099 mmc->csd[1] = cmd.response[1];
2100 mmc->csd[2] = cmd.response[2];
2101 mmc->csd[3] = cmd.response[3];
2102
2103 if (mmc->version == MMC_VERSION_UNKNOWN) {
2104 int version = (cmd.response[0] >> 26) & 0xf;
2105
2106 switch (version) {
2107 case 0:
2108 mmc->version = MMC_VERSION_1_2;
2109 break;
2110 case 1:
2111 mmc->version = MMC_VERSION_1_4;
2112 break;
2113 case 2:
2114 mmc->version = MMC_VERSION_2_2;
2115 break;
2116 case 3:
2117 mmc->version = MMC_VERSION_3;
2118 break;
2119 case 4:
2120 mmc->version = MMC_VERSION_4;
2121 break;
2122 default:
2123 mmc->version = MMC_VERSION_1_2;
2124 break;
2125 }
2126 }
2127
2128 /* divide frequency by 10, since the mults are 10x bigger */
2129 freq = fbase[(cmd.response[0] & 0x7)];
2130 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2131
2132 mmc->legacy_speed = freq * mult;
2133 mmc_select_mode(mmc, MMC_LEGACY);
2134
2135 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2136 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2137
2138 if (IS_SD(mmc))
2139 mmc->write_bl_len = mmc->read_bl_len;
2140 else
2141 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2142
2143 if (mmc->high_capacity) {
2144 csize = (mmc->csd[1] & 0x3f) << 16
2145 | (mmc->csd[2] & 0xffff0000) >> 16;
2146 cmult = 8;
2147 } else {
2148 csize = (mmc->csd[1] & 0x3ff) << 2
2149 | (mmc->csd[2] & 0xc0000000) >> 30;
2150 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2151 }
2152
2153 mmc->capacity_user = (csize + 1) << (cmult + 2);
2154 mmc->capacity_user *= mmc->read_bl_len;
2155 mmc->capacity_boot = 0;
2156 mmc->capacity_rpmb = 0;
2157 for (i = 0; i < 4; i++)
2158 mmc->capacity_gp[i] = 0;
2159
2160 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2161 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2162
2163 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2164 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2165
2166 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2167 cmd.cmdidx = MMC_CMD_SET_DSR;
2168 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2169 cmd.resp_type = MMC_RSP_NONE;
2170 if (mmc_send_cmd(mmc, &cmd, NULL))
2171 printf("MMC: SET_DSR failed\n");
2172 }
2173
2174 /* Select the card, and put it into Transfer Mode */
2175 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2176 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2177 cmd.resp_type = MMC_RSP_R1;
2178 cmd.cmdarg = mmc->rca << 16;
2179 err = mmc_send_cmd(mmc, &cmd, NULL);
2180
2181 if (err)
2182 return err;
2183 }
2184
2185 /*
2186 * For SD, its erase group is always one sector
2187 */
2188 mmc->erase_grp_size = 1;
2189 mmc->part_config = MMCPART_NOAVAILABLE;
2190
2191 err = mmc_startup_v4(mmc);
2192 if (err)
2193 return err;
2194
2195 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2196 if (err)
2197 return err;
2198
2199 if (IS_SD(mmc)) {
2200 err = sd_get_capabilities(mmc);
2201 if (err)
2202 return err;
2203 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2204 } else {
2205 err = mmc_get_capabilities(mmc);
2206 if (err)
2207 return err;
2208 mmc_select_mode_and_width(mmc, mmc->card_caps);
2209 }
2210
2211 if (err)
2212 return err;
2213
2214 mmc->best_mode = mmc->selected_mode;
2215
2216 /* Fix the block length for DDR mode */
2217 if (mmc->ddr_mode) {
2218 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2219 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2220 }
2221
2222 /* fill in device description */
2223 bdesc = mmc_get_blk_desc(mmc);
2224 bdesc->lun = 0;
2225 bdesc->hwpart = 0;
2226 bdesc->type = 0;
2227 bdesc->blksz = mmc->read_bl_len;
2228 bdesc->log2blksz = LOG2(bdesc->blksz);
2229 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2230 #if !defined(CONFIG_SPL_BUILD) || \
2231 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2232 !defined(CONFIG_USE_TINY_PRINTF))
2233 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2234 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2235 (mmc->cid[3] >> 16) & 0xffff);
2236 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2237 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2238 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2239 (mmc->cid[2] >> 24) & 0xff);
2240 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2241 (mmc->cid[2] >> 16) & 0xf);
2242 #else
2243 bdesc->vendor[0] = 0;
2244 bdesc->product[0] = 0;
2245 bdesc->revision[0] = 0;
2246 #endif
2247 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2248 part_init(bdesc);
2249 #endif
2250
2251 return 0;
2252 }
2253
2254 static int mmc_send_if_cond(struct mmc *mmc)
2255 {
2256 struct mmc_cmd cmd;
2257 int err;
2258
2259 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2260 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2261 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2262 cmd.resp_type = MMC_RSP_R7;
2263
2264 err = mmc_send_cmd(mmc, &cmd, NULL);
2265
2266 if (err)
2267 return err;
2268
2269 if ((cmd.response[0] & 0xff) != 0xaa)
2270 return -EOPNOTSUPP;
2271 else
2272 mmc->version = SD_VERSION_2;
2273
2274 return 0;
2275 }
2276
2277 #if !CONFIG_IS_ENABLED(DM_MMC)
2278 /* board-specific MMC power initializations. */
2279 __weak void board_mmc_power_init(void)
2280 {
2281 }
2282 #endif
2283
2284 static int mmc_power_init(struct mmc *mmc)
2285 {
2286 #if CONFIG_IS_ENABLED(DM_MMC)
2287 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2288 int ret;
2289
2290 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2291 &mmc->vmmc_supply);
2292 if (ret)
2293 debug("%s: No vmmc supply\n", mmc->dev->name);
2294
2295 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2296 &mmc->vqmmc_supply);
2297 if (ret)
2298 debug("%s: No vqmmc supply\n", mmc->dev->name);
2299 #endif
2300 #else /* !CONFIG_DM_MMC */
2301 /*
2302 * Driver model should use a regulator, as above, rather than calling
2303 * out to board code.
2304 */
2305 board_mmc_power_init();
2306 #endif
2307 return 0;
2308 }
2309
2310 /*
2311 * put the host in the initial state:
2312 * - turn on Vdd (card power supply)
2313 * - configure the bus width and clock to minimal values
2314 */
2315 static void mmc_set_initial_state(struct mmc *mmc)
2316 {
2317 int err;
2318
2319 /* First try to set 3.3V. If it fails set to 1.8V */
2320 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2321 if (err != 0)
2322 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2323 if (err != 0)
2324 printf("mmc: failed to set signal voltage\n");
2325
2326 mmc_select_mode(mmc, MMC_LEGACY);
2327 mmc_set_bus_width(mmc, 1);
2328 mmc_set_clock(mmc, 0, false);
2329 }
2330
2331 static int mmc_power_on(struct mmc *mmc)
2332 {
2333 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2334 if (mmc->vmmc_supply) {
2335 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2336
2337 if (ret) {
2338 puts("Error enabling VMMC supply\n");
2339 return ret;
2340 }
2341 }
2342 #endif
2343 return 0;
2344 }
2345
2346 static int mmc_power_off(struct mmc *mmc)
2347 {
2348 mmc_set_clock(mmc, 1, true);
2349 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2350 if (mmc->vmmc_supply) {
2351 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2352
2353 if (ret) {
2354 debug("Error disabling VMMC supply\n");
2355 return ret;
2356 }
2357 }
2358 #endif
2359 return 0;
2360 }
2361
2362 static int mmc_power_cycle(struct mmc *mmc)
2363 {
2364 int ret;
2365
2366 ret = mmc_power_off(mmc);
2367 if (ret)
2368 return ret;
2369 /*
2370 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2371 * to be on the safer side.
2372 */
2373 udelay(2000);
2374 return mmc_power_on(mmc);
2375 }
2376
2377 int mmc_start_init(struct mmc *mmc)
2378 {
2379 bool no_card;
2380 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2381 int err;
2382
2383 mmc->host_caps = mmc->cfg->host_caps;
2384
2385 /* we pretend there's no card when init is NULL */
2386 no_card = mmc_getcd(mmc) == 0;
2387 #if !CONFIG_IS_ENABLED(DM_MMC)
2388 no_card = no_card || (mmc->cfg->ops->init == NULL);
2389 #endif
2390 if (no_card) {
2391 mmc->has_init = 0;
2392 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2393 printf("MMC: no card present\n");
2394 #endif
2395 return -ENOMEDIUM;
2396 }
2397
2398 if (mmc->has_init)
2399 return 0;
2400
2401 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2402 mmc_adapter_card_type_ident();
2403 #endif
2404 err = mmc_power_init(mmc);
2405 if (err)
2406 return err;
2407
2408 #ifdef CONFIG_MMC_QUIRKS
2409 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2410 MMC_QUIRK_RETRY_SEND_CID;
2411 #endif
2412
2413 err = mmc_power_cycle(mmc);
2414 if (err) {
2415 /*
2416 * if power cycling is not supported, we should not try
2417 * to use the UHS modes, because we wouldn't be able to
2418 * recover from an error during the UHS initialization.
2419 */
2420 debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2421 uhs_en = false;
2422 mmc->host_caps &= ~UHS_CAPS;
2423 err = mmc_power_on(mmc);
2424 }
2425 if (err)
2426 return err;
2427
2428 #if CONFIG_IS_ENABLED(DM_MMC)
2429 /* The device has already been probed ready for use */
2430 #else
2431 /* made sure it's not NULL earlier */
2432 err = mmc->cfg->ops->init(mmc);
2433 if (err)
2434 return err;
2435 #endif
2436 mmc->ddr_mode = 0;
2437
2438 retry:
2439 mmc_set_initial_state(mmc);
2440 mmc_send_init_stream(mmc);
2441
2442 /* Reset the Card */
2443 err = mmc_go_idle(mmc);
2444
2445 if (err)
2446 return err;
2447
2448 /* The internal partition reset to user partition(0) at every CMD0*/
2449 mmc_get_blk_desc(mmc)->hwpart = 0;
2450
2451 /* Test for SD version 2 */
2452 err = mmc_send_if_cond(mmc);
2453
2454 /* Now try to get the SD card's operating condition */
2455 err = sd_send_op_cond(mmc, uhs_en);
2456 if (err && uhs_en) {
2457 uhs_en = false;
2458 mmc_power_cycle(mmc);
2459 goto retry;
2460 }
2461
2462 /* If the command timed out, we check for an MMC card */
2463 if (err == -ETIMEDOUT) {
2464 err = mmc_send_op_cond(mmc);
2465
2466 if (err) {
2467 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2468 printf("Card did not respond to voltage select!\n");
2469 #endif
2470 return -EOPNOTSUPP;
2471 }
2472 }
2473
2474 if (!err)
2475 mmc->init_in_progress = 1;
2476
2477 return err;
2478 }
2479
2480 static int mmc_complete_init(struct mmc *mmc)
2481 {
2482 int err = 0;
2483
2484 mmc->init_in_progress = 0;
2485 if (mmc->op_cond_pending)
2486 err = mmc_complete_op_cond(mmc);
2487
2488 if (!err)
2489 err = mmc_startup(mmc);
2490 if (err)
2491 mmc->has_init = 0;
2492 else
2493 mmc->has_init = 1;
2494 return err;
2495 }
2496
2497 int mmc_init(struct mmc *mmc)
2498 {
2499 int err = 0;
2500 __maybe_unused unsigned start;
2501 #if CONFIG_IS_ENABLED(DM_MMC)
2502 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2503
2504 upriv->mmc = mmc;
2505 #endif
2506 if (mmc->has_init)
2507 return 0;
2508
2509 start = get_timer(0);
2510
2511 if (!mmc->init_in_progress)
2512 err = mmc_start_init(mmc);
2513
2514 if (!err)
2515 err = mmc_complete_init(mmc);
2516 if (err)
2517 printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2518
2519 return err;
2520 }
2521
2522 int mmc_set_dsr(struct mmc *mmc, u16 val)
2523 {
2524 mmc->dsr = val;
2525 return 0;
2526 }
2527
2528 /* CPU-specific MMC initializations */
2529 __weak int cpu_mmc_init(bd_t *bis)
2530 {
2531 return -1;
2532 }
2533
2534 /* board-specific MMC initializations. */
2535 __weak int board_mmc_init(bd_t *bis)
2536 {
2537 return -1;
2538 }
2539
2540 void mmc_set_preinit(struct mmc *mmc, int preinit)
2541 {
2542 mmc->preinit = preinit;
2543 }
2544
2545 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2546 static int mmc_probe(bd_t *bis)
2547 {
2548 return 0;
2549 }
2550 #elif CONFIG_IS_ENABLED(DM_MMC)
2551 static int mmc_probe(bd_t *bis)
2552 {
2553 int ret, i;
2554 struct uclass *uc;
2555 struct udevice *dev;
2556
2557 ret = uclass_get(UCLASS_MMC, &uc);
2558 if (ret)
2559 return ret;
2560
2561 /*
2562 * Try to add them in sequence order. Really with driver model we
2563 * should allow holes, but the current MMC list does not allow that.
2564 * So if we request 0, 1, 3 we will get 0, 1, 2.
2565 */
2566 for (i = 0; ; i++) {
2567 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2568 if (ret == -ENODEV)
2569 break;
2570 }
2571 uclass_foreach_dev(dev, uc) {
2572 ret = device_probe(dev);
2573 if (ret)
2574 printf("%s - probe failed: %d\n", dev->name, ret);
2575 }
2576
2577 return 0;
2578 }
2579 #else
2580 static int mmc_probe(bd_t *bis)
2581 {
2582 if (board_mmc_init(bis) < 0)
2583 cpu_mmc_init(bis);
2584
2585 return 0;
2586 }
2587 #endif
2588
2589 int mmc_initialize(bd_t *bis)
2590 {
2591 static int initialized = 0;
2592 int ret;
2593 if (initialized) /* Avoid initializing mmc multiple times */
2594 return 0;
2595 initialized = 1;
2596
2597 #if !CONFIG_IS_ENABLED(BLK)
2598 #if !CONFIG_IS_ENABLED(MMC_TINY)
2599 mmc_list_init();
2600 #endif
2601 #endif
2602 ret = mmc_probe(bis);
2603 if (ret)
2604 return ret;
2605
2606 #ifndef CONFIG_SPL_BUILD
2607 print_mmc_devices(',');
2608 #endif
2609
2610 mmc_do_preinit();
2611 return 0;
2612 }
2613
2614 #ifdef CONFIG_CMD_BKOPS_ENABLE
2615 int mmc_set_bkops_enable(struct mmc *mmc)
2616 {
2617 int err;
2618 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2619
2620 err = mmc_send_ext_csd(mmc, ext_csd);
2621 if (err) {
2622 puts("Could not get ext_csd register values\n");
2623 return err;
2624 }
2625
2626 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2627 puts("Background operations not supported on device\n");
2628 return -EMEDIUMTYPE;
2629 }
2630
2631 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2632 puts("Background operations already enabled\n");
2633 return 0;
2634 }
2635
2636 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2637 if (err) {
2638 puts("Failed to enable manual background operations\n");
2639 return err;
2640 }
2641
2642 puts("Enabled manual background operations\n");
2643
2644 return 0;
2645 }
2646 #endif