]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/mmc/mmc.c
mmc: make UHS and HS200 optional
[people/ms/u-boot.git] / drivers / mmc / mmc.c
1 /*
2 * Copyright 2008, Freescale Semiconductor, Inc
3 * Andy Fleming
4 *
5 * Based vaguely on the Linux code
6 *
7 * SPDX-License-Identifier: GPL-2.0+
8 */
9
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24
25 static const unsigned int sd_au_size[] = {
26 0, SZ_16K / 512, SZ_32K / 512,
27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
31 };
32
33 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
34 static int mmc_power_cycle(struct mmc *mmc);
35 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
36
37 #if CONFIG_IS_ENABLED(MMC_TINY)
38 static struct mmc mmc_static;
39 struct mmc *find_mmc_device(int dev_num)
40 {
41 return &mmc_static;
42 }
43
44 void mmc_do_preinit(void)
45 {
46 struct mmc *m = &mmc_static;
47 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
48 mmc_set_preinit(m, 1);
49 #endif
50 if (m->preinit)
51 mmc_start_init(m);
52 }
53
54 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
55 {
56 return &mmc->block_dev;
57 }
58 #endif
59
60 #if !CONFIG_IS_ENABLED(DM_MMC)
61
62 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
63 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
64 {
65 return -ENOSYS;
66 }
67 #endif
68
69 __weak int board_mmc_getwp(struct mmc *mmc)
70 {
71 return -1;
72 }
73
74 int mmc_getwp(struct mmc *mmc)
75 {
76 int wp;
77
78 wp = board_mmc_getwp(mmc);
79
80 if (wp < 0) {
81 if (mmc->cfg->ops->getwp)
82 wp = mmc->cfg->ops->getwp(mmc);
83 else
84 wp = 0;
85 }
86
87 return wp;
88 }
89
90 __weak int board_mmc_getcd(struct mmc *mmc)
91 {
92 return -1;
93 }
94 #endif
95
96 #ifdef CONFIG_MMC_TRACE
97 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
98 {
99 printf("CMD_SEND:%d\n", cmd->cmdidx);
100 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
101 }
102
103 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
104 {
105 int i;
106 u8 *ptr;
107
108 if (ret) {
109 printf("\t\tRET\t\t\t %d\n", ret);
110 } else {
111 switch (cmd->resp_type) {
112 case MMC_RSP_NONE:
113 printf("\t\tMMC_RSP_NONE\n");
114 break;
115 case MMC_RSP_R1:
116 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
117 cmd->response[0]);
118 break;
119 case MMC_RSP_R1b:
120 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
121 cmd->response[0]);
122 break;
123 case MMC_RSP_R2:
124 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
125 cmd->response[0]);
126 printf("\t\t \t\t 0x%08X \n",
127 cmd->response[1]);
128 printf("\t\t \t\t 0x%08X \n",
129 cmd->response[2]);
130 printf("\t\t \t\t 0x%08X \n",
131 cmd->response[3]);
132 printf("\n");
133 printf("\t\t\t\t\tDUMPING DATA\n");
134 for (i = 0; i < 4; i++) {
135 int j;
136 printf("\t\t\t\t\t%03d - ", i*4);
137 ptr = (u8 *)&cmd->response[i];
138 ptr += 3;
139 for (j = 0; j < 4; j++)
140 printf("%02X ", *ptr--);
141 printf("\n");
142 }
143 break;
144 case MMC_RSP_R3:
145 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
146 cmd->response[0]);
147 break;
148 default:
149 printf("\t\tERROR MMC rsp not supported\n");
150 break;
151 }
152 }
153 }
154
155 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
156 {
157 int status;
158
159 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
160 printf("CURR STATE:%d\n", status);
161 }
162 #endif
163
164 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
165 const char *mmc_mode_name(enum bus_mode mode)
166 {
167 static const char *const names[] = {
168 [MMC_LEGACY] = "MMC legacy",
169 [SD_LEGACY] = "SD Legacy",
170 [MMC_HS] = "MMC High Speed (26MHz)",
171 [SD_HS] = "SD High Speed (50MHz)",
172 [UHS_SDR12] = "UHS SDR12 (25MHz)",
173 [UHS_SDR25] = "UHS SDR25 (50MHz)",
174 [UHS_SDR50] = "UHS SDR50 (100MHz)",
175 [UHS_SDR104] = "UHS SDR104 (208MHz)",
176 [UHS_DDR50] = "UHS DDR50 (50MHz)",
177 [MMC_HS_52] = "MMC High Speed (52MHz)",
178 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
179 [MMC_HS_200] = "HS200 (200MHz)",
180 };
181
182 if (mode >= MMC_MODES_END)
183 return "Unknown mode";
184 else
185 return names[mode];
186 }
187 #endif
188
189 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
190 {
191 static const int freqs[] = {
192 [SD_LEGACY] = 25000000,
193 [MMC_HS] = 26000000,
194 [SD_HS] = 50000000,
195 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
196 [UHS_SDR12] = 25000000,
197 [UHS_SDR25] = 50000000,
198 [UHS_SDR50] = 100000000,
199 [UHS_DDR50] = 50000000,
200 #ifdef MMC_SUPPORTS_TUNING
201 [UHS_SDR104] = 208000000,
202 #endif
203 #endif
204 [MMC_HS_52] = 52000000,
205 [MMC_DDR_52] = 52000000,
206 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
207 [MMC_HS_200] = 200000000,
208 #endif
209 };
210
211 if (mode == MMC_LEGACY)
212 return mmc->legacy_speed;
213 else if (mode >= MMC_MODES_END)
214 return 0;
215 else
216 return freqs[mode];
217 }
218
219 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
220 {
221 mmc->selected_mode = mode;
222 mmc->tran_speed = mmc_mode2freq(mmc, mode);
223 mmc->ddr_mode = mmc_is_mode_ddr(mode);
224 debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
225 mmc->tran_speed / 1000000);
226 return 0;
227 }
228
229 #if !CONFIG_IS_ENABLED(DM_MMC)
230 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
231 {
232 int ret;
233
234 mmmc_trace_before_send(mmc, cmd);
235 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
236 mmmc_trace_after_send(mmc, cmd, ret);
237
238 return ret;
239 }
240 #endif
241
242 int mmc_send_status(struct mmc *mmc, int timeout)
243 {
244 struct mmc_cmd cmd;
245 int err, retries = 5;
246
247 cmd.cmdidx = MMC_CMD_SEND_STATUS;
248 cmd.resp_type = MMC_RSP_R1;
249 if (!mmc_host_is_spi(mmc))
250 cmd.cmdarg = mmc->rca << 16;
251
252 while (1) {
253 err = mmc_send_cmd(mmc, &cmd, NULL);
254 if (!err) {
255 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
256 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
257 MMC_STATE_PRG)
258 break;
259
260 if (cmd.response[0] & MMC_STATUS_MASK) {
261 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
262 pr_err("Status Error: 0x%08X\n",
263 cmd.response[0]);
264 #endif
265 return -ECOMM;
266 }
267 } else if (--retries < 0)
268 return err;
269
270 if (timeout-- <= 0)
271 break;
272
273 udelay(1000);
274 }
275
276 mmc_trace_state(mmc, &cmd);
277 if (timeout <= 0) {
278 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
279 pr_err("Timeout waiting card ready\n");
280 #endif
281 return -ETIMEDOUT;
282 }
283
284 return 0;
285 }
286
287 int mmc_set_blocklen(struct mmc *mmc, int len)
288 {
289 struct mmc_cmd cmd;
290 int err;
291
292 if (mmc->ddr_mode)
293 return 0;
294
295 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
296 cmd.resp_type = MMC_RSP_R1;
297 cmd.cmdarg = len;
298
299 err = mmc_send_cmd(mmc, &cmd, NULL);
300
301 #ifdef CONFIG_MMC_QUIRKS
302 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
303 int retries = 4;
304 /*
305 * It has been seen that SET_BLOCKLEN may fail on the first
306 * attempt, let's try a few more time
307 */
308 do {
309 err = mmc_send_cmd(mmc, &cmd, NULL);
310 if (!err)
311 break;
312 } while (retries--);
313 }
314 #endif
315
316 return err;
317 }
318
319 #ifdef MMC_SUPPORTS_TUNING
320 static const u8 tuning_blk_pattern_4bit[] = {
321 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
322 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
323 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
324 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
325 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
326 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
327 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
328 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
329 };
330
331 static const u8 tuning_blk_pattern_8bit[] = {
332 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
333 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
334 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
335 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
336 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
337 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
338 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
339 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
340 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
341 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
342 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
343 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
344 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
345 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
346 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
347 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
348 };
349
350 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
351 {
352 struct mmc_cmd cmd;
353 struct mmc_data data;
354 const u8 *tuning_block_pattern;
355 int size, err;
356
357 if (mmc->bus_width == 8) {
358 tuning_block_pattern = tuning_blk_pattern_8bit;
359 size = sizeof(tuning_blk_pattern_8bit);
360 } else if (mmc->bus_width == 4) {
361 tuning_block_pattern = tuning_blk_pattern_4bit;
362 size = sizeof(tuning_blk_pattern_4bit);
363 } else {
364 return -EINVAL;
365 }
366
367 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
368
369 cmd.cmdidx = opcode;
370 cmd.cmdarg = 0;
371 cmd.resp_type = MMC_RSP_R1;
372
373 data.dest = (void *)data_buf;
374 data.blocks = 1;
375 data.blocksize = size;
376 data.flags = MMC_DATA_READ;
377
378 err = mmc_send_cmd(mmc, &cmd, &data);
379 if (err)
380 return err;
381
382 if (memcmp(data_buf, tuning_block_pattern, size))
383 return -EIO;
384
385 return 0;
386 }
387 #endif
388
389 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
390 lbaint_t blkcnt)
391 {
392 struct mmc_cmd cmd;
393 struct mmc_data data;
394
395 if (blkcnt > 1)
396 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
397 else
398 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
399
400 if (mmc->high_capacity)
401 cmd.cmdarg = start;
402 else
403 cmd.cmdarg = start * mmc->read_bl_len;
404
405 cmd.resp_type = MMC_RSP_R1;
406
407 data.dest = dst;
408 data.blocks = blkcnt;
409 data.blocksize = mmc->read_bl_len;
410 data.flags = MMC_DATA_READ;
411
412 if (mmc_send_cmd(mmc, &cmd, &data))
413 return 0;
414
415 if (blkcnt > 1) {
416 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
417 cmd.cmdarg = 0;
418 cmd.resp_type = MMC_RSP_R1b;
419 if (mmc_send_cmd(mmc, &cmd, NULL)) {
420 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
421 pr_err("mmc fail to send stop cmd\n");
422 #endif
423 return 0;
424 }
425 }
426
427 return blkcnt;
428 }
429
430 #if CONFIG_IS_ENABLED(BLK)
431 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
432 #else
433 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
434 void *dst)
435 #endif
436 {
437 #if CONFIG_IS_ENABLED(BLK)
438 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
439 #endif
440 int dev_num = block_dev->devnum;
441 int err;
442 lbaint_t cur, blocks_todo = blkcnt;
443
444 if (blkcnt == 0)
445 return 0;
446
447 struct mmc *mmc = find_mmc_device(dev_num);
448 if (!mmc)
449 return 0;
450
451 if (CONFIG_IS_ENABLED(MMC_TINY))
452 err = mmc_switch_part(mmc, block_dev->hwpart);
453 else
454 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
455
456 if (err < 0)
457 return 0;
458
459 if ((start + blkcnt) > block_dev->lba) {
460 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
461 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
462 start + blkcnt, block_dev->lba);
463 #endif
464 return 0;
465 }
466
467 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
468 debug("%s: Failed to set blocklen\n", __func__);
469 return 0;
470 }
471
472 do {
473 cur = (blocks_todo > mmc->cfg->b_max) ?
474 mmc->cfg->b_max : blocks_todo;
475 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
476 debug("%s: Failed to read blocks\n", __func__);
477 return 0;
478 }
479 blocks_todo -= cur;
480 start += cur;
481 dst += cur * mmc->read_bl_len;
482 } while (blocks_todo > 0);
483
484 return blkcnt;
485 }
486
487 static int mmc_go_idle(struct mmc *mmc)
488 {
489 struct mmc_cmd cmd;
490 int err;
491
492 udelay(1000);
493
494 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
495 cmd.cmdarg = 0;
496 cmd.resp_type = MMC_RSP_NONE;
497
498 err = mmc_send_cmd(mmc, &cmd, NULL);
499
500 if (err)
501 return err;
502
503 udelay(2000);
504
505 return 0;
506 }
507
508 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
509 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
510 {
511 struct mmc_cmd cmd;
512 int err = 0;
513
514 /*
515 * Send CMD11 only if the request is to switch the card to
516 * 1.8V signalling.
517 */
518 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
519 return mmc_set_signal_voltage(mmc, signal_voltage);
520
521 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
522 cmd.cmdarg = 0;
523 cmd.resp_type = MMC_RSP_R1;
524
525 err = mmc_send_cmd(mmc, &cmd, NULL);
526 if (err)
527 return err;
528
529 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
530 return -EIO;
531
532 /*
533 * The card should drive cmd and dat[0:3] low immediately
534 * after the response of cmd11, but wait 100 us to be sure
535 */
536 err = mmc_wait_dat0(mmc, 0, 100);
537 if (err == -ENOSYS)
538 udelay(100);
539 else if (err)
540 return -ETIMEDOUT;
541
542 /*
543 * During a signal voltage level switch, the clock must be gated
544 * for 5 ms according to the SD spec
545 */
546 mmc_set_clock(mmc, mmc->clock, true);
547
548 err = mmc_set_signal_voltage(mmc, signal_voltage);
549 if (err)
550 return err;
551
552 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
553 mdelay(10);
554 mmc_set_clock(mmc, mmc->clock, false);
555
556 /*
557 * Failure to switch is indicated by the card holding
558 * dat[0:3] low. Wait for at least 1 ms according to spec
559 */
560 err = mmc_wait_dat0(mmc, 1, 1000);
561 if (err == -ENOSYS)
562 udelay(1000);
563 else if (err)
564 return -ETIMEDOUT;
565
566 return 0;
567 }
568 #endif
569
570 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
571 {
572 int timeout = 1000;
573 int err;
574 struct mmc_cmd cmd;
575
576 while (1) {
577 cmd.cmdidx = MMC_CMD_APP_CMD;
578 cmd.resp_type = MMC_RSP_R1;
579 cmd.cmdarg = 0;
580
581 err = mmc_send_cmd(mmc, &cmd, NULL);
582
583 if (err)
584 return err;
585
586 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
587 cmd.resp_type = MMC_RSP_R3;
588
589 /*
590 * Most cards do not answer if some reserved bits
591 * in the ocr are set. However, Some controller
592 * can set bit 7 (reserved for low voltages), but
593 * how to manage low voltages SD card is not yet
594 * specified.
595 */
596 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
597 (mmc->cfg->voltages & 0xff8000);
598
599 if (mmc->version == SD_VERSION_2)
600 cmd.cmdarg |= OCR_HCS;
601
602 if (uhs_en)
603 cmd.cmdarg |= OCR_S18R;
604
605 err = mmc_send_cmd(mmc, &cmd, NULL);
606
607 if (err)
608 return err;
609
610 if (cmd.response[0] & OCR_BUSY)
611 break;
612
613 if (timeout-- <= 0)
614 return -EOPNOTSUPP;
615
616 udelay(1000);
617 }
618
619 if (mmc->version != SD_VERSION_2)
620 mmc->version = SD_VERSION_1_0;
621
622 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
623 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
624 cmd.resp_type = MMC_RSP_R3;
625 cmd.cmdarg = 0;
626
627 err = mmc_send_cmd(mmc, &cmd, NULL);
628
629 if (err)
630 return err;
631 }
632
633 mmc->ocr = cmd.response[0];
634
635 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
636 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
637 == 0x41000000) {
638 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
639 if (err)
640 return err;
641 }
642 #endif
643
644 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
645 mmc->rca = 0;
646
647 return 0;
648 }
649
650 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
651 {
652 struct mmc_cmd cmd;
653 int err;
654
655 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
656 cmd.resp_type = MMC_RSP_R3;
657 cmd.cmdarg = 0;
658 if (use_arg && !mmc_host_is_spi(mmc))
659 cmd.cmdarg = OCR_HCS |
660 (mmc->cfg->voltages &
661 (mmc->ocr & OCR_VOLTAGE_MASK)) |
662 (mmc->ocr & OCR_ACCESS_MODE);
663
664 err = mmc_send_cmd(mmc, &cmd, NULL);
665 if (err)
666 return err;
667 mmc->ocr = cmd.response[0];
668 return 0;
669 }
670
671 static int mmc_send_op_cond(struct mmc *mmc)
672 {
673 int err, i;
674
675 /* Some cards seem to need this */
676 mmc_go_idle(mmc);
677
678 /* Asking to the card its capabilities */
679 for (i = 0; i < 2; i++) {
680 err = mmc_send_op_cond_iter(mmc, i != 0);
681 if (err)
682 return err;
683
684 /* exit if not busy (flag seems to be inverted) */
685 if (mmc->ocr & OCR_BUSY)
686 break;
687 }
688 mmc->op_cond_pending = 1;
689 return 0;
690 }
691
692 static int mmc_complete_op_cond(struct mmc *mmc)
693 {
694 struct mmc_cmd cmd;
695 int timeout = 1000;
696 uint start;
697 int err;
698
699 mmc->op_cond_pending = 0;
700 if (!(mmc->ocr & OCR_BUSY)) {
701 /* Some cards seem to need this */
702 mmc_go_idle(mmc);
703
704 start = get_timer(0);
705 while (1) {
706 err = mmc_send_op_cond_iter(mmc, 1);
707 if (err)
708 return err;
709 if (mmc->ocr & OCR_BUSY)
710 break;
711 if (get_timer(start) > timeout)
712 return -EOPNOTSUPP;
713 udelay(100);
714 }
715 }
716
717 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
718 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
719 cmd.resp_type = MMC_RSP_R3;
720 cmd.cmdarg = 0;
721
722 err = mmc_send_cmd(mmc, &cmd, NULL);
723
724 if (err)
725 return err;
726
727 mmc->ocr = cmd.response[0];
728 }
729
730 mmc->version = MMC_VERSION_UNKNOWN;
731
732 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
733 mmc->rca = 1;
734
735 return 0;
736 }
737
738
739 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
740 {
741 struct mmc_cmd cmd;
742 struct mmc_data data;
743 int err;
744
745 /* Get the Card Status Register */
746 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
747 cmd.resp_type = MMC_RSP_R1;
748 cmd.cmdarg = 0;
749
750 data.dest = (char *)ext_csd;
751 data.blocks = 1;
752 data.blocksize = MMC_MAX_BLOCK_LEN;
753 data.flags = MMC_DATA_READ;
754
755 err = mmc_send_cmd(mmc, &cmd, &data);
756
757 return err;
758 }
759
760 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
761 {
762 struct mmc_cmd cmd;
763 int timeout = 1000;
764 int retries = 3;
765 int ret;
766
767 cmd.cmdidx = MMC_CMD_SWITCH;
768 cmd.resp_type = MMC_RSP_R1b;
769 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
770 (index << 16) |
771 (value << 8);
772
773 while (retries > 0) {
774 ret = mmc_send_cmd(mmc, &cmd, NULL);
775
776 /* Waiting for the ready status */
777 if (!ret) {
778 ret = mmc_send_status(mmc, timeout);
779 return ret;
780 }
781
782 retries--;
783 }
784
785 return ret;
786
787 }
788
789 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
790 {
791 int err;
792 int speed_bits;
793
794 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
795
796 switch (mode) {
797 case MMC_HS:
798 case MMC_HS_52:
799 case MMC_DDR_52:
800 speed_bits = EXT_CSD_TIMING_HS;
801 break;
802 case MMC_HS_200:
803 speed_bits = EXT_CSD_TIMING_HS200;
804 break;
805 case MMC_LEGACY:
806 speed_bits = EXT_CSD_TIMING_LEGACY;
807 break;
808 default:
809 return -EINVAL;
810 }
811 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
812 speed_bits);
813 if (err)
814 return err;
815
816 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
817 /* Now check to see that it worked */
818 err = mmc_send_ext_csd(mmc, test_csd);
819 if (err)
820 return err;
821
822 /* No high-speed support */
823 if (!test_csd[EXT_CSD_HS_TIMING])
824 return -ENOTSUPP;
825 }
826
827 return 0;
828 }
829
830 static int mmc_get_capabilities(struct mmc *mmc)
831 {
832 u8 *ext_csd = mmc->ext_csd;
833 char cardtype;
834
835 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
836
837 if (mmc_host_is_spi(mmc))
838 return 0;
839
840 /* Only version 4 supports high-speed */
841 if (mmc->version < MMC_VERSION_4)
842 return 0;
843
844 if (!ext_csd) {
845 pr_err("No ext_csd found!\n"); /* this should enver happen */
846 return -ENOTSUPP;
847 }
848
849 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
850
851 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0x3f;
852 mmc->cardtype = cardtype;
853
854 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
855 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
856 mmc->card_caps |= MMC_MODE_HS200;
857 }
858 if (cardtype & EXT_CSD_CARD_TYPE_52) {
859 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
860 mmc->card_caps |= MMC_MODE_DDR_52MHz;
861 mmc->card_caps |= MMC_MODE_HS_52MHz;
862 }
863 if (cardtype & EXT_CSD_CARD_TYPE_26)
864 mmc->card_caps |= MMC_MODE_HS;
865
866 return 0;
867 }
868
869 static int mmc_set_capacity(struct mmc *mmc, int part_num)
870 {
871 switch (part_num) {
872 case 0:
873 mmc->capacity = mmc->capacity_user;
874 break;
875 case 1:
876 case 2:
877 mmc->capacity = mmc->capacity_boot;
878 break;
879 case 3:
880 mmc->capacity = mmc->capacity_rpmb;
881 break;
882 case 4:
883 case 5:
884 case 6:
885 case 7:
886 mmc->capacity = mmc->capacity_gp[part_num - 4];
887 break;
888 default:
889 return -1;
890 }
891
892 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
893
894 return 0;
895 }
896
897 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
898 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
899 {
900 int forbidden = 0;
901 bool change = false;
902
903 if (part_num & PART_ACCESS_MASK)
904 forbidden = MMC_CAP(MMC_HS_200);
905
906 if (MMC_CAP(mmc->selected_mode) & forbidden) {
907 debug("selected mode (%s) is forbidden for part %d\n",
908 mmc_mode_name(mmc->selected_mode), part_num);
909 change = true;
910 } else if (mmc->selected_mode != mmc->best_mode) {
911 debug("selected mode is not optimal\n");
912 change = true;
913 }
914
915 if (change)
916 return mmc_select_mode_and_width(mmc,
917 mmc->card_caps & ~forbidden);
918
919 return 0;
920 }
921 #else
922 static inline int mmc_boot_part_access_chk(struct mmc *mmc,
923 unsigned int part_num)
924 {
925 return 0;
926 }
927 #endif
928
929 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
930 {
931 int ret;
932
933 ret = mmc_boot_part_access_chk(mmc, part_num);
934 if (ret)
935 return ret;
936
937 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
938 (mmc->part_config & ~PART_ACCESS_MASK)
939 | (part_num & PART_ACCESS_MASK));
940
941 /*
942 * Set the capacity if the switch succeeded or was intended
943 * to return to representing the raw device.
944 */
945 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
946 ret = mmc_set_capacity(mmc, part_num);
947 mmc_get_blk_desc(mmc)->hwpart = part_num;
948 }
949
950 return ret;
951 }
952
953 int mmc_hwpart_config(struct mmc *mmc,
954 const struct mmc_hwpart_conf *conf,
955 enum mmc_hwpart_conf_mode mode)
956 {
957 u8 part_attrs = 0;
958 u32 enh_size_mult;
959 u32 enh_start_addr;
960 u32 gp_size_mult[4];
961 u32 max_enh_size_mult;
962 u32 tot_enh_size_mult = 0;
963 u8 wr_rel_set;
964 int i, pidx, err;
965 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
966
967 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
968 return -EINVAL;
969
970 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
971 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
972 return -EMEDIUMTYPE;
973 }
974
975 if (!(mmc->part_support & PART_SUPPORT)) {
976 pr_err("Card does not support partitioning\n");
977 return -EMEDIUMTYPE;
978 }
979
980 if (!mmc->hc_wp_grp_size) {
981 pr_err("Card does not define HC WP group size\n");
982 return -EMEDIUMTYPE;
983 }
984
985 /* check partition alignment and total enhanced size */
986 if (conf->user.enh_size) {
987 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
988 conf->user.enh_start % mmc->hc_wp_grp_size) {
989 pr_err("User data enhanced area not HC WP group "
990 "size aligned\n");
991 return -EINVAL;
992 }
993 part_attrs |= EXT_CSD_ENH_USR;
994 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
995 if (mmc->high_capacity) {
996 enh_start_addr = conf->user.enh_start;
997 } else {
998 enh_start_addr = (conf->user.enh_start << 9);
999 }
1000 } else {
1001 enh_size_mult = 0;
1002 enh_start_addr = 0;
1003 }
1004 tot_enh_size_mult += enh_size_mult;
1005
1006 for (pidx = 0; pidx < 4; pidx++) {
1007 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1008 pr_err("GP%i partition not HC WP group size "
1009 "aligned\n", pidx+1);
1010 return -EINVAL;
1011 }
1012 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1013 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1014 part_attrs |= EXT_CSD_ENH_GP(pidx);
1015 tot_enh_size_mult += gp_size_mult[pidx];
1016 }
1017 }
1018
1019 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1020 pr_err("Card does not support enhanced attribute\n");
1021 return -EMEDIUMTYPE;
1022 }
1023
1024 err = mmc_send_ext_csd(mmc, ext_csd);
1025 if (err)
1026 return err;
1027
1028 max_enh_size_mult =
1029 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1030 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1031 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1032 if (tot_enh_size_mult > max_enh_size_mult) {
1033 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1034 tot_enh_size_mult, max_enh_size_mult);
1035 return -EMEDIUMTYPE;
1036 }
1037
1038 /* The default value of EXT_CSD_WR_REL_SET is device
1039 * dependent, the values can only be changed if the
1040 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1041 * changed only once and before partitioning is completed. */
1042 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1043 if (conf->user.wr_rel_change) {
1044 if (conf->user.wr_rel_set)
1045 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1046 else
1047 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1048 }
1049 for (pidx = 0; pidx < 4; pidx++) {
1050 if (conf->gp_part[pidx].wr_rel_change) {
1051 if (conf->gp_part[pidx].wr_rel_set)
1052 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1053 else
1054 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1055 }
1056 }
1057
1058 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1059 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1060 puts("Card does not support host controlled partition write "
1061 "reliability settings\n");
1062 return -EMEDIUMTYPE;
1063 }
1064
1065 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1066 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1067 pr_err("Card already partitioned\n");
1068 return -EPERM;
1069 }
1070
1071 if (mode == MMC_HWPART_CONF_CHECK)
1072 return 0;
1073
1074 /* Partitioning requires high-capacity size definitions */
1075 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1076 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1077 EXT_CSD_ERASE_GROUP_DEF, 1);
1078
1079 if (err)
1080 return err;
1081
1082 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1083
1084 /* update erase group size to be high-capacity */
1085 mmc->erase_grp_size =
1086 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1087
1088 }
1089
1090 /* all OK, write the configuration */
1091 for (i = 0; i < 4; i++) {
1092 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1093 EXT_CSD_ENH_START_ADDR+i,
1094 (enh_start_addr >> (i*8)) & 0xFF);
1095 if (err)
1096 return err;
1097 }
1098 for (i = 0; i < 3; i++) {
1099 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1100 EXT_CSD_ENH_SIZE_MULT+i,
1101 (enh_size_mult >> (i*8)) & 0xFF);
1102 if (err)
1103 return err;
1104 }
1105 for (pidx = 0; pidx < 4; pidx++) {
1106 for (i = 0; i < 3; i++) {
1107 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1108 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1109 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1110 if (err)
1111 return err;
1112 }
1113 }
1114 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1115 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1116 if (err)
1117 return err;
1118
1119 if (mode == MMC_HWPART_CONF_SET)
1120 return 0;
1121
1122 /* The WR_REL_SET is a write-once register but shall be
1123 * written before setting PART_SETTING_COMPLETED. As it is
1124 * write-once we can only write it when completing the
1125 * partitioning. */
1126 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1127 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1128 EXT_CSD_WR_REL_SET, wr_rel_set);
1129 if (err)
1130 return err;
1131 }
1132
1133 /* Setting PART_SETTING_COMPLETED confirms the partition
1134 * configuration but it only becomes effective after power
1135 * cycle, so we do not adjust the partition related settings
1136 * in the mmc struct. */
1137
1138 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1139 EXT_CSD_PARTITION_SETTING,
1140 EXT_CSD_PARTITION_SETTING_COMPLETED);
1141 if (err)
1142 return err;
1143
1144 return 0;
1145 }
1146
1147 #if !CONFIG_IS_ENABLED(DM_MMC)
1148 int mmc_getcd(struct mmc *mmc)
1149 {
1150 int cd;
1151
1152 cd = board_mmc_getcd(mmc);
1153
1154 if (cd < 0) {
1155 if (mmc->cfg->ops->getcd)
1156 cd = mmc->cfg->ops->getcd(mmc);
1157 else
1158 cd = 1;
1159 }
1160
1161 return cd;
1162 }
1163 #endif
1164
1165 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1166 {
1167 struct mmc_cmd cmd;
1168 struct mmc_data data;
1169
1170 /* Switch the frequency */
1171 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1172 cmd.resp_type = MMC_RSP_R1;
1173 cmd.cmdarg = (mode << 31) | 0xffffff;
1174 cmd.cmdarg &= ~(0xf << (group * 4));
1175 cmd.cmdarg |= value << (group * 4);
1176
1177 data.dest = (char *)resp;
1178 data.blocksize = 64;
1179 data.blocks = 1;
1180 data.flags = MMC_DATA_READ;
1181
1182 return mmc_send_cmd(mmc, &cmd, &data);
1183 }
1184
1185
1186 static int sd_get_capabilities(struct mmc *mmc)
1187 {
1188 int err;
1189 struct mmc_cmd cmd;
1190 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1191 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1192 struct mmc_data data;
1193 int timeout;
1194 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1195 u32 sd3_bus_mode;
1196 #endif
1197
1198 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1199
1200 if (mmc_host_is_spi(mmc))
1201 return 0;
1202
1203 /* Read the SCR to find out if this card supports higher speeds */
1204 cmd.cmdidx = MMC_CMD_APP_CMD;
1205 cmd.resp_type = MMC_RSP_R1;
1206 cmd.cmdarg = mmc->rca << 16;
1207
1208 err = mmc_send_cmd(mmc, &cmd, NULL);
1209
1210 if (err)
1211 return err;
1212
1213 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1214 cmd.resp_type = MMC_RSP_R1;
1215 cmd.cmdarg = 0;
1216
1217 timeout = 3;
1218
1219 retry_scr:
1220 data.dest = (char *)scr;
1221 data.blocksize = 8;
1222 data.blocks = 1;
1223 data.flags = MMC_DATA_READ;
1224
1225 err = mmc_send_cmd(mmc, &cmd, &data);
1226
1227 if (err) {
1228 if (timeout--)
1229 goto retry_scr;
1230
1231 return err;
1232 }
1233
1234 mmc->scr[0] = __be32_to_cpu(scr[0]);
1235 mmc->scr[1] = __be32_to_cpu(scr[1]);
1236
1237 switch ((mmc->scr[0] >> 24) & 0xf) {
1238 case 0:
1239 mmc->version = SD_VERSION_1_0;
1240 break;
1241 case 1:
1242 mmc->version = SD_VERSION_1_10;
1243 break;
1244 case 2:
1245 mmc->version = SD_VERSION_2;
1246 if ((mmc->scr[0] >> 15) & 0x1)
1247 mmc->version = SD_VERSION_3;
1248 break;
1249 default:
1250 mmc->version = SD_VERSION_1_0;
1251 break;
1252 }
1253
1254 if (mmc->scr[0] & SD_DATA_4BIT)
1255 mmc->card_caps |= MMC_MODE_4BIT;
1256
1257 /* Version 1.0 doesn't support switching */
1258 if (mmc->version == SD_VERSION_1_0)
1259 return 0;
1260
1261 timeout = 4;
1262 while (timeout--) {
1263 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1264 (u8 *)switch_status);
1265
1266 if (err)
1267 return err;
1268
1269 /* The high-speed function is busy. Try again */
1270 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1271 break;
1272 }
1273
1274 /* If high-speed isn't supported, we return */
1275 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1276 mmc->card_caps |= MMC_CAP(SD_HS);
1277
1278 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1279 /* Version before 3.0 don't support UHS modes */
1280 if (mmc->version < SD_VERSION_3)
1281 return 0;
1282
1283 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1284 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1285 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1286 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1287 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1288 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1289 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1290 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1291 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1292 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1293 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1294 #endif
1295
1296 return 0;
1297 }
1298
1299 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1300 {
1301 int err;
1302
1303 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1304 int speed;
1305
1306 switch (mode) {
1307 case SD_LEGACY:
1308 case UHS_SDR12:
1309 speed = UHS_SDR12_BUS_SPEED;
1310 break;
1311 case SD_HS:
1312 case UHS_SDR25:
1313 speed = UHS_SDR25_BUS_SPEED;
1314 break;
1315 case UHS_SDR50:
1316 speed = UHS_SDR50_BUS_SPEED;
1317 break;
1318 case UHS_DDR50:
1319 speed = UHS_DDR50_BUS_SPEED;
1320 break;
1321 case UHS_SDR104:
1322 speed = UHS_SDR104_BUS_SPEED;
1323 break;
1324 default:
1325 return -EINVAL;
1326 }
1327
1328 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1329 if (err)
1330 return err;
1331
1332 if ((__be32_to_cpu(switch_status[4]) >> 24) != speed)
1333 return -ENOTSUPP;
1334
1335 return 0;
1336 }
1337
1338 int sd_select_bus_width(struct mmc *mmc, int w)
1339 {
1340 int err;
1341 struct mmc_cmd cmd;
1342
1343 if ((w != 4) && (w != 1))
1344 return -EINVAL;
1345
1346 cmd.cmdidx = MMC_CMD_APP_CMD;
1347 cmd.resp_type = MMC_RSP_R1;
1348 cmd.cmdarg = mmc->rca << 16;
1349
1350 err = mmc_send_cmd(mmc, &cmd, NULL);
1351 if (err)
1352 return err;
1353
1354 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1355 cmd.resp_type = MMC_RSP_R1;
1356 if (w == 4)
1357 cmd.cmdarg = 2;
1358 else if (w == 1)
1359 cmd.cmdarg = 0;
1360 err = mmc_send_cmd(mmc, &cmd, NULL);
1361 if (err)
1362 return err;
1363
1364 return 0;
1365 }
1366
1367 static int sd_read_ssr(struct mmc *mmc)
1368 {
1369 int err, i;
1370 struct mmc_cmd cmd;
1371 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1372 struct mmc_data data;
1373 int timeout = 3;
1374 unsigned int au, eo, et, es;
1375
1376 cmd.cmdidx = MMC_CMD_APP_CMD;
1377 cmd.resp_type = MMC_RSP_R1;
1378 cmd.cmdarg = mmc->rca << 16;
1379
1380 err = mmc_send_cmd(mmc, &cmd, NULL);
1381 if (err)
1382 return err;
1383
1384 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1385 cmd.resp_type = MMC_RSP_R1;
1386 cmd.cmdarg = 0;
1387
1388 retry_ssr:
1389 data.dest = (char *)ssr;
1390 data.blocksize = 64;
1391 data.blocks = 1;
1392 data.flags = MMC_DATA_READ;
1393
1394 err = mmc_send_cmd(mmc, &cmd, &data);
1395 if (err) {
1396 if (timeout--)
1397 goto retry_ssr;
1398
1399 return err;
1400 }
1401
1402 for (i = 0; i < 16; i++)
1403 ssr[i] = be32_to_cpu(ssr[i]);
1404
1405 au = (ssr[2] >> 12) & 0xF;
1406 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1407 mmc->ssr.au = sd_au_size[au];
1408 es = (ssr[3] >> 24) & 0xFF;
1409 es |= (ssr[2] & 0xFF) << 8;
1410 et = (ssr[3] >> 18) & 0x3F;
1411 if (es && et) {
1412 eo = (ssr[3] >> 16) & 0x3;
1413 mmc->ssr.erase_timeout = (et * 1000) / es;
1414 mmc->ssr.erase_offset = eo * 1000;
1415 }
1416 } else {
1417 debug("Invalid Allocation Unit Size.\n");
1418 }
1419
1420 return 0;
1421 }
1422
1423 /* frequency bases */
1424 /* divided by 10 to be nice to platforms without floating point */
1425 static const int fbase[] = {
1426 10000,
1427 100000,
1428 1000000,
1429 10000000,
1430 };
1431
1432 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1433 * to platforms without floating point.
1434 */
1435 static const u8 multipliers[] = {
1436 0, /* reserved */
1437 10,
1438 12,
1439 13,
1440 15,
1441 20,
1442 25,
1443 30,
1444 35,
1445 40,
1446 45,
1447 50,
1448 55,
1449 60,
1450 70,
1451 80,
1452 };
1453
1454 static inline int bus_width(uint cap)
1455 {
1456 if (cap == MMC_MODE_8BIT)
1457 return 8;
1458 if (cap == MMC_MODE_4BIT)
1459 return 4;
1460 if (cap == MMC_MODE_1BIT)
1461 return 1;
1462 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1463 return 0;
1464 }
1465
1466 #if !CONFIG_IS_ENABLED(DM_MMC)
1467 #ifdef MMC_SUPPORTS_TUNING
1468 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1469 {
1470 return -ENOTSUPP;
1471 }
1472 #endif
1473
1474 static void mmc_send_init_stream(struct mmc *mmc)
1475 {
1476 }
1477
1478 static int mmc_set_ios(struct mmc *mmc)
1479 {
1480 int ret = 0;
1481
1482 if (mmc->cfg->ops->set_ios)
1483 ret = mmc->cfg->ops->set_ios(mmc);
1484
1485 return ret;
1486 }
1487 #endif
1488
1489 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1490 {
1491 if (clock > mmc->cfg->f_max)
1492 clock = mmc->cfg->f_max;
1493
1494 if (clock < mmc->cfg->f_min)
1495 clock = mmc->cfg->f_min;
1496
1497 mmc->clock = clock;
1498 mmc->clk_disable = disable;
1499
1500 return mmc_set_ios(mmc);
1501 }
1502
1503 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1504 {
1505 mmc->bus_width = width;
1506
1507 return mmc_set_ios(mmc);
1508 }
1509
1510 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1511 /*
1512 * helper function to display the capabilities in a human
1513 * friendly manner. The capabilities include bus width and
1514 * supported modes.
1515 */
1516 void mmc_dump_capabilities(const char *text, uint caps)
1517 {
1518 enum bus_mode mode;
1519
1520 printf("%s: widths [", text);
1521 if (caps & MMC_MODE_8BIT)
1522 printf("8, ");
1523 if (caps & MMC_MODE_4BIT)
1524 printf("4, ");
1525 if (caps & MMC_MODE_1BIT)
1526 printf("1, ");
1527 printf("\b\b] modes [");
1528 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1529 if (MMC_CAP(mode) & caps)
1530 printf("%s, ", mmc_mode_name(mode));
1531 printf("\b\b]\n");
1532 }
1533 #endif
1534
1535 struct mode_width_tuning {
1536 enum bus_mode mode;
1537 uint widths;
1538 #ifdef MMC_SUPPORTS_TUNING
1539 uint tuning;
1540 #endif
1541 };
1542
1543 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1544 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1545 {
1546 switch (voltage) {
1547 case MMC_SIGNAL_VOLTAGE_000: return 0;
1548 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1549 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1550 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1551 }
1552 return -EINVAL;
1553 }
1554
1555 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1556 {
1557 int err;
1558
1559 if (mmc->signal_voltage == signal_voltage)
1560 return 0;
1561
1562 mmc->signal_voltage = signal_voltage;
1563 err = mmc_set_ios(mmc);
1564 if (err)
1565 debug("unable to set voltage (err %d)\n", err);
1566
1567 return err;
1568 }
1569 #else
1570 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1571 {
1572 return 0;
1573 }
1574 #endif
1575
1576 static const struct mode_width_tuning sd_modes_by_pref[] = {
1577 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1578 #ifdef MMC_SUPPORTS_TUNING
1579 {
1580 .mode = UHS_SDR104,
1581 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1582 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1583 },
1584 #endif
1585 {
1586 .mode = UHS_SDR50,
1587 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1588 },
1589 {
1590 .mode = UHS_DDR50,
1591 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1592 },
1593 {
1594 .mode = UHS_SDR25,
1595 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1596 },
1597 #endif
1598 {
1599 .mode = SD_HS,
1600 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1601 },
1602 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1603 {
1604 .mode = UHS_SDR12,
1605 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1606 },
1607 #endif
1608 {
1609 .mode = SD_LEGACY,
1610 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1611 }
1612 };
1613
1614 #define for_each_sd_mode_by_pref(caps, mwt) \
1615 for (mwt = sd_modes_by_pref;\
1616 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1617 mwt++) \
1618 if (caps & MMC_CAP(mwt->mode))
1619
1620 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1621 {
1622 int err;
1623 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1624 const struct mode_width_tuning *mwt;
1625 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1626 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1627 #else
1628 bool uhs_en = false;
1629 #endif
1630 uint caps;
1631
1632 #ifdef DEBUG
1633 mmc_dump_capabilities("sd card", card_caps);
1634 mmc_dump_capabilities("host", mmc->host_caps);
1635 #endif
1636
1637 /* Restrict card's capabilities by what the host can do */
1638 caps = card_caps & mmc->host_caps;
1639
1640 if (!uhs_en)
1641 caps &= ~UHS_CAPS;
1642
1643 for_each_sd_mode_by_pref(caps, mwt) {
1644 uint *w;
1645
1646 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1647 if (*w & caps & mwt->widths) {
1648 debug("trying mode %s width %d (at %d MHz)\n",
1649 mmc_mode_name(mwt->mode),
1650 bus_width(*w),
1651 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1652
1653 /* configure the bus width (card + host) */
1654 err = sd_select_bus_width(mmc, bus_width(*w));
1655 if (err)
1656 goto error;
1657 mmc_set_bus_width(mmc, bus_width(*w));
1658
1659 /* configure the bus mode (card) */
1660 err = sd_set_card_speed(mmc, mwt->mode);
1661 if (err)
1662 goto error;
1663
1664 /* configure the bus mode (host) */
1665 mmc_select_mode(mmc, mwt->mode);
1666 mmc_set_clock(mmc, mmc->tran_speed, false);
1667
1668 #ifdef MMC_SUPPORTS_TUNING
1669 /* execute tuning if needed */
1670 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1671 err = mmc_execute_tuning(mmc,
1672 mwt->tuning);
1673 if (err) {
1674 debug("tuning failed\n");
1675 goto error;
1676 }
1677 }
1678 #endif
1679
1680 err = sd_read_ssr(mmc);
1681 if (!err)
1682 return 0;
1683
1684 pr_warn("bad ssr\n");
1685
1686 error:
1687 /* revert to a safer bus speed */
1688 mmc_select_mode(mmc, SD_LEGACY);
1689 mmc_set_clock(mmc, mmc->tran_speed, false);
1690 }
1691 }
1692 }
1693
1694 printf("unable to select a mode\n");
1695 return -ENOTSUPP;
1696 }
1697
1698 /*
1699 * read the compare the part of ext csd that is constant.
1700 * This can be used to check that the transfer is working
1701 * as expected.
1702 */
1703 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1704 {
1705 int err;
1706 const u8 *ext_csd = mmc->ext_csd;
1707 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1708
1709 if (mmc->version < MMC_VERSION_4)
1710 return 0;
1711
1712 err = mmc_send_ext_csd(mmc, test_csd);
1713 if (err)
1714 return err;
1715
1716 /* Only compare read only fields */
1717 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1718 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1719 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1720 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1721 ext_csd[EXT_CSD_REV]
1722 == test_csd[EXT_CSD_REV] &&
1723 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1724 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1725 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1726 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1727 return 0;
1728
1729 return -EBADMSG;
1730 }
1731
1732 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1733 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1734 uint32_t allowed_mask)
1735 {
1736 u32 card_mask = 0;
1737
1738 switch (mode) {
1739 case MMC_HS_200:
1740 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_8V)
1741 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1742 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_2V)
1743 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1744 break;
1745 case MMC_DDR_52:
1746 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1747 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1748 MMC_SIGNAL_VOLTAGE_180;
1749 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1750 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1751 break;
1752 default:
1753 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1754 break;
1755 }
1756
1757 while (card_mask & allowed_mask) {
1758 enum mmc_voltage best_match;
1759
1760 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1761 if (!mmc_set_signal_voltage(mmc, best_match))
1762 return 0;
1763
1764 allowed_mask &= ~best_match;
1765 }
1766
1767 return -ENOTSUPP;
1768 }
1769 #else
1770 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1771 uint32_t allowed_mask)
1772 {
1773 return 0;
1774 }
1775 #endif
1776
1777 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1778 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1779 {
1780 .mode = MMC_HS_200,
1781 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1782 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1783 },
1784 #endif
1785 {
1786 .mode = MMC_DDR_52,
1787 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1788 },
1789 {
1790 .mode = MMC_HS_52,
1791 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1792 },
1793 {
1794 .mode = MMC_HS,
1795 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1796 },
1797 {
1798 .mode = MMC_LEGACY,
1799 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1800 }
1801 };
1802
1803 #define for_each_mmc_mode_by_pref(caps, mwt) \
1804 for (mwt = mmc_modes_by_pref;\
1805 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1806 mwt++) \
1807 if (caps & MMC_CAP(mwt->mode))
1808
1809 static const struct ext_csd_bus_width {
1810 uint cap;
1811 bool is_ddr;
1812 uint ext_csd_bits;
1813 } ext_csd_bus_width[] = {
1814 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1815 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1816 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1817 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1818 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1819 };
1820
1821 #define for_each_supported_width(caps, ddr, ecbv) \
1822 for (ecbv = ext_csd_bus_width;\
1823 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1824 ecbv++) \
1825 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1826
1827 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1828 {
1829 int err;
1830 const struct mode_width_tuning *mwt;
1831 const struct ext_csd_bus_width *ecbw;
1832
1833 #ifdef DEBUG
1834 mmc_dump_capabilities("mmc", card_caps);
1835 mmc_dump_capabilities("host", mmc->host_caps);
1836 #endif
1837
1838 /* Restrict card's capabilities by what the host can do */
1839 card_caps &= mmc->host_caps;
1840
1841 /* Only version 4 of MMC supports wider bus widths */
1842 if (mmc->version < MMC_VERSION_4)
1843 return 0;
1844
1845 if (!mmc->ext_csd) {
1846 debug("No ext_csd found!\n"); /* this should enver happen */
1847 return -ENOTSUPP;
1848 }
1849
1850 mmc_set_clock(mmc, mmc->legacy_speed, false);
1851
1852 for_each_mmc_mode_by_pref(card_caps, mwt) {
1853 for_each_supported_width(card_caps & mwt->widths,
1854 mmc_is_mode_ddr(mwt->mode), ecbw) {
1855 enum mmc_voltage old_voltage;
1856 debug("trying mode %s width %d (at %d MHz)\n",
1857 mmc_mode_name(mwt->mode),
1858 bus_width(ecbw->cap),
1859 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1860 old_voltage = mmc->signal_voltage;
1861 err = mmc_set_lowest_voltage(mmc, mwt->mode,
1862 MMC_ALL_SIGNAL_VOLTAGE);
1863 if (err)
1864 continue;
1865
1866 /* configure the bus width (card + host) */
1867 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1868 EXT_CSD_BUS_WIDTH,
1869 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1870 if (err)
1871 goto error;
1872 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1873
1874 /* configure the bus speed (card) */
1875 err = mmc_set_card_speed(mmc, mwt->mode);
1876 if (err)
1877 goto error;
1878
1879 /*
1880 * configure the bus width AND the ddr mode (card)
1881 * The host side will be taken care of in the next step
1882 */
1883 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1884 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1885 EXT_CSD_BUS_WIDTH,
1886 ecbw->ext_csd_bits);
1887 if (err)
1888 goto error;
1889 }
1890
1891 /* configure the bus mode (host) */
1892 mmc_select_mode(mmc, mwt->mode);
1893 mmc_set_clock(mmc, mmc->tran_speed, false);
1894 #ifdef MMC_SUPPORTS_TUNING
1895
1896 /* execute tuning if needed */
1897 if (mwt->tuning) {
1898 err = mmc_execute_tuning(mmc, mwt->tuning);
1899 if (err) {
1900 debug("tuning failed\n");
1901 goto error;
1902 }
1903 }
1904 #endif
1905
1906 /* do a transfer to check the configuration */
1907 err = mmc_read_and_compare_ext_csd(mmc);
1908 if (!err)
1909 return 0;
1910 error:
1911 mmc_set_signal_voltage(mmc, old_voltage);
1912 /* if an error occured, revert to a safer bus mode */
1913 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1914 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
1915 mmc_select_mode(mmc, MMC_LEGACY);
1916 mmc_set_bus_width(mmc, 1);
1917 }
1918 }
1919
1920 pr_err("unable to select a mode\n");
1921
1922 return -ENOTSUPP;
1923 }
1924
1925 static int mmc_startup_v4(struct mmc *mmc)
1926 {
1927 int err, i;
1928 u64 capacity;
1929 bool has_parts = false;
1930 bool part_completed;
1931 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1932
1933 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1934 return 0;
1935
1936 /* check ext_csd version and capacity */
1937 err = mmc_send_ext_csd(mmc, ext_csd);
1938 if (err)
1939 goto error;
1940
1941 /* store the ext csd for future reference */
1942 if (!mmc->ext_csd)
1943 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
1944 if (!mmc->ext_csd)
1945 return -ENOMEM;
1946 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
1947
1948 if (ext_csd[EXT_CSD_REV] >= 2) {
1949 /*
1950 * According to the JEDEC Standard, the value of
1951 * ext_csd's capacity is valid if the value is more
1952 * than 2GB
1953 */
1954 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1955 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1956 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1957 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1958 capacity *= MMC_MAX_BLOCK_LEN;
1959 if ((capacity >> 20) > 2 * 1024)
1960 mmc->capacity_user = capacity;
1961 }
1962
1963 switch (ext_csd[EXT_CSD_REV]) {
1964 case 1:
1965 mmc->version = MMC_VERSION_4_1;
1966 break;
1967 case 2:
1968 mmc->version = MMC_VERSION_4_2;
1969 break;
1970 case 3:
1971 mmc->version = MMC_VERSION_4_3;
1972 break;
1973 case 5:
1974 mmc->version = MMC_VERSION_4_41;
1975 break;
1976 case 6:
1977 mmc->version = MMC_VERSION_4_5;
1978 break;
1979 case 7:
1980 mmc->version = MMC_VERSION_5_0;
1981 break;
1982 case 8:
1983 mmc->version = MMC_VERSION_5_1;
1984 break;
1985 }
1986
1987 /* The partition data may be non-zero but it is only
1988 * effective if PARTITION_SETTING_COMPLETED is set in
1989 * EXT_CSD, so ignore any data if this bit is not set,
1990 * except for enabling the high-capacity group size
1991 * definition (see below).
1992 */
1993 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1994 EXT_CSD_PARTITION_SETTING_COMPLETED);
1995
1996 /* store the partition info of emmc */
1997 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
1998 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
1999 ext_csd[EXT_CSD_BOOT_MULT])
2000 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2001 if (part_completed &&
2002 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2003 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2004
2005 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2006
2007 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2008
2009 for (i = 0; i < 4; i++) {
2010 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2011 uint mult = (ext_csd[idx + 2] << 16) +
2012 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2013 if (mult)
2014 has_parts = true;
2015 if (!part_completed)
2016 continue;
2017 mmc->capacity_gp[i] = mult;
2018 mmc->capacity_gp[i] *=
2019 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2020 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2021 mmc->capacity_gp[i] <<= 19;
2022 }
2023
2024 if (part_completed) {
2025 mmc->enh_user_size =
2026 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2027 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2028 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2029 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2030 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2031 mmc->enh_user_size <<= 19;
2032 mmc->enh_user_start =
2033 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2034 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2035 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2036 ext_csd[EXT_CSD_ENH_START_ADDR];
2037 if (mmc->high_capacity)
2038 mmc->enh_user_start <<= 9;
2039 }
2040
2041 /*
2042 * Host needs to enable ERASE_GRP_DEF bit if device is
2043 * partitioned. This bit will be lost every time after a reset
2044 * or power off. This will affect erase size.
2045 */
2046 if (part_completed)
2047 has_parts = true;
2048 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2049 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2050 has_parts = true;
2051 if (has_parts) {
2052 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2053 EXT_CSD_ERASE_GROUP_DEF, 1);
2054
2055 if (err)
2056 goto error;
2057
2058 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2059 }
2060
2061 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2062 /* Read out group size from ext_csd */
2063 mmc->erase_grp_size =
2064 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2065 /*
2066 * if high capacity and partition setting completed
2067 * SEC_COUNT is valid even if it is smaller than 2 GiB
2068 * JEDEC Standard JESD84-B45, 6.2.4
2069 */
2070 if (mmc->high_capacity && part_completed) {
2071 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2072 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2073 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2074 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2075 capacity *= MMC_MAX_BLOCK_LEN;
2076 mmc->capacity_user = capacity;
2077 }
2078 } else {
2079 /* Calculate the group size from the csd value. */
2080 int erase_gsz, erase_gmul;
2081
2082 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2083 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2084 mmc->erase_grp_size = (erase_gsz + 1)
2085 * (erase_gmul + 1);
2086 }
2087
2088 mmc->hc_wp_grp_size = 1024
2089 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2090 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2091
2092 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2093
2094 return 0;
2095 error:
2096 if (mmc->ext_csd) {
2097 free(mmc->ext_csd);
2098 mmc->ext_csd = NULL;
2099 }
2100 return err;
2101 }
2102
2103 static int mmc_startup(struct mmc *mmc)
2104 {
2105 int err, i;
2106 uint mult, freq;
2107 u64 cmult, csize;
2108 struct mmc_cmd cmd;
2109 struct blk_desc *bdesc;
2110
2111 #ifdef CONFIG_MMC_SPI_CRC_ON
2112 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2113 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2114 cmd.resp_type = MMC_RSP_R1;
2115 cmd.cmdarg = 1;
2116 err = mmc_send_cmd(mmc, &cmd, NULL);
2117 if (err)
2118 return err;
2119 }
2120 #endif
2121
2122 /* Put the Card in Identify Mode */
2123 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2124 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2125 cmd.resp_type = MMC_RSP_R2;
2126 cmd.cmdarg = 0;
2127
2128 err = mmc_send_cmd(mmc, &cmd, NULL);
2129
2130 #ifdef CONFIG_MMC_QUIRKS
2131 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2132 int retries = 4;
2133 /*
2134 * It has been seen that SEND_CID may fail on the first
2135 * attempt, let's try a few more time
2136 */
2137 do {
2138 err = mmc_send_cmd(mmc, &cmd, NULL);
2139 if (!err)
2140 break;
2141 } while (retries--);
2142 }
2143 #endif
2144
2145 if (err)
2146 return err;
2147
2148 memcpy(mmc->cid, cmd.response, 16);
2149
2150 /*
2151 * For MMC cards, set the Relative Address.
2152 * For SD cards, get the Relatvie Address.
2153 * This also puts the cards into Standby State
2154 */
2155 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2156 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2157 cmd.cmdarg = mmc->rca << 16;
2158 cmd.resp_type = MMC_RSP_R6;
2159
2160 err = mmc_send_cmd(mmc, &cmd, NULL);
2161
2162 if (err)
2163 return err;
2164
2165 if (IS_SD(mmc))
2166 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2167 }
2168
2169 /* Get the Card-Specific Data */
2170 cmd.cmdidx = MMC_CMD_SEND_CSD;
2171 cmd.resp_type = MMC_RSP_R2;
2172 cmd.cmdarg = mmc->rca << 16;
2173
2174 err = mmc_send_cmd(mmc, &cmd, NULL);
2175
2176 if (err)
2177 return err;
2178
2179 mmc->csd[0] = cmd.response[0];
2180 mmc->csd[1] = cmd.response[1];
2181 mmc->csd[2] = cmd.response[2];
2182 mmc->csd[3] = cmd.response[3];
2183
2184 if (mmc->version == MMC_VERSION_UNKNOWN) {
2185 int version = (cmd.response[0] >> 26) & 0xf;
2186
2187 switch (version) {
2188 case 0:
2189 mmc->version = MMC_VERSION_1_2;
2190 break;
2191 case 1:
2192 mmc->version = MMC_VERSION_1_4;
2193 break;
2194 case 2:
2195 mmc->version = MMC_VERSION_2_2;
2196 break;
2197 case 3:
2198 mmc->version = MMC_VERSION_3;
2199 break;
2200 case 4:
2201 mmc->version = MMC_VERSION_4;
2202 break;
2203 default:
2204 mmc->version = MMC_VERSION_1_2;
2205 break;
2206 }
2207 }
2208
2209 /* divide frequency by 10, since the mults are 10x bigger */
2210 freq = fbase[(cmd.response[0] & 0x7)];
2211 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2212
2213 mmc->legacy_speed = freq * mult;
2214 mmc_select_mode(mmc, MMC_LEGACY);
2215
2216 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2217 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2218
2219 if (IS_SD(mmc))
2220 mmc->write_bl_len = mmc->read_bl_len;
2221 else
2222 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2223
2224 if (mmc->high_capacity) {
2225 csize = (mmc->csd[1] & 0x3f) << 16
2226 | (mmc->csd[2] & 0xffff0000) >> 16;
2227 cmult = 8;
2228 } else {
2229 csize = (mmc->csd[1] & 0x3ff) << 2
2230 | (mmc->csd[2] & 0xc0000000) >> 30;
2231 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2232 }
2233
2234 mmc->capacity_user = (csize + 1) << (cmult + 2);
2235 mmc->capacity_user *= mmc->read_bl_len;
2236 mmc->capacity_boot = 0;
2237 mmc->capacity_rpmb = 0;
2238 for (i = 0; i < 4; i++)
2239 mmc->capacity_gp[i] = 0;
2240
2241 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2242 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2243
2244 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2245 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2246
2247 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2248 cmd.cmdidx = MMC_CMD_SET_DSR;
2249 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2250 cmd.resp_type = MMC_RSP_NONE;
2251 if (mmc_send_cmd(mmc, &cmd, NULL))
2252 pr_warn("MMC: SET_DSR failed\n");
2253 }
2254
2255 /* Select the card, and put it into Transfer Mode */
2256 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2257 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2258 cmd.resp_type = MMC_RSP_R1;
2259 cmd.cmdarg = mmc->rca << 16;
2260 err = mmc_send_cmd(mmc, &cmd, NULL);
2261
2262 if (err)
2263 return err;
2264 }
2265
2266 /*
2267 * For SD, its erase group is always one sector
2268 */
2269 mmc->erase_grp_size = 1;
2270 mmc->part_config = MMCPART_NOAVAILABLE;
2271
2272 err = mmc_startup_v4(mmc);
2273 if (err)
2274 return err;
2275
2276 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2277 if (err)
2278 return err;
2279
2280 if (IS_SD(mmc)) {
2281 err = sd_get_capabilities(mmc);
2282 if (err)
2283 return err;
2284 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2285 } else {
2286 err = mmc_get_capabilities(mmc);
2287 if (err)
2288 return err;
2289 mmc_select_mode_and_width(mmc, mmc->card_caps);
2290 }
2291
2292 if (err)
2293 return err;
2294
2295 mmc->best_mode = mmc->selected_mode;
2296
2297 /* Fix the block length for DDR mode */
2298 if (mmc->ddr_mode) {
2299 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2300 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2301 }
2302
2303 /* fill in device description */
2304 bdesc = mmc_get_blk_desc(mmc);
2305 bdesc->lun = 0;
2306 bdesc->hwpart = 0;
2307 bdesc->type = 0;
2308 bdesc->blksz = mmc->read_bl_len;
2309 bdesc->log2blksz = LOG2(bdesc->blksz);
2310 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2311 #if !defined(CONFIG_SPL_BUILD) || \
2312 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2313 !defined(CONFIG_USE_TINY_PRINTF))
2314 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2315 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2316 (mmc->cid[3] >> 16) & 0xffff);
2317 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2318 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2319 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2320 (mmc->cid[2] >> 24) & 0xff);
2321 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2322 (mmc->cid[2] >> 16) & 0xf);
2323 #else
2324 bdesc->vendor[0] = 0;
2325 bdesc->product[0] = 0;
2326 bdesc->revision[0] = 0;
2327 #endif
2328 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2329 part_init(bdesc);
2330 #endif
2331
2332 return 0;
2333 }
2334
2335 static int mmc_send_if_cond(struct mmc *mmc)
2336 {
2337 struct mmc_cmd cmd;
2338 int err;
2339
2340 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2341 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2342 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2343 cmd.resp_type = MMC_RSP_R7;
2344
2345 err = mmc_send_cmd(mmc, &cmd, NULL);
2346
2347 if (err)
2348 return err;
2349
2350 if ((cmd.response[0] & 0xff) != 0xaa)
2351 return -EOPNOTSUPP;
2352 else
2353 mmc->version = SD_VERSION_2;
2354
2355 return 0;
2356 }
2357
2358 #if !CONFIG_IS_ENABLED(DM_MMC)
2359 /* board-specific MMC power initializations. */
2360 __weak void board_mmc_power_init(void)
2361 {
2362 }
2363 #endif
2364
2365 static int mmc_power_init(struct mmc *mmc)
2366 {
2367 #if CONFIG_IS_ENABLED(DM_MMC)
2368 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2369 int ret;
2370
2371 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2372 &mmc->vmmc_supply);
2373 if (ret)
2374 debug("%s: No vmmc supply\n", mmc->dev->name);
2375
2376 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2377 &mmc->vqmmc_supply);
2378 if (ret)
2379 debug("%s: No vqmmc supply\n", mmc->dev->name);
2380 #endif
2381 #else /* !CONFIG_DM_MMC */
2382 /*
2383 * Driver model should use a regulator, as above, rather than calling
2384 * out to board code.
2385 */
2386 board_mmc_power_init();
2387 #endif
2388 return 0;
2389 }
2390
2391 /*
2392 * put the host in the initial state:
2393 * - turn on Vdd (card power supply)
2394 * - configure the bus width and clock to minimal values
2395 */
2396 static void mmc_set_initial_state(struct mmc *mmc)
2397 {
2398 int err;
2399
2400 /* First try to set 3.3V. If it fails set to 1.8V */
2401 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2402 if (err != 0)
2403 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2404 if (err != 0)
2405 pr_warn("mmc: failed to set signal voltage\n");
2406
2407 mmc_select_mode(mmc, MMC_LEGACY);
2408 mmc_set_bus_width(mmc, 1);
2409 mmc_set_clock(mmc, 0, false);
2410 }
2411
2412 static int mmc_power_on(struct mmc *mmc)
2413 {
2414 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2415 if (mmc->vmmc_supply) {
2416 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2417
2418 if (ret) {
2419 puts("Error enabling VMMC supply\n");
2420 return ret;
2421 }
2422 }
2423 #endif
2424 return 0;
2425 }
2426
2427 static int mmc_power_off(struct mmc *mmc)
2428 {
2429 mmc_set_clock(mmc, 1, true);
2430 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2431 if (mmc->vmmc_supply) {
2432 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2433
2434 if (ret) {
2435 debug("Error disabling VMMC supply\n");
2436 return ret;
2437 }
2438 }
2439 #endif
2440 return 0;
2441 }
2442
2443 static int mmc_power_cycle(struct mmc *mmc)
2444 {
2445 int ret;
2446
2447 ret = mmc_power_off(mmc);
2448 if (ret)
2449 return ret;
2450 /*
2451 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2452 * to be on the safer side.
2453 */
2454 udelay(2000);
2455 return mmc_power_on(mmc);
2456 }
2457
2458 int mmc_start_init(struct mmc *mmc)
2459 {
2460 bool no_card;
2461 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2462 int err;
2463
2464 /*
2465 * all hosts are capable of 1 bit bus-width and able to use the legacy
2466 * timings.
2467 */
2468 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2469 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2470
2471 /* we pretend there's no card when init is NULL */
2472 no_card = mmc_getcd(mmc) == 0;
2473 #if !CONFIG_IS_ENABLED(DM_MMC)
2474 no_card = no_card || (mmc->cfg->ops->init == NULL);
2475 #endif
2476 if (no_card) {
2477 mmc->has_init = 0;
2478 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2479 printf("MMC: no card present\n");
2480 #endif
2481 return -ENOMEDIUM;
2482 }
2483
2484 if (mmc->has_init)
2485 return 0;
2486
2487 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2488 mmc_adapter_card_type_ident();
2489 #endif
2490 err = mmc_power_init(mmc);
2491 if (err)
2492 return err;
2493
2494 #ifdef CONFIG_MMC_QUIRKS
2495 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2496 MMC_QUIRK_RETRY_SEND_CID;
2497 #endif
2498
2499 err = mmc_power_cycle(mmc);
2500 if (err) {
2501 /*
2502 * if power cycling is not supported, we should not try
2503 * to use the UHS modes, because we wouldn't be able to
2504 * recover from an error during the UHS initialization.
2505 */
2506 debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2507 uhs_en = false;
2508 mmc->host_caps &= ~UHS_CAPS;
2509 err = mmc_power_on(mmc);
2510 }
2511 if (err)
2512 return err;
2513
2514 #if CONFIG_IS_ENABLED(DM_MMC)
2515 /* The device has already been probed ready for use */
2516 #else
2517 /* made sure it's not NULL earlier */
2518 err = mmc->cfg->ops->init(mmc);
2519 if (err)
2520 return err;
2521 #endif
2522 mmc->ddr_mode = 0;
2523
2524 retry:
2525 mmc_set_initial_state(mmc);
2526 mmc_send_init_stream(mmc);
2527
2528 /* Reset the Card */
2529 err = mmc_go_idle(mmc);
2530
2531 if (err)
2532 return err;
2533
2534 /* The internal partition reset to user partition(0) at every CMD0*/
2535 mmc_get_blk_desc(mmc)->hwpart = 0;
2536
2537 /* Test for SD version 2 */
2538 err = mmc_send_if_cond(mmc);
2539
2540 /* Now try to get the SD card's operating condition */
2541 err = sd_send_op_cond(mmc, uhs_en);
2542 if (err && uhs_en) {
2543 uhs_en = false;
2544 mmc_power_cycle(mmc);
2545 goto retry;
2546 }
2547
2548 /* If the command timed out, we check for an MMC card */
2549 if (err == -ETIMEDOUT) {
2550 err = mmc_send_op_cond(mmc);
2551
2552 if (err) {
2553 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2554 pr_err("Card did not respond to voltage select!\n");
2555 #endif
2556 return -EOPNOTSUPP;
2557 }
2558 }
2559
2560 if (!err)
2561 mmc->init_in_progress = 1;
2562
2563 return err;
2564 }
2565
2566 static int mmc_complete_init(struct mmc *mmc)
2567 {
2568 int err = 0;
2569
2570 mmc->init_in_progress = 0;
2571 if (mmc->op_cond_pending)
2572 err = mmc_complete_op_cond(mmc);
2573
2574 if (!err)
2575 err = mmc_startup(mmc);
2576 if (err)
2577 mmc->has_init = 0;
2578 else
2579 mmc->has_init = 1;
2580 return err;
2581 }
2582
2583 int mmc_init(struct mmc *mmc)
2584 {
2585 int err = 0;
2586 __maybe_unused unsigned start;
2587 #if CONFIG_IS_ENABLED(DM_MMC)
2588 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2589
2590 upriv->mmc = mmc;
2591 #endif
2592 if (mmc->has_init)
2593 return 0;
2594
2595 start = get_timer(0);
2596
2597 if (!mmc->init_in_progress)
2598 err = mmc_start_init(mmc);
2599
2600 if (!err)
2601 err = mmc_complete_init(mmc);
2602 if (err)
2603 printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2604
2605 return err;
2606 }
2607
2608 int mmc_set_dsr(struct mmc *mmc, u16 val)
2609 {
2610 mmc->dsr = val;
2611 return 0;
2612 }
2613
2614 /* CPU-specific MMC initializations */
2615 __weak int cpu_mmc_init(bd_t *bis)
2616 {
2617 return -1;
2618 }
2619
2620 /* board-specific MMC initializations. */
2621 __weak int board_mmc_init(bd_t *bis)
2622 {
2623 return -1;
2624 }
2625
2626 void mmc_set_preinit(struct mmc *mmc, int preinit)
2627 {
2628 mmc->preinit = preinit;
2629 }
2630
2631 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2632 static int mmc_probe(bd_t *bis)
2633 {
2634 return 0;
2635 }
2636 #elif CONFIG_IS_ENABLED(DM_MMC)
2637 static int mmc_probe(bd_t *bis)
2638 {
2639 int ret, i;
2640 struct uclass *uc;
2641 struct udevice *dev;
2642
2643 ret = uclass_get(UCLASS_MMC, &uc);
2644 if (ret)
2645 return ret;
2646
2647 /*
2648 * Try to add them in sequence order. Really with driver model we
2649 * should allow holes, but the current MMC list does not allow that.
2650 * So if we request 0, 1, 3 we will get 0, 1, 2.
2651 */
2652 for (i = 0; ; i++) {
2653 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2654 if (ret == -ENODEV)
2655 break;
2656 }
2657 uclass_foreach_dev(dev, uc) {
2658 ret = device_probe(dev);
2659 if (ret)
2660 pr_err("%s - probe failed: %d\n", dev->name, ret);
2661 }
2662
2663 return 0;
2664 }
2665 #else
2666 static int mmc_probe(bd_t *bis)
2667 {
2668 if (board_mmc_init(bis) < 0)
2669 cpu_mmc_init(bis);
2670
2671 return 0;
2672 }
2673 #endif
2674
2675 int mmc_initialize(bd_t *bis)
2676 {
2677 static int initialized = 0;
2678 int ret;
2679 if (initialized) /* Avoid initializing mmc multiple times */
2680 return 0;
2681 initialized = 1;
2682
2683 #if !CONFIG_IS_ENABLED(BLK)
2684 #if !CONFIG_IS_ENABLED(MMC_TINY)
2685 mmc_list_init();
2686 #endif
2687 #endif
2688 ret = mmc_probe(bis);
2689 if (ret)
2690 return ret;
2691
2692 #ifndef CONFIG_SPL_BUILD
2693 print_mmc_devices(',');
2694 #endif
2695
2696 mmc_do_preinit();
2697 return 0;
2698 }
2699
2700 #ifdef CONFIG_CMD_BKOPS_ENABLE
2701 int mmc_set_bkops_enable(struct mmc *mmc)
2702 {
2703 int err;
2704 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2705
2706 err = mmc_send_ext_csd(mmc, ext_csd);
2707 if (err) {
2708 puts("Could not get ext_csd register values\n");
2709 return err;
2710 }
2711
2712 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2713 puts("Background operations not supported on device\n");
2714 return -EMEDIUMTYPE;
2715 }
2716
2717 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2718 puts("Background operations already enabled\n");
2719 return 0;
2720 }
2721
2722 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2723 if (err) {
2724 puts("Failed to enable manual background operations\n");
2725 return err;
2726 }
2727
2728 puts("Enabled manual background operations\n");
2729
2730 return 0;
2731 }
2732 #endif